Luna/kernel/src/thread/Scheduler.cpp

173 lines
4.5 KiB
C++
Raw Normal View History

2022-12-07 14:03:34 +00:00
#include "thread/Scheduler.h"
#include "Log.h"
#include "arch/CPU.h"
#include "arch/MMU.h"
#include "memory/KernelVM.h"
#include "memory/MemoryManager.h"
2022-12-08 13:56:11 +00:00
#include <luna/ScopeGuard.h>
2022-12-07 14:03:34 +00:00
#include <luna/Stack.h>
static Thread g_idle;
static Thread* g_current = nullptr;
static const usize TICKS_PER_TIMESLICE = 20;
namespace Scheduler
{
void init()
{
g_idle.id = 0;
g_idle.init_regs_kernel();
g_idle.set_ip((u64)CPU::idle_loop);
2022-12-07 14:14:58 +00:00
g_idle.state = ThreadState::Idle;
2022-12-07 14:03:34 +00:00
g_idle.ticks_left = 1;
// Map some stack for the idle task
u64 idle_stack_vm = MemoryManager::alloc_for_kernel(1, MMU::NoExecute | MMU::ReadWrite).release_value();
2022-12-07 14:03:34 +00:00
Stack idle_stack{idle_stack_vm, ARCH_PAGE_SIZE};
g_idle.set_sp(idle_stack.top());
kinfoln("CREATED IDLE THREAD: id %lu with ip %lx and sp %lx", g_idle.id, g_idle.ip(), g_idle.sp());
g_current = &g_idle;
}
Thread* current()
{
return g_current;
}
2022-12-07 14:55:58 +00:00
Thread* idle()
{
return &g_idle;
}
2022-12-08 13:56:11 +00:00
Result<void> new_kernel_thread_impl(Thread* thread)
2022-12-07 14:03:34 +00:00
{
2022-12-08 13:56:11 +00:00
// If anything fails, make sure to clean up.
auto guard = make_scope_guard([&] { delete thread; });
2022-12-08 13:56:11 +00:00
u64 thread_stack_vm = TRY(MemoryManager::alloc_for_kernel(4, MMU::NoExecute | MMU::ReadWrite));
2022-12-08 13:56:11 +00:00
guard.deactivate();
2022-12-08 13:56:11 +00:00
2022-12-07 14:03:34 +00:00
Stack thread_stack{thread_stack_vm, ARCH_PAGE_SIZE * 4};
thread->set_sp(thread_stack.top());
g_threads.append(thread);
kinfoln("CREATED THREAD: id %lu with ip %lx and sp %lx", thread->id, thread->ip(), thread->sp());
return {};
}
Result<void> new_kernel_thread(u64 address)
{
Thread* thread = TRY(new_thread());
thread->init_regs_kernel();
thread->set_ip(address);
2022-12-08 13:56:11 +00:00
return new_kernel_thread_impl(thread);
2022-12-07 14:03:34 +00:00
}
Result<void> new_kernel_thread(void (*func)(void))
{
Thread* thread = TRY(new_thread());
thread->init_regs_kernel();
thread->set_ip((u64)func);
2022-12-08 13:56:11 +00:00
return new_kernel_thread_impl(thread);
2022-12-07 14:03:34 +00:00
}
Result<void> new_kernel_thread(void (*func)(void*), void* arg)
{
Thread* thread = TRY(new_thread());
thread->init_regs_kernel();
thread->set_ip((u64)func);
thread->set_arguments((u64)arg, 0, 0, 0);
2022-12-08 13:56:11 +00:00
return new_kernel_thread_impl(thread);
2022-12-07 14:03:34 +00:00
}
Thread* pick_task()
{
Thread* old = g_current;
2022-12-07 14:14:58 +00:00
if (old->is_idle())
2022-12-07 14:03:34 +00:00
{
2022-12-07 14:17:20 +00:00
auto maybe_last = g_threads.last();
if (!maybe_last.has_value()) // No threads!!
2022-12-07 14:03:34 +00:00
return &g_idle;
2022-12-07 14:17:20 +00:00
g_current = old = maybe_last.value();
2022-12-07 14:03:34 +00:00
}
2022-12-07 14:17:20 +00:00
bool has_found_thread = false;
2022-12-07 14:03:34 +00:00
do {
auto maybe_next = g_threads.next(g_current);
if (!maybe_next.has_value()) g_current = g_threads.expect_first();
2022-12-07 14:03:34 +00:00
else
g_current = maybe_next.value();
2022-12-07 14:17:20 +00:00
if (g_current->state == ThreadState::Runnable)
{
has_found_thread = true;
2022-12-07 14:03:34 +00:00
break;
2022-12-07 14:17:20 +00:00
}
2022-12-07 14:03:34 +00:00
} while (g_current != old);
2022-12-07 14:17:20 +00:00
if (!has_found_thread) g_current = &g_idle;
2022-12-07 14:03:34 +00:00
return g_current;
}
void generic_switch_context(Thread* old_thread, Thread* new_thread, Registers* regs)
{
if (old_thread != new_thread) switch_context(old_thread, new_thread, regs);
2022-12-07 14:14:58 +00:00
if (new_thread->is_idle())
2022-12-07 14:03:34 +00:00
{
new_thread->ticks_left = 1; // The idle task only runs for 1 tick so we can check for new runnable tasks
// as fast as possible.
}
else
new_thread->ticks_left = TICKS_PER_TIMESLICE;
}
void switch_task(Registers* regs)
{
Thread* old_thread = g_current;
Thread* new_thread = pick_task();
generic_switch_context(old_thread, new_thread, regs);
}
void invoke(Registers* regs)
{
CPU::disable_interrupts();
g_current->ticks++;
if (is_in_kernel(regs)) g_current->ticks_in_kernel++;
else
g_current->ticks_in_user++;
g_current->ticks_left--;
2022-12-07 14:55:58 +00:00
g_threads.for_each([](Thread* thread) {
if (thread->state == ThreadState::Sleeping)
{
if (--thread->sleep_ticks_left == 0) thread->state = ThreadState::Runnable;
}
});
2022-12-07 14:03:34 +00:00
if (!g_current->ticks_left) switch_task(regs);
}
2022-12-07 14:55:58 +00:00
}
void kernel_sleep(u64 ms)
{
g_current->sleep_ticks_left = ms;
g_current->state = ThreadState::Sleeping;
kernel_yield();
2022-12-07 14:03:34 +00:00
}