Luna/kernel/src/thread/Scheduler.cpp

270 lines
8.6 KiB
C++
Raw Normal View History

#define MODULE "sched"
#include "thread/Scheduler.h"
2022-09-20 18:02:08 +00:00
#include "assert.h"
#include "interrupts/Interrupts.h"
#include "log/Log.h"
#include "memory/KernelMemoryManager.h"
2022-09-21 15:56:53 +00:00
#include "misc/hang.h"
#include "std/string.h"
2022-09-21 15:56:53 +00:00
#include "thread/PIT.h"
#include "thread/Task.h"
static uint64_t task_num = 0;
static Task idle_task;
static uint64_t free_tid = 0;
static Task* sched_current_task;
static Task* base_task;
static Task* end_task;
static void idle_task_function()
{
Interrupts::enable();
while (1) asm volatile("hlt");
}
static uint64_t frequency;
void Scheduler::init()
{
memset(&idle_task, 0, sizeof(Task));
idle_task.id = free_tid++;
idle_task.regs.rip = (uint64_t)&idle_task_function;
idle_task.regs.rsp = (uint64_t)KernelMemoryManager::get_page();
idle_task.regs.cs = 0x08;
idle_task.regs.ss = 0x10;
asm volatile("pushfq; movq (%%rsp), %%rax; movq %%rax, %0; popfq;" : "=m"(idle_task.regs.rflags)::"%rax");
idle_task.regs.rflags |= 0x200;
idle_task.task_sleep = 1000;
idle_task.state = idle_task.Idle;
base_task = (Task*)KernelMemoryManager::get_page();
memset(base_task, 0, sizeof(Task));
end_task = base_task;
sched_current_task = base_task;
sched_current_task->id = free_tid++;
2022-09-20 18:02:08 +00:00
sched_current_task->task_time = 20; // gets 20 ms of cpu time before next switch
sched_current_task->next_task = sched_current_task;
sched_current_task->prev_task = sched_current_task;
sched_current_task->state = sched_current_task->Running;
task_num++;
// the other registers will be saved next task switch
frequency = 1000 / PIT::frequency();
}
void Scheduler::add_kernel_task(void (*task)(void))
{
Task* new_task = (Task*)KernelMemoryManager::get_page(); // FIXME: allocate memory the size of Task, not 4 KB for
// each task (YES, I know, I need malloc)
memset(new_task, 0, sizeof(Task));
new_task->id = free_tid++;
new_task->regs.rip = (uint64_t)task;
new_task->allocated_stack = (uint64_t)KernelMemoryManager::get_pages(4); // 16 KB is enough for everyone, right?
new_task->regs.rsp = new_task->allocated_stack + (4096 * 4) - sizeof(uintptr_t);
new_task->regs.cs = 0x08;
new_task->regs.ss = 0x10;
asm volatile("pushfq; movq (%%rsp), %%rax; movq %%rax, %0; popfq;" : "=m"(new_task->regs.rflags)::"%rax");
2022-09-20 18:02:08 +00:00
new_task->regs.rflags |= 0x200; // enable interrupts
new_task->task_sleep = 0;
new_task->task_time = 0;
end_task->next_task = new_task;
new_task->prev_task = end_task;
base_task->prev_task = new_task;
new_task->next_task = base_task;
end_task = new_task;
new_task->state = new_task->Running;
task_num++;
kinfoln("Adding kernel task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
new_task->id, new_task->regs.rsp, task_num);
}
void Scheduler::add_user_task(void (*task)(void))
{
Task* new_task =
(Task*)KernelMemoryManager::get_page(); // FIXME: allocate memory the size of Task, not 4 KB for each task
new_task->id = free_tid++;
new_task->regs.rip = (uint64_t)KernelMemoryManager::get_unaligned_mapping((void*)(uint64_t)task, MAP_USER);
new_task->allocated_stack =
(uint64_t)KernelMemoryManager::get_pages(4, MAP_READ_WRITE | MAP_USER); // 16 KB is enough for everyone, right?
new_task->regs.rsp = new_task->allocated_stack + (4096 * 4) - sizeof(uintptr_t);
new_task->regs.cs = 0x18 | 0x03;
new_task->regs.ss = 0x20 | 0x03;
new_task->regs.rflags = (1 << 21) | (1 << 9); // enable interrupts
new_task->task_sleep = 0;
new_task->task_time = 0;
end_task->next_task = new_task;
new_task->prev_task = end_task;
base_task->prev_task = new_task;
new_task->next_task = base_task;
end_task = new_task;
new_task->state = new_task->Running;
task_num++;
kinfoln("Adding user task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
new_task->id, new_task->regs.rsp, task_num);
}
void Scheduler::reap_task(Task* task)
{
ASSERT(!Interrupts::is_in_handler());
task_num--;
Task* exiting_task = task;
kinfoln("reaping task %ld", exiting_task->id);
if (exiting_task->allocated_stack) KernelMemoryManager::release_pages((void*)exiting_task->allocated_stack, 4);
KernelMemoryManager::release_page((void*)exiting_task);
}
void Scheduler::task_exit(Context* context)
{
ASSERT(Interrupts::is_in_handler());
kdbgln("exit: task %ld finished running", sched_current_task->id);
sched_current_task->state = sched_current_task->Exited;
task_yield(context);
}
void Scheduler::task_misbehave(Context* context)
{
ASSERT(Interrupts::is_in_handler());
kdbgln("exit: task %ld misbehaved", sched_current_task->id);
sched_current_task->state = sched_current_task->Exited;
task_yield(context);
}
void Scheduler::reap_tasks()
{
Interrupts::disable();
ASSERT(!Interrupts::is_in_handler());
Task* reap_base = nullptr;
Task* reap_end = nullptr;
Task* task = base_task;
Task* task_reaping;
uint64_t iter_index = 0;
do {
if (task->state == task->Exited)
{
if (task == base_task && task == end_task) { PANIC("Last task exited"); }
else if (task == base_task) { base_task = task->next_task; }
else if (task == end_task) { end_task = task->prev_task; }
if (!reap_base)
{
reap_base = task;
reap_end = task;
task->prev_task->next_task = task->next_task;
task->next_task->prev_task = task->prev_task;
task->prev_task = nullptr;
task_reaping = task;
task = task->next_task;
task_reaping->next_task = nullptr;
}
else
{
reap_end->next_task = task;
task->prev_task->next_task = task->next_task;
task->next_task->prev_task = task->prev_task;
task->prev_task = nullptr;
reap_end = task;
task_reaping = task;
task = task->next_task;
task_reaping->next_task = nullptr;
}
}
else { task = task->next_task; }
iter_index++;
} while (iter_index < task_num);
task = reap_base;
while (task)
{
Task* reaped_task = task;
task = task->next_task;
reap_task(reaped_task);
}
Interrupts::enable();
}
static void sched_decrement_sleep_times()
{
Task* task = base_task;
if (!task) return;
do {
if (task->task_sleep > 0)
{
task->task_sleep -= frequency;
if (task->task_sleep < 0) task->task_sleep = 0;
}
if (task->task_sleep == 0 && task->state == task->Sleeping) task->state = task->Running;
task = task->next_task;
} while (task != base_task);
}
void Scheduler::task_tick(Context* context)
{
2022-09-20 18:02:08 +00:00
ASSERT(Interrupts::is_in_handler());
Interrupts::disable();
sched_decrement_sleep_times();
if (sched_current_task->id == 0) return task_yield(context);
sched_current_task->task_time -= frequency;
if (sched_current_task->task_time < 0)
{
sched_current_task->task_time = 0;
task_yield(context);
}
Interrupts::enable();
}
void Scheduler::task_yield(Context* context)
{
2022-09-20 18:02:08 +00:00
ASSERT(Interrupts::is_in_handler());
Interrupts::disable();
get_context_to_task(*sched_current_task, context);
bool was_idle = false;
if (sched_current_task->state == sched_current_task->Idle)
{
sched_current_task = end_task;
was_idle = true;
}
Task* original_task = sched_current_task;
do {
sched_current_task = sched_current_task->next_task;
if (sched_current_task->state == sched_current_task->Running)
{
2022-09-20 18:02:08 +00:00
sched_current_task->task_time = 20;
set_context_from_task(*sched_current_task, context);
Interrupts::enable();
return;
}
} while (sched_current_task != original_task);
sched_current_task = &idle_task;
sched_current_task->task_time = frequency;
if (!was_idle) { set_context_from_task(*sched_current_task, context); }
Interrupts::enable();
return;
}
void Scheduler::yield()
{
asm volatile("int $48");
}
void Scheduler::exit()
{
asm volatile("int $49");
}
void Scheduler::sleep(unsigned long ms)
{
ASSERT(!Interrupts::is_in_handler());
Interrupts::disable();
Task* task = current_task();
task->task_sleep = ms;
task->state = task->Sleeping;
Interrupts::enable();
yield();
}
Task* Scheduler::current_task()
{
return sched_current_task;
}