Scheduler: Change fixed Task array for (highly inefficient) memory allocation, add userspace task support (which as memory is mapped kernel-only instantly crashes), and support for exiting a task (marking it as exited and reaping it later)
This commit is contained in:
parent
c6c2e286e7
commit
ec01dc2927
@ -5,11 +5,19 @@ namespace Scheduler
|
||||
{
|
||||
void init();
|
||||
void yield();
|
||||
void exit();
|
||||
void sleep(unsigned long ms);
|
||||
void add_kernel_task(void (*task)(void));
|
||||
void add_user_task(void (*task)(void));
|
||||
|
||||
void task_exit(Context* context);
|
||||
void task_misbehave(Context* context);
|
||||
|
||||
Task* current_task();
|
||||
|
||||
void task_yield(Context* context);
|
||||
void task_tick(Context* context);
|
||||
|
||||
void reap_task(Task* task);
|
||||
void reap_tasks();
|
||||
}
|
@ -3,6 +3,14 @@
|
||||
|
||||
struct Task
|
||||
{
|
||||
enum TaskState
|
||||
{
|
||||
Idle,
|
||||
Running,
|
||||
Sleeping,
|
||||
Exited
|
||||
};
|
||||
|
||||
uint64_t id;
|
||||
Context regs;
|
||||
|
||||
@ -10,6 +18,11 @@ struct Task
|
||||
int64_t task_time = 0;
|
||||
|
||||
Task* next_task = nullptr;
|
||||
Task* prev_task = nullptr;
|
||||
|
||||
uint64_t allocated_stack = 0;
|
||||
|
||||
TaskState state;
|
||||
};
|
||||
|
||||
void set_context_from_task(Task& task, Context* ctx);
|
||||
|
@ -15,6 +15,8 @@ extern "C" void common_handler(Context* context)
|
||||
ASSERT(Interrupts::is_in_handler());
|
||||
if (context->number >= 0x20 && context->number < 0x30)
|
||||
{
|
||||
Interrupts::ensure_handler(); // restore the "in interrupt flag" if an interrupt happened in the middle of this
|
||||
// one
|
||||
IRQ::interrupt_handler(context);
|
||||
return;
|
||||
}
|
||||
@ -25,19 +27,30 @@ extern "C" void common_handler(Context* context)
|
||||
kinfoln("Stack trace:");
|
||||
|
||||
StackTracer tracer(context->rbp);
|
||||
tracer.trace();
|
||||
while (1) halt();
|
||||
tracer.trace_with_ip(context->rip);
|
||||
if (context->cs == 0x8) { PANIC("Fatal: GPF in kernel task"); }
|
||||
else
|
||||
{
|
||||
Interrupts::ensure_handler();
|
||||
Scheduler::task_misbehave(context);
|
||||
}
|
||||
}
|
||||
if (context->number == 14)
|
||||
{
|
||||
Interrupts::disable();
|
||||
kerrorln("Page fault in %s (RIP %lx), while trying to access %lx, error code %ld",
|
||||
context->cs == 8 ? "ring 0" : "ring 3", context->rip, context->cr2, context->error_code);
|
||||
kinfoln("Stack trace:");
|
||||
|
||||
StackTracer tracer(context->rbp);
|
||||
tracer.trace();
|
||||
tracer.trace_with_ip(context->rip);
|
||||
|
||||
hang();
|
||||
if (context->cs == 0x8) { PANIC("Fatal: Page fault in kernel task"); }
|
||||
else
|
||||
{
|
||||
Interrupts::ensure_handler();
|
||||
Scheduler::task_misbehave(context);
|
||||
}
|
||||
}
|
||||
if (context->number == 8)
|
||||
{
|
||||
@ -45,10 +58,20 @@ extern "C" void common_handler(Context* context)
|
||||
kinfoln("Stack trace:");
|
||||
|
||||
StackTracer tracer(context->rbp);
|
||||
tracer.trace();
|
||||
tracer.trace_with_ip(context->rip);
|
||||
|
||||
hang();
|
||||
}
|
||||
if (context->number == 48) { Scheduler::task_yield(context); }
|
||||
if (context->number == 48)
|
||||
{
|
||||
Interrupts::ensure_handler();
|
||||
Scheduler::task_yield(context);
|
||||
}
|
||||
if (context->number == 49)
|
||||
{
|
||||
Interrupts::ensure_handler();
|
||||
Scheduler::task_exit(context);
|
||||
}
|
||||
if (context->number == 256) { kwarnln("Unused interrupt"); }
|
||||
return;
|
||||
}
|
@ -46,6 +46,7 @@ extern "C"
|
||||
void isr46();
|
||||
void isr47();
|
||||
void isr48();
|
||||
void isr49();
|
||||
}
|
||||
|
||||
#define INSTALL_TRAP(x) IDT::add_handler(x, (void*)&isr##x, IDT_TA_TrapGate)
|
||||
@ -104,8 +105,9 @@ void Interrupts::install()
|
||||
INSTALL_ISR(45);
|
||||
INSTALL_ISR(46);
|
||||
INSTALL_ISR(47);
|
||||
kdbgln("Installing handler stub for software interrupt 48");
|
||||
kdbgln("Installing handler stub for software interrupt 48, 49");
|
||||
INSTALL_ISR(48);
|
||||
INSTALL_ISR(49);
|
||||
kdbgln("Installing unused handler stubs for the rest of the IDT");
|
||||
for (int i = 49; i < 256; i++) { INSTALL_UNUSED(i); }
|
||||
for (int i = 50; i < 256; i++) { INSTALL_UNUSED(i); }
|
||||
}
|
@ -134,4 +134,5 @@ IRQ 44, 12
|
||||
IRQ 45, 13
|
||||
IRQ 46, 14
|
||||
IRQ 47, 15
|
||||
SOFT 48
|
||||
SOFT 48
|
||||
SOFT 49
|
@ -117,6 +117,22 @@ extern "C" void _start()
|
||||
}
|
||||
});
|
||||
|
||||
Scheduler::add_kernel_task([]() {
|
||||
while (1)
|
||||
{
|
||||
sleep(400);
|
||||
Scheduler::reap_tasks();
|
||||
}
|
||||
});
|
||||
|
||||
Scheduler::add_user_task([]() { Interrupts::disable(); });
|
||||
|
||||
Scheduler::add_kernel_task([]() {
|
||||
sleep(2000);
|
||||
Scheduler::add_user_task([]() { Interrupts::disable(); });
|
||||
Scheduler::exit();
|
||||
});
|
||||
|
||||
kinfoln("Prepared scheduler tasks");
|
||||
|
||||
ACPI::SDTHeader* rootSDT = ACPI::GetRSDTOrXSDT();
|
||||
@ -134,5 +150,5 @@ extern "C" void _start()
|
||||
dev_type.dev_class, dev_type.dev_subclass, dev_type.prog_if, dev_type.revision);
|
||||
});
|
||||
|
||||
while (1) Scheduler::sleep(200);
|
||||
Scheduler::exit();
|
||||
}
|
@ -10,7 +10,6 @@
|
||||
#include "thread/PIT.h"
|
||||
#include "thread/Task.h"
|
||||
|
||||
static Task tasks[32];
|
||||
static uint64_t task_num = 0;
|
||||
|
||||
static Task idle_task;
|
||||
@ -24,7 +23,7 @@ static Task* end_task;
|
||||
static void idle_task_function()
|
||||
{
|
||||
Interrupts::enable();
|
||||
while (1) halt();
|
||||
while (1) asm volatile("hlt");
|
||||
}
|
||||
|
||||
static uint64_t frequency;
|
||||
@ -40,14 +39,18 @@ void Scheduler::init()
|
||||
asm volatile("pushfq; movq (%%rsp), %%rax; movq %%rax, %0; popfq;" : "=m"(idle_task.regs.rflags)::"%rax");
|
||||
idle_task.regs.rflags |= 0x200;
|
||||
idle_task.task_sleep = 1000;
|
||||
idle_task.state = idle_task.Idle;
|
||||
|
||||
base_task = &tasks[task_num++];
|
||||
base_task = (Task*)KernelMemoryManager::get_page();
|
||||
memset(base_task, 0, sizeof(Task));
|
||||
end_task = base_task;
|
||||
sched_current_task = base_task;
|
||||
sched_current_task->id = free_tid++;
|
||||
sched_current_task->task_time = 20; // gets 20 ms of cpu time before next switch
|
||||
sched_current_task->next_task = nullptr;
|
||||
sched_current_task->next_task = sched_current_task;
|
||||
sched_current_task->prev_task = sched_current_task;
|
||||
sched_current_task->state = sched_current_task->Running;
|
||||
task_num++;
|
||||
// the other registers will be saved next task switch
|
||||
|
||||
frequency = 1000 / PIT::frequency();
|
||||
@ -55,12 +58,13 @@ void Scheduler::init()
|
||||
|
||||
void Scheduler::add_kernel_task(void (*task)(void))
|
||||
{
|
||||
if (task_num == 32) return; // FIXME: allow for dynamically allocated linked list instead of a fixed array of Tasks
|
||||
Task* new_task = &tasks[task_num++];
|
||||
Task* new_task = (Task*)KernelMemoryManager::get_page(); // FIXME: allocate memory the size of Task, not 4 KB for
|
||||
// each task (YES, I know, I need malloc)
|
||||
memset(new_task, 0, sizeof(Task));
|
||||
new_task->id = free_tid++;
|
||||
new_task->regs.rip = (uint64_t)task;
|
||||
new_task->regs.rsp = (uint64_t)KernelMemoryManager::get_pages(2); // 8 KB is enough for everyone, right?
|
||||
new_task->allocated_stack = (uint64_t)KernelMemoryManager::get_pages(4); // 16 KB is enough for everyone, right?
|
||||
new_task->regs.rsp = new_task->allocated_stack + (4096 * 4) - sizeof(uintptr_t);
|
||||
new_task->regs.cs = 0x08;
|
||||
new_task->regs.ss = 0x10;
|
||||
asm volatile("pushfq; movq (%%rsp), %%rax; movq %%rax, %0; popfq;" : "=m"(new_task->regs.rflags)::"%rax");
|
||||
@ -68,66 +72,173 @@ void Scheduler::add_kernel_task(void (*task)(void))
|
||||
new_task->task_sleep = 0;
|
||||
new_task->task_time = 0;
|
||||
end_task->next_task = new_task;
|
||||
new_task->prev_task = end_task;
|
||||
base_task->prev_task = new_task;
|
||||
new_task->next_task = base_task;
|
||||
end_task = new_task;
|
||||
kinfoln("Adding task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip, new_task->id,
|
||||
new_task->regs.rsp, task_num);
|
||||
new_task->state = new_task->Running;
|
||||
task_num++;
|
||||
kinfoln("Adding kernel task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
|
||||
new_task->id, new_task->regs.rsp, task_num);
|
||||
}
|
||||
|
||||
void Scheduler::add_user_task(void (*task)(void))
|
||||
{
|
||||
Task* new_task =
|
||||
(Task*)KernelMemoryManager::get_page(); // FIXME: allocate memory the size of Task, not 4 KB for each task
|
||||
new_task->id = free_tid++;
|
||||
new_task->regs.rip = (uint64_t)task;
|
||||
new_task->allocated_stack = (uint64_t)KernelMemoryManager::get_pages(4); // 16 KB is enough for everyone, right?
|
||||
new_task->regs.rsp = new_task->allocated_stack + (4096 * 4) - sizeof(uintptr_t);
|
||||
new_task->regs.cs = 0x18 | 0x03;
|
||||
new_task->regs.ss = 0x20 | 0x03;
|
||||
new_task->regs.rflags = (1 << 21) | (1 << 9); // enable interrupts
|
||||
new_task->task_sleep = 0;
|
||||
new_task->task_time = 0;
|
||||
end_task->next_task = new_task;
|
||||
new_task->prev_task = end_task;
|
||||
base_task->prev_task = new_task;
|
||||
new_task->next_task = base_task;
|
||||
end_task = new_task;
|
||||
new_task->state = new_task->Running;
|
||||
task_num++;
|
||||
kinfoln("Adding user task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
|
||||
new_task->id, new_task->regs.rsp, task_num);
|
||||
}
|
||||
|
||||
void Scheduler::reap_task(Task* task)
|
||||
{
|
||||
ASSERT(!Interrupts::is_in_handler());
|
||||
task_num--;
|
||||
Task* exiting_task = task;
|
||||
kinfoln("reaping task %ld", exiting_task->id);
|
||||
if (exiting_task->allocated_stack) KernelMemoryManager::release_pages((void*)exiting_task->allocated_stack, 4);
|
||||
KernelMemoryManager::release_page((void*)exiting_task);
|
||||
}
|
||||
|
||||
void Scheduler::task_exit(Context* context)
|
||||
{
|
||||
ASSERT(Interrupts::is_in_handler());
|
||||
kdbgln("exit: task %ld finished running", sched_current_task->id);
|
||||
sched_current_task->state = sched_current_task->Exited;
|
||||
task_yield(context);
|
||||
}
|
||||
|
||||
void Scheduler::task_misbehave(Context* context)
|
||||
{
|
||||
ASSERT(Interrupts::is_in_handler());
|
||||
kdbgln("exit: task %ld misbehaved", sched_current_task->id);
|
||||
sched_current_task->state = sched_current_task->Exited;
|
||||
task_yield(context);
|
||||
}
|
||||
|
||||
void Scheduler::reap_tasks()
|
||||
{
|
||||
Interrupts::disable();
|
||||
ASSERT(!Interrupts::is_in_handler());
|
||||
Task* reap_base = nullptr;
|
||||
Task* reap_end = nullptr;
|
||||
Task* task = base_task;
|
||||
Task* task_reaping;
|
||||
uint64_t iter_index = 0;
|
||||
do {
|
||||
if (task->state == task->Exited)
|
||||
{
|
||||
if (task == base_task && task == end_task) { PANIC("Last task exited"); }
|
||||
else if (task == base_task) { base_task = task->next_task; }
|
||||
else if (task == end_task) { end_task = task->prev_task; }
|
||||
if (!reap_base)
|
||||
{
|
||||
reap_base = task;
|
||||
reap_end = task;
|
||||
task->prev_task->next_task = task->next_task;
|
||||
task->next_task->prev_task = task->prev_task;
|
||||
task->prev_task = nullptr;
|
||||
task_reaping = task;
|
||||
task = task->next_task;
|
||||
task_reaping->next_task = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
reap_end->next_task = task;
|
||||
task->prev_task->next_task = task->next_task;
|
||||
task->next_task->prev_task = task->prev_task;
|
||||
task->prev_task = nullptr;
|
||||
reap_end = task;
|
||||
task_reaping = task;
|
||||
task = task->next_task;
|
||||
task_reaping->next_task = nullptr;
|
||||
}
|
||||
}
|
||||
else { task = task->next_task; }
|
||||
iter_index++;
|
||||
} while (iter_index < task_num);
|
||||
task = reap_base;
|
||||
while (task)
|
||||
{
|
||||
Task* reaped_task = task;
|
||||
task = task->next_task;
|
||||
reap_task(reaped_task);
|
||||
}
|
||||
Interrupts::enable();
|
||||
}
|
||||
|
||||
static void sched_decrement_sleep_times()
|
||||
{
|
||||
Task* task = base_task;
|
||||
while (task)
|
||||
{
|
||||
if (!task) return;
|
||||
do {
|
||||
if (task->task_sleep > 0)
|
||||
{
|
||||
task->task_sleep -= frequency;
|
||||
if (task->task_sleep < 0) task->task_sleep = 0;
|
||||
}
|
||||
if (task->task_sleep == 0 && task->state == task->Sleeping) task->state = task->Running;
|
||||
task = task->next_task;
|
||||
}
|
||||
} while (task != base_task);
|
||||
}
|
||||
|
||||
void Scheduler::task_tick(Context* context)
|
||||
{
|
||||
ASSERT(Interrupts::is_in_handler());
|
||||
Interrupts::disable();
|
||||
sched_decrement_sleep_times();
|
||||
if (sched_current_task->id == 0) return task_yield(context);
|
||||
sched_current_task->task_time -= frequency;
|
||||
if (sched_current_task->task_time < 0)
|
||||
{
|
||||
sched_current_task->task_time = 0;
|
||||
task_yield(context);
|
||||
}
|
||||
Interrupts::enable();
|
||||
}
|
||||
|
||||
void Scheduler::task_yield(Context* context)
|
||||
{
|
||||
ASSERT(Interrupts::is_in_handler());
|
||||
Interrupts::disable();
|
||||
get_context_to_task(*sched_current_task, context);
|
||||
bool was_idle = false;
|
||||
if (sched_current_task->id == 0) // idle task
|
||||
if (sched_current_task->state == sched_current_task->Idle)
|
||||
{
|
||||
sched_current_task = base_task;
|
||||
sched_current_task = end_task;
|
||||
was_idle = true;
|
||||
}
|
||||
Task* original_task = sched_current_task;
|
||||
do {
|
||||
sched_current_task = sched_current_task->next_task;
|
||||
if (!sched_current_task) { sched_current_task = base_task; }
|
||||
if (sched_current_task->task_sleep == 0)
|
||||
if (sched_current_task->state == sched_current_task->Running)
|
||||
{
|
||||
sched_current_task->task_time = 20;
|
||||
set_context_from_task(*sched_current_task, context);
|
||||
Interrupts::enable();
|
||||
return;
|
||||
}
|
||||
} while (sched_current_task != original_task);
|
||||
if (original_task->task_sleep > 0)
|
||||
{
|
||||
sched_current_task = &idle_task;
|
||||
sched_current_task->task_time = frequency;
|
||||
if (!was_idle) { set_context_from_task(*sched_current_task, context); }
|
||||
return;
|
||||
}
|
||||
original_task->task_time = 20; // grant 30 more ms, there is no other task available
|
||||
sched_current_task = &idle_task;
|
||||
sched_current_task->task_time = frequency;
|
||||
if (!was_idle) { set_context_from_task(*sched_current_task, context); }
|
||||
Interrupts::enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -136,9 +247,19 @@ void Scheduler::yield()
|
||||
asm volatile("int $48");
|
||||
}
|
||||
|
||||
void Scheduler::exit()
|
||||
{
|
||||
asm volatile("int $49");
|
||||
}
|
||||
|
||||
void Scheduler::sleep(unsigned long ms)
|
||||
{
|
||||
current_task()->task_sleep = ms;
|
||||
ASSERT(!Interrupts::is_in_handler());
|
||||
Interrupts::disable();
|
||||
Task* task = current_task();
|
||||
task->task_sleep = ms;
|
||||
task->state = task->Sleeping;
|
||||
Interrupts::enable();
|
||||
yield();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user