Luna/kernel/src/thread/Scheduler.cpp

384 lines
10 KiB
C++
Raw Normal View History

2022-12-07 15:03:34 +01:00
#include "thread/Scheduler.h"
#include "ELF.h"
2022-12-07 15:03:34 +01:00
#include "Log.h"
#include "arch/CPU.h"
#include "arch/MMU.h"
#include "memory/MemoryManager.h"
#include "thread/ThreadImage.h"
#include <luna/Alignment.h>
2022-12-08 14:56:11 +01:00
#include <luna/ScopeGuard.h>
2022-12-07 15:03:34 +01:00
#include <luna/Stack.h>
static Thread g_idle;
static Thread* g_current = nullptr;
static Thread* g_init = nullptr;
static Thread* g_reap = nullptr;
2022-12-07 15:03:34 +01:00
static const usize TICKS_PER_TIMESLICE = 20;
namespace Scheduler
{
void init()
{
g_idle.id = 0;
g_idle.init_regs_kernel();
g_idle.set_ip((u64)CPU::idle_loop);
2022-12-07 15:14:58 +01:00
g_idle.state = ThreadState::Idle;
g_idle.is_kernel = true;
g_idle.parent = nullptr;
2023-03-24 21:05:38 +01:00
g_idle.name = "[idle]";
2022-12-07 15:03:34 +01:00
g_idle.ticks_left = 1;
// Map some stack for the idle task
2022-12-16 19:44:33 +01:00
u64 idle_stack_vm = MemoryManager::alloc_for_kernel(1, MMU::NoExecute | MMU::ReadWrite)
.expect_value("Error while setting up the idle task, cannot continue");
2022-12-07 15:03:34 +01:00
2022-12-21 20:22:44 +01:00
Stack idle_stack { idle_stack_vm, ARCH_PAGE_SIZE };
2022-12-07 15:03:34 +01:00
g_idle.set_sp(idle_stack.top());
2022-12-18 18:43:17 +01:00
g_idle.stack = idle_stack;
kinfoln("Created idle thread: id %lu with ip %#lx and sp %#lx", g_idle.id, g_idle.ip(), g_idle.sp());
2022-12-07 15:03:34 +01:00
g_current = &g_idle;
}
Thread* current()
{
return g_current;
}
2022-12-07 15:55:58 +01:00
Thread* idle()
{
return &g_idle;
}
Thread* init_thread()
{
return g_init;
}
void set_reap_thread(Thread* thread)
{
g_reap = thread;
}
void signal_reap_thread()
{
if (g_reap) g_reap->wake_up();
}
Result<Thread*> new_kernel_thread_impl(Thread* thread, const char* name)
2022-12-07 15:03:34 +01:00
{
2022-12-08 14:56:11 +01:00
// If anything fails, make sure to clean up.
auto guard = make_scope_guard([&] { delete thread; });
2022-12-08 14:56:11 +01:00
2023-01-10 19:31:41 +01:00
const u64 thread_stack_vm = TRY(MemoryManager::alloc_for_kernel(4, MMU::NoExecute | MMU::ReadWrite));
2022-12-08 14:56:11 +01:00
guard.deactivate();
2022-12-08 14:56:11 +01:00
2023-01-10 19:31:41 +01:00
const Stack thread_stack { thread_stack_vm, ARCH_PAGE_SIZE * 4 };
2022-12-07 15:03:34 +01:00
thread->set_sp(thread_stack.top());
2022-12-18 18:43:17 +01:00
thread->stack = thread_stack;
2023-03-24 21:05:38 +01:00
thread->name = name;
thread->is_kernel = true;
thread->auth = Credentials { .uid = 0, .euid = 0, .suid = 0, .gid = 0, .egid = 0, .sgid = 0 };
2022-12-07 15:03:34 +01:00
g_threads.append(thread);
thread->state = ThreadState::Runnable;
kinfoln("Created kernel thread: id %lu with ip %#lx and sp %#lx", thread->id, thread->ip(), thread->sp());
2022-12-07 15:03:34 +01:00
return thread;
2022-12-07 15:03:34 +01:00
}
Result<Thread*> new_kernel_thread(u64 address, const char* name)
2022-12-07 15:03:34 +01:00
{
2023-01-10 19:31:41 +01:00
Thread* const thread = TRY(new_thread());
2022-12-07 15:03:34 +01:00
thread->init_regs_kernel();
thread->set_ip(address);
2023-03-24 21:05:38 +01:00
return new_kernel_thread_impl(thread, name);
2022-12-07 15:03:34 +01:00
}
Result<Thread*> new_kernel_thread(void (*func)(void), const char* name)
2022-12-07 15:03:34 +01:00
{
2023-01-10 19:31:41 +01:00
Thread* const thread = TRY(new_thread());
2022-12-07 15:03:34 +01:00
thread->init_regs_kernel();
thread->set_ip((u64)func);
2023-03-24 21:05:38 +01:00
return new_kernel_thread_impl(thread, name);
2022-12-07 15:03:34 +01:00
}
Result<Thread*> new_kernel_thread(void (*func)(void*), void* arg, const char* name)
2022-12-07 15:03:34 +01:00
{
2023-01-10 19:31:41 +01:00
Thread* const thread = TRY(new_thread());
2022-12-07 15:03:34 +01:00
thread->init_regs_kernel();
thread->set_ip((u64)func);
thread->set_arguments((u64)arg, 0, 0, 0);
2023-03-24 21:05:38 +01:00
return new_kernel_thread_impl(thread, name);
2022-12-07 15:03:34 +01:00
}
2023-03-18 23:45:48 +01:00
Result<Thread*> new_userspace_thread(SharedPtr<VFS::Inode> inode, const char* name)
{
Thread* const thread = TRY(make<Thread>());
thread->state = ThreadState::None;
thread->is_kernel = false;
thread->id = 1;
2023-03-24 21:05:38 +01:00
thread->name = name;
thread->auth = Credentials { .uid = 0, .euid = 0, .suid = 0, .gid = 0, .egid = 0, .sgid = 0 };
Vector<String> args;
auto name_string = TRY(String::from_cstring(name));
TRY(args.try_append(move(name_string)));
Vector<String> env;
auto guard = make_scope_guard([&] { delete thread; });
auto image = TRY(ThreadImage::try_load_from_elf(inode));
u64 argv = TRY(image->push_string_vector_on_stack(args));
u64 envp = TRY(image->push_string_vector_on_stack(env));
guard.deactivate();
image->apply(thread);
thread->set_arguments(args.size(), argv, env.size(), envp);
kinfoln("Created userspace thread: id %lu with ip %#.16lx and sp %#.16lx (ksp %#lx)", thread->id, thread->ip(),
thread->sp(), thread->kernel_stack.top());
g_threads.append(thread);
g_init = thread;
return thread;
}
2023-03-18 23:45:48 +01:00
void add_thread(Thread* thread)
{
g_threads.append(thread);
}
2022-12-19 12:24:15 +01:00
void reap_thread(Thread* thread)
{
2023-03-18 23:45:48 +01:00
CPU::disable_interrupts();
2022-12-19 12:24:15 +01:00
kinfoln("reap: reaping thread with id %zu", thread->id);
if (thread->is_kernel)
{
auto stack = thread->stack;
MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
}
else
{
auto stack = thread->kernel_stack;
MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
for (int i = 0; i < FD_MAX; i++)
{
if (thread->fd_table[i].has_value()) thread->fd_table[i]->inode->remove_handle();
}
}
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->directory);
2022-12-19 12:24:15 +01:00
delete thread;
2023-03-18 23:45:48 +01:00
CPU::enable_interrupts();
2022-12-19 12:24:15 +01:00
}
2022-12-07 15:03:34 +01:00
Thread* pick_task()
{
Thread* old = g_current;
2022-12-07 15:14:58 +01:00
if (old->is_idle())
2022-12-07 15:03:34 +01:00
{
2022-12-07 15:17:20 +01:00
auto maybe_last = g_threads.last();
if (!maybe_last.has_value()) // No threads!!
2022-12-07 15:03:34 +01:00
return &g_idle;
2022-12-07 15:17:20 +01:00
g_current = old = maybe_last.value();
2022-12-07 15:03:34 +01:00
}
2022-12-07 15:17:20 +01:00
bool has_found_thread = false;
2022-12-07 15:03:34 +01:00
do {
auto maybe_next = g_threads.next(g_current);
if (!maybe_next.has_value()) g_current = g_threads.expect_first();
2022-12-07 15:03:34 +01:00
else
g_current = maybe_next.value();
2022-12-07 15:17:20 +01:00
if (g_current->state == ThreadState::Runnable)
{
has_found_thread = true;
2022-12-07 15:03:34 +01:00
break;
2022-12-07 15:17:20 +01:00
}
2022-12-07 15:03:34 +01:00
} while (g_current != old);
2022-12-07 15:17:20 +01:00
if (!has_found_thread) g_current = &g_idle;
2022-12-07 15:03:34 +01:00
return g_current;
}
void generic_switch_context(Thread* old_thread, Thread* new_thread, Registers* regs)
{
if (old_thread != new_thread)
{
switch_context(old_thread, new_thread, regs);
if (!old_thread->is_kernel) old_thread->fp_data.save();
if (!new_thread->is_kernel)
{
MMU::switch_page_directory(new_thread->directory);
CPU::switch_kernel_stack(new_thread->kernel_stack.top());
new_thread->fp_data.restore();
}
}
2022-12-07 15:03:34 +01:00
2022-12-07 15:14:58 +01:00
if (new_thread->is_idle())
2022-12-07 15:03:34 +01:00
{
new_thread->ticks_left = 1; // The idle task only runs for 1 tick so we can check for new runnable tasks
// as fast as possible.
}
else
new_thread->ticks_left = TICKS_PER_TIMESLICE;
}
void switch_task(Registers* regs)
{
Thread* old_thread = g_current;
Thread* new_thread = pick_task();
generic_switch_context(old_thread, new_thread, regs);
}
void invoke(Registers* regs)
{
CPU::disable_interrupts();
if (is_in_kernel(regs)) g_current->kernel_ticks_self++;
2022-12-07 15:03:34 +01:00
else
g_current->user_ticks_self++;
2022-12-07 15:03:34 +01:00
g_current->ticks_left--;
for (auto* const thread : g_threads)
{
2022-12-07 15:55:58 +01:00
if (thread->state == ThreadState::Sleeping)
{
if (thread->sleep_ticks_left == 0 || --thread->sleep_ticks_left == 0) thread->wake_up();
2022-12-07 15:55:58 +01:00
}
}
2022-12-07 15:55:58 +01:00
2022-12-07 15:03:34 +01:00
if (!g_current->ticks_left) switch_task(regs);
}
2022-12-19 12:43:23 +01:00
LinkedList<Thread> check_for_dying_threads()
{
2022-12-19 12:43:23 +01:00
LinkedList<Thread> result;
g_threads.delayed_for_each([&](Thread* thread) {
if (thread->state == ThreadState::Dying)
{
g_threads.remove(thread);
result.append(thread);
}
});
return result;
}
2023-03-23 22:42:24 +01:00
Option<Thread*> find_by_pid(pid_t pid)
{
for (auto* const thread : g_threads)
{
if (thread->id == (u64)pid && thread->state != ThreadState::Dying) return thread;
}
2023-03-23 22:42:24 +01:00
return {};
2023-03-23 22:42:24 +01:00
}
bool has_children(Thread* thread)
{
bool result { false };
for_each_child(thread, [&](Thread*) {
result = true;
return false;
});
return result;
}
Option<Thread*> find_exited_child(Thread* thread)
{
Option<Thread*> result;
for_each_child(thread, [&](Thread* child) {
if (!result.has_value() && child->state == ThreadState::Exited)
{
result = child;
return false;
}
return true;
});
return result;
}
void dump_state()
{
CPU::disable_interrupts();
kdbgln("--- BEGIN SCHEDULER DUMP ---");
kdbgln("current at %p, id = %zu", g_current, g_current->id);
for (const auto* thread : g_threads)
{
kdbgln("%p %c [%-20s] %4zu, parent = (%-18p,%zu), state = %d, ticks: (k:%04zu,u:%04zu), status = "
"%d, cwd = %s",
thread, thread->is_kernel ? 'k' : 'u', thread->name.chars(), thread->id, thread->parent,
thread->parent ? thread->parent->id : 0, (int)thread->state, thread->kernel_ticks_self,
thread->user_ticks_self, thread->status,
thread->current_directory_path.is_empty() ? "/" : thread->current_directory_path.chars());
}
kdbgln("--- END SCHEDULER DUMP ---");
CPU::enable_interrupts();
}
2022-12-07 15:55:58 +01:00
}
void kernel_sleep(u64 ms)
{
g_current->sleep_ticks_left = ms;
g_current->state = ThreadState::Sleeping;
kernel_yield();
}
void kernel_wait(pid_t pid)
{
g_current->child_being_waited_for = pid;
g_current->state = ThreadState::Waiting;
kernel_yield();
}
void kernel_wait_for_event()
{
g_current->state = ThreadState::Waiting;
kernel_yield();
}
[[noreturn]] void kernel_exit()
{
g_current->state = ThreadState::Dying;
Scheduler::signal_reap_thread();
kernel_yield();
unreachable();
2023-01-02 13:07:29 +01:00
}