2022-09-20 17:58:04 +00:00
|
|
|
#define MODULE "sched"
|
|
|
|
|
|
|
|
#include "thread/Scheduler.h"
|
|
|
|
#include "interrupts/Interrupts.h"
|
|
|
|
#include "log/Log.h"
|
2022-09-24 19:45:13 +00:00
|
|
|
#include "memory/MemoryManager.h"
|
2022-10-16 16:48:35 +00:00
|
|
|
#include "memory/PMM.h"
|
2022-09-23 14:41:43 +00:00
|
|
|
#include "memory/VMM.h"
|
2022-09-21 15:56:53 +00:00
|
|
|
#include "misc/hang.h"
|
2022-10-19 18:33:41 +00:00
|
|
|
#include "misc/reboot.h"
|
2022-10-12 11:18:35 +00:00
|
|
|
#include "misc/utils.h"
|
2022-09-25 14:56:00 +00:00
|
|
|
#include "panic/Panic.h"
|
2022-10-19 15:41:23 +00:00
|
|
|
#include "std/assert.h"
|
|
|
|
#include "std/errno.h"
|
2022-09-24 21:18:33 +00:00
|
|
|
#include "std/stdlib.h"
|
2022-09-20 17:58:04 +00:00
|
|
|
#include "std/string.h"
|
2022-10-19 15:15:30 +00:00
|
|
|
#include "sys/UserMemory.h"
|
2022-10-01 10:28:32 +00:00
|
|
|
#include "sys/elf/ELFLoader.h"
|
2022-09-21 15:56:53 +00:00
|
|
|
#include "thread/PIT.h"
|
2022-09-20 17:58:04 +00:00
|
|
|
#include "thread/Task.h"
|
2022-10-14 15:26:47 +00:00
|
|
|
#include "utils/Addresses.h"
|
2022-10-14 15:21:16 +00:00
|
|
|
#include "utils/Registers.h"
|
2022-09-20 17:58:04 +00:00
|
|
|
|
|
|
|
static uint64_t task_num = 0;
|
|
|
|
|
|
|
|
static Task idle_task;
|
|
|
|
|
2022-10-27 06:07:34 +00:00
|
|
|
static uint64_t free_pid = 0;
|
2022-09-20 17:58:04 +00:00
|
|
|
|
|
|
|
static Task* sched_current_task;
|
|
|
|
static Task* base_task;
|
|
|
|
static Task* end_task;
|
|
|
|
|
2022-10-01 10:16:30 +00:00
|
|
|
extern "C" void idle_task_function();
|
2022-09-20 17:58:04 +00:00
|
|
|
|
|
|
|
static uint64_t frequency;
|
|
|
|
|
2022-10-17 18:40:38 +00:00
|
|
|
template <typename Callback> void sched_for_each_task(Callback callback)
|
|
|
|
{
|
|
|
|
Task* task = base_task;
|
|
|
|
if (!task) return;
|
|
|
|
do {
|
|
|
|
bool will_continue = callback(task);
|
|
|
|
if (!will_continue) break;
|
|
|
|
task = task->next_task;
|
|
|
|
} while (task != base_task);
|
|
|
|
}
|
|
|
|
|
2022-10-19 15:16:01 +00:00
|
|
|
template <typename Callback> void sched_for_each_child(Task* task, Callback callback)
|
|
|
|
{
|
|
|
|
sched_for_each_task([&](Task* child) {
|
|
|
|
if (child->ppid == task->id) { return callback(child); }
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-10-17 18:40:38 +00:00
|
|
|
Task* Scheduler::find_by_pid(uint64_t pid)
|
|
|
|
{
|
|
|
|
Task* result = nullptr;
|
|
|
|
sched_for_each_task([&](Task* task) {
|
|
|
|
if (task->id == pid)
|
|
|
|
{
|
|
|
|
result = task;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-10-17 15:14:22 +00:00
|
|
|
void Scheduler::append_task(Task* task)
|
|
|
|
{
|
|
|
|
if (!base_task)
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(!end_task);
|
2022-10-17 15:14:22 +00:00
|
|
|
base_task = task;
|
|
|
|
end_task = base_task;
|
|
|
|
task->next_task = task;
|
|
|
|
task->prev_task = task;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
end_task->next_task = task;
|
|
|
|
task->prev_task = end_task;
|
|
|
|
base_task->prev_task = task;
|
|
|
|
task->next_task = base_task;
|
|
|
|
end_task = task;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-20 17:58:04 +00:00
|
|
|
void Scheduler::init()
|
|
|
|
{
|
|
|
|
memset(&idle_task, 0, sizeof(Task));
|
2022-10-27 06:07:34 +00:00
|
|
|
idle_task.id = free_pid++;
|
2022-10-01 10:16:30 +00:00
|
|
|
idle_task.regs.rip = (uint64_t)idle_task_function;
|
2022-10-14 15:26:47 +00:00
|
|
|
idle_task.regs.rsp = get_top_of_stack((uint64_t)MemoryManager::get_page(), 1);
|
2022-09-20 17:58:04 +00:00
|
|
|
idle_task.regs.cs = 0x08;
|
2022-09-21 19:03:24 +00:00
|
|
|
idle_task.regs.ss = 0x10;
|
2022-10-01 10:16:30 +00:00
|
|
|
idle_task.regs.rflags = (1 << 21) | (1 << 9);
|
2022-09-20 17:58:04 +00:00
|
|
|
idle_task.task_sleep = 1000;
|
2022-10-12 15:07:39 +00:00
|
|
|
idle_task.user_task = false;
|
2022-10-30 19:51:32 +00:00
|
|
|
idle_task.block_reason = BlockReason::None;
|
2022-09-21 19:06:00 +00:00
|
|
|
idle_task.state = idle_task.Idle;
|
2022-09-20 17:58:04 +00:00
|
|
|
|
2022-10-18 16:41:17 +00:00
|
|
|
strlcpy(idle_task.name, "[cpu-idle]", sizeof(idle_task.name));
|
|
|
|
|
2022-10-17 17:12:47 +00:00
|
|
|
sched_current_task = &idle_task;
|
2022-09-20 17:58:04 +00:00
|
|
|
|
|
|
|
frequency = 1000 / PIT::frequency();
|
|
|
|
}
|
|
|
|
|
2022-10-17 17:12:47 +00:00
|
|
|
void Scheduler::add_kernel_task(const char* taskname, void (*task)(void))
|
2022-09-20 17:58:04 +00:00
|
|
|
{
|
2022-09-25 16:12:12 +00:00
|
|
|
Task* new_task = new Task;
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(new_task);
|
2022-10-12 15:07:39 +00:00
|
|
|
new_task->user_task = false;
|
2022-10-27 06:07:34 +00:00
|
|
|
new_task->id = free_pid++;
|
2022-10-18 15:18:37 +00:00
|
|
|
new_task->ppid = 0;
|
2022-10-28 15:10:28 +00:00
|
|
|
new_task->uid = new_task->euid = new_task->gid = new_task->egid = 0;
|
2022-09-20 17:58:04 +00:00
|
|
|
new_task->regs.rip = (uint64_t)task;
|
2022-09-25 16:13:20 +00:00
|
|
|
new_task->allocated_stack =
|
|
|
|
(uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK); // 16 KB is enough for everyone, right?
|
2022-10-14 15:26:47 +00:00
|
|
|
new_task->regs.rsp = get_top_of_stack(new_task->allocated_stack, TASK_PAGES_IN_STACK);
|
2022-09-20 17:58:04 +00:00
|
|
|
new_task->regs.cs = 0x08;
|
2022-09-21 19:03:24 +00:00
|
|
|
new_task->regs.ss = 0x10;
|
2022-09-25 18:35:05 +00:00
|
|
|
new_task->regs.ds = 0x10;
|
2022-10-14 15:21:16 +00:00
|
|
|
new_task->regs.rflags = read_rflags() | 0x200; // enable interrupts
|
2022-09-20 17:58:04 +00:00
|
|
|
new_task->task_sleep = 0;
|
|
|
|
new_task->task_time = 0;
|
2022-09-25 18:35:05 +00:00
|
|
|
new_task->cpu_time = 0;
|
2022-10-17 17:12:47 +00:00
|
|
|
strlcpy(new_task->name, taskname, sizeof(new_task->name));
|
2022-10-17 15:14:22 +00:00
|
|
|
append_task(new_task);
|
2022-10-30 19:51:32 +00:00
|
|
|
new_task->block_reason = BlockReason::None;
|
2022-09-21 19:06:00 +00:00
|
|
|
new_task->state = new_task->Running;
|
|
|
|
task_num++;
|
2022-10-17 17:12:47 +00:00
|
|
|
kinfoln("Adding kernel task: %s, starts at %lx, PID %ld, stack at %lx, total tasks: %ld", new_task->name,
|
|
|
|
new_task->regs.rip, new_task->id, new_task->regs.rsp, task_num);
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|
|
|
|
|
2022-10-14 16:17:57 +00:00
|
|
|
Task* Scheduler::create_user_task()
|
2022-09-21 19:06:00 +00:00
|
|
|
{
|
2022-09-25 16:12:12 +00:00
|
|
|
Task* new_task = new Task;
|
2022-10-17 16:43:35 +00:00
|
|
|
if (!new_task) return nullptr;
|
2022-10-12 15:12:06 +00:00
|
|
|
memset(&new_task->regs, 0, sizeof(Context));
|
2022-10-12 15:07:39 +00:00
|
|
|
new_task->user_task = true;
|
2022-10-27 06:07:34 +00:00
|
|
|
new_task->id = free_pid++;
|
2022-10-18 15:18:37 +00:00
|
|
|
new_task->ppid = 0;
|
2022-09-21 19:06:00 +00:00
|
|
|
new_task->task_sleep = 0;
|
|
|
|
new_task->task_time = 0;
|
2022-09-25 18:35:05 +00:00
|
|
|
new_task->cpu_time = 0;
|
2022-10-30 19:51:32 +00:00
|
|
|
new_task->block_reason = BlockReason::None;
|
2022-10-17 15:14:22 +00:00
|
|
|
append_task(new_task);
|
2022-09-21 19:06:00 +00:00
|
|
|
task_num++;
|
2022-10-14 16:21:09 +00:00
|
|
|
return new_task;
|
2022-09-21 19:06:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-16 16:48:35 +00:00
|
|
|
long Scheduler::load_user_task(const char* filename)
|
2022-10-01 10:28:32 +00:00
|
|
|
{
|
|
|
|
kinfoln("Loading user task: %s", filename);
|
2022-10-12 16:37:00 +00:00
|
|
|
Interrupts::push_and_disable();
|
2022-10-01 10:28:32 +00:00
|
|
|
Task* new_task = new Task;
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(new_task);
|
2022-10-12 15:12:06 +00:00
|
|
|
memset(&new_task->regs, 0, sizeof(Context));
|
2022-10-27 06:07:34 +00:00
|
|
|
new_task->id = free_pid++;
|
2022-10-18 15:18:37 +00:00
|
|
|
new_task->ppid = 0;
|
2022-10-28 15:10:28 +00:00
|
|
|
new_task->uid = new_task->euid = new_task->gid = new_task->egid = 0;
|
2022-10-17 16:43:35 +00:00
|
|
|
if (!new_task->allocator.init())
|
|
|
|
{
|
|
|
|
delete new_task;
|
2022-10-27 06:07:34 +00:00
|
|
|
free_pid--;
|
2022-10-17 16:43:35 +00:00
|
|
|
Interrupts::pop();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2022-10-13 19:14:39 +00:00
|
|
|
new_task->address_space = AddressSpace::create();
|
|
|
|
VMM::switch_to_user_address_space(new_task->address_space);
|
2022-10-27 06:07:34 +00:00
|
|
|
long result;
|
|
|
|
if ((result = ELFLoader::check_elf_image_from_filesystem(filename)) < 0)
|
|
|
|
{
|
|
|
|
delete new_task;
|
|
|
|
free_pid--;
|
|
|
|
kerrorln("Failed to load %s from initrd", filename);
|
|
|
|
Interrupts::pop();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
if ((uint64_t)result > PMM::get_free())
|
|
|
|
{
|
|
|
|
delete new_task;
|
|
|
|
free_pid--;
|
|
|
|
kerrorln("Not enough memory for task %s", filename);
|
|
|
|
Interrupts::pop();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ELFImage* image = ELFLoader::load_elf_from_filesystem(filename);
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(image);
|
2022-10-12 15:07:39 +00:00
|
|
|
new_task->user_task = true;
|
2022-10-07 15:54:05 +00:00
|
|
|
new_task->regs.rip = image->entry;
|
|
|
|
new_task->image = image;
|
2022-10-17 15:30:05 +00:00
|
|
|
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(
|
Kernel: Introduce page ownership
Some pages, such as framebuffer pages, are not physical memory frames reserved for the current process.
Some, such as the framebuffer, may be shared between all processes.
Yet, on exit() or on exec(), a process frees all frames mapped into its address spaces.
And on fork(), it copies all data between frames. So how could we map framebuffers.
Simple: we use one of the bits in page table entries which are available to the OS, and mark whether that page is owned by the current process.
If it is owned, it will be:
- Freed on address space destruction
- Its data will be copied to a new page owned by the child process on fork()
If it is not owned, it will be:
- Left alone on address space destruction
- On fork(), the child's virtual page will be mapped to the same physical frame as the parent
This still needs a bit more work, such as keeping a reference of how many processes use a page to free it when all processes using it exit/exec.
This should be done for MAP_SHARED mappings, for example, since they are not permanent forever,
unlike the framebuffer for example.
2022-11-02 18:32:28 +00:00
|
|
|
0x100000, TASK_PAGES_IN_STACK, MAP_READ_WRITE | MAP_USER | MAP_AS_OWNED_BY_TASK); // 16 KB is enough for everyone, right?
|
2022-10-16 16:48:35 +00:00
|
|
|
if (!new_task->allocated_stack)
|
|
|
|
{
|
2022-10-17 16:43:35 +00:00
|
|
|
new_task->address_space.destroy();
|
2022-10-16 16:48:35 +00:00
|
|
|
delete new_task;
|
2022-10-27 06:07:34 +00:00
|
|
|
free_pid--;
|
2022-10-16 16:48:35 +00:00
|
|
|
ELFLoader::release_elf_image(image);
|
2022-10-17 15:30:05 +00:00
|
|
|
VMM::switch_back_to_kernel_address_space();
|
|
|
|
Interrupts::pop();
|
2022-10-16 16:48:35 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2022-10-14 15:26:47 +00:00
|
|
|
new_task->regs.rsp = get_top_of_stack(new_task->allocated_stack, TASK_PAGES_IN_STACK);
|
2022-10-01 10:28:32 +00:00
|
|
|
new_task->regs.cs = 0x18 | 0x03;
|
|
|
|
new_task->regs.ss = 0x20 | 0x03;
|
|
|
|
new_task->regs.ds = 0x20 | 0x03;
|
|
|
|
new_task->regs.rflags = (1 << 21) | (1 << 9); // enable interrupts
|
|
|
|
new_task->task_sleep = 0;
|
|
|
|
new_task->task_time = 0;
|
|
|
|
new_task->cpu_time = 0;
|
2022-10-17 17:12:47 +00:00
|
|
|
strlcpy(new_task->name, filename, sizeof(new_task->name));
|
2022-10-17 15:14:22 +00:00
|
|
|
append_task(new_task);
|
2022-10-30 19:51:32 +00:00
|
|
|
new_task->block_reason = BlockReason::None;
|
2022-10-01 10:28:32 +00:00
|
|
|
new_task->state = new_task->Running;
|
|
|
|
task_num++;
|
2022-10-17 17:12:47 +00:00
|
|
|
kinfoln("Adding user task: %s, loaded at %lx, PID %ld, stack at %lx, total tasks: %ld", new_task->name,
|
|
|
|
new_task->regs.rip, new_task->id, new_task->regs.rsp, task_num);
|
2022-10-13 19:14:39 +00:00
|
|
|
VMM::switch_back_to_kernel_address_space();
|
2022-10-12 16:37:00 +00:00
|
|
|
Interrupts::pop();
|
2022-10-16 16:48:35 +00:00
|
|
|
return (long)new_task->id;
|
2022-10-01 10:28:32 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 15:08:45 +00:00
|
|
|
void Scheduler::reset_task(Task* task, ELFImage* new_image)
|
|
|
|
{
|
2022-10-12 15:12:06 +00:00
|
|
|
memset(&task->regs, 0, sizeof(Context));
|
2022-10-12 15:08:45 +00:00
|
|
|
task->state = task->Running;
|
|
|
|
task->regs.rip = new_image->entry;
|
|
|
|
task->image = new_image;
|
2022-10-14 15:26:47 +00:00
|
|
|
task->regs.rsp = get_top_of_stack(task->allocated_stack, TASK_PAGES_IN_STACK);
|
2022-10-12 15:08:45 +00:00
|
|
|
task->regs.cs = 0x18 | 0x03;
|
|
|
|
task->regs.ss = 0x20 | 0x03;
|
|
|
|
task->regs.ds = 0x20 | 0x03;
|
|
|
|
task->regs.rflags = (1 << 21) | (1 << 9); // enable interrupts
|
|
|
|
task->task_sleep = 0;
|
|
|
|
task->cpu_time = 0;
|
2022-10-30 19:51:32 +00:00
|
|
|
task->block_reason = BlockReason::None;
|
2022-10-17 17:12:47 +00:00
|
|
|
kinfoln("Resetting task: %s, loaded at %lx, PID %ld, stack at %lx, total tasks: %ld", task->name, task->regs.rip,
|
|
|
|
task->id, task->regs.rsp, task_num);
|
2022-10-12 15:08:45 +00:00
|
|
|
}
|
|
|
|
|
2022-09-21 19:06:00 +00:00
|
|
|
void Scheduler::reap_task(Task* task)
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(!Interrupts::is_in_handler());
|
2022-09-21 19:06:00 +00:00
|
|
|
task_num--;
|
|
|
|
Task* exiting_task = task;
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK?
|
2022-10-13 19:55:51 +00:00
|
|
|
if (exiting_task->is_user_task())
|
|
|
|
{
|
|
|
|
VMM::switch_back_to_kernel_address_space();
|
|
|
|
VMM::apply_address_space();
|
|
|
|
VMM::switch_to_user_address_space(exiting_task->address_space);
|
|
|
|
}
|
2022-10-17 17:12:47 +00:00
|
|
|
kinfoln("reaping task %s, PID %ld, exited with code %ld", exiting_task->name, exiting_task->id,
|
|
|
|
exiting_task->exit_status);
|
2022-10-27 06:07:34 +00:00
|
|
|
if (exiting_task->id == (free_pid - 1)) free_pid--; // If we are the last spawned thread, free our PID.
|
2022-10-17 15:30:05 +00:00
|
|
|
if (exiting_task->allocated_stack && !exiting_task->is_user_task())
|
2022-09-25 16:13:20 +00:00
|
|
|
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
|
2022-10-27 06:07:34 +00:00
|
|
|
if (exiting_task->image) kfree(exiting_task->image);
|
2022-10-13 19:14:39 +00:00
|
|
|
if (exiting_task->is_user_task())
|
|
|
|
{
|
2022-10-17 16:43:35 +00:00
|
|
|
exiting_task->allocator.free();
|
2022-10-13 19:14:39 +00:00
|
|
|
VMM::switch_back_to_kernel_address_space();
|
2022-10-13 19:55:51 +00:00
|
|
|
VMM::apply_address_space();
|
2022-10-13 19:14:39 +00:00
|
|
|
Interrupts::push_and_enable();
|
|
|
|
exiting_task->address_space.destroy();
|
|
|
|
Interrupts::pop();
|
2022-10-07 15:54:05 +00:00
|
|
|
}
|
2022-10-12 15:08:17 +00:00
|
|
|
for (int i = 0; i < TASK_MAX_FDS; i++) { exiting_task->files[i].close(); }
|
2022-09-25 16:12:12 +00:00
|
|
|
delete exiting_task;
|
2022-09-21 19:06:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-19 18:51:54 +00:00
|
|
|
void sched_common_exit(Context* context, int64_t status)
|
2022-09-21 19:06:00 +00:00
|
|
|
{
|
2022-10-18 19:30:52 +00:00
|
|
|
if (sched_current_task->id == 1) sched_current_task->state = sched_current_task->Exited;
|
|
|
|
else
|
|
|
|
sched_current_task->state = sched_current_task->Dying;
|
2022-10-08 15:56:40 +00:00
|
|
|
sched_current_task->exit_status = status;
|
2022-10-19 15:16:01 +00:00
|
|
|
if (sched_current_task->id != 1)
|
|
|
|
{
|
|
|
|
sched_for_each_child(sched_current_task, [](Task* child) {
|
|
|
|
if (child->state != child->Exited) child->ppid = 1;
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
}
|
2022-10-22 09:56:08 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
#ifndef RUN_TEST_AS_INIT
|
|
|
|
reboot();
|
|
|
|
#else
|
|
|
|
hang();
|
|
|
|
#endif
|
|
|
|
}
|
2022-10-19 18:51:54 +00:00
|
|
|
Scheduler::task_yield(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::task_exit(Context* context, int64_t status)
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(Interrupts::is_in_handler());
|
2022-10-19 18:51:54 +00:00
|
|
|
kdbgln("exit: task %ld finished running, used %ld ms of cpu time", sched_current_task->id,
|
|
|
|
sched_current_task->cpu_time);
|
|
|
|
sched_common_exit(context, status);
|
2022-09-21 19:06:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-08 15:56:40 +00:00
|
|
|
void Scheduler::task_misbehave(Context* context, int64_t status)
|
2022-09-21 19:06:00 +00:00
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(Interrupts::is_in_handler());
|
2022-09-25 15:49:51 +00:00
|
|
|
kdbgln("exit: task %ld misbehaved, used %ld ms of cpu time", sched_current_task->id, sched_current_task->cpu_time);
|
2022-10-19 18:51:54 +00:00
|
|
|
sched_common_exit(context, status);
|
2022-09-21 19:06:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::reap_tasks()
|
2022-09-20 17:58:04 +00:00
|
|
|
{
|
2022-09-21 19:06:00 +00:00
|
|
|
Interrupts::disable();
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(!Interrupts::is_in_handler());
|
2022-09-21 19:06:00 +00:00
|
|
|
Task* reap_base = nullptr;
|
|
|
|
Task* reap_end = nullptr;
|
2022-09-20 17:58:04 +00:00
|
|
|
Task* task = base_task;
|
2022-09-21 19:06:00 +00:00
|
|
|
Task* task_reaping;
|
|
|
|
uint64_t iter_index = 0;
|
|
|
|
do {
|
|
|
|
if (task->state == task->Exited)
|
|
|
|
{
|
2022-09-25 14:56:00 +00:00
|
|
|
if (task == base_task && task == end_task) { panic("Last task exited"); }
|
2022-09-21 19:06:00 +00:00
|
|
|
else if (task == base_task) { base_task = task->next_task; }
|
|
|
|
else if (task == end_task) { end_task = task->prev_task; }
|
|
|
|
if (!reap_base)
|
|
|
|
{
|
|
|
|
reap_base = task;
|
|
|
|
reap_end = task;
|
|
|
|
task->prev_task->next_task = task->next_task;
|
|
|
|
task->next_task->prev_task = task->prev_task;
|
|
|
|
task->prev_task = nullptr;
|
|
|
|
task_reaping = task;
|
|
|
|
task = task->next_task;
|
|
|
|
task_reaping->next_task = nullptr;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
reap_end->next_task = task;
|
|
|
|
task->prev_task->next_task = task->next_task;
|
|
|
|
task->next_task->prev_task = task->prev_task;
|
|
|
|
task->prev_task = nullptr;
|
|
|
|
reap_end = task;
|
|
|
|
task_reaping = task;
|
|
|
|
task = task->next_task;
|
|
|
|
task_reaping->next_task = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { task = task->next_task; }
|
|
|
|
iter_index++;
|
|
|
|
} while (iter_index < task_num);
|
|
|
|
task = reap_base;
|
2022-09-20 17:58:04 +00:00
|
|
|
while (task)
|
|
|
|
{
|
2022-09-21 19:06:00 +00:00
|
|
|
Task* reaped_task = task;
|
|
|
|
task = task->next_task;
|
|
|
|
reap_task(reaped_task);
|
|
|
|
}
|
|
|
|
Interrupts::enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sched_decrement_sleep_times()
|
|
|
|
{
|
2022-10-17 18:40:38 +00:00
|
|
|
sched_for_each_task([](Task* task) {
|
2022-09-20 17:58:04 +00:00
|
|
|
if (task->task_sleep > 0)
|
|
|
|
{
|
|
|
|
task->task_sleep -= frequency;
|
|
|
|
if (task->task_sleep < 0) task->task_sleep = 0;
|
|
|
|
}
|
2022-09-21 19:06:00 +00:00
|
|
|
if (task->task_sleep == 0 && task->state == task->Sleeping) task->state = task->Running;
|
2022-10-17 18:40:38 +00:00
|
|
|
return true;
|
|
|
|
});
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::task_tick(Context* context)
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(Interrupts::is_in_handler());
|
2022-09-21 19:06:00 +00:00
|
|
|
Interrupts::disable();
|
2022-09-20 17:58:04 +00:00
|
|
|
sched_decrement_sleep_times();
|
|
|
|
sched_current_task->task_time -= frequency;
|
2022-09-25 15:49:51 +00:00
|
|
|
sched_current_task->cpu_time += frequency;
|
2022-10-22 12:26:29 +00:00
|
|
|
if (sched_current_task->id == 0) return task_yield(context);
|
|
|
|
if (sched_current_task->task_time <= 0)
|
2022-09-20 17:58:04 +00:00
|
|
|
{
|
|
|
|
sched_current_task->task_time = 0;
|
|
|
|
task_yield(context);
|
|
|
|
}
|
2022-09-21 19:06:00 +00:00
|
|
|
Interrupts::enable();
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::task_yield(Context* context)
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(Interrupts::is_in_handler());
|
2022-09-21 19:06:00 +00:00
|
|
|
Interrupts::disable();
|
2022-10-17 15:07:25 +00:00
|
|
|
sched_current_task->save_context(context);
|
2022-09-20 17:58:04 +00:00
|
|
|
bool was_idle = false;
|
2022-09-21 19:06:00 +00:00
|
|
|
if (sched_current_task->state == sched_current_task->Idle)
|
2022-09-20 17:58:04 +00:00
|
|
|
{
|
2022-09-21 19:06:00 +00:00
|
|
|
sched_current_task = end_task;
|
2022-09-20 17:58:04 +00:00
|
|
|
was_idle = true;
|
|
|
|
}
|
|
|
|
Task* original_task = sched_current_task;
|
|
|
|
do {
|
|
|
|
sched_current_task = sched_current_task->next_task;
|
2022-10-21 19:26:19 +00:00
|
|
|
if (sched_current_task->state == sched_current_task->Blocking)
|
|
|
|
{
|
2022-10-27 15:05:42 +00:00
|
|
|
if (!sched_current_task->is_still_blocking()) sched_current_task->resume();
|
2022-10-21 19:26:19 +00:00
|
|
|
}
|
2022-09-21 19:06:00 +00:00
|
|
|
if (sched_current_task->state == sched_current_task->Running)
|
2022-09-20 17:58:04 +00:00
|
|
|
{
|
2022-10-02 17:13:21 +00:00
|
|
|
if (sched_current_task->id != original_task->id || was_idle)
|
2022-10-02 16:53:54 +00:00
|
|
|
{
|
2022-10-17 15:07:25 +00:00
|
|
|
if (!was_idle && original_task->is_user_task() && !original_task->has_died())
|
2022-10-02 17:13:21 +00:00
|
|
|
{
|
2022-10-17 15:07:25 +00:00
|
|
|
original_task->save_floating();
|
2022-10-02 17:13:21 +00:00
|
|
|
}
|
2022-10-13 19:14:39 +00:00
|
|
|
if (sched_current_task->is_user_task())
|
|
|
|
{
|
2022-10-17 15:07:25 +00:00
|
|
|
sched_current_task->switch_to_address_space();
|
|
|
|
sched_current_task->restore_floating();
|
2022-10-13 19:14:39 +00:00
|
|
|
}
|
|
|
|
else if (!was_idle && original_task->is_user_task() && !sched_current_task->is_user_task())
|
|
|
|
{
|
|
|
|
VMM::switch_back_to_kernel_address_space();
|
|
|
|
VMM::apply_address_space();
|
|
|
|
}
|
2022-10-02 16:53:54 +00:00
|
|
|
}
|
2022-09-20 18:02:08 +00:00
|
|
|
sched_current_task->task_time = 20;
|
2022-10-17 15:07:25 +00:00
|
|
|
sched_current_task->restore_context(context);
|
2022-09-20 17:58:04 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (sched_current_task != original_task);
|
2022-10-02 17:13:21 +00:00
|
|
|
if (!was_idle && original_task->is_user_task() && original_task->state != original_task->Exited)
|
|
|
|
{
|
2022-10-17 15:07:25 +00:00
|
|
|
original_task->save_floating();
|
2022-10-02 17:13:21 +00:00
|
|
|
}
|
2022-09-21 19:06:00 +00:00
|
|
|
sched_current_task = &idle_task;
|
|
|
|
sched_current_task->task_time = frequency;
|
2022-10-17 15:07:25 +00:00
|
|
|
if (!was_idle) { sched_current_task->restore_context(context); }
|
2022-09-20 17:58:04 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::yield()
|
|
|
|
{
|
2022-09-29 17:17:43 +00:00
|
|
|
asm volatile("int $0x42" : : "a"(1));
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|
|
|
|
|
2022-10-08 15:56:40 +00:00
|
|
|
void Scheduler::exit(int status)
|
2022-09-21 19:06:00 +00:00
|
|
|
{
|
2022-10-08 15:56:40 +00:00
|
|
|
asm volatile("int $0x42" : : "a"(0), "D"(status));
|
2022-09-21 19:06:00 +00:00
|
|
|
}
|
|
|
|
|
2022-09-20 17:58:04 +00:00
|
|
|
void Scheduler::sleep(unsigned long ms)
|
|
|
|
{
|
2022-09-29 17:17:43 +00:00
|
|
|
asm volatile("int $0x42" : : "D"(ms), "a"(2));
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Task* Scheduler::current_task()
|
|
|
|
{
|
|
|
|
return sched_current_task;
|
2022-10-19 15:15:30 +00:00
|
|
|
}
|
|
|
|
|
2022-10-27 15:05:42 +00:00
|
|
|
#define WNOHANG 1
|
|
|
|
|
2022-10-19 15:15:30 +00:00
|
|
|
void sys_waitpid(Context* context, long pid, int* wstatus,
|
2022-10-27 15:05:42 +00:00
|
|
|
int options) // FIXME: only allow waiting for child processes when specifying a PID.
|
2022-10-19 15:15:30 +00:00
|
|
|
{
|
|
|
|
Task* child = nullptr;
|
|
|
|
if (pid == -1)
|
|
|
|
{
|
2022-10-19 15:16:01 +00:00
|
|
|
sched_for_each_child(sched_current_task, [&](Task* task) {
|
|
|
|
if (task->state == task->Dying)
|
2022-10-19 15:15:30 +00:00
|
|
|
{
|
|
|
|
child = task;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
if (!child)
|
|
|
|
{
|
2022-10-27 15:05:42 +00:00
|
|
|
if (options & WNOHANG)
|
|
|
|
{
|
|
|
|
context->rax = 0; // No child has exited, let's return 0.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int* kwstatus;
|
|
|
|
if (wstatus)
|
|
|
|
{
|
|
|
|
kwstatus = obtain_user_ref(wstatus);
|
|
|
|
if (!kwstatus)
|
|
|
|
{
|
|
|
|
context->rax = -EFAULT;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kdbgln("blocking wait on any child");
|
|
|
|
sched_current_task->state = sched_current_task->Blocking;
|
|
|
|
sched_current_task->block_reason = BlockReason::Waiting;
|
2022-10-30 18:28:43 +00:00
|
|
|
sched_current_task->blocking_wait_info.pid = -1;
|
2022-10-27 15:05:42 +00:00
|
|
|
if (wstatus) sched_current_task->blocking_wait_info.wstatus = kwstatus;
|
|
|
|
else
|
|
|
|
sched_current_task->blocking_wait_info.wstatus = nullptr;
|
|
|
|
return Scheduler::task_yield(context);
|
2022-10-19 15:15:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
child = Scheduler::find_by_pid(pid);
|
|
|
|
if (!child)
|
|
|
|
{
|
2022-10-30 18:24:56 +00:00
|
|
|
context->rax = -ECHILD;
|
2022-10-19 15:15:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2022-10-30 18:24:56 +00:00
|
|
|
if (child->ppid != sched_current_task->id)
|
|
|
|
{
|
|
|
|
// We are trying to call waitpid() on a task that isn't a child of ours. This is not allowed.
|
|
|
|
context->rax = -ECHILD;
|
|
|
|
return;
|
|
|
|
}
|
2022-10-27 15:05:42 +00:00
|
|
|
if (child->state != child->Dying)
|
2022-10-19 15:15:30 +00:00
|
|
|
{
|
2022-10-27 15:05:42 +00:00
|
|
|
if (options & WNOHANG)
|
|
|
|
{
|
|
|
|
context->rax = 0; // No child has exited, let's return 0.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int* kwstatus;
|
|
|
|
if (wstatus)
|
|
|
|
{
|
|
|
|
kwstatus = obtain_user_ref(wstatus);
|
|
|
|
if (!kwstatus)
|
|
|
|
{
|
|
|
|
context->rax = -EFAULT;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sched_current_task->state = sched_current_task->Blocking;
|
|
|
|
sched_current_task->block_reason = BlockReason::Waiting;
|
2022-10-30 18:28:43 +00:00
|
|
|
sched_current_task->blocking_wait_info.pid = pid;
|
2022-10-27 15:05:42 +00:00
|
|
|
if (wstatus) sched_current_task->blocking_wait_info.wstatus = kwstatus;
|
|
|
|
else
|
|
|
|
sched_current_task->blocking_wait_info.wstatus = nullptr;
|
|
|
|
return Scheduler::task_yield(context);
|
2022-10-19 15:15:30 +00:00
|
|
|
}
|
|
|
|
if (wstatus)
|
|
|
|
{
|
|
|
|
int* kwstatus = obtain_user_ref(wstatus);
|
|
|
|
if (kwstatus)
|
|
|
|
{
|
|
|
|
*kwstatus = (int)(child->exit_status & 0xff);
|
|
|
|
release_user_ref(kwstatus);
|
|
|
|
}
|
2022-10-20 06:20:56 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
kinfoln("wstatus ptr is invalid: %p", (void*)wstatus);
|
|
|
|
child->state = child->Exited;
|
|
|
|
context->rax = -EFAULT;
|
|
|
|
return;
|
|
|
|
}
|
2022-10-19 15:15:30 +00:00
|
|
|
}
|
|
|
|
child->state = child->Exited;
|
|
|
|
context->rax = (long)child->id;
|
2022-10-22 12:26:29 +00:00
|
|
|
}
|
|
|
|
|
2022-10-27 15:05:42 +00:00
|
|
|
bool Task::is_wait_still_blocking()
|
|
|
|
{
|
|
|
|
Task* child = nullptr;
|
2022-10-30 18:28:43 +00:00
|
|
|
if (blocking_wait_info.pid == -1)
|
2022-10-27 15:05:42 +00:00
|
|
|
{
|
|
|
|
sched_for_each_child(sched_current_task, [&](Task* task) {
|
|
|
|
if (task->state == task->Dying)
|
|
|
|
{
|
|
|
|
child = task;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
if (!child) return true;
|
|
|
|
else
|
|
|
|
{
|
2022-10-30 18:28:43 +00:00
|
|
|
blocking_wait_info.pid = child->id; // We're committed to this child now.
|
2022-10-27 15:05:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-10-30 18:28:43 +00:00
|
|
|
child = Scheduler::find_by_pid(blocking_wait_info.pid);
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(child); // since sys_waitpid should have validated this child, and the only way for it to disappear from
|
2022-10-30 18:24:56 +00:00
|
|
|
// the process list is for someone to wait for it, this should be pretty safe.
|
2022-10-27 15:05:42 +00:00
|
|
|
if (child->state != child->Dying) return true;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Task::resume_wait()
|
|
|
|
{
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(blocking_wait_info.pid != -1); // is_wait_still_blocking should have chosen a child for us if the user
|
2022-10-30 18:28:43 +00:00
|
|
|
// process told us to wait for any child.
|
|
|
|
Task* child = Scheduler::find_by_pid(blocking_wait_info.pid);
|
2022-11-02 18:38:15 +00:00
|
|
|
ensure(child); // This should also already have been validated.
|
2022-10-27 15:05:42 +00:00
|
|
|
|
|
|
|
if (blocking_wait_info.wstatus)
|
|
|
|
{
|
|
|
|
*blocking_wait_info.wstatus = (int)(child->exit_status & 0xff);
|
|
|
|
release_user_ref(blocking_wait_info.wstatus);
|
|
|
|
}
|
|
|
|
|
|
|
|
child->state = child->Exited;
|
|
|
|
regs.rax = (long)child->id;
|
|
|
|
}
|
|
|
|
|
2022-10-22 12:26:29 +00:00
|
|
|
struct pstat
|
|
|
|
{
|
|
|
|
long pt_pid;
|
|
|
|
long pt_ppid;
|
|
|
|
char pt_name[128];
|
|
|
|
int pt_state;
|
|
|
|
long pt_time;
|
2022-10-28 15:31:34 +00:00
|
|
|
uid_t pt_uid;
|
|
|
|
gid_t pt_gid;
|
2022-10-22 12:26:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void sys_pstat(Context* context, long pid, struct pstat* buf)
|
|
|
|
{
|
|
|
|
Task* task;
|
2022-10-27 06:07:34 +00:00
|
|
|
if (pid == -1) task = Scheduler::find_by_pid(free_pid - 1);
|
2022-10-22 12:26:29 +00:00
|
|
|
else if (pid == 0)
|
|
|
|
task = &idle_task;
|
|
|
|
else
|
|
|
|
task = Scheduler::find_by_pid(pid);
|
|
|
|
if (!task)
|
|
|
|
{
|
|
|
|
context->rax = -ESRCH;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (task->state == task->Exited) // we're just waiting for the reaper to reap it
|
|
|
|
{
|
|
|
|
context->rax = -ESRCH;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (buf)
|
|
|
|
{
|
|
|
|
struct pstat* kpstat = obtain_user_ref(buf);
|
|
|
|
if (!kpstat)
|
|
|
|
{
|
|
|
|
context->rax = -EFAULT;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
kpstat->pt_pid = task->id;
|
|
|
|
kpstat->pt_ppid = task->ppid;
|
|
|
|
kpstat->pt_state = (int)task->state;
|
|
|
|
kpstat->pt_time = (long)task->cpu_time;
|
2022-10-28 15:31:34 +00:00
|
|
|
kpstat->pt_uid = task->uid;
|
|
|
|
kpstat->pt_gid = task->gid;
|
2022-10-22 12:26:29 +00:00
|
|
|
strlcpy(kpstat->pt_name, task->name, sizeof(kpstat->pt_name));
|
|
|
|
release_user_ref(kpstat);
|
|
|
|
}
|
|
|
|
context->rax = task->id;
|
|
|
|
return;
|
2022-09-20 17:58:04 +00:00
|
|
|
}
|