core: Add priority-based scheduling
This commit is contained in:
parent
6be2261519
commit
9c92b3de4b
@ -28,11 +28,10 @@ pub fn setupCore(allocator: *pmm.FrameAllocator) !void {
|
|||||||
idle_thread.mapper = null;
|
idle_thread.mapper = null;
|
||||||
idle_thread.regs = std.mem.zeroes(@TypeOf(idle_thread.regs));
|
idle_thread.regs = std.mem.zeroes(@TypeOf(idle_thread.regs));
|
||||||
idle_thread.state = .Running;
|
idle_thread.state = .Running;
|
||||||
|
idle_thread.user_priority = 0;
|
||||||
thread.arch.initKernelRegisters(&idle_thread.regs);
|
thread.arch.initKernelRegisters(&idle_thread.regs);
|
||||||
thread.arch.setAddress(&idle_thread.regs, @intFromPtr(&thread.arch.idleLoop));
|
thread.arch.setAddress(&idle_thread.regs, @intFromPtr(&thread.arch.idleLoop));
|
||||||
|
|
||||||
core.thread_list.append(&core.idle_thread);
|
|
||||||
|
|
||||||
const stack = try pmm.allocFrame(allocator);
|
const stack = try pmm.allocFrame(allocator);
|
||||||
thread.arch.setStack(&idle_thread.regs, stack.virtualAddress(vmm.PHYSICAL_MAPPING_BASE) + (platform.PAGE_SIZE - 16));
|
thread.arch.setStack(&idle_thread.regs, stack.virtualAddress(vmm.PHYSICAL_MAPPING_BASE) + (platform.PAGE_SIZE - 16));
|
||||||
|
|
||||||
|
@ -69,6 +69,7 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
};
|
};
|
||||||
|
|
||||||
init.mapper = mapper;
|
init.mapper = mapper;
|
||||||
|
init.user_priority = 255;
|
||||||
thread.arch.initUserRegisters(&init.regs);
|
thread.arch.initUserRegisters(&init.regs);
|
||||||
thread.arch.setArgument(&init.regs, base);
|
thread.arch.setArgument(&init.regs, base);
|
||||||
|
|
||||||
|
@ -1,7 +1,21 @@
|
|||||||
const interrupts = @import("../arch/interrupts.zig").arch;
|
const interrupts = @import("../arch/interrupts.zig").arch;
|
||||||
const sys = @import("syscall.zig");
|
const sys = @import("syscall.zig");
|
||||||
const thread = @import("../thread.zig");
|
const thread = @import("../thread.zig");
|
||||||
|
const cpu = @import("../arch/cpu.zig");
|
||||||
|
|
||||||
pub fn yield(regs: *interrupts.InterruptStackFrame, _: *sys.Arguments, _: *isize) anyerror!void {
|
pub fn yield(regs: *interrupts.InterruptStackFrame, _: *sys.Arguments, _: *isize) anyerror!void {
|
||||||
thread.scheduleNewTask(regs);
|
const core = cpu.thisCore();
|
||||||
|
const new_thread = thread.fetchNewTask(core, false) orelse return;
|
||||||
|
const current_thread = thread.scheduleNewTask(core, regs, new_thread);
|
||||||
|
thread.addThreadToPriorityQueue(core, current_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setPriority(_: *interrupts.InterruptStackFrame, args: *sys.Arguments, _: *isize) anyerror!void {
|
||||||
|
const core = cpu.thisCore();
|
||||||
|
core.current_thread.user_priority = @truncate(args.arg0);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getPriority(_: *interrupts.InterruptStackFrame, _: *sys.Arguments, retval: *isize) anyerror!void {
|
||||||
|
const core = cpu.thisCore();
|
||||||
|
retval.* = core.current_thread.user_priority;
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ pub const Arguments = struct {
|
|||||||
|
|
||||||
const SystemCall = *const fn (frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) anyerror!void;
|
const SystemCall = *const fn (frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) anyerror!void;
|
||||||
|
|
||||||
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield };
|
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield, sched.setPriority, sched.getPriority };
|
||||||
|
|
||||||
pub fn invokeSyscall(number: usize, frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) void {
|
pub fn invokeSyscall(number: usize, frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) void {
|
||||||
if (number >= syscalls.len) {
|
if (number >= syscalls.len) {
|
||||||
|
@ -18,6 +18,9 @@ pub const ThreadControlBlock = struct {
|
|||||||
state: ThreadState,
|
state: ThreadState,
|
||||||
|
|
||||||
ticks: u64,
|
ticks: u64,
|
||||||
|
|
||||||
|
user_priority: u8,
|
||||||
|
current_priority: u32,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const ThreadList = std.DoublyLinkedList(ThreadControlBlock);
|
pub const ThreadList = std.DoublyLinkedList(ThreadControlBlock);
|
||||||
@ -53,28 +56,47 @@ pub fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadContro
|
|||||||
core.current_thread = new_task;
|
core.current_thread = new_task;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scheduleNewTask(regs: *interrupts.InterruptStackFrame) void {
|
pub fn fetchNewTask(core: *cpu.arch.Core, should_idle_if_not_found: bool) ?*ThreadControlBlock {
|
||||||
const core = cpu.thisCore();
|
const last = core.thread_list.last orelse {
|
||||||
|
if (should_idle_if_not_found) {
|
||||||
|
return &core.idle_thread;
|
||||||
|
} else return null;
|
||||||
|
};
|
||||||
|
|
||||||
const new_task = core.thread_list.popFirst() orelse return;
|
const new_task = &last.data;
|
||||||
core.thread_list.append(new_task);
|
|
||||||
|
|
||||||
switchTask(regs, &new_task.data);
|
removeThreadFromPriorityQueue(core, new_task);
|
||||||
|
|
||||||
|
return new_task;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scheduleNewTask(core: *cpu.arch.Core, regs: *interrupts.InterruptStackFrame, new_thread: *ThreadControlBlock) *ThreadControlBlock {
|
||||||
|
if (core.thread_list.first) |first| {
|
||||||
|
first.data.current_priority +|= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
const current_thread = core.current_thread;
|
||||||
|
|
||||||
|
switchTask(regs, new_thread);
|
||||||
|
|
||||||
|
return current_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
|
pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
|
||||||
const core = cpu.thisCore();
|
const core = cpu.thisCore();
|
||||||
|
|
||||||
core.current_thread.ticks -= 1;
|
core.current_thread.ticks -|= 1;
|
||||||
if (core.current_thread.ticks == 0) {
|
if (core.current_thread.ticks == 0) {
|
||||||
scheduleNewTask(regs);
|
const new_thread = fetchNewTask(core, false) orelse return;
|
||||||
|
const current_thread = scheduleNewTask(core, regs, new_thread);
|
||||||
|
addThreadToPriorityQueue(core, current_thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
|
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
|
||||||
|
|
||||||
pub fn addThreadToScheduler(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
pub fn addThreadToScheduler(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||||
core.thread_list.append(@fieldParentPtr("data", thread));
|
addThreadToPriorityQueue(core, thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlBlock {
|
pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlBlock {
|
||||||
@ -86,6 +108,33 @@ pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlB
|
|||||||
thread.mapper = null;
|
thread.mapper = null;
|
||||||
thread.regs = std.mem.zeroes(@TypeOf(thread.regs));
|
thread.regs = std.mem.zeroes(@TypeOf(thread.regs));
|
||||||
thread.state = .Inactive;
|
thread.state = .Inactive;
|
||||||
|
thread.user_priority = 0;
|
||||||
|
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn addThreadToPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||||
|
thread.current_priority = thread.user_priority;
|
||||||
|
|
||||||
|
var it: ?*ThreadList.Node = core.thread_list.first;
|
||||||
|
while (it) |n| : (it = n.next) {
|
||||||
|
if (thread.current_priority <= n.data.current_priority) {
|
||||||
|
n.data.current_priority -|= thread.current_priority;
|
||||||
|
core.thread_list.insertBefore(n, @fieldParentPtr("data", thread));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
thread.current_priority -|= n.data.current_priority;
|
||||||
|
}
|
||||||
|
|
||||||
|
core.thread_list.append(@fieldParentPtr("data", thread));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn removeThreadFromPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||||
|
const node: *ThreadList.Node = @fieldParentPtr("data", thread);
|
||||||
|
|
||||||
|
if (node.next) |n| {
|
||||||
|
n.data.current_priority +|= thread.current_priority;
|
||||||
|
}
|
||||||
|
|
||||||
|
core.thread_list.remove(node);
|
||||||
|
}
|
||||||
|
@ -4,6 +4,8 @@ const SystemCall = enum(u64) {
|
|||||||
LockFrame,
|
LockFrame,
|
||||||
FreeFrame,
|
FreeFrame,
|
||||||
Yield,
|
Yield,
|
||||||
|
SetPriority,
|
||||||
|
GetPriority,
|
||||||
};
|
};
|
||||||
|
|
||||||
const SystemError = error{
|
const SystemError = error{
|
||||||
@ -39,3 +41,11 @@ pub fn freeFrame(address: u64) void {
|
|||||||
pub fn yield() void {
|
pub fn yield() void {
|
||||||
_ = syscall(.Yield, 0);
|
_ = syscall(.Yield, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn setPriority(priority: u8) void {
|
||||||
|
_ = syscall(.SetPriority, priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getPriority() u8 {
|
||||||
|
return @truncate(@as(u64, @bitCast(syscall(.GetPriority, 0))));
|
||||||
|
}
|
||||||
|
@ -2,6 +2,10 @@ const kernel = @import("kernel.zig");
|
|||||||
|
|
||||||
export fn _start(base: u64) callconv(.C) noreturn {
|
export fn _start(base: u64) callconv(.C) noreturn {
|
||||||
kernel.print(base);
|
kernel.print(base);
|
||||||
|
kernel.print(kernel.getPriority());
|
||||||
|
|
||||||
|
kernel.setPriority(128);
|
||||||
|
kernel.print(kernel.getPriority());
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
kernel.yield();
|
kernel.yield();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user