Compare commits

..

6 Commits

Author SHA1 Message Date
b2d89ab4fd
init: Add basic virtual memory code 2025-02-20 18:05:11 +01:00
2ecd24df82
core: Add some annotations to managed thread variables
Some thread variables don't need to be set manually,
since they are updated whenever needed by some functions.
Add comments to make that clear.
2025-02-20 17:59:27 +01:00
8ea5df2e4e
core: Give init a pointer to its own page tables 2025-02-20 17:57:50 +01:00
0782ff5fe7
core/x86_64: Fix function naming style 2025-02-20 17:55:56 +01:00
b14c473a29
core: Make some functions in thread.zig private 2025-02-18 20:10:14 +01:00
0ee587a142
core: Add sleep support to the scheduler 2025-02-18 20:06:40 +01:00
12 changed files with 322 additions and 38 deletions

View File

@ -20,7 +20,8 @@ pub fn setupCore(allocator: *pmm.FrameAllocator) !void {
const core: *arch.Core = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
core.id = 0; // FIXME: Actually check core id
core.thread_list = .{};
core.active_thread_list = .{};
core.sleeping_thread_list = .{};
const idle_thread = &core.idle_thread.data;

View File

@ -1,3 +1,3 @@
const thread = @import("../../thread.zig");
pub const Core = struct { id: u32, thread_list: thread.ThreadList, current_thread: *thread.ThreadControlBlock, idle_thread: thread.ThreadList.Node };
pub const Core = struct { id: u32, active_thread_list: thread.ThreadList, sleeping_thread_list: thread.ThreadList, current_thread: *thread.ThreadControlBlock, idle_thread: thread.ThreadList.Node };

View File

@ -51,8 +51,9 @@ pub fn setAddress(regs: *interrupts.InterruptStackFrame, address: u64) void {
regs.rip = address;
}
pub fn setArgument(regs: *interrupts.InterruptStackFrame, argument: u64) void {
regs.rdi = argument;
pub fn setArguments(regs: *interrupts.InterruptStackFrame, arg0: u64, arg1: u64) void {
regs.rdi = arg0;
regs.rsi = arg1;
}
pub fn setStack(regs: *interrupts.InterruptStackFrame, stack: u64) void {

View File

@ -23,11 +23,11 @@ pub const PageTableEntry = packed struct {
available2: u3,
no_execute: u1,
pub fn set_address(self: *PageTableEntry, address: u64) void {
pub fn setAddress(self: *PageTableEntry, address: u64) void {
self.address = @intCast(address >> 12);
}
pub fn get_address(self: *PageTableEntry) u64 {
pub fn getAddress(self: *PageTableEntry) u64 {
return self.address << 12;
}
@ -86,7 +86,7 @@ fn updatePageTableEntry(entry: *PageTableEntry, phys: pmm.PhysFrame, flags: u32)
entry.cache_disabled = hasFlag(flags, Flags.CacheDisable);
entry.no_execute = hasFlag(flags, Flags.NoExecute);
entry.global = hasFlag(flags, Flags.Global);
entry.set_address(phys.address);
entry.setAddress(phys.address);
}
fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntry, flags: u32, base: usize) !void {
@ -94,7 +94,7 @@ fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntr
pte.clear();
const frame = try pmm.allocFrame(allocator);
pte.present = 1;
pte.set_address(frame.address);
pte.setAddress(frame.address);
getTable(pte, base).* = std.mem.zeroes(PageDirectory);
}
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
@ -102,7 +102,7 @@ fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntr
}
fn getTable(pte: *PageTableEntry, base: usize) *allowzero PageDirectory {
const frame = pmm.PhysFrame{ .address = pte.get_address() };
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
return @ptrFromInt(frame.virtualAddress(base));
}
@ -160,7 +160,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
if (user_address != user_page) {
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
const frame = pmm.PhysFrame{ .address = pte.get_address() };
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
const virt = frame.virtualAddress(base) + remainder;
@ -173,7 +173,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
while (count > 0) {
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
const frame = pmm.PhysFrame{ .address = pte.get_address() };
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
const amount: usize = @min(platform.PAGE_SIZE, count);
const virt = frame.virtualAddress(base);
@ -196,7 +196,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
if (user_address != user_page) {
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
const frame = pmm.PhysFrame{ .address = pte.get_address() };
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
const virt = frame.virtualAddress(base) + remainder;
@ -208,7 +208,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
while (count > 0) {
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
const frame = pmm.PhysFrame{ .address = pte.get_address() };
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
const amount: usize = @min(platform.PAGE_SIZE, count);
const virt = frame.virtualAddress(base);
@ -251,9 +251,9 @@ fn lockPageDirectoryFrames(allocator: *pmm.FrameAllocator, directory: *PageDirec
if (pte.present == 0) continue;
if ((index < 4) and (pte.larger_pages == 1)) continue;
try pmm.lockFrame(allocator, pte.get_address());
try pmm.lockFrame(allocator, pte.getAddress());
const child_table: *PageDirectory = @ptrFromInt(pte.get_address());
const child_table: *PageDirectory = @ptrFromInt(pte.getAddress());
try lockPageDirectoryFrames(allocator, child_table, index - 1);
}

View File

@ -71,9 +71,7 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
init.mapper = mapper;
init.user_priority = 255;
thread.arch.initUserRegisters(&init.regs);
thread.arch.setArgument(&init.regs, base);
thread.addThreadToScheduler(cpu.thisCore(), init);
thread.arch.setArguments(&init.regs, base, mapper.phys.address);
const ctx = Context{ .allocator = &allocator, .mapper = mapper, .regs = &init.regs };

View File

@ -19,3 +19,7 @@ pub fn getPriority(_: *interrupts.InterruptStackFrame, _: *sys.Arguments, retval
const core = cpu.thisCore();
retval.* = core.current_thread.user_priority;
}
pub fn sleep(regs: *interrupts.InterruptStackFrame, args: *sys.Arguments, _: *isize) anyerror!void {
_ = thread.startSleep(regs, args.arg0);
}

View File

@ -15,7 +15,7 @@ pub const Arguments = struct {
const SystemCall = *const fn (frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) anyerror!void;
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield, sched.setPriority, sched.getPriority };
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield, sched.setPriority, sched.getPriority, sched.sleep };
pub fn invokeSyscall(number: usize, frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) void {
if (number >= syscalls.len) {

View File

@ -9,6 +9,7 @@ pub const ThreadState = enum {
Inactive,
Running,
Blocked,
Sleeping,
};
pub const ThreadControlBlock = struct {
@ -16,11 +17,14 @@ pub const ThreadControlBlock = struct {
mapper: ?vmm.MemoryMapper,
regs: interrupts.InterruptStackFrame,
state: ThreadState,
ticks: u64,
user_priority: u8,
// Managed by scheduleNewTask(), no need to set manually.
ticks: u64,
// Managed by addThreadToPriorityQueue(), no need to set manually.
current_priority: u32,
// Managed by startSleep(), no need to set manually.
sleep_ticks: u64,
};
pub const ThreadList = std.DoublyLinkedList(ThreadControlBlock);
@ -41,7 +45,7 @@ pub fn enterTask(task: *ThreadControlBlock) noreturn {
arch.enterTask(&task.regs, vmm.PHYSICAL_MAPPING_BASE, directory.address);
}
pub fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
const core = cpu.thisCore();
core.current_thread.regs = regs.*;
@ -57,7 +61,7 @@ pub fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadContro
}
pub fn fetchNewTask(core: *cpu.arch.Core, should_idle_if_not_found: bool) ?*ThreadControlBlock {
const last = core.thread_list.last orelse {
const last = core.active_thread_list.last orelse {
if (should_idle_if_not_found) {
return &core.idle_thread.data;
} else return null;
@ -71,7 +75,7 @@ pub fn fetchNewTask(core: *cpu.arch.Core, should_idle_if_not_found: bool) ?*Thre
}
pub fn scheduleNewTask(core: *cpu.arch.Core, regs: *interrupts.InterruptStackFrame, new_thread: *ThreadControlBlock) *ThreadControlBlock {
if (core.thread_list.first) |first| {
if (core.active_thread_list.first) |first| {
first.data.current_priority +|= 4;
}
@ -85,6 +89,11 @@ pub fn scheduleNewTask(core: *cpu.arch.Core, regs: *interrupts.InterruptStackFra
pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
const core = cpu.thisCore();
updateSleepQueue(core);
while (popSleepQueue(core)) |thread| {
reviveThread(core, thread);
}
core.current_thread.ticks -|= 1;
if (core.current_thread.ticks == 0) {
const new_thread = fetchNewTask(core, false) orelse return;
@ -93,12 +102,81 @@ pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
}
}
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
pub fn block(regs: *interrupts.InterruptStackFrame) *ThreadControlBlock {
const core = cpu.thisCore();
pub fn addThreadToScheduler(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
// fetchNewTask() always returns a thread if should_idle_if_not_found is set to true.
const new_thread = fetchNewTask(core, true) orelse unreachable;
const current_thread = scheduleNewTask(core, regs, new_thread);
current_thread.state = .Blocked;
return current_thread;
}
pub fn startSleep(regs: *interrupts.InterruptStackFrame, ticks: u64) *ThreadControlBlock {
const core = cpu.thisCore();
// fetchNewTask() always returns a thread if should_idle_if_not_found is set to true.
const new_thread = fetchNewTask(core, true) orelse unreachable;
const current_thread = scheduleNewTask(core, regs, new_thread);
current_thread.state = .Sleeping;
addThreadToSleepQueue(core, current_thread, ticks);
return current_thread;
}
fn addThreadToSleepQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock, ticks: u64) void {
thread.sleep_ticks = ticks;
var it: ?*ThreadList.Node = core.sleeping_thread_list.first;
while (it) |n| : (it = n.next) {
if (thread.sleep_ticks <= n.data.sleep_ticks) {
n.data.sleep_ticks -|= thread.sleep_ticks;
core.sleeping_thread_list.insertBefore(n, @fieldParentPtr("data", thread));
return;
}
thread.sleep_ticks -|= n.data.sleep_ticks;
}
core.sleeping_thread_list.append(@fieldParentPtr("data", thread));
}
pub fn removeThreadFromSleepQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
const node: *ThreadList.Node = @fieldParentPtr("data", thread);
if (node.next) |n| {
n.data.sleep_ticks +|= thread.sleep_ticks;
}
core.sleeping_thread_list.remove(node);
reviveThread(core, thread);
}
fn updateSleepQueue(core: *cpu.arch.Core) void {
const first = core.sleeping_thread_list.first orelse return;
first.data.sleep_ticks -|= 1;
}
fn popSleepQueue(core: *cpu.arch.Core) ?*ThreadControlBlock {
const first = core.sleeping_thread_list.first orelse return null;
if (first.data.sleep_ticks == 0) {
core.sleeping_thread_list.remove(first);
return &first.data;
}
return null;
}
pub fn reviveThread(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
thread.state = .Running;
addThreadToPriorityQueue(core, thread);
}
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlBlock {
const frame = try pmm.allocFrame(allocator);
@ -116,25 +194,25 @@ pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlB
pub fn addThreadToPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
thread.current_priority = thread.user_priority;
var it: ?*ThreadList.Node = core.thread_list.first;
var it: ?*ThreadList.Node = core.active_thread_list.first;
while (it) |n| : (it = n.next) {
if (thread.current_priority <= n.data.current_priority) {
n.data.current_priority -|= thread.current_priority;
core.thread_list.insertBefore(n, @fieldParentPtr("data", thread));
core.active_thread_list.insertBefore(n, @fieldParentPtr("data", thread));
return;
}
thread.current_priority -|= n.data.current_priority;
}
core.thread_list.append(@fieldParentPtr("data", thread));
core.active_thread_list.append(@fieldParentPtr("data", thread));
}
pub fn removeThreadFromPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
fn removeThreadFromPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
const node: *ThreadList.Node = @fieldParentPtr("data", thread);
if (node.next) |n| {
n.data.current_priority +|= thread.current_priority;
}
core.thread_list.remove(node);
core.active_thread_list.remove(node);
}

9
system/init/arch/vm.zig Normal file
View File

@ -0,0 +1,9 @@
const std = @import("std");
const target = @import("builtin").target;
pub const arch = switch (target.cpu.arch) {
.x86_64 => @import("x86_64/vm.zig"),
else => {
@compileError("unsupported architecture");
},
};

View File

@ -0,0 +1,182 @@
const std = @import("std");
const kernel = @import("../../kernel.zig");
const MapError = error{
MemoryAlreadyInUse,
MemoryNotInUse,
OutOfMemory,
};
const PhysFrame = struct {
address: u64,
pub fn virtualAddress(self: *const PhysFrame, base: usize) usize {
return base + self.address;
}
pub fn virtualPointer(self: *const PhysFrame, comptime T: type, base: usize) *T {
return @ptrFromInt(self.virtualAddress(base));
}
};
const PageTableEntry = packed struct {
present: u1,
read_write: u1,
user: u1,
write_through: u1,
cache_disabled: u1,
accessed: u1,
reserved: u1,
larger_pages: u1,
global: u1,
available: u3,
address: u48,
available2: u3,
no_execute: u1,
pub fn setAddress(self: *PageTableEntry, address: u64) void {
self.address = @intCast(address >> 12);
}
pub fn getAddress(self: *PageTableEntry) u64 {
return self.address << 12;
}
pub fn clear(self: *PageTableEntry) void {
self.* = std.mem.zeroes(PageTableEntry);
}
};
const PageDirectory = struct {
entries: [512]PageTableEntry,
};
pub const Flags = enum(u32) {
None = 0,
ReadWrite = 1,
User = 2,
NoExecute = 4,
WriteThrough = 8,
CacheDisable = 16,
Global = 32,
};
const PageTableIndexes = struct {
level4: u24,
level3: u24,
level2: u24,
level1: u24,
};
pub const MemoryMapper = struct {
cr3: PhysFrame,
directory: *PageDirectory,
base: u64,
pub fn create(frame: PhysFrame, base: usize) MemoryMapper {
return .{ .cr3 = frame, .directory = frame.virtualPointer(PageDirectory, base), .base = base };
}
};
fn calculatePageTableIndexes(address: usize) PageTableIndexes {
return .{ .level4 = @intCast((address >> 39) & 0o777), .level3 = @intCast((address >> 30) & 0o777), .level2 = @intCast((address >> 21) & 0o777), .level1 = @intCast((address >> 12) & 0o777) };
}
fn hasFlag(flags: u32, flag: Flags) u1 {
return switch ((flags & @intFromEnum(flag)) > 0) {
true => 1,
false => 0,
};
}
fn updatePageTableEntry(entry: *PageTableEntry, phys: PhysFrame, flags: u32) void {
entry.clear();
entry.present = 1;
entry.read_write = hasFlag(flags, Flags.ReadWrite);
entry.user = hasFlag(flags, Flags.User);
entry.write_through = hasFlag(flags, Flags.WriteThrough);
entry.cache_disabled = hasFlag(flags, Flags.CacheDisable);
entry.no_execute = hasFlag(flags, Flags.NoExecute);
entry.global = hasFlag(flags, Flags.Global);
entry.setAddress(phys.address);
}
fn setUpParentPageTableEntry(mapper: *const MemoryMapper, pte: *PageTableEntry, flags: u32) !void {
if (pte.present == 0) {
pte.clear();
const frame = PhysFrame{ .address = try kernel.allocFrame() };
pte.present = 1;
pte.setAddress(frame.address);
getTable(pte, mapper.base).* = std.mem.zeroes(PageDirectory);
}
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
if (hasFlag(flags, Flags.User) == 1) pte.user = 1;
}
fn getTable(mapper: *const MemoryMapper, pte: *PageTableEntry) *PageDirectory {
const frame = PhysFrame{ .address = pte.getAddress() };
return frame.virtualPointer(PageDirectory, mapper.base);
}
pub fn map(mapper: *const MemoryMapper, virt_address: u64, phys: PhysFrame, flags: u32) !void {
const indexes = calculatePageTableIndexes(virt_address);
const l4 = &mapper.directory.entries[indexes.level4];
try setUpParentPageTableEntry(mapper, l4, flags);
const l3 = &getTable(mapper, l4).entries[indexes.level3];
if (l3.larger_pages == 1) return error.MemoryAlreadyInUse;
try setUpParentPageTableEntry(mapper, l3, flags);
const l2 = &getTable(mapper, l3).entries[indexes.level2];
if (l2.larger_pages == 1) return error.MemoryAlreadyInUse;
try setUpParentPageTableEntry(mapper, l2, flags);
const l1 = &getTable(mapper, l2).entries[indexes.level1];
if (l1.present == 1) return error.MemoryAlreadyInUse;
updatePageTableEntry(l1, phys, flags);
}
pub fn remap(mapper: *const MemoryMapper, virt_address: u64, phys: ?PhysFrame, flags: u32) !PhysFrame {
const entry = getEntry(mapper, virt_address) orelse return error.MemoryNotInUse;
const old_frame = PhysFrame{ .address = entry.getAddress() };
const frame = phys orelse old_frame;
updatePageTableEntry(entry, frame, flags);
return old_frame;
}
pub fn unmap(mapper: *const MemoryMapper, virt_address: u64) !PhysFrame {
const entry = getEntry(mapper, virt_address) orelse return error.MemoryNotInUse;
const frame = PhysFrame{ .address = entry.getAddress() };
entry.clear();
return frame;
}
pub fn getEntry(mapper: MemoryMapper, virt_address: u64) ?*PageTableEntry {
const indexes = calculatePageTableIndexes(virt_address);
const l4 = &mapper.directory.entries[indexes.level4];
if (l4.present == 0) return null;
const l3 = &getTable(mapper, l4).entries[indexes.level3];
if (l3.present == 0) return null;
if (l3.larger_pages == 1) return l3;
const l2 = &getTable(mapper, l3).entries[indexes.level2];
if (l2.present == 0) return null;
if (l2.larger_pages == 1) return l2;
const l1 = &getTable(mapper, l2).entries[indexes.level1];
if (l1.present == 0) return null;
return l1;
}
pub fn getPhysical(mapper: MemoryMapper, virt_address: u64) ?PhysFrame {
const entry = getEntry(mapper, virt_address) orelse return null;
return PhysFrame{ .address = entry.getAddress() };
}

View File

@ -6,6 +6,7 @@ const SystemCall = enum(u64) {
Yield,
SetPriority,
GetPriority,
Sleep,
};
const SystemError = error{
@ -49,3 +50,7 @@ pub fn setPriority(priority: u8) void {
pub fn getPriority() u8 {
return @truncate(@as(u64, @bitCast(syscall(.GetPriority, 0))));
}
pub fn sleep(ms: u64) void {
_ = syscall(.Sleep, ms);
}

View File

@ -1,13 +1,19 @@
const kernel = @import("kernel.zig");
const vm = @import("arch/vm.zig").arch;
export fn _start(base: u64, address: u64) callconv(.C) noreturn {
const mapper = vm.MemoryMapper.create(.{ .address = address }, base);
export fn _start(base: u64) callconv(.C) noreturn {
kernel.print(base);
kernel.print(kernel.getPriority());
kernel.print(address);
kernel.print(@intFromPtr(mapper.directory));
kernel.setPriority(128);
kernel.print(kernel.getPriority());
vm.map(&mapper, 0x6000000, kernel.allocFrame(), @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.User));
while (true) {
kernel.yield();
var counter: u64 = 0;
while (true) : (counter += 4) {
kernel.sleep(1000);
kernel.print(counter);
}
}