Compare commits
No commits in common. "b2d89ab4fd33a8683e667d4a7f12a523eaa90285" and "e27ef97682dcba3038f9e47e4c3d1685c70ad30a" have entirely different histories.
b2d89ab4fd
...
e27ef97682
@ -20,8 +20,7 @@ pub fn setupCore(allocator: *pmm.FrameAllocator) !void {
|
||||
|
||||
const core: *arch.Core = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
||||
core.id = 0; // FIXME: Actually check core id
|
||||
core.active_thread_list = .{};
|
||||
core.sleeping_thread_list = .{};
|
||||
core.thread_list = .{};
|
||||
|
||||
const idle_thread = &core.idle_thread.data;
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
const thread = @import("../../thread.zig");
|
||||
|
||||
pub const Core = struct { id: u32, active_thread_list: thread.ThreadList, sleeping_thread_list: thread.ThreadList, current_thread: *thread.ThreadControlBlock, idle_thread: thread.ThreadList.Node };
|
||||
pub const Core = struct { id: u32, thread_list: thread.ThreadList, current_thread: *thread.ThreadControlBlock, idle_thread: thread.ThreadList.Node };
|
||||
|
@ -51,9 +51,8 @@ pub fn setAddress(regs: *interrupts.InterruptStackFrame, address: u64) void {
|
||||
regs.rip = address;
|
||||
}
|
||||
|
||||
pub fn setArguments(regs: *interrupts.InterruptStackFrame, arg0: u64, arg1: u64) void {
|
||||
regs.rdi = arg0;
|
||||
regs.rsi = arg1;
|
||||
pub fn setArgument(regs: *interrupts.InterruptStackFrame, argument: u64) void {
|
||||
regs.rdi = argument;
|
||||
}
|
||||
|
||||
pub fn setStack(regs: *interrupts.InterruptStackFrame, stack: u64) void {
|
||||
|
@ -23,11 +23,11 @@ pub const PageTableEntry = packed struct {
|
||||
available2: u3,
|
||||
no_execute: u1,
|
||||
|
||||
pub fn setAddress(self: *PageTableEntry, address: u64) void {
|
||||
pub fn set_address(self: *PageTableEntry, address: u64) void {
|
||||
self.address = @intCast(address >> 12);
|
||||
}
|
||||
|
||||
pub fn getAddress(self: *PageTableEntry) u64 {
|
||||
pub fn get_address(self: *PageTableEntry) u64 {
|
||||
return self.address << 12;
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ fn updatePageTableEntry(entry: *PageTableEntry, phys: pmm.PhysFrame, flags: u32)
|
||||
entry.cache_disabled = hasFlag(flags, Flags.CacheDisable);
|
||||
entry.no_execute = hasFlag(flags, Flags.NoExecute);
|
||||
entry.global = hasFlag(flags, Flags.Global);
|
||||
entry.setAddress(phys.address);
|
||||
entry.set_address(phys.address);
|
||||
}
|
||||
|
||||
fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntry, flags: u32, base: usize) !void {
|
||||
@ -94,7 +94,7 @@ fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntr
|
||||
pte.clear();
|
||||
const frame = try pmm.allocFrame(allocator);
|
||||
pte.present = 1;
|
||||
pte.setAddress(frame.address);
|
||||
pte.set_address(frame.address);
|
||||
getTable(pte, base).* = std.mem.zeroes(PageDirectory);
|
||||
}
|
||||
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
|
||||
@ -102,7 +102,7 @@ fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntr
|
||||
}
|
||||
|
||||
fn getTable(pte: *PageTableEntry, base: usize) *allowzero PageDirectory {
|
||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||
const frame = pmm.PhysFrame{ .address = pte.get_address() };
|
||||
return @ptrFromInt(frame.virtualAddress(base));
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
|
||||
|
||||
if (user_address != user_page) {
|
||||
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
|
||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||
const frame = pmm.PhysFrame{ .address = pte.get_address() };
|
||||
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
||||
const virt = frame.virtualAddress(base) + remainder;
|
||||
|
||||
@ -173,7 +173,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
|
||||
|
||||
while (count > 0) {
|
||||
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
|
||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||
const frame = pmm.PhysFrame{ .address = pte.get_address() };
|
||||
const amount: usize = @min(platform.PAGE_SIZE, count);
|
||||
const virt = frame.virtualAddress(base);
|
||||
|
||||
@ -196,7 +196,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
|
||||
|
||||
if (user_address != user_page) {
|
||||
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
|
||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||
const frame = pmm.PhysFrame{ .address = pte.get_address() };
|
||||
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
||||
const virt = frame.virtualAddress(base) + remainder;
|
||||
|
||||
@ -208,7 +208,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
|
||||
|
||||
while (count > 0) {
|
||||
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
|
||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||
const frame = pmm.PhysFrame{ .address = pte.get_address() };
|
||||
const amount: usize = @min(platform.PAGE_SIZE, count);
|
||||
const virt = frame.virtualAddress(base);
|
||||
|
||||
@ -251,9 +251,9 @@ fn lockPageDirectoryFrames(allocator: *pmm.FrameAllocator, directory: *PageDirec
|
||||
if (pte.present == 0) continue;
|
||||
if ((index < 4) and (pte.larger_pages == 1)) continue;
|
||||
|
||||
try pmm.lockFrame(allocator, pte.getAddress());
|
||||
try pmm.lockFrame(allocator, pte.get_address());
|
||||
|
||||
const child_table: *PageDirectory = @ptrFromInt(pte.getAddress());
|
||||
const child_table: *PageDirectory = @ptrFromInt(pte.get_address());
|
||||
|
||||
try lockPageDirectoryFrames(allocator, child_table, index - 1);
|
||||
}
|
||||
|
@ -71,7 +71,9 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
||||
init.mapper = mapper;
|
||||
init.user_priority = 255;
|
||||
thread.arch.initUserRegisters(&init.regs);
|
||||
thread.arch.setArguments(&init.regs, base, mapper.phys.address);
|
||||
thread.arch.setArgument(&init.regs, base);
|
||||
|
||||
thread.addThreadToScheduler(cpu.thisCore(), init);
|
||||
|
||||
const ctx = Context{ .allocator = &allocator, .mapper = mapper, .regs = &init.regs };
|
||||
|
||||
|
@ -19,7 +19,3 @@ pub fn getPriority(_: *interrupts.InterruptStackFrame, _: *sys.Arguments, retval
|
||||
const core = cpu.thisCore();
|
||||
retval.* = core.current_thread.user_priority;
|
||||
}
|
||||
|
||||
pub fn sleep(regs: *interrupts.InterruptStackFrame, args: *sys.Arguments, _: *isize) anyerror!void {
|
||||
_ = thread.startSleep(regs, args.arg0);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ pub const Arguments = struct {
|
||||
|
||||
const SystemCall = *const fn (frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) anyerror!void;
|
||||
|
||||
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield, sched.setPriority, sched.getPriority, sched.sleep };
|
||||
const syscalls = [_]SystemCall{ print, mem.allocFrame, mem.lockFrame, mem.freeFrame, sched.yield, sched.setPriority, sched.getPriority };
|
||||
|
||||
pub fn invokeSyscall(number: usize, frame: *interrupts.InterruptStackFrame, args: *Arguments, retval: *isize) void {
|
||||
if (number >= syscalls.len) {
|
||||
|
@ -9,7 +9,6 @@ pub const ThreadState = enum {
|
||||
Inactive,
|
||||
Running,
|
||||
Blocked,
|
||||
Sleeping,
|
||||
};
|
||||
|
||||
pub const ThreadControlBlock = struct {
|
||||
@ -17,14 +16,11 @@ pub const ThreadControlBlock = struct {
|
||||
mapper: ?vmm.MemoryMapper,
|
||||
regs: interrupts.InterruptStackFrame,
|
||||
state: ThreadState,
|
||||
user_priority: u8,
|
||||
|
||||
// Managed by scheduleNewTask(), no need to set manually.
|
||||
ticks: u64,
|
||||
// Managed by addThreadToPriorityQueue(), no need to set manually.
|
||||
|
||||
user_priority: u8,
|
||||
current_priority: u32,
|
||||
// Managed by startSleep(), no need to set manually.
|
||||
sleep_ticks: u64,
|
||||
};
|
||||
|
||||
pub const ThreadList = std.DoublyLinkedList(ThreadControlBlock);
|
||||
@ -45,7 +41,7 @@ pub fn enterTask(task: *ThreadControlBlock) noreturn {
|
||||
arch.enterTask(&task.regs, vmm.PHYSICAL_MAPPING_BASE, directory.address);
|
||||
}
|
||||
|
||||
fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
|
||||
pub fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
core.current_thread.regs = regs.*;
|
||||
@ -61,7 +57,7 @@ fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlo
|
||||
}
|
||||
|
||||
pub fn fetchNewTask(core: *cpu.arch.Core, should_idle_if_not_found: bool) ?*ThreadControlBlock {
|
||||
const last = core.active_thread_list.last orelse {
|
||||
const last = core.thread_list.last orelse {
|
||||
if (should_idle_if_not_found) {
|
||||
return &core.idle_thread.data;
|
||||
} else return null;
|
||||
@ -75,7 +71,7 @@ pub fn fetchNewTask(core: *cpu.arch.Core, should_idle_if_not_found: bool) ?*Thre
|
||||
}
|
||||
|
||||
pub fn scheduleNewTask(core: *cpu.arch.Core, regs: *interrupts.InterruptStackFrame, new_thread: *ThreadControlBlock) *ThreadControlBlock {
|
||||
if (core.active_thread_list.first) |first| {
|
||||
if (core.thread_list.first) |first| {
|
||||
first.data.current_priority +|= 4;
|
||||
}
|
||||
|
||||
@ -89,11 +85,6 @@ pub fn scheduleNewTask(core: *cpu.arch.Core, regs: *interrupts.InterruptStackFra
|
||||
pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
updateSleepQueue(core);
|
||||
while (popSleepQueue(core)) |thread| {
|
||||
reviveThread(core, thread);
|
||||
}
|
||||
|
||||
core.current_thread.ticks -|= 1;
|
||||
if (core.current_thread.ticks == 0) {
|
||||
const new_thread = fetchNewTask(core, false) orelse return;
|
||||
@ -102,81 +93,12 @@ pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block(regs: *interrupts.InterruptStackFrame) *ThreadControlBlock {
|
||||
const core = cpu.thisCore();
|
||||
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
|
||||
|
||||
// fetchNewTask() always returns a thread if should_idle_if_not_found is set to true.
|
||||
const new_thread = fetchNewTask(core, true) orelse unreachable;
|
||||
const current_thread = scheduleNewTask(core, regs, new_thread);
|
||||
current_thread.state = .Blocked;
|
||||
|
||||
return current_thread;
|
||||
}
|
||||
|
||||
pub fn startSleep(regs: *interrupts.InterruptStackFrame, ticks: u64) *ThreadControlBlock {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
// fetchNewTask() always returns a thread if should_idle_if_not_found is set to true.
|
||||
const new_thread = fetchNewTask(core, true) orelse unreachable;
|
||||
const current_thread = scheduleNewTask(core, regs, new_thread);
|
||||
current_thread.state = .Sleeping;
|
||||
addThreadToSleepQueue(core, current_thread, ticks);
|
||||
|
||||
return current_thread;
|
||||
}
|
||||
|
||||
fn addThreadToSleepQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock, ticks: u64) void {
|
||||
thread.sleep_ticks = ticks;
|
||||
|
||||
var it: ?*ThreadList.Node = core.sleeping_thread_list.first;
|
||||
while (it) |n| : (it = n.next) {
|
||||
if (thread.sleep_ticks <= n.data.sleep_ticks) {
|
||||
n.data.sleep_ticks -|= thread.sleep_ticks;
|
||||
core.sleeping_thread_list.insertBefore(n, @fieldParentPtr("data", thread));
|
||||
return;
|
||||
}
|
||||
thread.sleep_ticks -|= n.data.sleep_ticks;
|
||||
}
|
||||
|
||||
core.sleeping_thread_list.append(@fieldParentPtr("data", thread));
|
||||
}
|
||||
|
||||
pub fn removeThreadFromSleepQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
const node: *ThreadList.Node = @fieldParentPtr("data", thread);
|
||||
|
||||
if (node.next) |n| {
|
||||
n.data.sleep_ticks +|= thread.sleep_ticks;
|
||||
}
|
||||
|
||||
core.sleeping_thread_list.remove(node);
|
||||
|
||||
reviveThread(core, thread);
|
||||
}
|
||||
|
||||
fn updateSleepQueue(core: *cpu.arch.Core) void {
|
||||
const first = core.sleeping_thread_list.first orelse return;
|
||||
|
||||
first.data.sleep_ticks -|= 1;
|
||||
}
|
||||
|
||||
fn popSleepQueue(core: *cpu.arch.Core) ?*ThreadControlBlock {
|
||||
const first = core.sleeping_thread_list.first orelse return null;
|
||||
|
||||
if (first.data.sleep_ticks == 0) {
|
||||
core.sleeping_thread_list.remove(first);
|
||||
return &first.data;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn reviveThread(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
thread.state = .Running;
|
||||
pub fn addThreadToScheduler(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
addThreadToPriorityQueue(core, thread);
|
||||
}
|
||||
|
||||
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
|
||||
|
||||
pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlBlock {
|
||||
const frame = try pmm.allocFrame(allocator);
|
||||
|
||||
@ -194,25 +116,25 @@ pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlB
|
||||
pub fn addThreadToPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
thread.current_priority = thread.user_priority;
|
||||
|
||||
var it: ?*ThreadList.Node = core.active_thread_list.first;
|
||||
var it: ?*ThreadList.Node = core.thread_list.first;
|
||||
while (it) |n| : (it = n.next) {
|
||||
if (thread.current_priority <= n.data.current_priority) {
|
||||
n.data.current_priority -|= thread.current_priority;
|
||||
core.active_thread_list.insertBefore(n, @fieldParentPtr("data", thread));
|
||||
core.thread_list.insertBefore(n, @fieldParentPtr("data", thread));
|
||||
return;
|
||||
}
|
||||
thread.current_priority -|= n.data.current_priority;
|
||||
}
|
||||
|
||||
core.active_thread_list.append(@fieldParentPtr("data", thread));
|
||||
core.thread_list.append(@fieldParentPtr("data", thread));
|
||||
}
|
||||
|
||||
fn removeThreadFromPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
pub fn removeThreadFromPriorityQueue(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
const node: *ThreadList.Node = @fieldParentPtr("data", thread);
|
||||
|
||||
if (node.next) |n| {
|
||||
n.data.current_priority +|= thread.current_priority;
|
||||
}
|
||||
|
||||
core.active_thread_list.remove(node);
|
||||
core.thread_list.remove(node);
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
const std = @import("std");
|
||||
const target = @import("builtin").target;
|
||||
|
||||
pub const arch = switch (target.cpu.arch) {
|
||||
.x86_64 => @import("x86_64/vm.zig"),
|
||||
else => {
|
||||
@compileError("unsupported architecture");
|
||||
},
|
||||
};
|
@ -1,182 +0,0 @@
|
||||
const std = @import("std");
|
||||
const kernel = @import("../../kernel.zig");
|
||||
|
||||
const MapError = error{
|
||||
MemoryAlreadyInUse,
|
||||
MemoryNotInUse,
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
const PhysFrame = struct {
|
||||
address: u64,
|
||||
|
||||
pub fn virtualAddress(self: *const PhysFrame, base: usize) usize {
|
||||
return base + self.address;
|
||||
}
|
||||
|
||||
pub fn virtualPointer(self: *const PhysFrame, comptime T: type, base: usize) *T {
|
||||
return @ptrFromInt(self.virtualAddress(base));
|
||||
}
|
||||
};
|
||||
|
||||
const PageTableEntry = packed struct {
|
||||
present: u1,
|
||||
read_write: u1,
|
||||
user: u1,
|
||||
write_through: u1,
|
||||
cache_disabled: u1,
|
||||
accessed: u1,
|
||||
reserved: u1,
|
||||
larger_pages: u1,
|
||||
global: u1,
|
||||
available: u3,
|
||||
address: u48,
|
||||
available2: u3,
|
||||
no_execute: u1,
|
||||
|
||||
pub fn setAddress(self: *PageTableEntry, address: u64) void {
|
||||
self.address = @intCast(address >> 12);
|
||||
}
|
||||
|
||||
pub fn getAddress(self: *PageTableEntry) u64 {
|
||||
return self.address << 12;
|
||||
}
|
||||
|
||||
pub fn clear(self: *PageTableEntry) void {
|
||||
self.* = std.mem.zeroes(PageTableEntry);
|
||||
}
|
||||
};
|
||||
|
||||
const PageDirectory = struct {
|
||||
entries: [512]PageTableEntry,
|
||||
};
|
||||
|
||||
pub const Flags = enum(u32) {
|
||||
None = 0,
|
||||
ReadWrite = 1,
|
||||
User = 2,
|
||||
NoExecute = 4,
|
||||
WriteThrough = 8,
|
||||
CacheDisable = 16,
|
||||
Global = 32,
|
||||
};
|
||||
|
||||
const PageTableIndexes = struct {
|
||||
level4: u24,
|
||||
level3: u24,
|
||||
level2: u24,
|
||||
level1: u24,
|
||||
};
|
||||
|
||||
pub const MemoryMapper = struct {
|
||||
cr3: PhysFrame,
|
||||
directory: *PageDirectory,
|
||||
base: u64,
|
||||
|
||||
pub fn create(frame: PhysFrame, base: usize) MemoryMapper {
|
||||
return .{ .cr3 = frame, .directory = frame.virtualPointer(PageDirectory, base), .base = base };
|
||||
}
|
||||
};
|
||||
|
||||
fn calculatePageTableIndexes(address: usize) PageTableIndexes {
|
||||
return .{ .level4 = @intCast((address >> 39) & 0o777), .level3 = @intCast((address >> 30) & 0o777), .level2 = @intCast((address >> 21) & 0o777), .level1 = @intCast((address >> 12) & 0o777) };
|
||||
}
|
||||
|
||||
fn hasFlag(flags: u32, flag: Flags) u1 {
|
||||
return switch ((flags & @intFromEnum(flag)) > 0) {
|
||||
true => 1,
|
||||
false => 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn updatePageTableEntry(entry: *PageTableEntry, phys: PhysFrame, flags: u32) void {
|
||||
entry.clear();
|
||||
entry.present = 1;
|
||||
entry.read_write = hasFlag(flags, Flags.ReadWrite);
|
||||
entry.user = hasFlag(flags, Flags.User);
|
||||
entry.write_through = hasFlag(flags, Flags.WriteThrough);
|
||||
entry.cache_disabled = hasFlag(flags, Flags.CacheDisable);
|
||||
entry.no_execute = hasFlag(flags, Flags.NoExecute);
|
||||
entry.global = hasFlag(flags, Flags.Global);
|
||||
entry.setAddress(phys.address);
|
||||
}
|
||||
|
||||
fn setUpParentPageTableEntry(mapper: *const MemoryMapper, pte: *PageTableEntry, flags: u32) !void {
|
||||
if (pte.present == 0) {
|
||||
pte.clear();
|
||||
const frame = PhysFrame{ .address = try kernel.allocFrame() };
|
||||
pte.present = 1;
|
||||
pte.setAddress(frame.address);
|
||||
getTable(pte, mapper.base).* = std.mem.zeroes(PageDirectory);
|
||||
}
|
||||
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
|
||||
if (hasFlag(flags, Flags.User) == 1) pte.user = 1;
|
||||
}
|
||||
|
||||
fn getTable(mapper: *const MemoryMapper, pte: *PageTableEntry) *PageDirectory {
|
||||
const frame = PhysFrame{ .address = pte.getAddress() };
|
||||
return frame.virtualPointer(PageDirectory, mapper.base);
|
||||
}
|
||||
|
||||
pub fn map(mapper: *const MemoryMapper, virt_address: u64, phys: PhysFrame, flags: u32) !void {
|
||||
const indexes = calculatePageTableIndexes(virt_address);
|
||||
const l4 = &mapper.directory.entries[indexes.level4];
|
||||
try setUpParentPageTableEntry(mapper, l4, flags);
|
||||
|
||||
const l3 = &getTable(mapper, l4).entries[indexes.level3];
|
||||
if (l3.larger_pages == 1) return error.MemoryAlreadyInUse;
|
||||
try setUpParentPageTableEntry(mapper, l3, flags);
|
||||
|
||||
const l2 = &getTable(mapper, l3).entries[indexes.level2];
|
||||
if (l2.larger_pages == 1) return error.MemoryAlreadyInUse;
|
||||
try setUpParentPageTableEntry(mapper, l2, flags);
|
||||
|
||||
const l1 = &getTable(mapper, l2).entries[indexes.level1];
|
||||
if (l1.present == 1) return error.MemoryAlreadyInUse;
|
||||
updatePageTableEntry(l1, phys, flags);
|
||||
}
|
||||
|
||||
pub fn remap(mapper: *const MemoryMapper, virt_address: u64, phys: ?PhysFrame, flags: u32) !PhysFrame {
|
||||
const entry = getEntry(mapper, virt_address) orelse return error.MemoryNotInUse;
|
||||
const old_frame = PhysFrame{ .address = entry.getAddress() };
|
||||
const frame = phys orelse old_frame;
|
||||
|
||||
updatePageTableEntry(entry, frame, flags);
|
||||
|
||||
return old_frame;
|
||||
}
|
||||
|
||||
pub fn unmap(mapper: *const MemoryMapper, virt_address: u64) !PhysFrame {
|
||||
const entry = getEntry(mapper, virt_address) orelse return error.MemoryNotInUse;
|
||||
|
||||
const frame = PhysFrame{ .address = entry.getAddress() };
|
||||
|
||||
entry.clear();
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
pub fn getEntry(mapper: MemoryMapper, virt_address: u64) ?*PageTableEntry {
|
||||
const indexes = calculatePageTableIndexes(virt_address);
|
||||
const l4 = &mapper.directory.entries[indexes.level4];
|
||||
if (l4.present == 0) return null;
|
||||
|
||||
const l3 = &getTable(mapper, l4).entries[indexes.level3];
|
||||
if (l3.present == 0) return null;
|
||||
if (l3.larger_pages == 1) return l3;
|
||||
|
||||
const l2 = &getTable(mapper, l3).entries[indexes.level2];
|
||||
if (l2.present == 0) return null;
|
||||
if (l2.larger_pages == 1) return l2;
|
||||
|
||||
const l1 = &getTable(mapper, l2).entries[indexes.level1];
|
||||
if (l1.present == 0) return null;
|
||||
|
||||
return l1;
|
||||
}
|
||||
|
||||
pub fn getPhysical(mapper: MemoryMapper, virt_address: u64) ?PhysFrame {
|
||||
const entry = getEntry(mapper, virt_address) orelse return null;
|
||||
|
||||
return PhysFrame{ .address = entry.getAddress() };
|
||||
}
|
@ -6,7 +6,6 @@ const SystemCall = enum(u64) {
|
||||
Yield,
|
||||
SetPriority,
|
||||
GetPriority,
|
||||
Sleep,
|
||||
};
|
||||
|
||||
const SystemError = error{
|
||||
@ -50,7 +49,3 @@ pub fn setPriority(priority: u8) void {
|
||||
pub fn getPriority() u8 {
|
||||
return @truncate(@as(u64, @bitCast(syscall(.GetPriority, 0))));
|
||||
}
|
||||
|
||||
pub fn sleep(ms: u64) void {
|
||||
_ = syscall(.Sleep, ms);
|
||||
}
|
||||
|
@ -1,19 +1,13 @@
|
||||
const kernel = @import("kernel.zig");
|
||||
const vm = @import("arch/vm.zig").arch;
|
||||
|
||||
export fn _start(base: u64, address: u64) callconv(.C) noreturn {
|
||||
const mapper = vm.MemoryMapper.create(.{ .address = address }, base);
|
||||
|
||||
export fn _start(base: u64) callconv(.C) noreturn {
|
||||
kernel.print(base);
|
||||
kernel.print(address);
|
||||
kernel.print(@intFromPtr(mapper.directory));
|
||||
kernel.print(kernel.getPriority());
|
||||
|
||||
vm.map(&mapper, 0x6000000, kernel.allocFrame(), @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.User));
|
||||
kernel.setPriority(128);
|
||||
kernel.print(kernel.getPriority());
|
||||
|
||||
var counter: u64 = 0;
|
||||
|
||||
while (true) : (counter += 4) {
|
||||
kernel.sleep(1000);
|
||||
kernel.print(counter);
|
||||
while (true) {
|
||||
kernel.yield();
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user