Compare commits
No commits in common. "614ed80980e0ac685a3c2f6a561fe7580a685664" and "654b228f105530614983f50cf3f3850d9b8ee62c" have entirely different histories.
614ed80980
...
654b228f10
@ -1,39 +0,0 @@
|
||||
const std = @import("std");
|
||||
const target = @import("builtin").target;
|
||||
const thread = @import("../thread.zig");
|
||||
const pmm = @import("../pmm.zig");
|
||||
const vmm = @import("vmm.zig").arch;
|
||||
|
||||
pub const arch = switch (target.cpu.arch) {
|
||||
.x86_64 => @import("x86_64/cpu.zig"),
|
||||
else => {
|
||||
@compileError("unsupported architecture");
|
||||
},
|
||||
};
|
||||
|
||||
// FIXME: single-core hack, we need a proper way to figure which core this is when SMP support is added.
|
||||
var this_core: *arch.Core = undefined;
|
||||
|
||||
pub fn setupCore(allocator: *pmm.FrameAllocator) !void {
|
||||
const frame = try pmm.allocFrame(allocator);
|
||||
|
||||
const core: *arch.Core = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
||||
core.id = 0; // FIXME: Actually check core id
|
||||
core.thread_list = .{};
|
||||
|
||||
const idle_thread = &core.idle_thread.data;
|
||||
|
||||
idle_thread.id = 0;
|
||||
idle_thread.directory = null;
|
||||
idle_thread.regs = std.mem.zeroes(@TypeOf(idle_thread.regs));
|
||||
thread.arch.initKernelRegisters(&idle_thread.regs);
|
||||
thread.arch.setAddress(&idle_thread.regs, @intFromPtr(&thread.arch.idleLoop));
|
||||
|
||||
core.thread_list.append(&core.idle_thread);
|
||||
|
||||
this_core = core;
|
||||
}
|
||||
|
||||
pub fn thisCore() *arch.Core {
|
||||
return this_core;
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
const std = @import("std");
|
||||
const target = @import("builtin").target;
|
||||
|
||||
pub const arch = switch (target.cpu.arch) {
|
||||
.x86_64 => @import("x86_64/thread.zig"),
|
||||
else => {
|
||||
@compileError("unsupported architecture");
|
||||
},
|
||||
};
|
@ -1,3 +0,0 @@
|
||||
const thread = @import("../../thread.zig");
|
||||
|
||||
pub const Core = struct { id: u32, thread_list: thread.ThreadList, current_thread: *thread.ThreadControlBlock, idle_thread: thread.ThreadList.Node };
|
@ -79,8 +79,9 @@ fn stackTop(begin: usize, size: usize) usize {
|
||||
return (begin + size) - 16;
|
||||
}
|
||||
|
||||
fn setupTSS(gdt: *GlobalDescriptorTable, tss: *TSS) void {
|
||||
fn setupTSS(gdt: *GlobalDescriptorTable, tss: *TSS, stack: [*]u8, stack_length: usize) void {
|
||||
tss.iomap_base = @sizeOf(TSS);
|
||||
tss.ist0 = stackTop(@intFromPtr(stack), stack_length);
|
||||
setTSSBase(&gdt.tss, &gdt.tss2, @intFromPtr(tss));
|
||||
setLimit(&gdt.tss, @sizeOf(TSS) - 1);
|
||||
}
|
||||
@ -119,15 +120,12 @@ pub fn setupGDT() void {
|
||||
var gdt = GlobalDescriptorTable{ .null = std.mem.zeroes(GDTEntry), .kernel_code = createGDTEntry(0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00), .kernel_data = createGDTEntry(0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00), .user_code = createGDTEntry(0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00), .user_data = createGDTEntry(0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00), .tss = createGDTEntry(0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00), .tss2 = HighGDTEntry{ .base_high = 0x00000000, .reserved = 0x00000000 } };
|
||||
var gdtr = std.mem.zeroes(GDTR);
|
||||
var tss = std.mem.zeroes(TSS);
|
||||
var interrupt_stack: [platform.PAGE_SIZE * 4]u8 = std.mem.zeroes([platform.PAGE_SIZE * 4]u8);
|
||||
var syscall_stack: [platform.PAGE_SIZE * 4]u8 = std.mem.zeroes([platform.PAGE_SIZE * 4]u8);
|
||||
var alternate_stack: [platform.PAGE_SIZE * 4]u8 = std.mem.zeroes([platform.PAGE_SIZE * 4]u8);
|
||||
};
|
||||
|
||||
state.gdtr.offset = @intFromPtr(&state.gdt);
|
||||
state.gdtr.size = @sizeOf(GlobalDescriptorTable);
|
||||
setupTSS(&state.gdt, &state.tss);
|
||||
state.tss.ist0 = stackTop(@intFromPtr(&state.interrupt_stack[0]), @sizeOf(@TypeOf(state.interrupt_stack)));
|
||||
state.tss.rsp0 = stackTop(@intFromPtr(&state.syscall_stack[0]), @sizeOf(@TypeOf(state.syscall_stack)));
|
||||
setupTSS(&state.gdt, &state.tss, @ptrCast(&state.alternate_stack[0]), @sizeOf(@TypeOf(state.alternate_stack)));
|
||||
|
||||
// Hackish way to call naked functions which we know conform to SysV ABI.
|
||||
const lgdt: *const fn (g: *GDTR) callconv(.C) void = @ptrCast(&loadGDT);
|
||||
|
@ -1,74 +0,0 @@
|
||||
const std = @import("std");
|
||||
const interrupts = @import("interrupts.zig");
|
||||
|
||||
pub inline fn enterTask(regs: *interrupts.InterruptStackFrame, comptime base: u64, directory: *anyopaque) noreturn {
|
||||
asm volatile (
|
||||
\\ addq %[base], %rsp
|
||||
\\ push %[ss]
|
||||
\\ push %[rsp]
|
||||
\\ push %[rflags]
|
||||
\\ push %[cs]
|
||||
\\ push %[rip]
|
||||
\\ mov %[directory], %cr3
|
||||
\\ mov $0, %rax
|
||||
\\ mov $0, %rbx
|
||||
\\ mov $0, %rcx
|
||||
\\ mov $0, %rdx
|
||||
\\ mov $0, %rsi
|
||||
\\ mov $0, %rbp
|
||||
\\ mov $0, %r8
|
||||
\\ mov $0, %r9
|
||||
\\ mov $0, %r10
|
||||
\\ mov $0, %r11
|
||||
\\ mov $0, %r12
|
||||
\\ mov $0, %r13
|
||||
\\ mov $0, %r14
|
||||
\\ mov $0, %r15
|
||||
\\ iretq
|
||||
:
|
||||
: [ss] "r" (regs.ss),
|
||||
[rsp] "r" (regs.rsp),
|
||||
[rflags] "r" (regs.rflags),
|
||||
[cs] "r" (regs.cs),
|
||||
[rip] "r" (regs.rip),
|
||||
[arg] "{rdi}" (regs.rdi),
|
||||
[base] "r" (base),
|
||||
[directory] "r" (directory),
|
||||
);
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub fn idleLoop() callconv(.Naked) noreturn {
|
||||
asm volatile (
|
||||
\\.loop:
|
||||
\\ sti
|
||||
\\ hlt
|
||||
\\ jmp .loop
|
||||
);
|
||||
}
|
||||
|
||||
pub fn setAddress(regs: *interrupts.InterruptStackFrame, address: u64) void {
|
||||
regs.rip = address;
|
||||
}
|
||||
|
||||
pub fn setArgument(regs: *interrupts.InterruptStackFrame, argument: u64) void {
|
||||
regs.rdi = argument;
|
||||
}
|
||||
|
||||
pub fn setStack(regs: *interrupts.InterruptStackFrame, stack: u64) void {
|
||||
regs.rsp = stack;
|
||||
}
|
||||
|
||||
pub fn initKernelRegisters(regs: *interrupts.InterruptStackFrame) void {
|
||||
regs.* = std.mem.zeroes(interrupts.InterruptStackFrame);
|
||||
regs.cs = 0x08;
|
||||
regs.ss = 0x10;
|
||||
regs.rflags = 1 << 9; // IF (Interrupt enable flag)
|
||||
}
|
||||
|
||||
pub fn initUserRegisters(regs: *interrupts.InterruptStackFrame) void {
|
||||
regs.* = std.mem.zeroes(interrupts.InterruptStackFrame);
|
||||
regs.cs = 0x18 | 3;
|
||||
regs.ss = 0x20 | 3;
|
||||
regs.rflags = 1 << 9; // IF (Interrupt enable flag)
|
||||
}
|
152
core/src/elf.zig
152
core/src/elf.zig
@ -1,152 +0,0 @@
|
||||
const std = @import("std");
|
||||
const target = @import("builtin").target;
|
||||
const vmm = @import("arch/vmm.zig").arch;
|
||||
const platform = @import("arch/platform.zig").arch;
|
||||
const pmm = @import("pmm.zig");
|
||||
const debug = @import("arch/debug.zig");
|
||||
|
||||
const ELFMAG = "\x7fELF";
|
||||
const SELFMAG = 4;
|
||||
const EI_CLASS = 4; // File class byte index
|
||||
const ELFCLASS64 = 2; // 64-bit objects
|
||||
const EI_DATA = 5; // Data encoding byte index
|
||||
const ELFDATA2LSB = 1; // 2's complement, little endian
|
||||
const ET_EXEC = 2; // Executable file
|
||||
const PT_LOAD = 1; // Loadable program segment
|
||||
const EM_MACH = switch (target.cpu.arch) {
|
||||
.x86_64 => 62,
|
||||
else => @compileError("unsupported architecture"),
|
||||
};
|
||||
|
||||
const Elf64_Ehdr align(8) = packed struct {
|
||||
e_ident: u128, // Magic number and other info
|
||||
e_type: u16, // Object file type
|
||||
e_machine: u16, // Architecture
|
||||
e_version: u32, // Object file version
|
||||
e_entry: u64, // Entry point virtual address
|
||||
e_phoff: u64, // Program header table file offset
|
||||
e_shoff: u64, // Section header table file offset
|
||||
e_flags: u32, // Processor-specific flags
|
||||
e_ehsize: u16, // ELF header size in bytes
|
||||
e_phentsize: u16, // Program header table entry size
|
||||
e_phnum: u16, // Program header table entry count
|
||||
e_shentsize: u16, // Section header table entry size
|
||||
e_shnum: u16, // Section header table entry count
|
||||
e_shstrndx: u16, // Section header string table index
|
||||
};
|
||||
|
||||
const Elf64_Phdr align(8) = packed struct {
|
||||
p_type: u32, // Segment type
|
||||
p_flags: u32, // Segment flags
|
||||
p_offset: u64, // Segment file offset
|
||||
p_vaddr: u64, // Segment virtual address
|
||||
p_paddr: u64, // Segment physical address
|
||||
p_filesz: u64, // Segment size in file
|
||||
p_memsz: u64, // Segment size in memory
|
||||
p_align: u64, // Segment alignment
|
||||
};
|
||||
|
||||
const ElfError = error{
|
||||
InvalidExecutable,
|
||||
};
|
||||
|
||||
fn canExecuteSegment(flags: u32) bool {
|
||||
return (flags & 1) > 0;
|
||||
}
|
||||
|
||||
fn canWriteSegment(flags: u32) bool {
|
||||
return (flags & 2) > 0;
|
||||
}
|
||||
|
||||
pub fn loadElf(allocator: *pmm.FrameAllocator, directory: *vmm.PageDirectory, base_address: pmm.PhysFrame) !usize {
|
||||
const address = base_address.virtualAddress(vmm.PHYSICAL_MAPPING_BASE);
|
||||
|
||||
debug.print("Address: {}\n", .{address});
|
||||
|
||||
const elf_header: *Elf64_Ehdr = @ptrFromInt(address);
|
||||
|
||||
debug.print("ELF header: {}\n", .{elf_header});
|
||||
|
||||
const e_ident: [*]u8 = @ptrFromInt(address);
|
||||
|
||||
if (!std.mem.eql(u8, e_ident[0..SELFMAG], ELFMAG[0..SELFMAG])) {
|
||||
debug.print("Error while loading ELF: ELF header has no valid magic\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
if (e_ident[EI_CLASS] != ELFCLASS64) {
|
||||
debug.print("Error while loading ELF: ELF object is not 64-bit\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
if (e_ident[EI_DATA] != ELFDATA2LSB) {
|
||||
debug.print("Error while loading ELF: ELF object is not 2's complement little-endian\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
if (elf_header.e_type != ET_EXEC) {
|
||||
debug.print("Error while loading ELF: ELF object is not an executable\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
if (elf_header.e_machine != EM_MACH) {
|
||||
debug.print("Error while loading ELF: ELF object's target architecture does not match the current one\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
if (elf_header.e_phnum == 0) {
|
||||
debug.print("Error while loading ELF: ELF object has no program headers\n", .{});
|
||||
return error.InvalidExecutable;
|
||||
}
|
||||
|
||||
var i: usize = 0;
|
||||
var program_header: *align(1) Elf64_Phdr = @ptrFromInt(address + elf_header.e_phoff);
|
||||
|
||||
debug.print("Program header address: {x}\n", .{address + elf_header.e_phoff});
|
||||
|
||||
while (i < elf_header.e_phnum) {
|
||||
debug.print("Program header: {}\n", .{program_header.*});
|
||||
if (program_header.p_type == PT_LOAD) {
|
||||
debug.print("ELF: Loading segment (offset={d}, base={x}, filesize={d}, memsize={d})\n", .{ program_header.p_offset, program_header.p_vaddr, program_header.p_filesz, program_header.p_memsz });
|
||||
|
||||
const vaddr_diff: u64 = @rem(program_header.p_vaddr, platform.PAGE_SIZE);
|
||||
const base_vaddr: u64 = program_header.p_vaddr - vaddr_diff;
|
||||
|
||||
var flags: u32 = @intFromEnum(vmm.Flags.User) | @intFromEnum(vmm.Flags.NoExecute);
|
||||
if (canWriteSegment(program_header.p_flags)) flags |= @intFromEnum(vmm.Flags.ReadWrite);
|
||||
if (canExecuteSegment(program_header.p_flags)) flags &= ~@as(u32, @intFromEnum(vmm.Flags.NoExecute));
|
||||
|
||||
// Allocate physical memory for the segment
|
||||
try vmm.allocAndMap(allocator, directory, base_vaddr, try std.math.divCeil(usize, program_header.p_memsz + vaddr_diff, platform.PAGE_SIZE), flags);
|
||||
|
||||
try vmm.memsetUser(directory, vmm.PHYSICAL_MAPPING_BASE, base_vaddr, 0, vaddr_diff);
|
||||
|
||||
try vmm.copyToUser(directory, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr, @ptrFromInt(address + program_header.p_offset), program_header.p_filesz);
|
||||
|
||||
const bss_size = program_header.p_memsz - program_header.p_filesz;
|
||||
|
||||
try vmm.memsetUser(directory, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr + program_header.p_filesz, 0, bss_size);
|
||||
} else {
|
||||
debug.print("ELF: Encountered non-loadable program header, skipping\n", .{});
|
||||
}
|
||||
|
||||
i += 1;
|
||||
|
||||
const new_address = address + elf_header.e_phoff + (i * elf_header.e_phentsize);
|
||||
|
||||
debug.print("Program header address: {x}\n", .{new_address});
|
||||
|
||||
program_header = @ptrFromInt(new_address);
|
||||
}
|
||||
|
||||
return elf_header.e_entry;
|
||||
}
|
||||
|
||||
pub fn allocateStack(allocator: *pmm.FrameAllocator, directory: *vmm.PageDirectory, stack_top: usize, stack_size: usize) !usize {
|
||||
const pages = try std.math.divCeil(usize, stack_size, platform.PAGE_SIZE);
|
||||
const stack_bottom = stack_top - (pages * platform.PAGE_SIZE);
|
||||
|
||||
try vmm.allocAndMap(allocator, directory, stack_bottom, pages, @intFromEnum(vmm.Flags.ReadWrite) | @intFromEnum(vmm.Flags.User) | @intFromEnum(vmm.Flags.NoExecute));
|
||||
|
||||
return stack_top - 16;
|
||||
}
|
@ -1,23 +1,14 @@
|
||||
const std = @import("std");
|
||||
const easyboot = @cImport(@cInclude("easyboot.h"));
|
||||
const debug = @import("arch/debug.zig");
|
||||
const cpu = @import("arch/cpu.zig");
|
||||
const platform = @import("arch/platform.zig").arch;
|
||||
const interrupts = @import("arch/interrupts.zig").arch;
|
||||
const vmm = @import("arch/vmm.zig").arch;
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const pmm = @import("pmm.zig");
|
||||
const thread = @import("thread.zig");
|
||||
const elf = @import("elf.zig");
|
||||
|
||||
const MultibootInfo = [*c]u8;
|
||||
|
||||
const Context = struct {
|
||||
allocator: *pmm.FrameAllocator,
|
||||
directory: *vmm.PageDirectory,
|
||||
regs: *interrupts.InterruptStackFrame,
|
||||
};
|
||||
|
||||
export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
||||
interrupts.disableInterrupts();
|
||||
|
||||
@ -26,87 +17,40 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
||||
while (true) {}
|
||||
}
|
||||
|
||||
debug.print("Hello world from the kernel!\n", .{});
|
||||
|
||||
multiboot.parseMultibootTags(@ptrCast(info));
|
||||
|
||||
platform.platformInit();
|
||||
|
||||
const tag = multiboot.findMultibootTag(easyboot.multiboot_tag_mmap_t, @ptrCast(info)) orelse {
|
||||
debug.print("error: No memory map multiboot tag found!\n", .{});
|
||||
while (true) {}
|
||||
unreachable;
|
||||
};
|
||||
debug.print("GDT initialized\n", .{});
|
||||
|
||||
var allocator = pmm.initializeFrameAllocator(tag) catch |err| {
|
||||
debug.print("Error while initializing frame allocator: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
if (multiboot.findMultibootTag(easyboot.multiboot_tag_mmap_t, @ptrCast(info))) |tag| {
|
||||
var allocator = pmm.initializeFrameAllocator(tag) catch |err| {
|
||||
debug.print("Error while initializing frame allocator: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
|
||||
var dir: vmm.PageDirectory = std.mem.zeroes(vmm.PageDirectory);
|
||||
const base: usize = vmm.createInitialMappings(&allocator, tag, &dir) catch |err| {
|
||||
debug.print("Error while creating initial mappings: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
var init_directory = std.mem.zeroes(vmm.PageDirectory);
|
||||
const base: usize = vmm.createInitialMappings(&allocator, tag, &init_directory) catch |err| {
|
||||
debug.print("Error while creating initial mappings: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
|
||||
debug.print("Physical memory base mapping for init: {x}\n", .{base});
|
||||
|
||||
const frame = pmm.allocFrame(&allocator) catch |err| {
|
||||
debug.print("Error while creating frame for user page directory: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
|
||||
// At this point the physical address space is already mapped into kernel virtual memory.
|
||||
const init_directory: *vmm.PageDirectory = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
||||
init_directory.* = dir;
|
||||
|
||||
cpu.setupCore(&allocator) catch |err| {
|
||||
debug.print("Error while setting up core-specific scheduler structures: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
|
||||
const init = thread.createThreadControlBlock(&allocator) catch |err| {
|
||||
debug.print("Error while creating thread control block for init: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
|
||||
init.directory = init_directory;
|
||||
thread.arch.initUserRegisters(&init.regs);
|
||||
thread.arch.setArgument(&init.regs, base);
|
||||
|
||||
thread.addThreadToScheduler(cpu.thisCore(), init);
|
||||
|
||||
const ctx = Context{ .allocator = &allocator, .directory = init_directory, .regs = &init.regs };
|
||||
|
||||
multiboot.findMultibootTags(easyboot.multiboot_tag_module_t, @ptrCast(info), struct {
|
||||
fn handler(mod: *easyboot.multiboot_tag_module_t, c: *const anyopaque) void {
|
||||
const context: *const Context = @alignCast(@ptrCast(c));
|
||||
const name = "init";
|
||||
if (std.mem.eql(u8, mod.string()[0..name.len], name[0..name.len])) {
|
||||
const phys_frame = pmm.PhysFrame{ .address = mod.mod_start };
|
||||
debug.print("Loading init from module at address {x}, virtual {x}\n", .{ mod.mod_start, phys_frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE) });
|
||||
const entry = elf.loadElf(context.allocator, context.directory, pmm.PhysFrame{ .address = mod.mod_start }) catch |err| {
|
||||
debug.print("Error while loading ELF file for init: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
thread.arch.setAddress(context.regs, entry);
|
||||
}
|
||||
}
|
||||
}.handler, &ctx);
|
||||
|
||||
const default_stack_size = 0x80000; // 512 KiB.
|
||||
const stack = elf.allocateStack(&allocator, init_directory, base - platform.PAGE_SIZE, default_stack_size) catch |err| {
|
||||
debug.print("Error while creating stack for init: {}\n", .{err});
|
||||
while (true) {}
|
||||
};
|
||||
thread.arch.setStack(&init.regs, stack);
|
||||
debug.print("Physical memory base mapping for init: {x}\n", .{base});
|
||||
} else {
|
||||
debug.print("No memory map multiboot tag found!\n", .{});
|
||||
}
|
||||
|
||||
platform.platformEndInit();
|
||||
|
||||
thread.enterTask(init);
|
||||
asm volatile ("int3");
|
||||
|
||||
while (true) {}
|
||||
}
|
||||
|
||||
pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
debug.print("--- KERNEL PANIC! ---\n", .{});
|
||||
debug.print("{s}\n", .{message});
|
||||
debug.print("return address: {x}\n", .{@returnAddress()});
|
||||
while (true) {}
|
||||
}
|
||||
|
@ -1,89 +0,0 @@
|
||||
const std = @import("std");
|
||||
const vmm = @import("arch/vmm.zig").arch;
|
||||
const interrupts = @import("arch/interrupts.zig").arch;
|
||||
pub const arch = @import("arch/thread.zig").arch;
|
||||
const pmm = @import("pmm.zig");
|
||||
const cpu = @import("arch/cpu.zig");
|
||||
|
||||
pub const ThreadState = enum {
|
||||
Running,
|
||||
};
|
||||
|
||||
pub const ThreadControlBlock = struct {
|
||||
id: u64,
|
||||
directory: ?*vmm.PageDirectory,
|
||||
regs: interrupts.InterruptStackFrame,
|
||||
|
||||
ticks: u64,
|
||||
};
|
||||
|
||||
pub const ThreadList = std.DoublyLinkedList(ThreadControlBlock);
|
||||
|
||||
var g_threads: ThreadList = .{};
|
||||
|
||||
const ALLOCATED_TICKS_PER_TASK = 20;
|
||||
|
||||
pub fn enterTask(task: *ThreadControlBlock) noreturn {
|
||||
cpu.thisCore().current_thread = task;
|
||||
|
||||
task.ticks = ALLOCATED_TICKS_PER_TASK;
|
||||
|
||||
var directory = vmm.readPageDirectory();
|
||||
|
||||
if (task.directory) |dir| {
|
||||
directory = vmm.getPhysicalPageDirectory(dir);
|
||||
}
|
||||
|
||||
arch.enterTask(&task.regs, vmm.PHYSICAL_MAPPING_BASE, @ptrCast(directory));
|
||||
}
|
||||
|
||||
pub fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
core.current_thread.regs = regs.*;
|
||||
regs.* = new_task.regs;
|
||||
|
||||
if (new_task.directory) |directory| {
|
||||
if (vmm.readPageDirectory() != directory) vmm.setPageDirectory(directory);
|
||||
}
|
||||
|
||||
new_task.ticks = ALLOCATED_TICKS_PER_TASK;
|
||||
|
||||
core.current_thread = new_task;
|
||||
}
|
||||
|
||||
pub fn scheduleNewTask(regs: *interrupts.InterruptStackFrame) void {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
const new_task = core.thread_list.popFirst() orelse return;
|
||||
core.thread_list.append(new_task);
|
||||
|
||||
switchTask(regs, &new_task.data);
|
||||
}
|
||||
|
||||
pub fn preempt(regs: *interrupts.InterruptStackFrame) void {
|
||||
const core = cpu.thisCore();
|
||||
|
||||
core.current_thread.ticks -= 1;
|
||||
if (core.current_thread.ticks == 0) {
|
||||
scheduleNewTask(regs);
|
||||
}
|
||||
}
|
||||
|
||||
var next_id: std.atomic.Value(u64) = std.atomic.Value(u64).init(1);
|
||||
|
||||
pub fn addThreadToScheduler(core: *cpu.arch.Core, thread: *ThreadControlBlock) void {
|
||||
core.thread_list.append(@fieldParentPtr("data", thread));
|
||||
}
|
||||
|
||||
pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlBlock {
|
||||
const frame = try pmm.allocFrame(allocator);
|
||||
|
||||
const node: *ThreadList.Node = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
||||
const thread = &node.data;
|
||||
thread.id = next_id.fetchAdd(1, .seq_cst);
|
||||
thread.directory = null;
|
||||
thread.regs = std.mem.zeroes(@TypeOf(thread.regs));
|
||||
|
||||
return thread;
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user