core: Change naming for virtual memory structures to reflect x86_64 terms
This commit is contained in:
parent
43bb21af0b
commit
14047e0c3c
@ -26,7 +26,7 @@ pub fn setupCore(allocator: *pmm.FrameAllocator) !void {
|
|||||||
const idle_thread = &core.idle_thread.data;
|
const idle_thread = &core.idle_thread.data;
|
||||||
|
|
||||||
idle_thread.id = 0;
|
idle_thread.id = 0;
|
||||||
idle_thread.mapper = null;
|
idle_thread.address_space = null;
|
||||||
idle_thread.regs = std.mem.zeroes(@TypeOf(idle_thread.regs));
|
idle_thread.regs = std.mem.zeroes(@TypeOf(idle_thread.regs));
|
||||||
idle_thread.state = .Running;
|
idle_thread.state = .Running;
|
||||||
idle_thread.user_priority = 0;
|
idle_thread.user_priority = 0;
|
||||||
|
@ -36,16 +36,17 @@ pub const PageTableEntry = packed struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const PageDirectory = struct {
|
// Used for all page tables (PML4, PDPT, PD and PT).
|
||||||
|
pub const PageTable = struct {
|
||||||
entries: [512]PageTableEntry,
|
entries: [512]PageTableEntry,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const MemoryMapper = struct {
|
pub const AddressSpace = struct {
|
||||||
phys: pmm.PhysFrame,
|
phys: pmm.PhysFrame,
|
||||||
directory: *PageDirectory,
|
table: *PageTable,
|
||||||
|
|
||||||
pub fn create(frame: pmm.PhysFrame, base: usize) MemoryMapper {
|
pub fn create(frame: pmm.PhysFrame, base: usize) AddressSpace {
|
||||||
return .{ .phys = frame, .directory = @ptrFromInt(frame.virtualAddress(base)) };
|
return .{ .phys = frame, .table = @ptrFromInt(frame.virtualAddress(base)) };
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -95,62 +96,62 @@ fn setUpParentPageTableEntry(allocator: *pmm.FrameAllocator, pte: *PageTableEntr
|
|||||||
const frame = try pmm.allocFrame(allocator);
|
const frame = try pmm.allocFrame(allocator);
|
||||||
pte.present = 1;
|
pte.present = 1;
|
||||||
pte.setAddress(frame.address);
|
pte.setAddress(frame.address);
|
||||||
getTable(pte, base).* = std.mem.zeroes(PageDirectory);
|
getTable(pte, base).* = std.mem.zeroes(PageTable);
|
||||||
}
|
}
|
||||||
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
|
if (hasFlag(flags, Flags.ReadWrite) == 1) pte.read_write = 1;
|
||||||
if (hasFlag(flags, Flags.User) == 1) pte.user = 1;
|
if (hasFlag(flags, Flags.User) == 1) pte.user = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn getTable(pte: *PageTableEntry, base: usize) *allowzero PageDirectory {
|
fn getTable(pte: *PageTableEntry, base: usize) *allowzero PageTable {
|
||||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||||
return @ptrFromInt(frame.virtualAddress(base));
|
return @ptrFromInt(frame.virtualAddress(base));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn map(allocator: *pmm.FrameAllocator, mapper: MemoryMapper, base: usize, virt_address: u64, phys: pmm.PhysFrame, flags: u32, use_huge_pages: bool) !void {
|
pub fn map(allocator: *pmm.FrameAllocator, space: AddressSpace, base: usize, virt_address: u64, phys: pmm.PhysFrame, flags: u32, use_huge_pages: bool) !void {
|
||||||
const indexes = calculatePageTableIndexes(virt_address);
|
const indexes = calculatePageTableIndexes(virt_address);
|
||||||
const l4 = &mapper.directory.entries[indexes.level4];
|
const pml4_entry = &space.table.entries[indexes.level4];
|
||||||
try setUpParentPageTableEntry(allocator, l4, flags, base);
|
try setUpParentPageTableEntry(allocator, pml4_entry, flags, base);
|
||||||
|
|
||||||
const l3 = &getTable(l4, base).entries[indexes.level3];
|
const pdpt_entry = &getTable(pml4_entry, base).entries[indexes.level3];
|
||||||
if (l3.larger_pages == 1) return error.MemoryAlreadyInUse;
|
if (pdpt_entry.larger_pages == 1) return error.MemoryAlreadyInUse;
|
||||||
try setUpParentPageTableEntry(allocator, l3, flags, base);
|
try setUpParentPageTableEntry(allocator, pdpt_entry, flags, base);
|
||||||
|
|
||||||
const l2 = &getTable(l3, base).entries[indexes.level2];
|
const pd_entry = &getTable(pdpt_entry, base).entries[indexes.level2];
|
||||||
if (l2.larger_pages == 1) return error.MemoryAlreadyInUse;
|
if (pd_entry.larger_pages == 1) return error.MemoryAlreadyInUse;
|
||||||
|
|
||||||
if (use_huge_pages) {
|
if (use_huge_pages) {
|
||||||
updatePageTableEntry(l2, phys, flags);
|
updatePageTableEntry(pd_entry, phys, flags);
|
||||||
l2.larger_pages = 1;
|
pd_entry.larger_pages = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try setUpParentPageTableEntry(allocator, l2, flags, base);
|
try setUpParentPageTableEntry(allocator, pd_entry, flags, base);
|
||||||
|
|
||||||
const l1 = &getTable(l2, base).entries[indexes.level1];
|
const pt_entry = &getTable(pd_entry, base).entries[indexes.level1];
|
||||||
if (l1.present == 1) return error.MemoryAlreadyInUse;
|
if (pt_entry.present == 1) return error.MemoryAlreadyInUse;
|
||||||
updatePageTableEntry(l1, phys, flags);
|
updatePageTableEntry(pt_entry, phys, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn getEntry(mapper: MemoryMapper, base: usize, virt_address: u64) ?*PageTableEntry {
|
pub fn getEntry(space: AddressSpace, base: usize, virt_address: u64) ?*PageTableEntry {
|
||||||
const indexes = calculatePageTableIndexes(virt_address);
|
const indexes = calculatePageTableIndexes(virt_address);
|
||||||
const l4 = &mapper.directory.entries[indexes.level4];
|
const pml4_entry = &space.table.entries[indexes.level4];
|
||||||
if (l4.present == 0) return null;
|
if (pml4_entry.present == 0) return null;
|
||||||
|
|
||||||
const l3 = &getTable(l4, base).entries[indexes.level3];
|
const pdpt_entry = &getTable(pml4_entry, base).entries[indexes.level3];
|
||||||
if (l3.present == 0) return null;
|
if (pdpt_entry.present == 0) return null;
|
||||||
if (l3.larger_pages == 1) return l3;
|
if (pdpt_entry.larger_pages == 1) return pdpt_entry;
|
||||||
|
|
||||||
const l2 = &getTable(l3, base).entries[indexes.level2];
|
const pd_entry = &getTable(pdpt_entry, base).entries[indexes.level2];
|
||||||
if (l2.present == 0) return null;
|
if (pd_entry.present == 0) return null;
|
||||||
if (l2.larger_pages == 1) return l2;
|
if (pd_entry.larger_pages == 1) return pd_entry;
|
||||||
|
|
||||||
const l1 = &getTable(l2, base).entries[indexes.level1];
|
const pt_entry = &getTable(pd_entry, base).entries[indexes.level1];
|
||||||
if (l1.present == 0) return null;
|
if (pt_entry.present == 0) return null;
|
||||||
|
|
||||||
return l1;
|
return pt_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]const u8, size: usize) !void {
|
pub fn copyToUser(space: AddressSpace, base: usize, user: usize, kernel: [*]const u8, size: usize) !void {
|
||||||
const remainder: usize = @rem(user, platform.PAGE_SIZE);
|
const remainder: usize = @rem(user, platform.PAGE_SIZE);
|
||||||
const user_page = user - remainder;
|
const user_page = user - remainder;
|
||||||
|
|
||||||
@ -159,7 +160,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
|
|||||||
var count = size;
|
var count = size;
|
||||||
|
|
||||||
if (user_address != user_page) {
|
if (user_address != user_page) {
|
||||||
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
|
const pte = getEntry(space, base, user_page) orelse return error.MemoryNotInUse;
|
||||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||||
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
||||||
const virt = frame.virtualAddress(base) + remainder;
|
const virt = frame.virtualAddress(base) + remainder;
|
||||||
@ -172,7 +173,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (count > 0) {
|
while (count > 0) {
|
||||||
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
|
const pte = getEntry(space, base, user_address) orelse return error.MemoryNotInUse;
|
||||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||||
const amount: usize = @min(platform.PAGE_SIZE, count);
|
const amount: usize = @min(platform.PAGE_SIZE, count);
|
||||||
const virt = frame.virtualAddress(base);
|
const virt = frame.virtualAddress(base);
|
||||||
@ -187,7 +188,7 @@ pub fn copyToUser(mapper: MemoryMapper, base: usize, user: usize, kernel: [*]con
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size: usize) !void {
|
pub fn memsetUser(space: AddressSpace, base: usize, user: usize, elem: u8, size: usize) !void {
|
||||||
const remainder: usize = @rem(user, platform.PAGE_SIZE);
|
const remainder: usize = @rem(user, platform.PAGE_SIZE);
|
||||||
const user_page = user - remainder;
|
const user_page = user - remainder;
|
||||||
|
|
||||||
@ -195,7 +196,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
|
|||||||
var count = size;
|
var count = size;
|
||||||
|
|
||||||
if (user_address != user_page) {
|
if (user_address != user_page) {
|
||||||
const pte = getEntry(mapper, base, user_page) orelse return error.MemoryNotInUse;
|
const pte = getEntry(space, base, user_page) orelse return error.MemoryNotInUse;
|
||||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||||
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
const amount: usize = @min((platform.PAGE_SIZE - remainder), count);
|
||||||
const virt = frame.virtualAddress(base) + remainder;
|
const virt = frame.virtualAddress(base) + remainder;
|
||||||
@ -207,7 +208,7 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (count > 0) {
|
while (count > 0) {
|
||||||
const pte = getEntry(mapper, base, user_address) orelse return error.MemoryNotInUse;
|
const pte = getEntry(space, base, user_address) orelse return error.MemoryNotInUse;
|
||||||
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
const frame = pmm.PhysFrame{ .address = pte.getAddress() };
|
||||||
const amount: usize = @min(platform.PAGE_SIZE, count);
|
const amount: usize = @min(platform.PAGE_SIZE, count);
|
||||||
const virt = frame.virtualAddress(base);
|
const virt = frame.virtualAddress(base);
|
||||||
@ -221,99 +222,100 @@ pub fn memsetUser(mapper: MemoryMapper, base: usize, user: usize, elem: u8, size
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allocAndMap(allocator: *pmm.FrameAllocator, mapper: MemoryMapper, base: u64, pages: usize, flags: u32) !void {
|
pub fn allocAndMap(allocator: *pmm.FrameAllocator, space: AddressSpace, base: u64, pages: usize, flags: u32) !void {
|
||||||
var virt = base;
|
var virt = base;
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
|
|
||||||
while (i < pages) {
|
while (i < pages) {
|
||||||
const frame = try pmm.allocFrame(allocator);
|
const frame = try pmm.allocFrame(allocator);
|
||||||
try map(allocator, mapper, PHYSICAL_MAPPING_BASE, virt, frame, flags, false);
|
try map(allocator, space, PHYSICAL_MAPPING_BASE, virt, frame, flags, false);
|
||||||
virt += platform.PAGE_SIZE;
|
virt += platform.PAGE_SIZE;
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mapPhysicalMemory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, mapper: MemoryMapper, base: usize, flags: u32) !void {
|
fn mapPhysicalMemory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, space: AddressSpace, base: usize, flags: u32) !void {
|
||||||
const address_space_size = mmap.getAddressSpaceSize(tag) orelse return error.InvalidMemoryMap;
|
const address_space_size = mmap.getAddressSpaceSize(tag) orelse return error.InvalidMemoryMap;
|
||||||
const address_space_pages = address_space_size / HUGE_PAGE_SIZE;
|
const address_space_pages = address_space_size / HUGE_PAGE_SIZE;
|
||||||
|
|
||||||
var index: usize = 0;
|
var index: usize = 0;
|
||||||
while (index < address_space_pages) : (index += 1) {
|
while (index < address_space_pages) : (index += 1) {
|
||||||
try map(allocator, mapper, 0, base + index * HUGE_PAGE_SIZE, pmm.PhysFrame{ .address = index * HUGE_PAGE_SIZE }, flags, true);
|
try map(allocator, space, 0, base + index * HUGE_PAGE_SIZE, pmm.PhysFrame{ .address = index * HUGE_PAGE_SIZE }, flags, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lockPageDirectoryFrames(allocator: *pmm.FrameAllocator, directory: *PageDirectory, index: u8) !void {
|
fn lockPageTableFrames(allocator: *pmm.FrameAllocator, table: *PageTable, index: u8) !void {
|
||||||
if (index > 1) {
|
if (index > 1) {
|
||||||
var i: u64 = 0;
|
var i: u64 = 0;
|
||||||
while (i < 512) : (i += 1) {
|
while (i < 512) : (i += 1) {
|
||||||
const pte = &directory.entries[i];
|
const pte = &table.entries[i];
|
||||||
if (pte.present == 0) continue;
|
if (pte.present == 0) continue;
|
||||||
if ((index < 4) and (pte.larger_pages == 1)) continue;
|
if ((index < 4) and (pte.larger_pages == 1)) continue;
|
||||||
|
|
||||||
try pmm.lockFrame(allocator, pte.getAddress());
|
try pmm.lockFrame(allocator, pte.getAddress());
|
||||||
|
|
||||||
const child_table: *PageDirectory = @ptrFromInt(pte.getAddress());
|
const child_table: *PageTable = @ptrFromInt(pte.getAddress());
|
||||||
|
|
||||||
try lockPageDirectoryFrames(allocator, child_table, index - 1);
|
try lockPageTableFrames(allocator, child_table, index - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lockPageDirectory(allocator: *pmm.FrameAllocator, mapper: MemoryMapper) !void {
|
fn lockPageTable(allocator: *pmm.FrameAllocator, space: AddressSpace) !void {
|
||||||
try pmm.lockFrame(allocator, mapper.phys.address);
|
try pmm.lockFrame(allocator, space.phys.address);
|
||||||
try lockPageDirectoryFrames(allocator, mapper.directory, 4);
|
try lockPageTableFrames(allocator, space.table, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setUpKernelPageDirectory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t) !pmm.PhysFrame {
|
fn setUpKernelPageDirectory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t) !pmm.PhysFrame {
|
||||||
const directory = readPageDirectory();
|
const table = readPageTable();
|
||||||
|
|
||||||
const mapper = MemoryMapper.create(directory, 0);
|
const space = AddressSpace.create(table, 0);
|
||||||
|
|
||||||
try lockPageDirectory(allocator, mapper);
|
try lockPageTable(allocator, space);
|
||||||
try mapPhysicalMemory(allocator, tag, mapper, PHYSICAL_MAPPING_BASE, @intFromEnum(Flags.ReadWrite) | @intFromEnum(Flags.NoExecute) | @intFromEnum(Flags.Global));
|
try mapPhysicalMemory(allocator, tag, space, PHYSICAL_MAPPING_BASE, @intFromEnum(Flags.ReadWrite) | @intFromEnum(Flags.NoExecute) | @intFromEnum(Flags.Global));
|
||||||
|
|
||||||
return directory;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setUpInitialUserPageDirectory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, kernel_directory: *PageDirectory, user_directory: *PageDirectory) !usize {
|
fn setUpInitialUserPageDirectory(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, kernel_table: *PageTable, user_table: *PageTable) !usize {
|
||||||
const physical_address_space_size = mmap.getAddressSpaceSize(tag) orelse return error.InvalidMemoryMap;
|
const physical_address_space_size = mmap.getAddressSpaceSize(tag) orelse return error.InvalidMemoryMap;
|
||||||
|
|
||||||
user_directory.* = std.mem.zeroes(PageDirectory);
|
user_table.* = std.mem.zeroes(PageTable);
|
||||||
|
|
||||||
const directory_upper_half: *[256]PageTableEntry = kernel_directory.entries[256..];
|
const directory_upper_half: *[256]PageTableEntry = kernel_table.entries[256..];
|
||||||
const user_directory_upper_half: *[256]PageTableEntry = user_directory.entries[256..];
|
const user_directory_upper_half: *[256]PageTableEntry = user_table.entries[256..];
|
||||||
@memcpy(user_directory_upper_half, directory_upper_half);
|
@memcpy(user_directory_upper_half, directory_upper_half);
|
||||||
|
|
||||||
const user_physical_address_base = (USER_ADDRESS_RANGE_END + 1) - physical_address_space_size;
|
const user_physical_address_base = (USER_ADDRESS_RANGE_END + 1) - physical_address_space_size;
|
||||||
|
|
||||||
const mapper = MemoryMapper.create(.{ .address = @intFromPtr(user_directory) }, 0);
|
const space = AddressSpace.create(.{ .address = @intFromPtr(user_table) }, 0);
|
||||||
|
|
||||||
try mapPhysicalMemory(allocator, tag, mapper, user_physical_address_base, @intFromEnum(Flags.ReadWrite) | @intFromEnum(Flags.NoExecute) | @intFromEnum(Flags.User));
|
try mapPhysicalMemory(allocator, tag, space, user_physical_address_base, @intFromEnum(Flags.ReadWrite) | @intFromEnum(Flags.NoExecute) | @intFromEnum(Flags.User));
|
||||||
|
|
||||||
return user_physical_address_base;
|
return user_physical_address_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn createInitialMappings(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, user_directory: *PageDirectory) !usize {
|
pub fn createInitialMappings(allocator: *pmm.FrameAllocator, tag: *easyboot.multiboot_tag_mmap_t, user_table: *PageTable) !usize {
|
||||||
const frame = try setUpKernelPageDirectory(allocator, tag);
|
const frame = try setUpKernelPageDirectory(allocator, tag);
|
||||||
const mapper = MemoryMapper.create(frame, 0);
|
const space = AddressSpace.create(frame, 0);
|
||||||
const base = try setUpInitialUserPageDirectory(allocator, tag, mapper.directory, user_directory);
|
const base = try setUpInitialUserPageDirectory(allocator, tag, space.table, user_table);
|
||||||
|
|
||||||
setPageDirectory(mapper.phys);
|
setPageTable(space.phys);
|
||||||
|
|
||||||
allocator.bitmap.location = @ptrFromInt(@as(usize, PHYSICAL_MAPPING_BASE) + @intFromPtr(allocator.bitmap.location));
|
allocator.bitmap.location = @ptrFromInt(@as(usize, PHYSICAL_MAPPING_BASE) + @intFromPtr(allocator.bitmap.location));
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn readPageDirectory() pmm.PhysFrame {
|
pub fn readPageTable() pmm.PhysFrame {
|
||||||
var address: u64 = undefined;
|
var address: u64 = undefined;
|
||||||
asm volatile ("mov %%cr3, %[dir]"
|
asm volatile ("mov %%cr3, %[dir]"
|
||||||
: [dir] "=r" (address),
|
: [dir] "=r" (address),
|
||||||
);
|
);
|
||||||
return .{ .address = address };
|
return .{ .address = address };
|
||||||
}
|
}
|
||||||
pub fn setPageDirectory(directory: pmm.PhysFrame) void {
|
|
||||||
|
pub fn setPageTable(directory: pmm.PhysFrame) void {
|
||||||
asm volatile ("mov %[dir], %%cr3"
|
asm volatile ("mov %[dir], %%cr3"
|
||||||
:
|
:
|
||||||
: [dir] "{rdi}" (directory.address),
|
: [dir] "{rdi}" (directory.address),
|
||||||
|
@ -58,7 +58,7 @@ fn canWriteSegment(flags: u32) bool {
|
|||||||
return (flags & 2) > 0;
|
return (flags & 2) > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn loadElf(allocator: *pmm.FrameAllocator, mapper: vmm.MemoryMapper, base_address: pmm.PhysFrame) !usize {
|
pub fn loadElf(allocator: *pmm.FrameAllocator, space: vmm.AddressSpace, base_address: pmm.PhysFrame) !usize {
|
||||||
const address = base_address.virtualAddress(vmm.PHYSICAL_MAPPING_BASE);
|
const address = base_address.virtualAddress(vmm.PHYSICAL_MAPPING_BASE);
|
||||||
|
|
||||||
debug.print("Address: {}\n", .{address});
|
debug.print("Address: {}\n", .{address});
|
||||||
@ -117,15 +117,15 @@ pub fn loadElf(allocator: *pmm.FrameAllocator, mapper: vmm.MemoryMapper, base_ad
|
|||||||
if (canExecuteSegment(program_header.p_flags)) flags &= ~@as(u32, @intFromEnum(vmm.Flags.NoExecute));
|
if (canExecuteSegment(program_header.p_flags)) flags &= ~@as(u32, @intFromEnum(vmm.Flags.NoExecute));
|
||||||
|
|
||||||
// Allocate physical memory for the segment
|
// Allocate physical memory for the segment
|
||||||
try vmm.allocAndMap(allocator, mapper, base_vaddr, try std.math.divCeil(usize, program_header.p_memsz + vaddr_diff, platform.PAGE_SIZE), flags);
|
try vmm.allocAndMap(allocator, space, base_vaddr, try std.math.divCeil(usize, program_header.p_memsz + vaddr_diff, platform.PAGE_SIZE), flags);
|
||||||
|
|
||||||
try vmm.memsetUser(mapper, vmm.PHYSICAL_MAPPING_BASE, base_vaddr, 0, vaddr_diff);
|
try vmm.memsetUser(space, vmm.PHYSICAL_MAPPING_BASE, base_vaddr, 0, vaddr_diff);
|
||||||
|
|
||||||
try vmm.copyToUser(mapper, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr, @ptrFromInt(address + program_header.p_offset), program_header.p_filesz);
|
try vmm.copyToUser(space, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr, @ptrFromInt(address + program_header.p_offset), program_header.p_filesz);
|
||||||
|
|
||||||
const bss_size = program_header.p_memsz - program_header.p_filesz;
|
const bss_size = program_header.p_memsz - program_header.p_filesz;
|
||||||
|
|
||||||
try vmm.memsetUser(mapper, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr + program_header.p_filesz, 0, bss_size);
|
try vmm.memsetUser(space, vmm.PHYSICAL_MAPPING_BASE, program_header.p_vaddr + program_header.p_filesz, 0, bss_size);
|
||||||
} else {
|
} else {
|
||||||
debug.print("ELF: Encountered non-loadable program header, skipping\n", .{});
|
debug.print("ELF: Encountered non-loadable program header, skipping\n", .{});
|
||||||
}
|
}
|
||||||
@ -142,11 +142,11 @@ pub fn loadElf(allocator: *pmm.FrameAllocator, mapper: vmm.MemoryMapper, base_ad
|
|||||||
return elf_header.e_entry;
|
return elf_header.e_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allocateStack(allocator: *pmm.FrameAllocator, mapper: vmm.MemoryMapper, stack_top: usize, stack_size: usize) !usize {
|
pub fn allocateStack(allocator: *pmm.FrameAllocator, space: vmm.AddressSpace, stack_top: usize, stack_size: usize) !usize {
|
||||||
const pages = try std.math.divCeil(usize, stack_size, platform.PAGE_SIZE);
|
const pages = try std.math.divCeil(usize, stack_size, platform.PAGE_SIZE);
|
||||||
const stack_bottom = stack_top - (pages * platform.PAGE_SIZE);
|
const stack_bottom = stack_top - (pages * platform.PAGE_SIZE);
|
||||||
|
|
||||||
try vmm.allocAndMap(allocator, mapper, stack_bottom, pages, @intFromEnum(vmm.Flags.ReadWrite) | @intFromEnum(vmm.Flags.User) | @intFromEnum(vmm.Flags.NoExecute));
|
try vmm.allocAndMap(allocator, space, stack_bottom, pages, @intFromEnum(vmm.Flags.ReadWrite) | @intFromEnum(vmm.Flags.User) | @intFromEnum(vmm.Flags.NoExecute));
|
||||||
|
|
||||||
return stack_top - 16;
|
return stack_top - 16;
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ const MultibootInfo = [*c]u8;
|
|||||||
|
|
||||||
const Context = struct {
|
const Context = struct {
|
||||||
allocator: *pmm.FrameAllocator,
|
allocator: *pmm.FrameAllocator,
|
||||||
mapper: vmm.MemoryMapper,
|
space: vmm.AddressSpace,
|
||||||
regs: *interrupts.InterruptStackFrame,
|
regs: *interrupts.InterruptStackFrame,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -41,8 +41,8 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
while (true) {}
|
while (true) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
var dir: vmm.PageDirectory = std.mem.zeroes(vmm.PageDirectory);
|
var table: vmm.PageTable = std.mem.zeroes(vmm.PageTable);
|
||||||
const base: usize = vmm.createInitialMappings(&allocator, tag, &dir) catch |err| {
|
const base: usize = vmm.createInitialMappings(&allocator, tag, &table) catch |err| {
|
||||||
debug.print("Error while creating initial mappings: {}\n", .{err});
|
debug.print("Error while creating initial mappings: {}\n", .{err});
|
||||||
while (true) {}
|
while (true) {}
|
||||||
};
|
};
|
||||||
@ -55,8 +55,8 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// At this point the physical address space is already mapped into kernel virtual memory.
|
// At this point the physical address space is already mapped into kernel virtual memory.
|
||||||
const mapper = vmm.MemoryMapper.create(frame, vmm.PHYSICAL_MAPPING_BASE);
|
const space = vmm.AddressSpace.create(frame, vmm.PHYSICAL_MAPPING_BASE);
|
||||||
mapper.directory.* = dir;
|
space.table.* = table;
|
||||||
|
|
||||||
cpu.setupCore(&allocator) catch |err| {
|
cpu.setupCore(&allocator) catch |err| {
|
||||||
debug.print("Error while setting up core-specific scheduler structures: {}\n", .{err});
|
debug.print("Error while setting up core-specific scheduler structures: {}\n", .{err});
|
||||||
@ -68,12 +68,12 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
while (true) {}
|
while (true) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
init.mapper = mapper;
|
init.address_space = space;
|
||||||
init.user_priority = 255;
|
init.user_priority = 255;
|
||||||
thread.arch.initUserRegisters(&init.regs);
|
thread.arch.initUserRegisters(&init.regs);
|
||||||
thread.arch.setArguments(&init.regs, base, mapper.phys.address);
|
thread.arch.setArguments(&init.regs, base, space.phys.address);
|
||||||
|
|
||||||
const ctx = Context{ .allocator = &allocator, .mapper = mapper, .regs = &init.regs };
|
const ctx = Context{ .allocator = &allocator, .space = space, .regs = &init.regs };
|
||||||
|
|
||||||
multiboot.findMultibootTags(easyboot.multiboot_tag_module_t, @ptrCast(info), struct {
|
multiboot.findMultibootTags(easyboot.multiboot_tag_module_t, @ptrCast(info), struct {
|
||||||
fn handler(mod: *easyboot.multiboot_tag_module_t, c: *const anyopaque) void {
|
fn handler(mod: *easyboot.multiboot_tag_module_t, c: *const anyopaque) void {
|
||||||
@ -82,7 +82,7 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
if (std.mem.eql(u8, mod.string()[0..name.len], name[0..name.len])) {
|
if (std.mem.eql(u8, mod.string()[0..name.len], name[0..name.len])) {
|
||||||
const phys_frame = pmm.PhysFrame{ .address = mod.mod_start };
|
const phys_frame = pmm.PhysFrame{ .address = mod.mod_start };
|
||||||
debug.print("Loading init from module at address {x}, virtual {x}\n", .{ mod.mod_start, phys_frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE) });
|
debug.print("Loading init from module at address {x}, virtual {x}\n", .{ mod.mod_start, phys_frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE) });
|
||||||
const entry = elf.loadElf(context.allocator, context.mapper, pmm.PhysFrame{ .address = mod.mod_start }) catch |err| {
|
const entry = elf.loadElf(context.allocator, context.space, pmm.PhysFrame{ .address = mod.mod_start }) catch |err| {
|
||||||
debug.print("Error while loading ELF file for init: {}\n", .{err});
|
debug.print("Error while loading ELF file for init: {}\n", .{err});
|
||||||
while (true) {}
|
while (true) {}
|
||||||
};
|
};
|
||||||
@ -92,7 +92,7 @@ export fn _start(magic: u32, info: MultibootInfo) callconv(.C) noreturn {
|
|||||||
}.handler, &ctx);
|
}.handler, &ctx);
|
||||||
|
|
||||||
const default_stack_size = 0x80000; // 512 KiB.
|
const default_stack_size = 0x80000; // 512 KiB.
|
||||||
const stack = elf.allocateStack(&allocator, mapper, base - platform.PAGE_SIZE, default_stack_size) catch |err| {
|
const stack = elf.allocateStack(&allocator, space, base - platform.PAGE_SIZE, default_stack_size) catch |err| {
|
||||||
debug.print("Error while creating stack for init: {}\n", .{err});
|
debug.print("Error while creating stack for init: {}\n", .{err});
|
||||||
while (true) {}
|
while (true) {}
|
||||||
};
|
};
|
||||||
|
@ -14,7 +14,7 @@ pub const ThreadState = enum {
|
|||||||
|
|
||||||
pub const ThreadControlBlock = struct {
|
pub const ThreadControlBlock = struct {
|
||||||
id: u64,
|
id: u64,
|
||||||
mapper: ?vmm.MemoryMapper,
|
address_space: ?vmm.AddressSpace,
|
||||||
regs: interrupts.InterruptStackFrame,
|
regs: interrupts.InterruptStackFrame,
|
||||||
state: ThreadState,
|
state: ThreadState,
|
||||||
user_priority: u8,
|
user_priority: u8,
|
||||||
@ -36,13 +36,13 @@ pub fn enterTask(task: *ThreadControlBlock) noreturn {
|
|||||||
|
|
||||||
task.ticks = ALLOCATED_TICKS_PER_TASK;
|
task.ticks = ALLOCATED_TICKS_PER_TASK;
|
||||||
|
|
||||||
var directory = vmm.readPageDirectory();
|
var table = vmm.readPageTable();
|
||||||
|
|
||||||
if (task.mapper) |mapper| {
|
if (task.address_space) |space| {
|
||||||
directory = mapper.phys;
|
table = space.phys;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch.enterTask(&task.regs, vmm.PHYSICAL_MAPPING_BASE, directory.address);
|
arch.enterTask(&task.regs, vmm.PHYSICAL_MAPPING_BASE, table.address);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
|
fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlock) void {
|
||||||
@ -51,8 +51,8 @@ fn switchTask(regs: *interrupts.InterruptStackFrame, new_task: *ThreadControlBlo
|
|||||||
core.current_thread.regs = regs.*;
|
core.current_thread.regs = regs.*;
|
||||||
regs.* = new_task.regs;
|
regs.* = new_task.regs;
|
||||||
|
|
||||||
if (new_task.mapper) |mapper| {
|
if (new_task.address_space) |space| {
|
||||||
if (vmm.readPageDirectory().address != mapper.phys.address) vmm.setPageDirectory(mapper.phys);
|
if (vmm.readPageTable().address != space.phys.address) vmm.setPageTable(space.phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
new_task.ticks = ALLOCATED_TICKS_PER_TASK;
|
new_task.ticks = ALLOCATED_TICKS_PER_TASK;
|
||||||
@ -183,7 +183,7 @@ pub fn createThreadControlBlock(allocator: *pmm.FrameAllocator) !*ThreadControlB
|
|||||||
const node: *ThreadList.Node = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
const node: *ThreadList.Node = @ptrFromInt(frame.virtualAddress(vmm.PHYSICAL_MAPPING_BASE));
|
||||||
const thread = &node.data;
|
const thread = &node.data;
|
||||||
thread.id = next_id.fetchAdd(1, .seq_cst);
|
thread.id = next_id.fetchAdd(1, .seq_cst);
|
||||||
thread.mapper = null;
|
thread.address_space = null;
|
||||||
thread.regs = std.mem.zeroes(@TypeOf(thread.regs));
|
thread.regs = std.mem.zeroes(@TypeOf(thread.regs));
|
||||||
thread.state = .Inactive;
|
thread.state = .Inactive;
|
||||||
thread.user_priority = 127;
|
thread.user_priority = 127;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user