Compare commits

...

3 Commits

6 changed files with 447 additions and 6 deletions

View File

@ -3,9 +3,9 @@ const system = @import("system");
const vm = system.vm; const vm = system.vm;
const syscalls = system.syscalls; const syscalls = system.syscalls;
const buffer = system.ring_buffer; const buffer = system.ring_buffer;
const heap = system.heap;
// FIXME: Make arch-specific. const PAGE_SIZE = vm.PAGE_SIZE;
const PAGE_SIZE = 4096;
fn setupKernelRingBuffer(base: u64) !buffer.RingBuffer { fn setupKernelRingBuffer(base: u64) !buffer.RingBuffer {
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() }; const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
@ -33,6 +33,44 @@ fn discoverThreadLimit() u64 {
} }
} }
fn setupKernelRingBufferForThread(mapper: *const vm.MemoryMapper, pid: u64, virt: u64) !void {
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
try syscalls.setEventQueue(pid, phys.address);
}
fn setupRingBufferForThread(mapper: *const vm.MemoryMapper, base: u64, virt: u64) !buffer.RingBuffer {
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
const data: [*]u8 = @ptrCast(phys.virtualPointer(u8, base));
return buffer.RingBuffer.init(data, PAGE_SIZE, true);
}
fn setupThread(pid: u64, base: u64) !system.ipc.Connection {
const space = try syscalls.getAddressSpace(pid);
const mapper = vm.MemoryMapper.create(.{ .address = space }, base);
const ipc_base = 0x1000; // FIXME: Find a good place in the address space and guarantee this is free.
try setupKernelRingBufferForThread(&mapper, pid, ipc_base + system.ipc.KERNEL_BUFFER_ADDRESS_OFFSET);
// INIT_WRITE and INIT_READ are inverted here because when the process writes, init reads.
const read_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_WRITE_BUFFER_ADDRESS_OFFSET);
const write_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_READ_BUFFER_ADDRESS_OFFSET);
const connection: system.ipc.Connection = .{ .pid = pid, .read_buffer = read_buffer, .write_buffer = write_buffer };
try syscalls.setThreadArguments(pid, base, ipc_base);
try syscalls.startThread(pid);
return connection;
}
export fn _start(base: u64, address: u64) callconv(.C) noreturn { export fn _start(base: u64, address: u64) callconv(.C) noreturn {
setTokens(); setTokens();
@ -42,15 +80,25 @@ export fn _start(base: u64, address: u64) callconv(.C) noreturn {
const threads = discoverThreadLimit(); const threads = discoverThreadLimit();
syscalls.print(threads); syscalls.print(threads);
const pid: u64 = 2;
var connection = setupThread(pid, base) catch {
while (true) {}
};
const event_queue = setupKernelRingBuffer(base) catch { const event_queue = setupKernelRingBuffer(base) catch {
while (true) {} while (true) {}
}; };
_ = event_queue; system.ipc.setKernelBuffer(event_queue);
var counter: u64 = 0; var counter: u64 = 0;
while (true) : (counter += 4) { while (true) : (counter += 4) {
var data: u8 = undefined;
if (connection.read_buffer.read(@ptrCast(&data), 1)) {
syscalls.print(data);
}
syscalls.sleep(1000); syscalls.sleep(1000);
syscalls.print(counter); syscalls.print(counter);
} }

View File

@ -7,6 +7,8 @@ const MapError = error{
OutOfMemory, OutOfMemory,
}; };
pub const PAGE_SIZE = 4096;
pub const PhysFrame = struct { pub const PhysFrame = struct {
address: u64, address: u64,
@ -157,7 +159,7 @@ pub fn unmap(mapper: *const MemoryMapper, virt_address: u64) !PhysFrame {
return frame; return frame;
} }
pub fn getEntry(mapper: MemoryMapper, virt_address: u64) ?*PageTableEntry { pub fn getEntry(mapper: *const MemoryMapper, virt_address: u64) ?*PageTableEntry {
const indexes = calculatePageTableIndexes(virt_address); const indexes = calculatePageTableIndexes(virt_address);
const l4 = &mapper.directory.entries[indexes.level4]; const l4 = &mapper.directory.entries[indexes.level4];
if (l4.present == 0) return null; if (l4.present == 0) return null;
@ -176,7 +178,7 @@ pub fn getEntry(mapper: MemoryMapper, virt_address: u64) ?*PageTableEntry {
return l1; return l1;
} }
pub fn getPhysical(mapper: MemoryMapper, virt_address: u64) ?PhysFrame { pub fn getPhysical(mapper: *const MemoryMapper, virt_address: u64) ?PhysFrame {
const entry = getEntry(mapper, virt_address) orelse return null; const entry = getEntry(mapper, virt_address) orelse return null;
return PhysFrame{ .address = entry.getAddress() }; return PhysFrame{ .address = entry.getAddress() };

344
system/lib/heap.zig Normal file
View File

@ -0,0 +1,344 @@
const std = @import("std");
const syscalls = @import("syscalls.zig");
const vm = @import("arch/vm.zig");
const PAGE_SIZE = vm.PAGE_SIZE;
const VirtualMemoryAllocator = struct {
mapper: vm.MemoryMapper,
base: usize,
end: usize,
start: usize,
pub fn create(mapper: vm.MemoryMapper, base: usize, end: usize) VirtualMemoryAllocator {
return .{ .mapper = mapper, .base = base, .end = end, .start = base };
}
fn isAvailable(self: *VirtualMemoryAllocator, page: usize) bool {
if (vm.getPhysical(&self.mapper, page)) |_| {
return false;
} else {
return true;
}
}
fn findFreeVirtualMemory(self: *VirtualMemoryAllocator, count: u64) ?usize {
var page = self.start;
var first_free_page: usize = 0;
var free_contiguous_pages: u64 = 0;
while (page < self.end) : (page += PAGE_SIZE) {
if (!self.isAvailable(page)) {
free_contiguous_pages = 0;
continue;
}
if (free_contiguous_pages == 0) first_free_page = page;
free_contiguous_pages += 1;
// Found enough contiguous free pages!!
if (free_contiguous_pages == count) {
self.start = first_free_page + (PAGE_SIZE * count);
return first_free_page;
}
}
return null;
}
pub fn allocAndMap(self: *VirtualMemoryAllocator, count: u64) !usize {
const base = self.findFreeVirtualMemory(count) orelse return error.OutOfMemory;
var virtual_address = base;
var pages_mapped: u64 = 0;
while (pages_mapped < count) : (pages_mapped += 1) {
const address = try syscalls.allocFrame();
try vm.map(&self.mapper, virtual_address, .{ .address = address }, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
virtual_address += PAGE_SIZE;
}
return base;
}
pub fn unmapAndFree(self: *VirtualMemoryAllocator, base: usize, count: u64) !void {
var virtual_address = base;
var pages_unmapped: u64 = 0;
while (pages_unmapped < count) : (pages_unmapped += 1) {
const frame = try vm.unmap(&self.mapper, virtual_address);
syscalls.freeFrame(frame.address);
virtual_address += PAGE_SIZE;
}
self.start = @min(self.start, base);
}
};
const MEMORY_BLOCK_FREE_TAG = 0xffeeffcc;
const MEMORY_BLOCK_USED_TAG = 0xee55ee66;
const MemoryBlockStatus = enum(u16) {
BlockDefault = 0,
BlockUsed = 1 << 0,
BlockStartOfMemory = 1 << 1,
BlockEndOfMemory = 1 << 2,
};
const MemoryBlockTag = packed struct {
tag: u32,
status: u16,
alignment: u16,
base_address: u64,
used: u64,
allocated: u64,
};
const MemoryBlockList = std.DoublyLinkedList(MemoryBlockTag);
// Not thread-safe and depends on userspace memory manipulation, should only be used in non-multithreading system processes (such as init).
pub const SystemAllocator = struct {
tags: MemoryBlockList,
underlying_alloc: VirtualMemoryAllocator,
pub fn init(mapper: vm.MemoryMapper, base: usize, end: usize) SystemAllocator {
return .{ .tags = .{}, .underlying_alloc = VirtualMemoryAllocator.create(mapper, base, end) };
}
pub fn allocator(self: *SystemAllocator) std.mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn ptrFromBlockNode(block: *MemoryBlockList.Node) [*]u8 {
return @ptrFromInt(@intFromPtr(block) + @sizeOf(MemoryBlockList.Node));
}
fn blockNodeFromPtr(ptr: [*]u8) *MemoryBlockList.Node {
return @ptrFromInt(@intFromPtr(ptr) - @sizeOf(MemoryBlockList.Node));
}
fn isBlockFree(block: *MemoryBlockList.Node) bool {
return (block.data.status & @intFromEnum(MemoryBlockStatus.BlockUsed)) == 0;
}
fn checkStatus(block: *MemoryBlockList.Node, status: MemoryBlockStatus) bool {
return (block.data.status & @intFromEnum(status)) == @intFromEnum(status);
}
fn spaceAvailable(block: *MemoryBlockList.Node) u64 {
return block.data.allocated - block.data.used;
}
fn alignBlockAddressDownwards(block_address: usize, alignment: usize) usize {
var object_address = block_address + @sizeOf(MemoryBlockList.Node);
object_address -= @rem(object_address, alignment);
return object_address - @sizeOf(MemoryBlockList.Node);
}
fn alignBlockAddressUpwards(block_address: usize, alignment: usize) usize {
var object_address = block_address + @sizeOf(MemoryBlockList.Node);
const unalignment = @rem(object_address, alignment);
if (unalignment != 0) object_address += (alignment - unalignment);
return object_address - @sizeOf(MemoryBlockList.Node);
}
fn getSplitOffset(block: *MemoryBlockList.Node, min: usize, alignment: usize) ?u64 {
var available = spaceAvailable(block);
available -= min; // reserve only min size for the new block.
var block_offset = available + block.data.used;
var block_address = @intFromPtr(ptrFromBlockNode(block)) + block_offset;
block_address = alignBlockAddressDownwards(block_address, alignment);
block_offset = block_address - @intFromPtr(ptrFromBlockNode(block));
if (block_offset < block.data.used) return null;
return block_offset;
}
fn split(list: *MemoryBlockList, block: *MemoryBlockList.Node, len: usize, alignment: usize) ?*MemoryBlockList.Node {
const available = spaceAvailable(block);
const old_size = block.data.allocated;
if (available < (len + @sizeOf(MemoryBlockList.Node))) return null; // Not enough space in this block
const offset = getSplitOffset(block, len + @sizeOf(MemoryBlockList.Node), alignment) orelse return null;
block.data.allocated = offset;
const new_node: *MemoryBlockList.Node = @ptrFromInt(@as(usize, @intFromPtr(block)) + offset + @sizeOf(MemoryBlockList.Node));
new_node.* = std.mem.zeroes(MemoryBlockList.Node);
new_node.data.tag = MEMORY_BLOCK_USED_TAG;
if (checkStatus(block, MemoryBlockStatus.BlockEndOfMemory)) {
new_node.data.status = @intFromEnum(MemoryBlockStatus.BlockEndOfMemory);
} else {
new_node.data.status = @intFromEnum(MemoryBlockStatus.BlockDefault);
}
new_node.data.allocated = old_size - (offset + @sizeOf(MemoryBlockList.Node));
new_node.data.alignment = @truncate(alignment);
new_node.data.base_address = block.data.base_address;
list.insertAfter(block, new_node);
block.data.status &= ~@intFromEnum(MemoryBlockStatus.BlockEndOfMemory); // this block is no longer the last block in its memory range
return new_node;
}
fn combineForward(list: *MemoryBlockList, block: *MemoryBlockList.Node) void {
// This block ends a memory range, cannot be combined with blocks outside its range.
if (checkStatus(block, MemoryBlockStatus.BlockEndOfMemory)) return;
// The caller needs to ensure there is a next block.
const next = block.next.?;
// This block starts a memory range, cannot be combined with blocks outside its range.
if (checkStatus(next, MemoryBlockStatus.BlockStartOfMemory)) return;
list.remove(next);
next.data.tag = MEMORY_BLOCK_FREE_TAG;
block.data.allocated += next.data.allocated + @sizeOf(MemoryBlockList.Node);
if (checkStatus(next, MemoryBlockStatus.BlockEndOfMemory)) {
block.data.status |= @intFromEnum(MemoryBlockStatus.BlockEndOfMemory);
}
}
fn combineBackward(list: *MemoryBlockList, block: *MemoryBlockList.Node) *MemoryBlockList.Node {
// This block starts a memory range, cannot be combined with blocks outside its range.
if (checkStatus(block, MemoryBlockStatus.BlockStartOfMemory)) return block;
// The caller needs to ensure there is a last block.
const last = block.prev.?;
// This block ends a memory range, cannot be combined with blocks outside its range.
if (checkStatus(last, MemoryBlockStatus.BlockEndOfMemory)) return block;
list.remove(block);
block.data.tag = MEMORY_BLOCK_FREE_TAG;
last.data.allocated += block.data.allocated + @sizeOf(MemoryBlockList.Node);
if (checkStatus(block, MemoryBlockStatus.BlockEndOfMemory)) {
last.data.status |= @intFromEnum(MemoryBlockStatus.BlockEndOfMemory);
}
return last;
}
const MINIMUM_PAGES_PER_ALLOCATION = 4;
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, _: usize) ?[*]u8 {
const self: *SystemAllocator = @ptrCast(@alignCast(ctx));
if (len == 0) return null;
var alignment = @as(usize, 1) << @truncate(ptr_align);
if (alignment < 16) alignment = 16;
var iter = self.tags.first;
while (iter) |tag| {
iter = tag.next;
if (isBlockFree(tag)) {
if (tag.data.allocated < len) continue;
break;
}
iter = split(&self.tags, tag, len, alignment) orelse continue;
break;
}
if (iter == null) {
const pages: usize = @max(MINIMUM_PAGES_PER_ALLOCATION, @divTrunc(len + @sizeOf(MemoryBlockList.Node), PAGE_SIZE));
const base_address = self.underlying_alloc.allocAndMap(pages) catch return null;
const address = alignBlockAddressUpwards(base_address, alignment);
const padding = address - base_address;
const node: *MemoryBlockList.Node = @ptrFromInt(address);
node.* = std.mem.zeroes(MemoryBlockList.Node);
node.data.allocated = (pages * PAGE_SIZE) - (@sizeOf(MemoryBlockList.Node) + padding);
node.data.tag = MEMORY_BLOCK_USED_TAG;
node.data.status = @intFromEnum(MemoryBlockStatus.BlockStartOfMemory) | @intFromEnum(MemoryBlockStatus.BlockEndOfMemory);
node.data.alignment = @truncate(alignment);
node.data.base_address = base_address;
self.tags.append(node);
iter = node;
}
const tag = iter.?;
tag.data.used = len;
tag.data.status |= @intFromEnum(MemoryBlockStatus.BlockUsed);
return ptrFromBlockNode(tag);
}
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, _: usize) bool {
_ = ctx;
var alignment: usize = @as(usize, 1) << @truncate(buf_align);
if (alignment < 16) alignment = 16;
const block = blockNodeFromPtr(buf.ptr);
if (block.data.tag != MEMORY_BLOCK_USED_TAG) return false;
if (block.data.alignment != alignment) return false;
if (!isBlockFree(block)) return false;
if (new_len > block.data.allocated) return false;
block.data.used = new_len;
return true;
}
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, _: usize) void {
const self: *SystemAllocator = @ptrCast(@alignCast(ctx));
var alignment: usize = @as(usize, 1) << @truncate(buf_align);
if (alignment < 16) alignment = 16;
var block = blockNodeFromPtr(buf.ptr);
if (block.data.tag != MEMORY_BLOCK_USED_TAG) return;
if (block.data.alignment != alignment) return;
if (!isBlockFree(block)) return;
block.data.status &= ~@intFromEnum(MemoryBlockStatus.BlockUsed);
const maybe_next = block.next;
if (maybe_next) |next| {
if (isBlockFree(next)) combineForward(&self.tags, block);
}
const maybe_last = block.prev;
if (maybe_last) |last| {
if (isBlockFree(last)) block = combineBackward(&self.tags, block);
}
if (checkStatus(block, MemoryBlockStatus.BlockStartOfMemory) and checkStatus(block, MemoryBlockStatus.BlockEndOfMemory)) {
self.tags.remove(block);
const base_address = block.data.base_address;
const block_address = @intFromPtr(block);
const padding = block_address - base_address;
const pages = std.math.divCeil(usize, block.data.allocated + padding + @sizeOf(MemoryBlockList.Node), PAGE_SIZE) catch return;
self.underlying_alloc.unmapAndFree(base_address, pages) catch return;
}
}
};

38
system/lib/ipc.zig Normal file
View File

@ -0,0 +1,38 @@
const buffer = @import("ring_buffer.zig");
const vm = @import("arch/vm.zig");
pub const Connection = struct {
pid: u64,
read_buffer: buffer.RingBuffer,
write_buffer: buffer.RingBuffer,
};
pub const KERNEL_BUFFER_ADDRESS_OFFSET = 0x0000;
pub const INIT_WRITE_BUFFER_ADDRESS_OFFSET = 0x1000;
pub const INIT_READ_BUFFER_ADDRESS_OFFSET = 0x2000;
var kernel_buffer: ?buffer.RingBuffer = null;
pub fn setKernelBuffer(buf: buffer.RingBuffer) void {
kernel_buffer = buf;
}
pub fn getKernelBuffer() ?buffer.RingBuffer {
return kernel_buffer;
}
const PAGE_SIZE = vm.PAGE_SIZE;
fn createPageBufferFromAddress(address: u64) buffer.RingBuffer {
const data: [*]u8 = @ptrFromInt(address);
return buffer.RingBuffer.init(data, PAGE_SIZE, false);
}
pub fn readInitBuffers(base_address: u64) Connection {
kernel_buffer = createPageBufferFromAddress(base_address + KERNEL_BUFFER_ADDRESS_OFFSET);
const read_buffer = createPageBufferFromAddress(base_address + INIT_READ_BUFFER_ADDRESS_OFFSET);
const write_buffer = createPageBufferFromAddress(base_address + INIT_WRITE_BUFFER_ADDRESS_OFFSET);
return .{ .pid = 0, .read_buffer = read_buffer, .write_buffer = write_buffer };
}

View File

@ -2,3 +2,5 @@ pub const kernel = @import("kernel.zig");
pub const ring_buffer = @import("ring_buffer.zig"); pub const ring_buffer = @import("ring_buffer.zig");
pub const syscalls = @import("syscalls.zig"); pub const syscalls = @import("syscalls.zig");
pub const vm = @import("arch/vm.zig"); pub const vm = @import("arch/vm.zig");
pub const ipc = @import("ipc.zig");
pub const heap = @import("heap.zig");

View File

@ -9,8 +9,15 @@ fn setTokens() void {
syscalls.setTokens(syscalls.getThreadId(), tokens) catch {}; syscalls.setTokens(syscalls.getThreadId(), tokens) catch {};
} }
export fn _start(_: u64, _: u64) callconv(.C) noreturn { export fn _start(_: u64, ipc_base: u64) callconv(.C) noreturn {
setTokens(); setTokens();
var connection = system.ipc.readInitBuffers(ipc_base);
const byte: u8 = 127;
_ = connection.write_buffer.write(@ptrCast(&byte), 1);
syscalls.yield();
while (true) {} while (true) {}
} }