core+init+system: Add basic IPC
This commit is contained in:
parent
e699eb4cfa
commit
ed69833361
34
core/src/sys/ipc.zig
Normal file
34
core/src/sys/ipc.zig
Normal file
@ -0,0 +1,34 @@
|
||||
const std = @import("std");
|
||||
const system = @import("system");
|
||||
const platform = @import("../arch/platform.zig");
|
||||
const sys = @import("syscall.zig");
|
||||
const thread = @import("../thread.zig");
|
||||
const cpu = @import("../arch/cpu.zig");
|
||||
const pmm = @import("../pmm.zig");
|
||||
const vmm = @import("../arch/vmm.zig");
|
||||
|
||||
pub fn send(regs: *platform.Registers, args: *sys.Arguments, retval: *isize) anyerror!void {
|
||||
try asyncSend(regs, args, retval);
|
||||
|
||||
_ = thread.block(regs);
|
||||
}
|
||||
|
||||
pub fn asyncSend(_: *platform.Registers, args: *sys.Arguments, _: *isize) anyerror!void {
|
||||
const core = cpu.thisCore();
|
||||
const target = thread.lookupThreadById(args.arg0) orelse return error.NoSuchThread;
|
||||
|
||||
var queue = target.event_queue orelse return error.ThreadMessagingNotAvailable;
|
||||
|
||||
var data: [2]u64 = std.mem.zeroes([2]u64);
|
||||
data[0] = @intFromEnum(system.kernel.KernelMessage.MessageReceived);
|
||||
data[1] = core.current_thread.id;
|
||||
_ = queue.writeType([2]u64, &data);
|
||||
|
||||
if (target.state == .Blocked) {
|
||||
thread.reviveThread(core, target);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wait(regs: *platform.Registers, _: *sys.Arguments, _: *isize) anyerror!void {
|
||||
_ = thread.block(regs);
|
||||
}
|
@ -5,6 +5,7 @@ const print = @import("print.zig");
|
||||
const mem = @import("mem.zig");
|
||||
const sched = @import("sched.zig");
|
||||
const tokens = @import("token.zig");
|
||||
const ipc = @import("ipc.zig");
|
||||
const cpu = @import("../arch/cpu.zig");
|
||||
|
||||
pub const Arguments = struct {
|
||||
@ -37,6 +38,9 @@ const syscalls = [_]SystemCall{
|
||||
sched.startThread,
|
||||
sched.getThreadId,
|
||||
mem.getAddressSpace,
|
||||
ipc.send,
|
||||
ipc.asyncSend,
|
||||
ipc.wait,
|
||||
};
|
||||
|
||||
pub fn invokeSyscall(number: usize, frame: *platform.Registers, args: *Arguments, retval: *isize) void {
|
||||
|
25
system/init/boot.zig
Normal file
25
system/init/boot.zig
Normal file
@ -0,0 +1,25 @@
|
||||
const system = @import("system");
|
||||
|
||||
const vm = system.vm;
|
||||
const syscalls = system.syscalls;
|
||||
const buffer = system.ring_buffer;
|
||||
|
||||
pub fn setupKernelRingBuffer(base: u64) !void {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
const data: [*]u8 = @ptrCast(phys.virtualPointer(u8, base));
|
||||
|
||||
try syscalls.setEventQueue(syscalls.getThreadId(), phys.address);
|
||||
|
||||
const event_queue = buffer.RingBuffer.init(data, vm.PAGE_SIZE, true);
|
||||
|
||||
system.ipc.setKernelBuffer(event_queue);
|
||||
}
|
||||
|
||||
pub fn discoverThreadLimit() u64 {
|
||||
var pid: u64 = 1;
|
||||
while (true) {
|
||||
_ = syscalls.getPriority(pid) catch return (pid - 1);
|
||||
pid += 1;
|
||||
}
|
||||
}
|
@ -1,4 +1,7 @@
|
||||
const std = @import("std");
|
||||
const system = @import("system");
|
||||
const thread = @import("thread.zig");
|
||||
const boot = @import("boot.zig");
|
||||
|
||||
const vm = system.vm;
|
||||
const syscalls = system.syscalls;
|
||||
@ -7,16 +10,6 @@ const heap = system.heap;
|
||||
|
||||
const PAGE_SIZE = vm.PAGE_SIZE;
|
||||
|
||||
fn setupKernelRingBuffer(base: u64) !buffer.RingBuffer {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
const data: [*]u8 = @ptrCast(phys.virtualPointer(u8, base));
|
||||
|
||||
try syscalls.setEventQueue(syscalls.getThreadId(), phys.address);
|
||||
|
||||
return buffer.RingBuffer.init(data, PAGE_SIZE, true);
|
||||
}
|
||||
|
||||
fn setTokens() void {
|
||||
var tokens: u64 = 0;
|
||||
tokens |= @intFromEnum(system.kernel.Token.Root);
|
||||
@ -25,81 +18,56 @@ fn setTokens() void {
|
||||
syscalls.setTokens(syscalls.getThreadId(), tokens) catch {};
|
||||
}
|
||||
|
||||
fn discoverThreadLimit() u64 {
|
||||
var pid: u64 = 1;
|
||||
while (true) {
|
||||
_ = syscalls.getPriority(pid) catch return (pid - 1);
|
||||
pid += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn setupKernelRingBufferForThread(mapper: *const vm.MemoryMapper, pid: u64, virt: u64) !void {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
|
||||
|
||||
try syscalls.setEventQueue(pid, phys.address);
|
||||
}
|
||||
|
||||
fn setupRingBufferForThread(mapper: *const vm.MemoryMapper, base: u64, virt: u64) !buffer.RingBuffer {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
|
||||
|
||||
const data: [*]u8 = @ptrCast(phys.virtualPointer(u8, base));
|
||||
|
||||
return buffer.RingBuffer.init(data, PAGE_SIZE, true);
|
||||
}
|
||||
|
||||
fn setupThread(pid: u64, base: u64) !system.ipc.Connection {
|
||||
const space = try syscalls.getAddressSpace(pid);
|
||||
const mapper = vm.MemoryMapper.create(.{ .address = space }, base);
|
||||
|
||||
const ipc_base = 0x1000; // FIXME: Find a good place in the address space and guarantee this is free.
|
||||
|
||||
try setupKernelRingBufferForThread(&mapper, pid, ipc_base + system.ipc.KERNEL_BUFFER_ADDRESS_OFFSET);
|
||||
// INIT_WRITE and INIT_READ are inverted here because when the process writes, init reads.
|
||||
const read_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_WRITE_BUFFER_ADDRESS_OFFSET);
|
||||
const write_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_READ_BUFFER_ADDRESS_OFFSET);
|
||||
|
||||
const connection: system.ipc.Connection = .{ .pid = pid, .read_buffer = read_buffer, .write_buffer = write_buffer };
|
||||
|
||||
try syscalls.setThreadArguments(pid, base, ipc_base);
|
||||
|
||||
try syscalls.startThread(pid);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
export fn _start(base: u64, address: u64) callconv(.C) noreturn {
|
||||
main(base, address) catch {
|
||||
while (true) {}
|
||||
};
|
||||
}
|
||||
|
||||
inline fn main(base: u64, address: u64) !void {
|
||||
setTokens();
|
||||
|
||||
const mapper = vm.MemoryMapper.create(.{ .address = address }, base);
|
||||
_ = mapper;
|
||||
var sys_alloc = heap.SystemAllocator.init(mapper, 0x200000, base - 0x200000); // FIXME: Let's not hardcode these.
|
||||
const allocator = sys_alloc.allocator();
|
||||
|
||||
const threads = discoverThreadLimit();
|
||||
syscalls.print(threads);
|
||||
const threads = boot.discoverThreadLimit();
|
||||
|
||||
const pid: u64 = 2;
|
||||
var connection = setupThread(pid, base) catch {
|
||||
while (true) {}
|
||||
};
|
||||
var thread_list = std.AutoHashMap(u64, thread.Thread).init(allocator);
|
||||
try thread_list.ensureTotalCapacity(@intCast(threads));
|
||||
errdefer thread_list.deinit();
|
||||
|
||||
const event_queue = setupKernelRingBuffer(base) catch {
|
||||
while (true) {}
|
||||
};
|
||||
var pid: u64 = 1;
|
||||
while (pid <= threads) : (pid += 1) {
|
||||
if (pid == syscalls.getThreadId()) continue;
|
||||
const t = try thread.setupThread(pid, base);
|
||||
try thread_list.put(pid, t);
|
||||
}
|
||||
|
||||
system.ipc.setKernelBuffer(event_queue);
|
||||
try boot.setupKernelRingBuffer(base);
|
||||
|
||||
var counter: u64 = 0;
|
||||
var kernel_queue = system.ipc.getKernelBuffer().?;
|
||||
|
||||
while (true) : (counter += 4) {
|
||||
var data: u8 = undefined;
|
||||
if (connection.read_buffer.read(@ptrCast(&data), 1)) {
|
||||
syscalls.print(data);
|
||||
outer: while (true) {
|
||||
var msg_type: u64 = undefined;
|
||||
while (kernel_queue.readType(u64, &msg_type)) {
|
||||
switch (msg_type) {
|
||||
@intFromEnum(system.kernel.KernelMessage.MessageReceived) => {
|
||||
var id: u64 = undefined;
|
||||
if (!kernel_queue.readType(u64, &id)) continue :outer;
|
||||
|
||||
var sender = thread_list.getPtr(id).?;
|
||||
|
||||
var data: u8 = undefined;
|
||||
if (sender.connection.read(u8, &data)) {
|
||||
syscalls.print(id);
|
||||
syscalls.print(data);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
syscalls.sleep(1000);
|
||||
syscalls.print(counter);
|
||||
syscalls.wait();
|
||||
}
|
||||
}
|
||||
|
49
system/init/thread.zig
Normal file
49
system/init/thread.zig
Normal file
@ -0,0 +1,49 @@
|
||||
const std = @import("std");
|
||||
const system = @import("system");
|
||||
|
||||
const vm = system.vm;
|
||||
const syscalls = system.syscalls;
|
||||
const buffer = system.ring_buffer;
|
||||
|
||||
pub const Thread = struct {
|
||||
address: ?[]u8,
|
||||
connection: system.ipc.Connection,
|
||||
};
|
||||
|
||||
pub fn setupKernelRingBufferForThread(mapper: *const vm.MemoryMapper, pid: u64, virt: u64) !void {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
|
||||
|
||||
try syscalls.setEventQueue(pid, phys.address);
|
||||
}
|
||||
|
||||
pub fn setupRingBufferForThread(mapper: *const vm.MemoryMapper, base: u64, virt: u64) !buffer.RingBuffer {
|
||||
const phys = vm.PhysFrame{ .address = try syscalls.allocFrame() };
|
||||
|
||||
try vm.map(mapper, virt, phys, @intFromEnum(vm.Flags.User) | @intFromEnum(vm.Flags.ReadWrite) | @intFromEnum(vm.Flags.NoExecute));
|
||||
|
||||
const data: [*]u8 = @ptrCast(phys.virtualPointer(u8, base));
|
||||
|
||||
return buffer.RingBuffer.init(data, vm.PAGE_SIZE, true);
|
||||
}
|
||||
|
||||
pub fn setupThread(pid: u64, base: u64) !Thread {
|
||||
const space = try syscalls.getAddressSpace(pid);
|
||||
const mapper = vm.MemoryMapper.create(.{ .address = space }, base);
|
||||
|
||||
const ipc_base = 0x1000; // FIXME: Find a good place in the address space and guarantee this is free.
|
||||
|
||||
try setupKernelRingBufferForThread(&mapper, pid, ipc_base + system.ipc.KERNEL_BUFFER_ADDRESS_OFFSET);
|
||||
// INIT_WRITE and INIT_READ are inverted here because when the process writes, init reads.
|
||||
const read_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_WRITE_BUFFER_ADDRESS_OFFSET);
|
||||
const write_buffer = try setupRingBufferForThread(&mapper, base, ipc_base + system.ipc.INIT_READ_BUFFER_ADDRESS_OFFSET);
|
||||
|
||||
const connection: system.ipc.Connection = .{ .pid = pid, .read_buffer = read_buffer, .write_buffer = write_buffer };
|
||||
|
||||
try syscalls.setThreadArguments(pid, base, ipc_base);
|
||||
|
||||
try syscalls.startThread(pid);
|
||||
|
||||
return .{ .address = null, .connection = connection };
|
||||
}
|
@ -5,6 +5,22 @@ pub const Connection = struct {
|
||||
pid: u64,
|
||||
read_buffer: buffer.RingBuffer,
|
||||
write_buffer: buffer.RingBuffer,
|
||||
|
||||
pub fn read(self: *Connection, comptime T: type, out: *T) bool {
|
||||
return self.read_buffer.readType(T, out);
|
||||
}
|
||||
|
||||
pub fn write(self: *Connection, comptime T: type, in: *const T) bool {
|
||||
return self.write_buffer.writeType(T, in);
|
||||
}
|
||||
|
||||
pub fn readBytes(self: *Connection, bytes: [*]u8, length: usize) bool {
|
||||
return self.read_buffer.read(bytes, length);
|
||||
}
|
||||
|
||||
pub fn writeBytes(self: *Connection, bytes: []u8) bool {
|
||||
return self.write_buffer.writeSlice(bytes);
|
||||
}
|
||||
};
|
||||
|
||||
pub const KERNEL_BUFFER_ADDRESS_OFFSET = 0x0000;
|
||||
|
@ -17,6 +17,9 @@ pub const SystemCall = enum(u64) {
|
||||
StartThread, // requires Token.CreateProcess
|
||||
GetThreadId,
|
||||
GetAddressSpace, // requires Token.CreateProcess
|
||||
Send,
|
||||
AsyncSend,
|
||||
Wait,
|
||||
};
|
||||
|
||||
pub const Token = enum(u64) {
|
||||
@ -31,3 +34,7 @@ pub const SystemError = error{
|
||||
NoSuchThread,
|
||||
NotAuthorized,
|
||||
};
|
||||
|
||||
pub const KernelMessage = enum(u8) {
|
||||
MessageReceived = 0,
|
||||
};
|
||||
|
@ -102,6 +102,22 @@ pub const RingBuffer = struct {
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn readType(self: *RingBuffer, comptime T: type, out: *T) bool {
|
||||
return self.read(@ptrCast(out), @sizeOf(@TypeOf(out.*)));
|
||||
}
|
||||
|
||||
pub fn peekType(self: *RingBuffer, comptime T: type, out: *T) bool {
|
||||
return self.peek(@ptrCast(out), @sizeOf(@TypeOf(out.*)));
|
||||
}
|
||||
|
||||
pub fn writeType(self: *RingBuffer, comptime T: type, in: *const T) bool {
|
||||
return self.write(@ptrCast(in), @sizeOf(@TypeOf(in.*)));
|
||||
}
|
||||
|
||||
pub fn writeSlice(self: *RingBuffer, bytes: []const u8) bool {
|
||||
return self.write(bytes.ptr, bytes.len);
|
||||
}
|
||||
|
||||
fn dataPointer(self: *RingBuffer) [*]u8 {
|
||||
return @ptrCast(&self.data.data_start);
|
||||
}
|
||||
|
@ -101,3 +101,15 @@ pub fn getAddressSpace(pid: u64) !u64 {
|
||||
if (retval < 0) return error.NoSuchThread;
|
||||
return @bitCast(retval);
|
||||
}
|
||||
|
||||
pub fn send(pid: u64) void {
|
||||
_ = syscall(.Send, pid, 0, 0);
|
||||
}
|
||||
|
||||
pub fn asyncSend(pid: u64) void {
|
||||
_ = syscall(.AsyncSend, pid, 0, 0);
|
||||
}
|
||||
|
||||
pub fn wait() void {
|
||||
_ = syscall(.Wait, 0, 0, 0);
|
||||
}
|
||||
|
@ -14,10 +14,12 @@ export fn _start(_: u64, ipc_base: u64) callconv(.C) noreturn {
|
||||
|
||||
var connection = system.ipc.readInitBuffers(ipc_base);
|
||||
|
||||
const byte: u8 = 127;
|
||||
var byte: u8 = 127;
|
||||
|
||||
_ = connection.write_buffer.write(@ptrCast(&byte), 1);
|
||||
syscalls.yield();
|
||||
while (byte > 0) : (byte -= 1) {
|
||||
_ = connection.write(u8, &byte);
|
||||
syscalls.asyncSend(1);
|
||||
}
|
||||
|
||||
while (true) {}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user