Compare commits

...

7 Commits

Author SHA1 Message Date
0ea9974512
First user process!
All checks were successful
continuous-integration/drone/push Build is passing
Putting it all together, we have a user process that successfully calls sys_exit() w/o crashing.
2023-01-05 21:55:21 +01:00
0aac6c888d
x86_64: Basic exit() syscall!
User processes need to do something, amirite?
2023-01-05 21:53:48 +01:00
a33a72915e
Scheduler: Creation, destruction and switching of userspace tasks :))
From a TarStream. Not optimal, but OK for the moment.
2023-01-05 21:52:26 +01:00
ea89b92675
Store a bit more stuff in a thread :) 2023-01-05 21:50:53 +01:00
31ea030c7f
MMU: Add functions to create and delete userspace page directories 2023-01-05 21:50:26 +01:00
c53bba0392
MemoryManager: Add an unmap_weak_and_free_vm() helper function
This function mirrors unmap_owned_and_free_vm(), but using weak unmapping (does not free the underlying physical memory)
2023-01-05 21:50:06 +01:00
d3c414af4e
ELFLoader: Do not keep track of segments
This reduces calls to kmalloc() since segment data is heap-allocated, and the segments loaded will be deleted when deleting the page directory.
2023-01-05 21:46:03 +01:00
14 changed files with 236 additions and 77 deletions

View File

@ -1,18 +1,4 @@
section .text
global _start
_start:
mov eax, ecx
push rdx
mov eax, 1
mov edi, hello_world
mov esi, 14
int 42h
nop
section .rodata
hello_world:
db 'Hello, world!', 0xa, 0
section .bss
array:
resb 10

View File

@ -23,19 +23,11 @@ static bool can_write_segment(u32 flags)
return can_write_segment(flags) && can_execute_segment(flags);
}*/
ELFSegment::ELFSegment(u64 base, usize size) : m_base(base), m_size(size)
{
}
namespace ELFLoader
{
// FIXME: Check that all calls to read_contents() read the proper amount of bytes.
Result<ELFData> load(const TarStream::Entry& elf_entry, const TarStream& stream)
{
LinkedList<ELFSegment> segments;
auto guard = make_scope_guard([&] { segments.consume([](ELFSegment* segment) { delete segment; }); });
Elf64_Ehdr elf_header;
usize nread = stream.read_contents(elf_entry, &elf_header, 0, sizeof elf_header);
if (nread < sizeof elf_header)
@ -101,9 +93,6 @@ namespace ELFLoader
/*expect(!can_write_and_execute_segment(program_header.p_flags),
"Segment is both writable and executable");*/
ELFSegment* segment = TRY(make<ELFSegment>(program_header.p_vaddr, program_header.p_memsz));
segments.append(segment);
int flags = MMU::User | MMU::NoExecute;
if (can_write_segment(program_header.p_flags)) flags |= MMU::ReadWrite;
else if (can_execute_segment(program_header.p_flags))
@ -124,14 +113,6 @@ namespace ELFLoader
else { kdbgln("ELF: Encountered non-loadable program header, skipping"); }
}
if (segments.count() == 0)
{
kdbgln("Error while loading ELF: No loadable segments");
return err(ENOEXEC);
}
guard.deactivate();
return ELFData { segments, elf_header.e_entry };
return ELFData { elf_header.e_entry };
}
}

View File

@ -47,28 +47,8 @@ typedef struct
u64 p_align; /* Segment alignment */
} Elf64_Phdr;
struct ELFSegment : public LinkedListNode<ELFSegment>
{
u64 base() const
{
return m_base;
}
usize size() const
{
return m_size;
}
ELFSegment(u64 base, usize size);
private:
u64 m_base;
usize m_size;
};
struct ELFData
{
LinkedList<ELFSegment> segments;
u64 entry;
};

View File

@ -31,6 +31,7 @@ namespace MMU
void flush_all();
Result<PageDirectory*> create_page_directory_for_userspace();
Result<void> delete_userspace_page_directory(PageDirectory* directory);
void setup_initial_page_directory();
PageDirectory* kernel_page_directory();

View File

@ -187,4 +187,5 @@ ISR 20 ; virtualization exception (#VE)
ISR_ERROR 21 ; control-protection exception (#CP)
; ISR 22-31 reserved
IRQ 32, 0 ; timer interrupt
IRQ 33, 0 ; keyboard interrupt
IRQ 33, 0 ; keyboard interrupt
ISR 66 ; user exit

View File

@ -107,6 +107,12 @@ extern "C" void arch_interrupt_entry(Registers* regs)
scancode_queue.try_push(scancode);
pic_eoi(regs);
}
else if (regs->isr == 66) // Exit!!
{
kdbgln("exiting from user task!!");
Scheduler::current()->state = ThreadState::Dying;
kernel_yield();
}
else
{
kwarnln("IRQ catched! Halting.");

View File

@ -2,6 +2,7 @@
#include "memory/MemoryManager.h"
#include <luna/CString.h>
#include <luna/Result.h>
#include <luna/ScopeGuard.h>
#include <luna/SystemError.h>
#pragma GCC push_options
@ -50,11 +51,16 @@ namespace MMU
return l4_table()->entries[l4_index(addr)];
}
constexpr PageDirectory* raw_l3_table(u64 l4)
{
const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
return (PageDirectory*)l3;
}
constexpr PageDirectory* l3_table(u64 addr)
{
const u64 l4 = l4_index(addr);
const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
return (PageDirectory*)l3;
return raw_l3_table(l4);
}
constexpr u64 l3_index(u64 addr)
@ -67,12 +73,17 @@ namespace MMU
return l3_table(addr)->entries[l3_index(addr)];
}
constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3)
{
const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
}
constexpr PageDirectory* l2_table(u64 addr)
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
return raw_l2_table(l4, l3);
}
constexpr u64 l2_index(u64 addr)
@ -85,13 +96,18 @@ namespace MMU
return l2_table(addr)->entries[l2_index(addr)];
}
constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2)
{
const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
}
constexpr PageDirectory* l1_table(u64 addr)
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
const u64 l2 = l2_index(addr);
const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
return raw_l1_table(l4, l3, l2);
}
constexpr u64 l1_index(u64 addr)
@ -270,6 +286,96 @@ namespace MMU
flush_all();
}
Result<PageDirectory*> create_page_directory_for_userspace()
{
u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute));
u64 directory_phys = MMU::get_physical(directory_virt).value();
PageDirectory* directory = (PageDirectory*)directory_virt;
memset(directory, 0, ARCH_PAGE_SIZE);
PageTableEntry& recursive_entry = directory->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(directory_phys);
directory->entries[511] = g_kernel_directory->entries[511];
// From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt
// with using recursive mapping. So let's make sure we don't leak any VM.
MemoryManager::unmap_weak_and_free_vm(directory_virt, 1);
return (PageDirectory*)directory_phys;
}
Result<void> delete_userspace_page_directory(PageDirectory* directory)
{
check(directory);
// Needed in order to access page tables using the recursive mapping system.
switch_page_directory(directory);
auto guard = make_scope_guard([&] {
check(g_kernel_directory);
switch_page_directory(g_kernel_directory);
MemoryManager::free_frame((u64)directory);
});
PageDirectory* table = l4_table();
// Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages)
for (u64 i = 0; i < 510; i++)
{
PageTableEntry& l4 = table->entries[i];
if (!l4.present) continue;
PageDirectory* pdp = raw_l3_table(i);
for (u64 j = 0; j < 512; j++)
{
PageTableEntry& l3 = pdp->entries[j];
if (!l3.present) continue;
if (l3.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l3.get_address()));
}
PageDirectory* pd = raw_l2_table(i, j);
for (u64 k = 0; k < 512; k++)
{
PageTableEntry& l2 = pd->entries[k];
if (!l2.present) continue;
if (l2.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l2.get_address()));
}
PageDirectory* pt = raw_l1_table(i, j, k);
for (u64 l = 0; l < 512; l++)
{
PageTableEntry& l1 = pt->entries[l];
if (!l1.present) continue;
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l1.get_address()));
}
TRY(MemoryManager::free_frame(l2.get_address()));
}
TRY(MemoryManager::free_frame(l3.get_address()));
}
TRY(MemoryManager::free_frame(l4.get_address()));
}
// No need to clean up manually, the ScopeGuard we set up earlier will do that for us.
return {};
}
PageDirectory* kernel_page_directory()
{
return g_kernel_directory;

View File

@ -36,7 +36,7 @@ u64 IDTEntry::get_offset() const
static IDTEntry idt[256];
#define IDT_TA_InterruptGate 0b10001110
#define IDT_TA_UserInterruptGate 0b11101110
#define IDT_TA_UserCallableInterruptGate 0b11101110
#define IDT_TA_TrapGate 0b10001111
struct [[gnu::packed]] IDTR
@ -60,6 +60,7 @@ static void idt_add_handler(short num, void* handler, u8 type_attr)
#define INT(x) extern "C" void _isr##x()
#define TRAP(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_TrapGate)
#define IRQ(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_InterruptGate)
#define SYS(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_UserCallableInterruptGate)
INT(0);
INT(1);
@ -83,6 +84,7 @@ INT(20);
INT(21);
INT(32);
INT(33);
INT(66);
void setup_idt()
{
@ -110,6 +112,7 @@ void setup_idt()
TRAP(21);
IRQ(32);
IRQ(33);
SYS(66);
static IDTR idtr;
idtr.limit = 0x0FFF;

View File

@ -51,7 +51,8 @@ Result<void> init()
kinfoln("Used memory: %s", to_dynamic_unit(MemoryManager::used()).release_value().chars());
kinfoln("Reserved memory: %s", to_dynamic_unit(MemoryManager::reserved()).release_value().chars());
MMU::unmap(0x400000);
Thread::init();
Scheduler::init();
TarStream::Entry entry;
while (TRY(g_initrd.read_next_entry().try_set_value_with_specific_error(entry, 0)))
@ -61,18 +62,10 @@ Result<void> init()
kinfoln("Found file %s in initial ramdisk, of size %s", entry.name,
to_dynamic_unit(entry.size).release_value().chars());
if (!strcmp(entry.name, "bin/app"))
{
auto data = TRY(ELFLoader::load(entry, g_initrd));
data.segments.consume([](ELFSegment* segment) { delete segment; });
kinfoln("Loaded ELF with entry=%#.16lx", data.entry);
}
if (!strcmp(entry.name, "bin/app")) { TRY(Scheduler::new_userspace_thread(entry, g_initrd)); }
}
}
Thread::init();
Scheduler::init();
TRY(Scheduler::new_kernel_thread(heap_thread));
TRY(Scheduler::new_kernel_thread(reap_thread));

View File

@ -309,6 +309,15 @@ namespace MemoryManager
return {};
}
Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
KernelVM::free_several_pages(virt, count);
return unmap_weak(virt, count);
}
Result<void> remap_unaligned(u64 address, usize count, int flags)
{
if (!is_aligned<ARCH_PAGE_SIZE>(address)) count++;

View File

@ -30,6 +30,7 @@ namespace MemoryManager
Result<void> unmap_owned(u64 virt, usize count);
Result<void> unmap_owned_and_free_vm(u64 virt, usize count);
Result<void> unmap_weak(u64 virt, usize count);
Result<void> unmap_weak_and_free_vm(u64 virt, usize count);
usize free();
usize used();

View File

@ -1,4 +1,5 @@
#include "thread/Scheduler.h"
#include "ELF.h"
#include "Log.h"
#include "arch/CPU.h"
#include "arch/MMU.h"
@ -19,6 +20,7 @@ namespace Scheduler
g_idle.init_regs_kernel();
g_idle.set_ip((u64)CPU::idle_loop);
g_idle.state = ThreadState::Idle;
g_idle.is_kernel = true;
g_idle.ticks_left = 1;
@ -60,6 +62,8 @@ namespace Scheduler
thread->stack = thread_stack;
thread->is_kernel = true;
g_threads.append(thread);
kinfoln("CREATED THREAD: id %lu with ip %lx and sp %lx", thread->id, thread->ip(), thread->sp());
@ -95,13 +99,84 @@ namespace Scheduler
return new_kernel_thread_impl(thread);
}
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack)
{
const u64 THREAD_STACK_BASE = 0x10000;
TRY(MemoryManager::alloc_at(THREAD_STACK_BASE, 4, MMU::ReadWrite | MMU::NoExecute | MMU::User));
auto guard = make_scope_guard([&] { MemoryManager::unmap_owned(THREAD_STACK_BASE, 4); });
u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute));
guard.deactivate();
user_stack = { THREAD_STACK_BASE, 4 * ARCH_PAGE_SIZE };
kernel_stack = { kernel_stack_base, 4 * ARCH_PAGE_SIZE };
return {};
}
Result<void> new_userspace_thread(const TarStream::Entry& entry, const TarStream& stream)
{
Thread* thread = TRY(new_thread());
thread->is_kernel = false;
auto guard = make_scope_guard([&] { delete thread; });
auto directory = TRY(MMU::create_page_directory_for_userspace());
auto directory_guard = make_scope_guard([&] {
MMU::switch_page_directory(MMU::kernel_page_directory());
MemoryManager::free_frame((u64)directory);
});
MMU::switch_page_directory(directory);
thread->init_regs_user();
auto data = TRY(ELFLoader::load(entry, stream));
thread->set_ip(data.entry);
TRY(create_stacks(thread->stack, thread->kernel_stack));
thread->set_sp(thread->stack.top());
thread->directory = directory;
guard.deactivate();
directory_guard.deactivate();
kinfoln("CREATED USERSPACE THREAD: id %lu with ip %lx and sp %lx (ksp %lx)", thread->id, thread->ip(),
thread->sp(), thread->kernel_stack.top());
g_threads.append(thread);
return {};
}
void reap_thread(Thread* thread)
{
kinfoln("reap: reaping thread with id %zu", thread->id);
auto stack = thread->stack;
kinfoln("deleting thread stack @ %#lx, has %zu bytes of stack", stack.bottom(), stack.bytes());
// FIXME: Propagate errors I guess?
MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
if (thread->is_kernel)
{
auto stack = thread->stack;
// FIXME: Propagate errors I guess?
kinfoln("deleting stack @ %#lx", stack.bottom());
MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
}
else
{
auto stack = thread->kernel_stack;
kinfoln("deleting kstack @ %#lx", stack.bottom());
// FIXME: Propagate errors I guess?
MemoryManager::unmap_owned_and_free_vm(stack.bottom(), stack.bytes() / ARCH_PAGE_SIZE).release_value();
}
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->directory);
delete thread;
}
@ -138,7 +213,15 @@ namespace Scheduler
void generic_switch_context(Thread* old_thread, Thread* new_thread, Registers* regs)
{
if (old_thread != new_thread) switch_context(old_thread, new_thread, regs);
if (old_thread != new_thread)
{
switch_context(old_thread, new_thread, regs);
if (!new_thread->is_kernel)
{
MMU::switch_page_directory(new_thread->directory);
CPU::switch_kernel_stack(new_thread->kernel_stack.top());
}
}
if (new_thread->is_idle())
{

View File

@ -1,5 +1,6 @@
#pragma once
#include "thread/Thread.h"
#include <luna/TarStream.h>
namespace Scheduler
{
@ -12,6 +13,8 @@ namespace Scheduler
Result<void> new_kernel_thread(void (*func)(void));
Result<void> new_kernel_thread(void (*func)(void*), void* arg);
Result<void> new_userspace_thread(const TarStream::Entry& entry, const TarStream& stream);
Thread* pick_task();
void reap_thread(Thread* thread);

View File

@ -1,5 +1,6 @@
#pragma once
#include "arch/MMU.h"
#include <luna/LinkedList.h>
#include <luna/Result.h>
#include <luna/Stack.h>
@ -32,9 +33,14 @@ struct Thread : public LinkedListNode<Thread>
u64 sleep_ticks_left;
Stack stack;
Stack kernel_stack;
ThreadState state = ThreadState::Runnable;
bool is_kernel { true };
PageDirectory* directory;
bool is_idle()
{
return state == ThreadState::Idle;