diff --git a/kernel/include/memory/MemoryManager.h b/kernel/include/memory/MemoryManager.h index c6cd4b37..3cecf2ae 100644 --- a/kernel/include/memory/MemoryManager.h +++ b/kernel/include/memory/MemoryManager.h @@ -1,4 +1,5 @@ #pragma once +#include "utils/Result.h" #include #ifndef PAGE_SIZE @@ -16,19 +17,19 @@ namespace MemoryManager void protect_kernel_sections(); - void* get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE); + Result get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE); void release_mapping(void* mapping); - void* get_unaligned_mapping(void* physicalAddress, int flags = MAP_READ_WRITE); - void* get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags = MAP_READ_WRITE); + Result get_unaligned_mapping(void* physicalAddress, int flags = MAP_READ_WRITE); + Result get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags = MAP_READ_WRITE); void release_unaligned_mapping(void* mapping); void release_unaligned_mappings(void* mapping, uint64_t count); - void* get_page(int flags = MAP_READ_WRITE); - void* get_pages(uint64_t count, int flags = MAP_READ_WRITE); + Result get_page(int flags = MAP_READ_WRITE); + Result get_pages(uint64_t count, int flags = MAP_READ_WRITE); - void* get_page_at(uint64_t addr, int flags = MAP_READ_WRITE); - void* get_pages_at(uint64_t addr, uint64_t count, int flags = MAP_READ_WRITE); + Result get_page_at(uint64_t addr, int flags = MAP_READ_WRITE); + Result get_pages_at(uint64_t addr, uint64_t count, int flags = MAP_READ_WRITE); void release_page(void* page); void release_pages(void* pages, uint64_t count); diff --git a/kernel/include/sys/UserMemory.h b/kernel/include/sys/UserMemory.h index c2c9936e..8d33e861 100644 --- a/kernel/include/sys/UserMemory.h +++ b/kernel/include/sys/UserMemory.h @@ -10,7 +10,7 @@ #include "misc/utils.h" #include -char* strdup_from_user(const char* user_string); +Result strdup_from_user(const char* user_string); bool validate_user_readable_page(uintptr_t address); bool validate_user_writable_page(uintptr_t address); bool validate_user_read(uintptr_t address, size_t size); diff --git a/kernel/include/utils/Result.h b/kernel/include/utils/Result.h index 11cd1f2a..6abb0f8a 100644 --- a/kernel/include/utils/Result.h +++ b/kernel/include/utils/Result.h @@ -1,5 +1,6 @@ #pragma once #include "std/ensure.h" +#include "std/errno.h" #include "std/string.h" #include "utils/move.h" #include "utils/new.h" @@ -80,14 +81,26 @@ template class Result return m_storage.fetch_reference(); } + T value_or(T other) + { + if (has_value()) return m_storage.fetch_reference(); + return other; + } + T release_value() { ensure(has_value()); T item = m_storage.fetch_reference(); m_has_value = false; + m_storage.destroy(); return move(item); } + ~Result() + { + if (has_value()) m_storage.destroy(); + } + private: struct Storage { @@ -103,6 +116,16 @@ template class Result return *fetch_ptr(); } + const T* fetch_ptr() const + { + return (const T*)buffer; + } + + const T& fetch_reference() const + { + return *fetch_ptr(); + } + void store_ptr(T* ptr) { new (buffer) T(*ptr); @@ -117,6 +140,11 @@ template class Result { new (buffer) T(ref); } + + void destroy() + { + fetch_reference().~T(); + } }; Storage m_storage; int m_error; diff --git a/kernel/src/acpi/RSDT.cpp b/kernel/src/acpi/RSDT.cpp index 410644ba..98f78afc 100644 --- a/kernel/src/acpi/RSDT.cpp +++ b/kernel/src/acpi/RSDT.cpp @@ -10,6 +10,8 @@ extern BOOTBOOT bootboot; +// FIXME: Propagate errors. + ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt() { static SDTHeader* cache = nullptr; @@ -19,7 +21,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt() void* physical = (void*)bootboot.arch.x86_64.acpi_ptr; kdbgln("RSDT/XSDT physical address: %p", physical); - SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical); + SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical).release_value(); uint64_t offset = (uint64_t)physical % PAGE_SIZE; uint64_t rsdt_pages = Utilities::get_blocks_from_size(PAGE_SIZE, (offset + rsdt->Length)); @@ -27,7 +29,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt() if (rsdt_pages > 1) { MemoryManager::release_unaligned_mapping(rsdt); - rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages); + rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages).release_value(); } kdbgln("Mapped RSDT/XSDT to virtual address %p, uses %ld pages", (void*)rsdt, rsdt_pages); @@ -82,7 +84,7 @@ void* ACPI::find_table(ACPI::SDTHeader* root_sdt, const char* signature) continue; } kdbgln("Physical address of entry: %p", (void*)h); - SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h); + SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h).release_value(); kdbgln("Mapped entry to virtual address %p", (void*)realHeader); if (!validate_sdt_header(realHeader)) { diff --git a/kernel/src/fs/InitRD.cpp b/kernel/src/fs/InitRD.cpp index 6235f91a..08584815 100644 --- a/kernel/src/fs/InitRD.cpp +++ b/kernel/src/fs/InitRD.cpp @@ -410,7 +410,8 @@ static void initrd_initialize_root() void InitRD::init() { initrd_base = MemoryManager::get_unaligned_mappings( - (void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size)); + (void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size)) + .release_value(); // FIXME: Propagate errors. kdbgln("physical base at %lx, size %lx, mapped to %p", bootboot.initrd_ptr, bootboot.initrd_size, initrd_base); kdbgln("total blocks: %ld", get_total_blocks()); void* leak = kmalloc(4); // leak some memory so that kmalloc doesn't continously allocate and free pages diff --git a/kernel/src/gdt/GDT.cpp b/kernel/src/gdt/GDT.cpp index 23580b74..0c2ecca4 100644 --- a/kernel/src/gdt/GDT.cpp +++ b/kernel/src/gdt/GDT.cpp @@ -5,6 +5,7 @@ #include "memory/MemoryManager.h" #include "std/ensure.h" #include "std/string.h" +#include "utils/Addresses.h" #include struct GDTR @@ -87,8 +88,9 @@ static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, uint64_t addr) static void setup_tss() { memset(&main_tss, 0, sizeof(TSS)); - main_tss.rsp[0] = - (uint64_t)MemoryManager::get_pages(4) + (PAGE_SIZE * 4) - 8; // allocate 16KB for the syscall stack + main_tss.rsp[0] = get_top_of_stack((uint64_t)MemoryManager::get_pages(4).release_value(), + 4); // FIXME: Propagate errors, we should use 1 kernel stack + // per task, and it probably shouldn't be so big. main_tss.iomap_base = sizeof(TSS); set_tss_base(&internal_gdt.tss, &internal_gdt.tss2, (uint64_t)&main_tss); set_limit(&internal_gdt.tss, sizeof(TSS) - 1); diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index c8b794f6..b8bee795 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -33,7 +33,7 @@ void MemoryManager::protect_kernel_sections() MAP_READ_WRITE); } -void* MemoryManager::get_mapping(void* physicalAddress, int flags) +Result MemoryManager::get_mapping(void* physicalAddress, int flags) { uint64_t virtualAddress = KernelHeap::request_virtual_page(); if (!virtualAddress) @@ -42,13 +42,13 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags) kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); - return 0; + return {ENOMEM}; } VMM::map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } -void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) +Result MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) { uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE; uint64_t virtualAddress = KernelHeap::request_virtual_page(); @@ -58,13 +58,13 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); - return 0; + return {ENOMEM}; } VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags); return (void*)(virtualAddress + offset); } -void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags) +Result MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_unaligned_mapping(physicalAddress, flags); @@ -78,7 +78,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun count); #endif KernelHeap::dump_usage(); - return 0; + return {ENOMEM}; } for (uint64_t i = 0; i < count; i++) { @@ -109,7 +109,7 @@ void MemoryManager::release_mapping(void* mapping) KernelHeap::free_virtual_page((uint64_t)mapping); } -void* MemoryManager::get_page(int flags) +Result MemoryManager::get_page(int flags) { uint64_t virtualAddress = KernelHeap::request_virtual_page(); if (!virtualAddress) @@ -118,12 +118,12 @@ void* MemoryManager::get_page(int flags) kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); - return 0; + return {ENOMEM}; } return get_page_at(virtualAddress, flags); } -void* MemoryManager::get_page_at(uint64_t addr, int flags) +Result MemoryManager::get_page_at(uint64_t addr, int flags) { auto paddr = PMM::request_page(); if (paddr.has_error()) @@ -131,7 +131,7 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags) #ifdef MM_DEBUG kwarnln("OOM while allocating one page of memory. this is not good..."); #endif - return 0; + return {ENOMEM}; } VMM::map(addr, (uint64_t)paddr.release_value(), flags); return (void*)addr; @@ -145,7 +145,7 @@ void MemoryManager::release_page(void* page) PMM::free_page((void*)physicalAddress); } -void* MemoryManager::get_pages(uint64_t count, int flags) +Result MemoryManager::get_pages(uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_page(flags); @@ -159,13 +159,13 @@ void* MemoryManager::get_pages(uint64_t count, int flags) kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); - return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to - // achieve... + return {ENOMEM}; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to + // achieve... } return get_pages_at(virtualAddress, count, flags); } -void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags) +Result MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_page_at(addr, flags); @@ -184,7 +184,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags) #endif // FIXME: Weren't we supposed to free all previously allocated pages, to avoid leaks when failing large // allocations? - return 0; + return {ENOMEM}; } VMM::map(addr + (i * PAGE_SIZE), (uint64_t)paddr.release_value(), flags); } diff --git a/kernel/src/memory/PMM.cpp b/kernel/src/memory/PMM.cpp index 747042f2..ec62deb0 100644 --- a/kernel/src/memory/PMM.cpp +++ b/kernel/src/memory/PMM.cpp @@ -186,5 +186,6 @@ uint64_t PMM::get_bitmap_size() void PMM::map_bitmap_to_virtual() { virtual_bitmap_addr = (char*)MemoryManager::get_unaligned_mappings( - bitmap_addr, Utilities::get_blocks_from_size(PAGE_SIZE, bitmap_size)); + bitmap_addr, Utilities::get_blocks_from_size(PAGE_SIZE, bitmap_size)) + .release_value(); // If we can't do this, something has gone terribly wrong. } \ No newline at end of file diff --git a/kernel/src/memory/liballoc/bindings.cpp b/kernel/src/memory/liballoc/bindings.cpp index d3acdd69..91b53e54 100644 --- a/kernel/src/memory/liballoc/bindings.cpp +++ b/kernel/src/memory/liballoc/bindings.cpp @@ -20,7 +20,7 @@ extern "C" int liballoc_unlock() extern "C" void* liballoc_alloc(size_t count) { - return MemoryManager::get_pages(count); + return MemoryManager::get_pages(count).value_or(nullptr); } extern "C" int liballoc_free(void* addr, size_t count) diff --git a/kernel/src/sys/UserMemory.cpp b/kernel/src/sys/UserMemory.cpp index 218d0a8d..fc71fb40 100644 --- a/kernel/src/sys/UserMemory.cpp +++ b/kernel/src/sys/UserMemory.cpp @@ -59,7 +59,7 @@ bool validate_user_writable_page(uintptr_t address) return false; } -char* strdup_from_user(const char* user_string) +Result strdup_from_user(const char* user_string) { uintptr_t user_ptr = (uintptr_t)user_string; auto aligned = round_down_to_nearest_page(user_ptr); @@ -68,14 +68,16 @@ char* strdup_from_user(const char* user_string) if (aligned != user_ptr) // Otherwise, we already do this check below. { if (!validate_user_readable_page(aligned)) return nullptr; - ptr = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0); + if (result.has_error()) return result.release_error(); + ptr = (char*)result.release_value(); index = user_ptr - aligned; } dynamic_string str; if (!dynamic_init(&str)) { if (ptr) MemoryManager::release_mapping(ptr); - return nullptr; + return {ENOMEM}; } while (true) // FIXME: set a limit for this and fail with ENAMETOOLONG otherwise. { @@ -86,16 +88,18 @@ char* strdup_from_user(const char* user_string) if (!validate_user_readable_page(user_ptr)) { kfree(str.buf); - return nullptr; + return {EFAULT}; } - ptr = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0); + if (result.has_error()) return result.release_error(); + ptr = (char*)result.release_value(); } char c = ptr[index]; if (!dynamic_push(&str, c)) { MemoryManager::release_mapping(ptr); kfree(str.buf); - return nullptr; + return {ENOMEM}; } if (!c) // We reached the null terminator!! { @@ -152,7 +156,9 @@ bool do_copy_from_user(const char* uptr, char* ptr, size_t size) if (aligned != user_ptr) // Otherwise, we already do this check below. { if (!validate_user_readable_page(aligned)) return false; - mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0); + if (result.has_error()) return false; // FIXME: Propagate errors. + mapping = (char*)result.release_value(); index = user_ptr - aligned; } while (size--) @@ -162,7 +168,9 @@ bool do_copy_from_user(const char* uptr, char* ptr, size_t size) if (mapping) MemoryManager::release_mapping(mapping); index = 0; if (!validate_user_readable_page(user_ptr)) return false; - mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0); + if (result.has_error()) return false; // FIXME: Propagate errors. + mapping = (char*)result.release_value(); } *ptr = mapping[index]; user_ptr++; @@ -181,7 +189,9 @@ bool do_copy_to_user(char* uptr, const char* ptr, size_t size) if (aligned != user_ptr) // Otherwise, we already do this check below. { if (!validate_user_writable_page(aligned)) return false; - mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned)); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned)); + if (result.has_error()) return false; + mapping = (char*)result.release_value(); index = user_ptr - aligned; } while (size--) @@ -191,7 +201,9 @@ bool do_copy_to_user(char* uptr, const char* ptr, size_t size) if (mapping) MemoryManager::release_mapping(mapping); index = 0; if (!validate_user_writable_page(user_ptr)) return false; - mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr)); + auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0); + if (result.has_error()) return false; + mapping = (char*)result.release_value(); } mapping[index] = *ptr; user_ptr++; diff --git a/kernel/src/sys/elf/ELFLoader.cpp b/kernel/src/sys/elf/ELFLoader.cpp index d20b15ec..2673b07e 100644 --- a/kernel/src/sys/elf/ELFLoader.cpp +++ b/kernel/src/sys/elf/ELFLoader.cpp @@ -104,8 +104,10 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node) uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz); void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr), - pages, MAP_READ_WRITE) + - (phdr.p_vaddr % PAGE_SIZE)); + pages, MAP_READ_WRITE) + .release_value() // FIXME: We check for enough space before loading the ELF, + // although a race condition could happen. + + (phdr.p_vaddr % PAGE_SIZE)); if (VMM::is_using_kernel_address_space()) { VMM::switch_to_previous_user_address_space(); } VMM::apply_address_space(); diff --git a/kernel/src/sys/exec.cpp b/kernel/src/sys/exec.cpp index 8a325d44..962d0c42 100644 --- a/kernel/src/sys/exec.cpp +++ b/kernel/src/sys/exec.cpp @@ -73,12 +73,13 @@ void push_on_user_stack(uint64_t* rsp, char* value, void sys_execv(Context* context, const char* pathname, char** argv) { - char* kpathname = strdup_from_user(pathname); - if (!kpathname) + auto result = strdup_from_user(pathname); + if (result.has_error()) { - context->rax = -EFAULT; + context->rax = -result.error(); return; } + char* kpathname = result.release_value(); kinfoln("exec(): executing %s", kpathname); @@ -149,14 +150,14 @@ void sys_execv(Context* context, const char* pathname, char** argv) } if (arg) { - char* kcopy = strdup_from_user(arg); - if (!kcopy) // FIXME: This could also be EFAULT. + auto rc = strdup_from_user(arg); + if (rc.has_error()) // FIXME: This could also be EFAULT. { free_kernel_argv_copy(); - context->rax = -ENOMEM; + context->rax = -rc.error(); return; } - kargv[kargc] = kcopy; + kargv[kargc] = rc.release_value(); } else { @@ -202,17 +203,17 @@ void sys_execv(Context* context, const char* pathname, char** argv) Task* task = Scheduler::current_task(); ensure(task); - // At this point, pretty much nothing can fail. + // At this point, pretty much nothing can fail. (FIXME: Race conditions could happen) task->allocator.free(); task->allocator .init(); // If we had enough space for the old bitmap, we should have enough space for the new bitmap. task->address_space.clear(); - task->allocated_stack = (uint64_t)MemoryManager::get_pages_at( - 0x100000, TASK_PAGES_IN_STACK, - MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK); // If we had enough space for the old stack, there should be - // enough space for the new stack. + task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(0x100000, TASK_PAGES_IN_STACK, + MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK) + .release_value(); // If we had enough space for the old stack, there should be + // enough space for the new stack. ELFImage* image = ELFLoader::load_elf_from_vfs(program); ensure(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly diff --git a/kernel/src/sys/mem.cpp b/kernel/src/sys/mem.cpp index fe9cc7c3..99a6a0b0 100644 --- a/kernel/src/sys/mem.cpp +++ b/kernel/src/sys/mem.cpp @@ -80,18 +80,18 @@ void sys_mmap(Context* context, void* address, size_t size, int prot, int fd, of context->rax = file->mmap((uint64_t)address - addr_offset, size, real_flags, offset); return; } - void* result = MemoryManager::get_pages_at((uint64_t)address - addr_offset, - Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags); - if (result) + auto result = MemoryManager::get_pages_at((uint64_t)address - addr_offset, + Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags); + if (result.has_value()) { - kdbgln("mmap() succeeded: %p", result); - context->rax = (uint64_t)result; + kdbgln("mmap() succeeded: %p", result.value()); + context->rax = (uint64_t)result.release_value(); return; } else { kwarnln("mmap() failed: failed to allocate physical memory"); - context->rax = MAP_FAIL(ENOMEM); + context->rax = MAP_FAIL(result.error()); return; } } @@ -117,17 +117,17 @@ void sys_mmap(Context* context, void* address, size_t size, int prot, int fd, of context->rax = file->mmap(ptr, size, real_flags, offset); return; } - void* result = MemoryManager::get_pages_at(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags); - if (result) + auto result = MemoryManager::get_pages_at(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags); + if (result.has_value()) { - kdbgln("mmap() succeeded: %p", result); - context->rax = (uint64_t)result; + kdbgln("mmap() succeeded: %p", result.value()); + context->rax = (uint64_t)result.release_value(); return; } else { kwarnln("mmap() failed: failed to allocate physical memory"); - context->rax = MAP_FAIL(ENOMEM); + context->rax = MAP_FAIL(result.error()); return; } } diff --git a/kernel/src/sys/stat.cpp b/kernel/src/sys/stat.cpp index 9870f445..48fdcaf9 100644 --- a/kernel/src/sys/stat.cpp +++ b/kernel/src/sys/stat.cpp @@ -64,12 +64,13 @@ void sys_fstat(Context* context, int fd, struct stat* buf) void sys_stat(Context* context, const char* path, struct stat* buf) { - char* kpath = strdup_from_user(path); - if (!kpath) + auto result = strdup_from_user(path); + if (result.has_error()) { - context->rax = -EFAULT; + context->rax = -result.error(); return; } + char* kpath = result.release_value(); VFS::Node* node = VFS::resolve_path(kpath); kfree(kpath); if (!node) diff --git a/kernel/src/sys/stdio.cpp b/kernel/src/sys/stdio.cpp index adc8206b..453f0ff5 100644 --- a/kernel/src/sys/stdio.cpp +++ b/kernel/src/sys/stdio.cpp @@ -170,12 +170,13 @@ void sys_open(Context* context, const char* filename, int flags, mode_t) // FIXM return; } - char* kfilename = strdup_from_user(filename); - if (!kfilename) + auto result = strdup_from_user(filename); + if (result.has_error()) { - context->rax = -EFAULT; + context->rax = -result.error(); return; } + char* kfilename = result.release_value(); VFS::Node* node = VFS::resolve_path(kfilename); if (!node) @@ -322,12 +323,13 @@ void sys_close(Context* context, int fd) void sys_mkdir(Context* context, const char* filename, mode_t mode) { - char* kfilename = strdup_from_user(filename); - if (!kfilename) + auto result = strdup_from_user(filename); + if (result.has_error()) { - context->rax = -EFAULT; + context->rax = -result.error(); return; } + char* kfilename = result.release_value(); Task* current_task = Scheduler::current_task(); @@ -340,7 +342,13 @@ void sys_mkdir(Context* context, const char* filename, mode_t mode) void sys_access(Context* context, const char* path, int) // FIXME: Use the amode argument. { - char* kpath = strdup_from_user(path); + auto result = strdup_from_user(path); + if (result.has_error()) + { + context->rax = -result.error(); + return; + } + char* kpath = result.release_value(); if (!VFS::exists(kpath)) { context->rax = -ENOENT; } else context->rax = 0; diff --git a/kernel/src/thread/Scheduler.cpp b/kernel/src/thread/Scheduler.cpp index 0062c2cf..5bece786 100644 --- a/kernel/src/thread/Scheduler.cpp +++ b/kernel/src/thread/Scheduler.cpp @@ -93,7 +93,9 @@ void Scheduler::init() memset(&idle_task, 0, sizeof(Task)); idle_task.id = free_pid++; idle_task.regs.rip = (uint64_t)idle_task_function; - idle_task.regs.rsp = get_top_of_stack((uint64_t)MemoryManager::get_page(), 1); + idle_task.regs.rsp = + get_top_of_stack((uint64_t)MemoryManager::get_page().release_value(), + 1); // If we OOM while creating the idle task, that's NOT good and we can panic. idle_task.regs.cs = 0x08; idle_task.regs.ss = 0x10; idle_task.regs.rflags = (1 << 21) | (1 << 9); @@ -119,7 +121,7 @@ void Scheduler::add_kernel_task(const char* taskname, void (*task)(void)) new_task->uid = new_task->euid = new_task->gid = new_task->egid = 0; new_task->regs.rip = (uint64_t)task; new_task->allocated_stack = - (uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK); // 16 KB is enough for everyone, right? + (uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK).release_value(); // FIXME: Propagate errors. new_task->regs.rsp = get_top_of_stack(new_task->allocated_stack, TASK_PAGES_IN_STACK); new_task->regs.cs = 0x08; new_task->regs.ss = 0x10; @@ -195,9 +197,9 @@ long Scheduler::load_user_task(const char* filename) new_task->user_task = true; new_task->regs.rip = image->entry; new_task->image = image; - new_task->allocated_stack = (uint64_t)MemoryManager::get_pages_at( - 0x100000, TASK_PAGES_IN_STACK, - MAP_READ_WRITE | MAP_USER | MAP_AS_OWNED_BY_TASK); // 16 KB is enough for everyone, right? + new_task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(0x100000, TASK_PAGES_IN_STACK, + MAP_READ_WRITE | MAP_USER | MAP_AS_OWNED_BY_TASK) + .release_value(); // FIXME: Propagate errors. if (!new_task->allocated_stack) { new_task->address_space.destroy();