Kernel: Return a Result in MemoryManager and strdup_from_user
This commit is contained in:
parent
662afad426
commit
58fb422161
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#include "utils/Result.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
@ -16,19 +17,19 @@ namespace MemoryManager
|
||||
|
||||
void protect_kernel_sections();
|
||||
|
||||
void* get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
void release_mapping(void* mapping);
|
||||
|
||||
void* get_unaligned_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
void* get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_unaligned_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags = MAP_READ_WRITE);
|
||||
void release_unaligned_mapping(void* mapping);
|
||||
void release_unaligned_mappings(void* mapping, uint64_t count);
|
||||
|
||||
void* get_page(int flags = MAP_READ_WRITE);
|
||||
void* get_pages(uint64_t count, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_page(int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_pages(uint64_t count, int flags = MAP_READ_WRITE);
|
||||
|
||||
void* get_page_at(uint64_t addr, int flags = MAP_READ_WRITE);
|
||||
void* get_pages_at(uint64_t addr, uint64_t count, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_page_at(uint64_t addr, int flags = MAP_READ_WRITE);
|
||||
Result<void*> get_pages_at(uint64_t addr, uint64_t count, int flags = MAP_READ_WRITE);
|
||||
|
||||
void release_page(void* page);
|
||||
void release_pages(void* pages, uint64_t count);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "misc/utils.h"
|
||||
#include <stddef.h>
|
||||
|
||||
char* strdup_from_user(const char* user_string);
|
||||
Result<char*> strdup_from_user(const char* user_string);
|
||||
bool validate_user_readable_page(uintptr_t address);
|
||||
bool validate_user_writable_page(uintptr_t address);
|
||||
bool validate_user_read(uintptr_t address, size_t size);
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
#include "std/ensure.h"
|
||||
#include "std/errno.h"
|
||||
#include "std/string.h"
|
||||
#include "utils/move.h"
|
||||
#include "utils/new.h"
|
||||
@ -80,14 +81,26 @@ template <typename T> class Result
|
||||
return m_storage.fetch_reference();
|
||||
}
|
||||
|
||||
T value_or(T other)
|
||||
{
|
||||
if (has_value()) return m_storage.fetch_reference();
|
||||
return other;
|
||||
}
|
||||
|
||||
T release_value()
|
||||
{
|
||||
ensure(has_value());
|
||||
T item = m_storage.fetch_reference();
|
||||
m_has_value = false;
|
||||
m_storage.destroy();
|
||||
return move(item);
|
||||
}
|
||||
|
||||
~Result()
|
||||
{
|
||||
if (has_value()) m_storage.destroy();
|
||||
}
|
||||
|
||||
private:
|
||||
struct Storage
|
||||
{
|
||||
@ -103,6 +116,16 @@ template <typename T> class Result
|
||||
return *fetch_ptr();
|
||||
}
|
||||
|
||||
const T* fetch_ptr() const
|
||||
{
|
||||
return (const T*)buffer;
|
||||
}
|
||||
|
||||
const T& fetch_reference() const
|
||||
{
|
||||
return *fetch_ptr();
|
||||
}
|
||||
|
||||
void store_ptr(T* ptr)
|
||||
{
|
||||
new (buffer) T(*ptr);
|
||||
@ -117,6 +140,11 @@ template <typename T> class Result
|
||||
{
|
||||
new (buffer) T(ref);
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
fetch_reference().~T();
|
||||
}
|
||||
};
|
||||
Storage m_storage;
|
||||
int m_error;
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
extern BOOTBOOT bootboot;
|
||||
|
||||
// FIXME: Propagate errors.
|
||||
|
||||
ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
|
||||
{
|
||||
static SDTHeader* cache = nullptr;
|
||||
@ -19,7 +21,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
|
||||
void* physical = (void*)bootboot.arch.x86_64.acpi_ptr;
|
||||
kdbgln("RSDT/XSDT physical address: %p", physical);
|
||||
|
||||
SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical);
|
||||
SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical).release_value();
|
||||
|
||||
uint64_t offset = (uint64_t)physical % PAGE_SIZE;
|
||||
uint64_t rsdt_pages = Utilities::get_blocks_from_size(PAGE_SIZE, (offset + rsdt->Length));
|
||||
@ -27,7 +29,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
|
||||
if (rsdt_pages > 1)
|
||||
{
|
||||
MemoryManager::release_unaligned_mapping(rsdt);
|
||||
rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages);
|
||||
rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages).release_value();
|
||||
}
|
||||
|
||||
kdbgln("Mapped RSDT/XSDT to virtual address %p, uses %ld pages", (void*)rsdt, rsdt_pages);
|
||||
@ -82,7 +84,7 @@ void* ACPI::find_table(ACPI::SDTHeader* root_sdt, const char* signature)
|
||||
continue;
|
||||
}
|
||||
kdbgln("Physical address of entry: %p", (void*)h);
|
||||
SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h);
|
||||
SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h).release_value();
|
||||
kdbgln("Mapped entry to virtual address %p", (void*)realHeader);
|
||||
if (!validate_sdt_header(realHeader))
|
||||
{
|
||||
|
@ -410,7 +410,8 @@ static void initrd_initialize_root()
|
||||
void InitRD::init()
|
||||
{
|
||||
initrd_base = MemoryManager::get_unaligned_mappings(
|
||||
(void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size));
|
||||
(void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size))
|
||||
.release_value(); // FIXME: Propagate errors.
|
||||
kdbgln("physical base at %lx, size %lx, mapped to %p", bootboot.initrd_ptr, bootboot.initrd_size, initrd_base);
|
||||
kdbgln("total blocks: %ld", get_total_blocks());
|
||||
void* leak = kmalloc(4); // leak some memory so that kmalloc doesn't continously allocate and free pages
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "memory/MemoryManager.h"
|
||||
#include "std/ensure.h"
|
||||
#include "std/string.h"
|
||||
#include "utils/Addresses.h"
|
||||
#include <stdint.h>
|
||||
|
||||
struct GDTR
|
||||
@ -87,8 +88,9 @@ static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, uint64_t addr)
|
||||
static void setup_tss()
|
||||
{
|
||||
memset(&main_tss, 0, sizeof(TSS));
|
||||
main_tss.rsp[0] =
|
||||
(uint64_t)MemoryManager::get_pages(4) + (PAGE_SIZE * 4) - 8; // allocate 16KB for the syscall stack
|
||||
main_tss.rsp[0] = get_top_of_stack((uint64_t)MemoryManager::get_pages(4).release_value(),
|
||||
4); // FIXME: Propagate errors, we should use 1 kernel stack
|
||||
// per task, and it probably shouldn't be so big.
|
||||
main_tss.iomap_base = sizeof(TSS);
|
||||
set_tss_base(&internal_gdt.tss, &internal_gdt.tss2, (uint64_t)&main_tss);
|
||||
set_limit(&internal_gdt.tss, sizeof(TSS) - 1);
|
||||
|
@ -33,7 +33,7 @@ void MemoryManager::protect_kernel_sections()
|
||||
MAP_READ_WRITE);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_mapping(void* physicalAddress, int flags)
|
||||
Result<void*> MemoryManager::get_mapping(void* physicalAddress, int flags)
|
||||
{
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
if (!virtualAddress)
|
||||
@ -42,13 +42,13 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
|
||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||
#endif
|
||||
KernelHeap::dump_usage();
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
|
||||
return (void*)virtualAddress;
|
||||
}
|
||||
|
||||
void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
|
||||
Result<void*> MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
|
||||
{
|
||||
uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE;
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
@ -58,13 +58,13 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
|
||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||
#endif
|
||||
KernelHeap::dump_usage();
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
|
||||
return (void*)(virtualAddress + offset);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags)
|
||||
Result<void*> MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags)
|
||||
{
|
||||
if (!count) return 0;
|
||||
if (count == 1) return get_unaligned_mapping(physicalAddress, flags);
|
||||
@ -78,7 +78,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
|
||||
count);
|
||||
#endif
|
||||
KernelHeap::dump_usage();
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
for (uint64_t i = 0; i < count; i++)
|
||||
{
|
||||
@ -109,7 +109,7 @@ void MemoryManager::release_mapping(void* mapping)
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_page(int flags)
|
||||
Result<void*> MemoryManager::get_page(int flags)
|
||||
{
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
if (!virtualAddress)
|
||||
@ -118,12 +118,12 @@ void* MemoryManager::get_page(int flags)
|
||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||
#endif
|
||||
KernelHeap::dump_usage();
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
return get_page_at(virtualAddress, flags);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_page_at(uint64_t addr, int flags)
|
||||
Result<void*> MemoryManager::get_page_at(uint64_t addr, int flags)
|
||||
{
|
||||
auto paddr = PMM::request_page();
|
||||
if (paddr.has_error())
|
||||
@ -131,7 +131,7 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags)
|
||||
#ifdef MM_DEBUG
|
||||
kwarnln("OOM while allocating one page of memory. this is not good...");
|
||||
#endif
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
VMM::map(addr, (uint64_t)paddr.release_value(), flags);
|
||||
return (void*)addr;
|
||||
@ -145,7 +145,7 @@ void MemoryManager::release_page(void* page)
|
||||
PMM::free_page((void*)physicalAddress);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_pages(uint64_t count, int flags)
|
||||
Result<void*> MemoryManager::get_pages(uint64_t count, int flags)
|
||||
{
|
||||
if (!count) return 0;
|
||||
if (count == 1) return get_page(flags);
|
||||
@ -159,13 +159,13 @@ void* MemoryManager::get_pages(uint64_t count, int flags)
|
||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||
#endif
|
||||
KernelHeap::dump_usage();
|
||||
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
|
||||
// achieve...
|
||||
return {ENOMEM}; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
|
||||
// achieve...
|
||||
}
|
||||
return get_pages_at(virtualAddress, count, flags);
|
||||
}
|
||||
|
||||
void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
|
||||
Result<void*> MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
|
||||
{
|
||||
if (!count) return 0;
|
||||
if (count == 1) return get_page_at(addr, flags);
|
||||
@ -184,7 +184,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
|
||||
#endif
|
||||
// FIXME: Weren't we supposed to free all previously allocated pages, to avoid leaks when failing large
|
||||
// allocations?
|
||||
return 0;
|
||||
return {ENOMEM};
|
||||
}
|
||||
VMM::map(addr + (i * PAGE_SIZE), (uint64_t)paddr.release_value(), flags);
|
||||
}
|
||||
|
@ -186,5 +186,6 @@ uint64_t PMM::get_bitmap_size()
|
||||
void PMM::map_bitmap_to_virtual()
|
||||
{
|
||||
virtual_bitmap_addr = (char*)MemoryManager::get_unaligned_mappings(
|
||||
bitmap_addr, Utilities::get_blocks_from_size(PAGE_SIZE, bitmap_size));
|
||||
bitmap_addr, Utilities::get_blocks_from_size(PAGE_SIZE, bitmap_size))
|
||||
.release_value(); // If we can't do this, something has gone terribly wrong.
|
||||
}
|
@ -20,7 +20,7 @@ extern "C" int liballoc_unlock()
|
||||
|
||||
extern "C" void* liballoc_alloc(size_t count)
|
||||
{
|
||||
return MemoryManager::get_pages(count);
|
||||
return MemoryManager::get_pages(count).value_or(nullptr);
|
||||
}
|
||||
|
||||
extern "C" int liballoc_free(void* addr, size_t count)
|
||||
|
@ -59,7 +59,7 @@ bool validate_user_writable_page(uintptr_t address)
|
||||
return false;
|
||||
}
|
||||
|
||||
char* strdup_from_user(const char* user_string)
|
||||
Result<char*> strdup_from_user(const char* user_string)
|
||||
{
|
||||
uintptr_t user_ptr = (uintptr_t)user_string;
|
||||
auto aligned = round_down_to_nearest_page(user_ptr);
|
||||
@ -68,14 +68,16 @@ char* strdup_from_user(const char* user_string)
|
||||
if (aligned != user_ptr) // Otherwise, we already do this check below.
|
||||
{
|
||||
if (!validate_user_readable_page(aligned)) return nullptr;
|
||||
ptr = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0);
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0);
|
||||
if (result.has_error()) return result.release_error();
|
||||
ptr = (char*)result.release_value();
|
||||
index = user_ptr - aligned;
|
||||
}
|
||||
dynamic_string str;
|
||||
if (!dynamic_init(&str))
|
||||
{
|
||||
if (ptr) MemoryManager::release_mapping(ptr);
|
||||
return nullptr;
|
||||
return {ENOMEM};
|
||||
}
|
||||
while (true) // FIXME: set a limit for this and fail with ENAMETOOLONG otherwise.
|
||||
{
|
||||
@ -86,16 +88,18 @@ char* strdup_from_user(const char* user_string)
|
||||
if (!validate_user_readable_page(user_ptr))
|
||||
{
|
||||
kfree(str.buf);
|
||||
return nullptr;
|
||||
return {EFAULT};
|
||||
}
|
||||
ptr = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0);
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0);
|
||||
if (result.has_error()) return result.release_error();
|
||||
ptr = (char*)result.release_value();
|
||||
}
|
||||
char c = ptr[index];
|
||||
if (!dynamic_push(&str, c))
|
||||
{
|
||||
MemoryManager::release_mapping(ptr);
|
||||
kfree(str.buf);
|
||||
return nullptr;
|
||||
return {ENOMEM};
|
||||
}
|
||||
if (!c) // We reached the null terminator!!
|
||||
{
|
||||
@ -152,7 +156,9 @@ bool do_copy_from_user(const char* uptr, char* ptr, size_t size)
|
||||
if (aligned != user_ptr) // Otherwise, we already do this check below.
|
||||
{
|
||||
if (!validate_user_readable_page(aligned)) return false;
|
||||
mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0);
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned), 0);
|
||||
if (result.has_error()) return false; // FIXME: Propagate errors.
|
||||
mapping = (char*)result.release_value();
|
||||
index = user_ptr - aligned;
|
||||
}
|
||||
while (size--)
|
||||
@ -162,7 +168,9 @@ bool do_copy_from_user(const char* uptr, char* ptr, size_t size)
|
||||
if (mapping) MemoryManager::release_mapping(mapping);
|
||||
index = 0;
|
||||
if (!validate_user_readable_page(user_ptr)) return false;
|
||||
mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0);
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0);
|
||||
if (result.has_error()) return false; // FIXME: Propagate errors.
|
||||
mapping = (char*)result.release_value();
|
||||
}
|
||||
*ptr = mapping[index];
|
||||
user_ptr++;
|
||||
@ -181,7 +189,9 @@ bool do_copy_to_user(char* uptr, const char* ptr, size_t size)
|
||||
if (aligned != user_ptr) // Otherwise, we already do this check below.
|
||||
{
|
||||
if (!validate_user_writable_page(aligned)) return false;
|
||||
mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(aligned));
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(aligned));
|
||||
if (result.has_error()) return false;
|
||||
mapping = (char*)result.release_value();
|
||||
index = user_ptr - aligned;
|
||||
}
|
||||
while (size--)
|
||||
@ -191,7 +201,9 @@ bool do_copy_to_user(char* uptr, const char* ptr, size_t size)
|
||||
if (mapping) MemoryManager::release_mapping(mapping);
|
||||
index = 0;
|
||||
if (!validate_user_writable_page(user_ptr)) return false;
|
||||
mapping = (char*)MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr));
|
||||
auto result = MemoryManager::get_mapping((void*)VMM::get_physical(user_ptr), 0);
|
||||
if (result.has_error()) return false;
|
||||
mapping = (char*)result.release_value();
|
||||
}
|
||||
mapping[index] = *ptr;
|
||||
user_ptr++;
|
||||
|
@ -104,8 +104,10 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
|
||||
|
||||
uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz);
|
||||
void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr),
|
||||
pages, MAP_READ_WRITE) +
|
||||
(phdr.p_vaddr % PAGE_SIZE));
|
||||
pages, MAP_READ_WRITE)
|
||||
.release_value() // FIXME: We check for enough space before loading the ELF,
|
||||
// although a race condition could happen.
|
||||
+ (phdr.p_vaddr % PAGE_SIZE));
|
||||
|
||||
if (VMM::is_using_kernel_address_space()) { VMM::switch_to_previous_user_address_space(); }
|
||||
VMM::apply_address_space();
|
||||
|
@ -73,12 +73,13 @@ void push_on_user_stack(uint64_t* rsp, char* value,
|
||||
|
||||
void sys_execv(Context* context, const char* pathname, char** argv)
|
||||
{
|
||||
char* kpathname = strdup_from_user(pathname);
|
||||
if (!kpathname)
|
||||
auto result = strdup_from_user(pathname);
|
||||
if (result.has_error())
|
||||
{
|
||||
context->rax = -EFAULT;
|
||||
context->rax = -result.error();
|
||||
return;
|
||||
}
|
||||
char* kpathname = result.release_value();
|
||||
|
||||
kinfoln("exec(): executing %s", kpathname);
|
||||
|
||||
@ -149,14 +150,14 @@ void sys_execv(Context* context, const char* pathname, char** argv)
|
||||
}
|
||||
if (arg)
|
||||
{
|
||||
char* kcopy = strdup_from_user(arg);
|
||||
if (!kcopy) // FIXME: This could also be EFAULT.
|
||||
auto rc = strdup_from_user(arg);
|
||||
if (rc.has_error()) // FIXME: This could also be EFAULT.
|
||||
{
|
||||
free_kernel_argv_copy();
|
||||
context->rax = -ENOMEM;
|
||||
context->rax = -rc.error();
|
||||
return;
|
||||
}
|
||||
kargv[kargc] = kcopy;
|
||||
kargv[kargc] = rc.release_value();
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -202,17 +203,17 @@ void sys_execv(Context* context, const char* pathname, char** argv)
|
||||
Task* task = Scheduler::current_task();
|
||||
ensure(task);
|
||||
|
||||
// At this point, pretty much nothing can fail.
|
||||
// At this point, pretty much nothing can fail. (FIXME: Race conditions could happen)
|
||||
|
||||
task->allocator.free();
|
||||
task->allocator
|
||||
.init(); // If we had enough space for the old bitmap, we should have enough space for the new bitmap.
|
||||
|
||||
task->address_space.clear();
|
||||
task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(
|
||||
0x100000, TASK_PAGES_IN_STACK,
|
||||
MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK); // If we had enough space for the old stack, there should be
|
||||
// enough space for the new stack.
|
||||
task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(0x100000, TASK_PAGES_IN_STACK,
|
||||
MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK)
|
||||
.release_value(); // If we had enough space for the old stack, there should be
|
||||
// enough space for the new stack.
|
||||
|
||||
ELFImage* image = ELFLoader::load_elf_from_vfs(program);
|
||||
ensure(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly
|
||||
|
@ -80,18 +80,18 @@ void sys_mmap(Context* context, void* address, size_t size, int prot, int fd, of
|
||||
context->rax = file->mmap((uint64_t)address - addr_offset, size, real_flags, offset);
|
||||
return;
|
||||
}
|
||||
void* result = MemoryManager::get_pages_at((uint64_t)address - addr_offset,
|
||||
Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
||||
if (result)
|
||||
auto result = MemoryManager::get_pages_at((uint64_t)address - addr_offset,
|
||||
Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
||||
if (result.has_value())
|
||||
{
|
||||
kdbgln("mmap() succeeded: %p", result);
|
||||
context->rax = (uint64_t)result;
|
||||
kdbgln("mmap() succeeded: %p", result.value());
|
||||
context->rax = (uint64_t)result.release_value();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
kwarnln("mmap() failed: failed to allocate physical memory");
|
||||
context->rax = MAP_FAIL(ENOMEM);
|
||||
context->rax = MAP_FAIL(result.error());
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -117,17 +117,17 @@ void sys_mmap(Context* context, void* address, size_t size, int prot, int fd, of
|
||||
context->rax = file->mmap(ptr, size, real_flags, offset);
|
||||
return;
|
||||
}
|
||||
void* result = MemoryManager::get_pages_at(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
||||
if (result)
|
||||
auto result = MemoryManager::get_pages_at(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
||||
if (result.has_value())
|
||||
{
|
||||
kdbgln("mmap() succeeded: %p", result);
|
||||
context->rax = (uint64_t)result;
|
||||
kdbgln("mmap() succeeded: %p", result.value());
|
||||
context->rax = (uint64_t)result.release_value();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
kwarnln("mmap() failed: failed to allocate physical memory");
|
||||
context->rax = MAP_FAIL(ENOMEM);
|
||||
context->rax = MAP_FAIL(result.error());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -64,12 +64,13 @@ void sys_fstat(Context* context, int fd, struct stat* buf)
|
||||
|
||||
void sys_stat(Context* context, const char* path, struct stat* buf)
|
||||
{
|
||||
char* kpath = strdup_from_user(path);
|
||||
if (!kpath)
|
||||
auto result = strdup_from_user(path);
|
||||
if (result.has_error())
|
||||
{
|
||||
context->rax = -EFAULT;
|
||||
context->rax = -result.error();
|
||||
return;
|
||||
}
|
||||
char* kpath = result.release_value();
|
||||
VFS::Node* node = VFS::resolve_path(kpath);
|
||||
kfree(kpath);
|
||||
if (!node)
|
||||
|
@ -170,12 +170,13 @@ void sys_open(Context* context, const char* filename, int flags, mode_t) // FIXM
|
||||
return;
|
||||
}
|
||||
|
||||
char* kfilename = strdup_from_user(filename);
|
||||
if (!kfilename)
|
||||
auto result = strdup_from_user(filename);
|
||||
if (result.has_error())
|
||||
{
|
||||
context->rax = -EFAULT;
|
||||
context->rax = -result.error();
|
||||
return;
|
||||
}
|
||||
char* kfilename = result.release_value();
|
||||
|
||||
VFS::Node* node = VFS::resolve_path(kfilename);
|
||||
if (!node)
|
||||
@ -322,12 +323,13 @@ void sys_close(Context* context, int fd)
|
||||
|
||||
void sys_mkdir(Context* context, const char* filename, mode_t mode)
|
||||
{
|
||||
char* kfilename = strdup_from_user(filename);
|
||||
if (!kfilename)
|
||||
auto result = strdup_from_user(filename);
|
||||
if (result.has_error())
|
||||
{
|
||||
context->rax = -EFAULT;
|
||||
context->rax = -result.error();
|
||||
return;
|
||||
}
|
||||
char* kfilename = result.release_value();
|
||||
|
||||
Task* current_task = Scheduler::current_task();
|
||||
|
||||
@ -340,7 +342,13 @@ void sys_mkdir(Context* context, const char* filename, mode_t mode)
|
||||
|
||||
void sys_access(Context* context, const char* path, int) // FIXME: Use the amode argument.
|
||||
{
|
||||
char* kpath = strdup_from_user(path);
|
||||
auto result = strdup_from_user(path);
|
||||
if (result.has_error())
|
||||
{
|
||||
context->rax = -result.error();
|
||||
return;
|
||||
}
|
||||
char* kpath = result.release_value();
|
||||
if (!VFS::exists(kpath)) { context->rax = -ENOENT; }
|
||||
else
|
||||
context->rax = 0;
|
||||
|
@ -93,7 +93,9 @@ void Scheduler::init()
|
||||
memset(&idle_task, 0, sizeof(Task));
|
||||
idle_task.id = free_pid++;
|
||||
idle_task.regs.rip = (uint64_t)idle_task_function;
|
||||
idle_task.regs.rsp = get_top_of_stack((uint64_t)MemoryManager::get_page(), 1);
|
||||
idle_task.regs.rsp =
|
||||
get_top_of_stack((uint64_t)MemoryManager::get_page().release_value(),
|
||||
1); // If we OOM while creating the idle task, that's NOT good and we can panic.
|
||||
idle_task.regs.cs = 0x08;
|
||||
idle_task.regs.ss = 0x10;
|
||||
idle_task.regs.rflags = (1 << 21) | (1 << 9);
|
||||
@ -119,7 +121,7 @@ void Scheduler::add_kernel_task(const char* taskname, void (*task)(void))
|
||||
new_task->uid = new_task->euid = new_task->gid = new_task->egid = 0;
|
||||
new_task->regs.rip = (uint64_t)task;
|
||||
new_task->allocated_stack =
|
||||
(uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK); // 16 KB is enough for everyone, right?
|
||||
(uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK).release_value(); // FIXME: Propagate errors.
|
||||
new_task->regs.rsp = get_top_of_stack(new_task->allocated_stack, TASK_PAGES_IN_STACK);
|
||||
new_task->regs.cs = 0x08;
|
||||
new_task->regs.ss = 0x10;
|
||||
@ -195,9 +197,9 @@ long Scheduler::load_user_task(const char* filename)
|
||||
new_task->user_task = true;
|
||||
new_task->regs.rip = image->entry;
|
||||
new_task->image = image;
|
||||
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(
|
||||
0x100000, TASK_PAGES_IN_STACK,
|
||||
MAP_READ_WRITE | MAP_USER | MAP_AS_OWNED_BY_TASK); // 16 KB is enough for everyone, right?
|
||||
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(0x100000, TASK_PAGES_IN_STACK,
|
||||
MAP_READ_WRITE | MAP_USER | MAP_AS_OWNED_BY_TASK)
|
||||
.release_value(); // FIXME: Propagate errors.
|
||||
if (!new_task->allocated_stack)
|
||||
{
|
||||
new_task->address_space.destroy();
|
||||
|
Loading…
Reference in New Issue
Block a user