Luna/kernel/src/sys/mmap.cpp
apio 36e6787415
All checks were successful
continuous-integration/drone/push Build is passing
kernel: Make sure addresses allocated by mmap() are ALWAYS page-aligned
Fixes a kernel crash. Thanks a lot, sysfuzz!
2023-06-18 20:29:32 +02:00

85 lines
2.5 KiB
C++

#include "Log.h"
#include "arch/MMU.h"
#include "memory/MemoryManager.h"
#include "sys/Syscall.h"
#include "thread/Scheduler.h"
#include <bits/mmap-flags.h>
#include <luna/Alignment.h>
constexpr uintptr_t USERSPACE_HEAP_BASE = 0x3000000;
Result<u64> sys_mmap(Registers*, SyscallArgs args)
{
void* addr = (void*)args[0];
usize len = (usize)args[1];
int prot = (int)args[2];
int flags = (int)args[3];
if (len == 0) return err(EINVAL);
if (flags < 0) return err(EINVAL);
// We support only anonymous mappings for now.
if ((flags & MAP_ANONYMOUS) != MAP_ANONYMOUS)
{
kwarnln("mmap: FIXME: attempt to mmap file instead of anonymous memory");
return err(ENOTSUP);
}
if (flags & MAP_SHARED)
{
kwarnln("mmap: FIXME: attempt to mmap shared memory");
return err(ENOTSUP);
}
len = align_up<ARCH_PAGE_SIZE>(len);
Thread* current = Scheduler::current();
u64 address;
if (!addr) address = TRY(current->vm_allocator->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
else
{
// FIXME: We should be more flexible if MAP_FIXED was not specified.
address = align_down<ARCH_PAGE_SIZE>((u64)addr);
if (!TRY(current->vm_allocator->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
return err(ENOMEM);
}
int mmu_flags = MMU::User | MMU::NoExecute;
if (prot & PROT_WRITE) mmu_flags |= MMU::ReadWrite;
if (prot & PROT_EXEC) mmu_flags &= ~MMU::NoExecute;
if (prot == PROT_NONE) mmu_flags = MMU::NoExecute;
#ifdef MMAP_DEBUG
kdbgln("mmap: mapping memory at %#lx, size=%zu", address, len);
#endif
// FIXME: This leaks VM if it fails.
return MemoryManager::alloc_at_zeroed(address, get_blocks_from_size(len, ARCH_PAGE_SIZE), mmu_flags);
}
Result<u64> sys_munmap(Registers*, SyscallArgs args)
{
u64 address = (u64)args[0];
usize size = (usize)args[1];
if (size == 0) return err(EINVAL);
if (!is_aligned<ARCH_PAGE_SIZE>(size)) return err(EINVAL);
Thread* current = Scheduler::current();
bool ok = TRY(current->vm_allocator->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
// POSIX says munmap should silently do nothing if the memory was not already mapped.
if (!ok) return 0;
#ifdef MMAP_DEBUG
kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size);
#endif
TRY(MemoryManager::unmap_owned(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
return { 0 };
}