2023-03-14 19:43:15 +00:00
|
|
|
#include "Log.h"
|
|
|
|
#include "arch/MMU.h"
|
|
|
|
#include "memory/MemoryManager.h"
|
2023-08-02 20:19:06 +00:00
|
|
|
#include "memory/SharedMemory.h"
|
2023-03-14 19:43:15 +00:00
|
|
|
#include "sys/Syscall.h"
|
|
|
|
#include "thread/Scheduler.h"
|
|
|
|
#include <bits/mmap-flags.h>
|
2023-08-02 20:19:06 +00:00
|
|
|
#include <bits/mmap.h>
|
|
|
|
#include <bits/open-flags.h>
|
2023-03-14 19:43:15 +00:00
|
|
|
#include <luna/Alignment.h>
|
|
|
|
|
|
|
|
constexpr uintptr_t USERSPACE_HEAP_BASE = 0x3000000;
|
|
|
|
|
|
|
|
Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
|
|
|
{
|
2023-08-02 20:19:06 +00:00
|
|
|
mmap_params params;
|
|
|
|
if (!MemoryManager::copy_from_user_typed((const mmap_params*)args[0], ¶ms)) return err(EFAULT);
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
if (params.len == 0) return err(EINVAL);
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
if (params.flags < 0) return err(EINVAL);
|
|
|
|
|
|
|
|
Thread* current = Scheduler::current();
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
SharedPtr<OpenFileDescription> description;
|
|
|
|
if ((params.flags & MAP_ANONYMOUS) != MAP_ANONYMOUS)
|
2023-03-14 19:43:15 +00:00
|
|
|
{
|
2023-08-02 20:19:06 +00:00
|
|
|
description = TRY(current->resolve_fd(params.fd))->description;
|
|
|
|
if (!(description->flags & O_RDONLY)) return err(EACCES);
|
|
|
|
if (description->flags & O_APPEND) return err(EACCES);
|
2023-03-14 19:43:15 +00:00
|
|
|
}
|
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
if (!is_aligned<ARCH_PAGE_SIZE>(params.offset)) return err(EINVAL);
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
params.len = align_up<ARCH_PAGE_SIZE>(params.len);
|
|
|
|
|
|
|
|
SharedMemory* shmem = nullptr;
|
|
|
|
u64 shmid = 0;
|
|
|
|
if (params.flags & MAP_SHARED)
|
|
|
|
{
|
|
|
|
if (!description)
|
|
|
|
{
|
|
|
|
params.offset = 0;
|
|
|
|
shmid = TRY(SharedMemory::create(nullptr, 0, params.len / ARCH_PAGE_SIZE));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if ((params.prot & PROT_WRITE) && !(description->flags & O_WRONLY)) return err(EACCES);
|
2023-08-03 07:26:21 +00:00
|
|
|
shmid = TRY(description->inode->query_shared_memory(params.offset, params.len / ARCH_PAGE_SIZE));
|
2023-08-02 20:19:06 +00:00
|
|
|
}
|
|
|
|
shmem = g_shared_memory_map.try_get_ref(shmid);
|
|
|
|
shmem->refs++;
|
2023-08-03 08:30:43 +00:00
|
|
|
shmem->prot |= params.prot;
|
2023-08-02 20:19:06 +00:00
|
|
|
}
|
2023-03-14 19:43:15 +00:00
|
|
|
|
|
|
|
u64 address;
|
2023-08-02 20:19:06 +00:00
|
|
|
if (!params.addr)
|
|
|
|
address = TRY(current->address_space->alloc_region(get_blocks_from_size(params.len, ARCH_PAGE_SIZE),
|
2023-08-02 20:39:07 +00:00
|
|
|
params.prot, params.flags, params.offset, shmid));
|
2023-03-14 19:43:15 +00:00
|
|
|
else
|
|
|
|
{
|
2023-06-17 23:48:36 +00:00
|
|
|
// FIXME: We should be more flexible if MAP_FIXED was not specified.
|
2023-08-02 20:19:06 +00:00
|
|
|
address = align_down<ARCH_PAGE_SIZE>((u64)params.addr);
|
2023-08-02 20:39:07 +00:00
|
|
|
if (!TRY(current->address_space->test_and_alloc_region(address,
|
|
|
|
get_blocks_from_size(params.len, ARCH_PAGE_SIZE),
|
|
|
|
params.prot, params.flags, params.offset, shmid)))
|
2023-06-17 23:48:36 +00:00
|
|
|
return err(ENOMEM);
|
2023-03-14 19:43:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mmu_flags = MMU::User | MMU::NoExecute;
|
2023-08-02 20:19:06 +00:00
|
|
|
if (params.prot & PROT_WRITE) mmu_flags |= MMU::ReadWrite;
|
|
|
|
if (params.prot & PROT_EXEC) mmu_flags &= ~MMU::NoExecute;
|
|
|
|
if (params.prot == PROT_NONE) mmu_flags = MMU::NoExecute;
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-04-14 19:10:38 +00:00
|
|
|
#ifdef MMAP_DEBUG
|
2023-08-03 07:26:21 +00:00
|
|
|
kdbgln("mmap: mapping memory at %#lx, size=%zu", address, params.len);
|
2023-04-14 19:10:38 +00:00
|
|
|
#endif
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-03 07:26:21 +00:00
|
|
|
if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, params.len / ARCH_PAGE_SIZE)); }
|
2023-08-02 20:19:06 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
TRY(MemoryManager::alloc_at_zeroed(address, get_blocks_from_size(params.len, ARCH_PAGE_SIZE), mmu_flags));
|
|
|
|
if (description) { TRY(description->inode->read((u8*)address, params.offset, params.len)); }
|
|
|
|
}
|
|
|
|
|
|
|
|
return address;
|
2023-03-14 19:43:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result<u64> sys_munmap(Registers*, SyscallArgs args)
|
|
|
|
{
|
|
|
|
u64 address = (u64)args[0];
|
|
|
|
usize size = (usize)args[1];
|
|
|
|
|
|
|
|
if (size == 0) return err(EINVAL);
|
|
|
|
if (!is_aligned<ARCH_PAGE_SIZE>(size)) return err(EINVAL);
|
|
|
|
|
|
|
|
Thread* current = Scheduler::current();
|
|
|
|
|
2023-07-09 18:38:04 +00:00
|
|
|
bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
2023-03-14 19:43:15 +00:00
|
|
|
|
|
|
|
// POSIX says munmap should silently do nothing if the memory was not already mapped.
|
|
|
|
if (!ok) return 0;
|
|
|
|
|
2023-04-14 19:10:38 +00:00
|
|
|
#ifdef MMAP_DEBUG
|
2023-03-14 19:43:15 +00:00
|
|
|
kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size);
|
2023-04-14 19:10:38 +00:00
|
|
|
#endif
|
2023-03-14 19:43:15 +00:00
|
|
|
|
2023-08-02 20:19:06 +00:00
|
|
|
TRY(MemoryManager::unmap_owned_if_possible(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
2023-03-14 19:43:15 +00:00
|
|
|
|
|
|
|
return { 0 };
|
|
|
|
}
|
2023-08-02 20:39:07 +00:00
|
|
|
|
|
|
|
Result<u64> sys_msync(Registers*, SyscallArgs args)
|
|
|
|
{
|
|
|
|
u64 address = (u64)args[0];
|
|
|
|
usize size = (usize)args[1];
|
|
|
|
|
|
|
|
if (!size) return 0;
|
|
|
|
if (!is_aligned<ARCH_PAGE_SIZE>(address)) return err(EINVAL);
|
|
|
|
|
|
|
|
Thread* current = Scheduler::current();
|
|
|
|
|
|
|
|
TRY(current->address_space->sync_regions(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
|
|
|
|
|
|
|
return { 0 };
|
|
|
|
}
|