#include "Log.h" #include "arch/MMU.h" #include "memory/MemoryManager.h" #include "memory/SharedMemory.h" #include "sys/Syscall.h" #include "thread/Scheduler.h" #include #include #include #include constexpr uintptr_t USERSPACE_HEAP_BASE = 0x3000000; Result sys_mmap(Registers*, SyscallArgs args) { mmap_params params; if (!MemoryManager::copy_from_user_typed((const mmap_params*)args[0], ¶ms)) return err(EFAULT); if (params.len == 0) return err(EINVAL); if (params.flags < 0) return err(EINVAL); Thread* current = Scheduler::current(); SharedPtr description; if ((params.flags & MAP_ANONYMOUS) != MAP_ANONYMOUS) { description = TRY(current->resolve_fd(params.fd))->description; if (!(description->flags & O_RDONLY)) return err(EACCES); if (description->flags & O_APPEND) return err(EACCES); } if (!is_aligned(params.offset)) return err(EINVAL); params.len = align_up(params.len); SharedMemory* shmem = nullptr; u64 shmid = 0; if (params.flags & MAP_SHARED) { if (!description) { params.offset = 0; shmid = TRY(SharedMemory::create(nullptr, 0, params.len / ARCH_PAGE_SIZE)); } else { if ((params.prot & PROT_WRITE) && !(description->flags & O_WRONLY)) return err(EACCES); shmid = TRY(description->inode->query_shared_memory(params.offset, params.len)); } shmem = g_shared_memory_map.try_get_ref(shmid); shmem->refs++; } u64 address; if (!params.addr) address = TRY(current->address_space->alloc_region(get_blocks_from_size(params.len, ARCH_PAGE_SIZE), params.prot, params.flags, params.offset, shmid)); else { // FIXME: We should be more flexible if MAP_FIXED was not specified. address = align_down((u64)params.addr); if (!TRY(current->address_space->test_and_alloc_region(address, get_blocks_from_size(params.len, ARCH_PAGE_SIZE), params.prot, params.flags, params.offset, shmid))) return err(ENOMEM); } int mmu_flags = MMU::User | MMU::NoExecute; if (params.prot & PROT_WRITE) mmu_flags |= MMU::ReadWrite; if (params.prot & PROT_EXEC) mmu_flags &= ~MMU::NoExecute; if (params.prot == PROT_NONE) mmu_flags = MMU::NoExecute; #ifdef MMAP_DEBUG kdbgln("mmap: mapping memory at %#lx, size=%zu", address, len); #endif if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, get_blocks_from_size(params.len, ARCH_PAGE_SIZE))); } else { TRY(MemoryManager::alloc_at_zeroed(address, get_blocks_from_size(params.len, ARCH_PAGE_SIZE), mmu_flags)); if (description) { TRY(description->inode->read((u8*)address, params.offset, params.len)); } } return address; } Result sys_munmap(Registers*, SyscallArgs args) { u64 address = (u64)args[0]; usize size = (usize)args[1]; if (size == 0) return err(EINVAL); if (!is_aligned(size)) return err(EINVAL); Thread* current = Scheduler::current(); bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); // POSIX says munmap should silently do nothing if the memory was not already mapped. if (!ok) return 0; #ifdef MMAP_DEBUG kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size); #endif TRY(MemoryManager::unmap_owned_if_possible(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); return { 0 }; } Result sys_msync(Registers*, SyscallArgs args) { u64 address = (u64)args[0]; usize size = (usize)args[1]; if (!size) return 0; if (!is_aligned(address)) return err(EINVAL); Thread* current = Scheduler::current(); TRY(current->address_space->sync_regions(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); return { 0 }; }