#include "Log.h"
#include "Pledge.h"
#include "arch/MMU.h"
#include "memory/MemoryManager.h"
#include "memory/SharedMemory.h"
#include "sys/Syscall.h"
#include "thread/Scheduler.h"
#include <bits/mmap-flags.h>
#include <bits/mmap.h>
#include <bits/open-flags.h>
#include <luna/Alignment.h>
#include <luna/Common.h>

Result<u64> sys_mmap(Registers*, SyscallArgs args)
{
    mmap_params params;
    if (!MemoryManager::copy_from_user_typed((const mmap_params*)args[0], &params)) return err(EFAULT);

    if (params.len == 0) return err(EINVAL);

    if (params.flags < 0) return err(EINVAL);

    Thread* current = Scheduler::current();
    if (params.prot & PROT_EXEC) TRY(check_pledge(current, Promise::p_prot_exec));
    TRY(check_pledge(current, Promise::p_stdio));

    SharedPtr<OpenFileDescription> description;
    if ((params.flags & MAP_ANONYMOUS) != MAP_ANONYMOUS)
    {
        description = TRY(current->resolve_fd(params.fd))->description;
        if (!(description->flags & O_RDONLY)) return err(EACCES);
        if (description->flags & O_APPEND) return err(EACCES);
    }

    if (!is_aligned<ARCH_PAGE_SIZE>(params.offset)) return err(EINVAL);

    const usize pages = ceil_div(params.len, ARCH_PAGE_SIZE);

    SharedMemory* shmem = nullptr;
    u64 shmid = 0;
    if (params.flags & MAP_SHARED)
    {
        if (!description)
        {
            params.offset = 0;
            shmid = TRY(SharedMemory::create(nullptr, 0, pages));
        }
        else
        {
            if ((params.prot & PROT_WRITE) && !(description->flags & O_WRONLY)) return err(EACCES);
            shmid = TRY(description->inode->query_shared_memory(params.offset, pages));
        }
        shmem = g_shared_memory_map.try_get_ref(shmid);
        shmem->refs++;
        shmem->prot |= params.prot;
    }

    u64 address;
    if (!params.addr)
        address = TRY(current->address_space->alloc_region(pages, params.prot, params.flags, params.offset, shmid));
    else
    {
        // FIXME: We should be more flexible if MAP_FIXED was not specified.
        address = align_down<ARCH_PAGE_SIZE>((u64)params.addr);
        if (!TRY(current->address_space->test_and_alloc_region(address, pages, params.prot, params.flags, params.offset,
                                                               shmid)))
            return err(ENOMEM);
    }

    int mmu_flags = MMU::User | MMU::NoExecute;
    if (params.prot & PROT_WRITE) mmu_flags |= MMU::ReadWrite;
    if (params.prot & PROT_EXEC) mmu_flags &= ~MMU::NoExecute;
    if (params.prot == PROT_NONE) mmu_flags = MMU::NoExecute;

#ifdef MMAP_DEBUG
    kdbgln("mmap: mapping memory at %#lx, size=%zu", address, params.len);
#endif

    if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, pages)); }
    else
    {
        TRY(MemoryManager::alloc_at_zeroed(address, pages, mmu_flags));
        if (description) { TRY(description->inode->read((u8*)address, params.offset, params.len)); }
    }

    return address;
}

Result<u64> sys_munmap(Registers*, SyscallArgs args)
{
    u64 address = (u64)args[0];
    usize size = (usize)args[1];

    if (size == 0) return err(EINVAL);
    if (!is_aligned<ARCH_PAGE_SIZE>(address)) return err(EINVAL);

    Thread* current = Scheduler::current();
    TRY(check_pledge(current, Promise::p_stdio));

    bool ok = TRY(current->address_space->free_region(address, ceil_div(size, ARCH_PAGE_SIZE)));

    // POSIX says munmap should silently do nothing if the memory was not already mapped.
    if (!ok) return 0;

#ifdef MMAP_DEBUG
    kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size);
#endif

    TRY(MemoryManager::unmap_owned_if_possible(address, ceil_div(size, ARCH_PAGE_SIZE)));

    return { 0 };
}

Result<u64> sys_msync(Registers*, SyscallArgs args)
{
    u64 address = (u64)args[0];
    usize size = (usize)args[1];

    if (!size) return 0;
    if (!is_aligned<ARCH_PAGE_SIZE>(address)) return err(EINVAL);

    Thread* current = Scheduler::current();
    TRY(check_pledge(current, Promise::p_stdio));

    TRY(current->address_space->sync_regions(address, ceil_div(size, ARCH_PAGE_SIZE)));

    return { 0 };
}