2022-10-02 20:45:04 +02:00
|
|
|
#define MODULE "mem"
|
|
|
|
|
|
|
|
#include "interrupts/Context.h"
|
|
|
|
#include "log/Log.h"
|
2022-10-17 19:32:24 +02:00
|
|
|
#include "memory/Memory.h"
|
2022-10-02 20:45:04 +02:00
|
|
|
#include "memory/MemoryManager.h"
|
|
|
|
#include "memory/VMM.h"
|
2022-10-12 12:15:12 +02:00
|
|
|
#include "misc/utils.h"
|
2022-10-19 17:41:23 +02:00
|
|
|
#include "std/errno.h"
|
2022-10-17 18:43:35 +02:00
|
|
|
#include "thread/Scheduler.h"
|
2022-10-02 20:45:04 +02:00
|
|
|
#include <stddef.h>
|
|
|
|
|
2022-11-02 18:34:57 +01:00
|
|
|
#define PROT_READ 1
|
|
|
|
#define PROT_WRITE 2
|
|
|
|
#define PROT_NONE 0
|
|
|
|
#define PROT_EXEC 4
|
2022-10-15 12:57:14 +02:00
|
|
|
|
2022-10-08 12:06:09 +02:00
|
|
|
#define MAP_FAIL(errno) 0xffffffffffffff00 | (unsigned char)(errno)
|
|
|
|
|
2022-10-15 12:57:14 +02:00
|
|
|
static const char* format_prot(int prot)
|
|
|
|
{
|
2022-11-02 18:34:57 +01:00
|
|
|
static char prot_string[4];
|
|
|
|
prot_string[3] = 0;
|
|
|
|
prot_string[0] = ((prot & PROT_READ) > 0) ? 'r' : '-';
|
|
|
|
prot_string[1] = ((prot & PROT_WRITE) > 0) ? 'w' : '-';
|
2022-11-02 18:39:58 +01:00
|
|
|
prot_string[2] = ((prot & PROT_EXEC) > 0) ? 'x' : '-';
|
2022-10-15 12:57:14 +02:00
|
|
|
return prot_string;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mman_flags_from_prot(int prot)
|
|
|
|
{
|
2022-11-02 18:34:57 +01:00
|
|
|
prot &= 0b111;
|
Kernel: Introduce page ownership
Some pages, such as framebuffer pages, are not physical memory frames reserved for the current process.
Some, such as the framebuffer, may be shared between all processes.
Yet, on exit() or on exec(), a process frees all frames mapped into its address spaces.
And on fork(), it copies all data between frames. So how could we map framebuffers.
Simple: we use one of the bits in page table entries which are available to the OS, and mark whether that page is owned by the current process.
If it is owned, it will be:
- Freed on address space destruction
- Its data will be copied to a new page owned by the child process on fork()
If it is not owned, it will be:
- Left alone on address space destruction
- On fork(), the child's virtual page will be mapped to the same physical frame as the parent
This still needs a bit more work, such as keeping a reference of how many processes use a page to free it when all processes using it exit/exec.
This should be done for MAP_SHARED mappings, for example, since they are not permanent forever,
unlike the framebuffer for example.
2022-11-02 19:32:28 +01:00
|
|
|
int flags = MAP_USER | MAP_AS_OWNED_BY_TASK;
|
|
|
|
if (prot == PROT_NONE) return MAP_AS_OWNED_BY_TASK;
|
2022-11-03 16:52:21 +01:00
|
|
|
if ((prot & PROT_WRITE) > 0) { flags |= MAP_READ_WRITE; }
|
|
|
|
if ((prot & PROT_EXEC) > 0) { flags |= MAP_EXEC; }
|
2022-11-02 18:34:57 +01:00
|
|
|
return flags;
|
2022-10-15 12:57:14 +02:00
|
|
|
}
|
|
|
|
|
2022-11-02 20:24:07 +01:00
|
|
|
void sys_mmap(Context* context, void* address, size_t size, int prot, int fd, off_t offset)
|
2022-10-02 20:45:04 +02:00
|
|
|
{
|
2022-10-08 14:52:28 +02:00
|
|
|
if (size < PAGE_SIZE)
|
2022-10-02 20:45:04 +02:00
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kwarnln("mmap(): size too small");
|
|
|
|
context->rax = MAP_FAIL(EINVAL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (size % PAGE_SIZE)
|
|
|
|
{
|
|
|
|
kwarnln("mmap(): size not a multiple of PAGE_SIZE");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = MAP_FAIL(EINVAL);
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
2022-10-15 12:57:14 +02:00
|
|
|
int real_flags = mman_flags_from_prot(prot);
|
2022-10-02 20:45:04 +02:00
|
|
|
if (address)
|
|
|
|
{
|
2022-11-02 20:24:07 +01:00
|
|
|
kdbgln("mmap(): %ld pages at address %p, %s, fd %d", size / PAGE_SIZE, address, format_prot(prot), fd);
|
2022-10-17 19:32:24 +02:00
|
|
|
if (Memory::is_kernel_address((uintptr_t)address))
|
|
|
|
{
|
|
|
|
kwarnln("munmap() failed: attempted to unmap a kernel page");
|
|
|
|
context->rax = MAP_FAIL(ENOMEM);
|
|
|
|
return;
|
|
|
|
}
|
2022-10-13 18:42:53 +02:00
|
|
|
if (VMM::get_physical((uint64_t)address) != (uint64_t)-1) // Address is already used.
|
2022-10-02 20:45:04 +02:00
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kwarnln("attempt to map an already mapped address");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = MAP_FAIL(ENOMEM);
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
2022-11-02 20:24:07 +01:00
|
|
|
uint64_t addr_offset = (uint64_t)address % PAGE_SIZE;
|
2022-11-03 16:52:21 +01:00
|
|
|
if (fd >= 0)
|
2022-11-02 20:24:07 +01:00
|
|
|
{
|
|
|
|
int err;
|
2022-11-03 16:52:21 +01:00
|
|
|
Descriptor* file = Scheduler::current_task()->open_descriptor_from_fd(fd, err);
|
|
|
|
if (!file)
|
2022-11-02 20:24:07 +01:00
|
|
|
{
|
|
|
|
context->rax = MAP_FAIL(err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
context->rax = file->mmap((uint64_t)address - addr_offset, size, real_flags, offset);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
void* result = MemoryManager::get_pages_at((uint64_t)address - addr_offset,
|
2022-10-12 12:15:12 +02:00
|
|
|
Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
2022-10-02 20:45:04 +02:00
|
|
|
if (result)
|
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kdbgln("mmap() succeeded: %p", result);
|
2022-10-02 20:45:04 +02:00
|
|
|
context->rax = (uint64_t)result;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-10-17 19:32:24 +02:00
|
|
|
kwarnln("mmap() failed: failed to allocate physical memory");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = MAP_FAIL(ENOMEM);
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2022-11-03 16:52:21 +01:00
|
|
|
kdbgln("mmap(): %ld pages at any address, %s, fd %d", Utilities::get_blocks_from_size(PAGE_SIZE, size),
|
|
|
|
format_prot(prot), fd);
|
2022-10-17 18:43:35 +02:00
|
|
|
uint64_t ptr =
|
|
|
|
Scheduler::current_task()->allocator.request_virtual_pages(Utilities::get_blocks_from_size(PAGE_SIZE, size));
|
2022-10-17 19:32:24 +02:00
|
|
|
if (!ptr)
|
|
|
|
{
|
|
|
|
kwarnln("mmap() failed: failed to allocate virtual address");
|
|
|
|
context->rax = MAP_FAIL(ENOMEM);
|
|
|
|
return;
|
|
|
|
}
|
2022-11-03 16:52:21 +01:00
|
|
|
if (fd >= 0)
|
2022-11-02 20:24:07 +01:00
|
|
|
{
|
|
|
|
int err;
|
2022-11-03 16:52:21 +01:00
|
|
|
Descriptor* file = Scheduler::current_task()->open_descriptor_from_fd(fd, err);
|
|
|
|
if (!file)
|
2022-11-02 20:24:07 +01:00
|
|
|
{
|
|
|
|
context->rax = MAP_FAIL(err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
context->rax = file->mmap(ptr, size, real_flags, offset);
|
|
|
|
return;
|
|
|
|
}
|
2022-10-17 18:43:35 +02:00
|
|
|
void* result = MemoryManager::get_pages_at(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, size), real_flags);
|
2022-10-02 20:45:04 +02:00
|
|
|
if (result)
|
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kdbgln("mmap() succeeded: %p", result);
|
2022-10-02 20:45:04 +02:00
|
|
|
context->rax = (uint64_t)result;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-10-17 19:32:24 +02:00
|
|
|
kwarnln("mmap() failed: failed to allocate physical memory");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = MAP_FAIL(ENOMEM);
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_munmap(Context* context, void* address, size_t size)
|
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kdbgln("munmap(): attempting to unmap %p", address);
|
2022-10-08 14:52:28 +02:00
|
|
|
if (size < PAGE_SIZE)
|
2022-10-08 12:06:09 +02:00
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kwarnln("munmap() failed: size is too small");
|
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (size % PAGE_SIZE)
|
|
|
|
{
|
|
|
|
kwarnln("munmap() failed: size is not a multiple of PAGE_SIZE");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
2022-10-02 20:45:04 +02:00
|
|
|
if (!address)
|
|
|
|
{
|
2022-10-13 18:50:12 +02:00
|
|
|
kwarnln("munmap() failed: attempted to unmap page 0");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = -EINVAL;
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
2022-10-17 19:32:24 +02:00
|
|
|
if (Memory::is_kernel_address((uintptr_t)address))
|
2022-10-02 20:45:04 +02:00
|
|
|
{
|
2022-10-17 19:32:24 +02:00
|
|
|
kwarnln("munmap() failed: attempted to unmap a kernel page");
|
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
Kernel: Introduce page ownership
Some pages, such as framebuffer pages, are not physical memory frames reserved for the current process.
Some, such as the framebuffer, may be shared between all processes.
Yet, on exit() or on exec(), a process frees all frames mapped into its address spaces.
And on fork(), it copies all data between frames. So how could we map framebuffers.
Simple: we use one of the bits in page table entries which are available to the OS, and mark whether that page is owned by the current process.
If it is owned, it will be:
- Freed on address space destruction
- Its data will be copied to a new page owned by the child process on fork()
If it is not owned, it will be:
- Left alone on address space destruction
- On fork(), the child's virtual page will be mapped to the same physical frame as the parent
This still needs a bit more work, such as keeping a reference of how many processes use a page to free it when all processes using it exit/exec.
This should be done for MAP_SHARED mappings, for example, since they are not permanent forever,
unlike the framebuffer for example.
2022-11-02 19:32:28 +01:00
|
|
|
uint64_t flags = VMM::get_flags((uint64_t)address);
|
|
|
|
if (flags == (uint64_t)-1)
|
2022-10-17 19:32:24 +02:00
|
|
|
{
|
|
|
|
kwarnln("munmap() failed: attempted to unmap a non-existent page");
|
2022-10-08 12:06:09 +02:00
|
|
|
context->rax = -EINVAL;
|
2022-10-02 20:45:04 +02:00
|
|
|
return;
|
|
|
|
}
|
2022-10-08 14:52:28 +02:00
|
|
|
uint64_t offset = (uint64_t)address % PAGE_SIZE;
|
2022-10-17 18:43:35 +02:00
|
|
|
Scheduler::current_task()->allocator.free_virtual_pages(((uint64_t)address - offset),
|
|
|
|
Utilities::get_blocks_from_size(PAGE_SIZE, size));
|
2022-11-03 16:52:21 +01:00
|
|
|
if (flags & MAP_AS_OWNED_BY_TASK)
|
|
|
|
MemoryManager::release_pages((void*)((uint64_t)address - offset),
|
|
|
|
Utilities::get_blocks_from_size(PAGE_SIZE, size));
|
Kernel: Introduce page ownership
Some pages, such as framebuffer pages, are not physical memory frames reserved for the current process.
Some, such as the framebuffer, may be shared between all processes.
Yet, on exit() or on exec(), a process frees all frames mapped into its address spaces.
And on fork(), it copies all data between frames. So how could we map framebuffers.
Simple: we use one of the bits in page table entries which are available to the OS, and mark whether that page is owned by the current process.
If it is owned, it will be:
- Freed on address space destruction
- Its data will be copied to a new page owned by the child process on fork()
If it is not owned, it will be:
- Left alone on address space destruction
- On fork(), the child's virtual page will be mapped to the same physical frame as the parent
This still needs a bit more work, such as keeping a reference of how many processes use a page to free it when all processes using it exit/exec.
This should be done for MAP_SHARED mappings, for example, since they are not permanent forever,
unlike the framebuffer for example.
2022-11-02 19:32:28 +01:00
|
|
|
else
|
2022-11-03 16:52:21 +01:00
|
|
|
MemoryManager::release_unaligned_mappings((void*)((uint64_t)address - offset),
|
|
|
|
Utilities::get_blocks_from_size(PAGE_SIZE, size));
|
2022-10-13 18:50:12 +02:00
|
|
|
kdbgln("munmap() succeeded");
|
2022-10-02 20:45:04 +02:00
|
|
|
context->rax = 0;
|
|
|
|
return;
|
2022-10-15 12:57:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void sys_mprotect(Context* context, void* address, size_t size, int prot)
|
|
|
|
{
|
|
|
|
kdbgln("mprotect(): attempting to protect %p with %s", address, format_prot(prot));
|
|
|
|
|
|
|
|
if (size < PAGE_SIZE)
|
|
|
|
{
|
|
|
|
kwarnln("mprotect() failed: size is too small");
|
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (size % PAGE_SIZE)
|
|
|
|
{
|
|
|
|
kwarnln("mprotect() failed: size is not a multiple of PAGE_SIZE");
|
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!address)
|
|
|
|
{
|
2022-10-17 19:32:24 +02:00
|
|
|
kwarnln("mprotect() failed: attempted to protect page 0");
|
2022-10-15 12:57:14 +02:00
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
2022-10-17 19:32:24 +02:00
|
|
|
if (Memory::is_kernel_address((uintptr_t)address))
|
|
|
|
{
|
|
|
|
kwarnln("mprotect() failed: attempted to protect a kernel page");
|
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
Kernel: Introduce page ownership
Some pages, such as framebuffer pages, are not physical memory frames reserved for the current process.
Some, such as the framebuffer, may be shared between all processes.
Yet, on exit() or on exec(), a process frees all frames mapped into its address spaces.
And on fork(), it copies all data between frames. So how could we map framebuffers.
Simple: we use one of the bits in page table entries which are available to the OS, and mark whether that page is owned by the current process.
If it is owned, it will be:
- Freed on address space destruction
- Its data will be copied to a new page owned by the child process on fork()
If it is not owned, it will be:
- Left alone on address space destruction
- On fork(), the child's virtual page will be mapped to the same physical frame as the parent
This still needs a bit more work, such as keeping a reference of how many processes use a page to free it when all processes using it exit/exec.
This should be done for MAP_SHARED mappings, for example, since they are not permanent forever,
unlike the framebuffer for example.
2022-11-02 19:32:28 +01:00
|
|
|
uint64_t flags = VMM::get_flags((uint64_t)address);
|
|
|
|
if (flags == (uint64_t)-1)
|
2022-10-15 12:57:14 +02:00
|
|
|
{
|
2022-10-17 19:32:24 +02:00
|
|
|
kwarnln("mprotect() failed: attempted to protect a non-existent page");
|
2022-10-15 12:57:14 +02:00
|
|
|
context->rax = -EINVAL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t offset = (uint64_t)address % PAGE_SIZE;
|
|
|
|
MemoryManager::protect((void*)((uint64_t)address - offset), Utilities::get_blocks_from_size(PAGE_SIZE, size),
|
2022-11-03 16:52:21 +01:00
|
|
|
flags & MAP_AS_OWNED_BY_TASK ? mman_flags_from_prot(prot)
|
|
|
|
: mman_flags_from_prot(prot) & ~(MAP_AS_OWNED_BY_TASK));
|
2022-10-15 12:57:14 +02:00
|
|
|
kdbgln("mprotect() succeeded");
|
|
|
|
context->rax = 0;
|
|
|
|
return;
|
2022-10-02 20:45:04 +02:00
|
|
|
}
|