Luna/kernel/src/memory/MemoryManager.cpp

281 lines
7.2 KiB
C++
Raw Normal View History

2022-11-19 16:59:49 +00:00
#include "memory/MemoryManager.h"
2022-11-30 16:16:36 +00:00
#include "Log.h"
2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
2022-11-19 16:59:49 +00:00
#include "boot/bootboot.h"
#include <luna/Alignment.h>
#include <luna/String.h>
#include <luna/SystemError.h>
#include <luna/Types.h>
2022-11-13 13:29:15 +00:00
extern BOOTBOOT bootboot;
2022-11-13 15:54:07 +00:00
extern u8 start_of_kernel_rodata[1];
extern u8 end_of_kernel_rodata[1];
extern u8 start_of_kernel_data[1];
extern u8 end_of_kernel_data[1];
2022-11-13 13:29:15 +00:00
static bool page_bitmap_read(u64 index);
static void page_bitmap_set(u64 index, bool value);
static u64 free_mem = 0;
static u64 used_mem = 0;
static u64 reserved_mem = 0;
static char* page_bitmap_addr = nullptr;
static char* page_virtual_bitmap_addr = nullptr;
static u64 page_bitmap_size;
static u64 start_index = 0;
static bool page_bitmap_read(u64 index)
{
return (page_virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
static void page_bitmap_set(u64 index, bool value)
{
u64 byte_index = index / 8;
u8 mask = 0b10000000 >> (index % 8);
page_virtual_bitmap_addr[byte_index] &= (u8)(~mask);
if (value) { page_virtual_bitmap_addr[byte_index] |= mask; }
2022-11-13 13:29:15 +00:00
}
#define CHECK_PAGE_ALIGNED(address) check(is_aligned(address, ARCH_PAGE_SIZE))
2022-11-13 13:29:15 +00:00
namespace MemoryManager
{
Result<void> protect_kernel_sections()
{
const u64 rodata_size = (u64)(end_of_kernel_rodata - start_of_kernel_rodata);
const u64 rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
const u64 data_size = (u64)(end_of_kernel_data - start_of_kernel_data);
const u64 data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
return {};
}
2022-11-13 13:29:15 +00:00
void init_physical_allocator()
{
u64 total_mem = 0;
void* biggest_memory_block = nullptr;
u64 biggest_memory_block_size = 0;
// walk the memory map
MMapEnt* ptr = &bootboot.mmap;
u64 mmap_entries = (bootboot.size - 128) / 16;
2022-11-13 13:29:15 +00:00
for (u64 i = 0; i < mmap_entries; i++)
{
u64 size = MMapEnt_Size(ptr);
total_mem += size;
if (!MMapEnt_IsFree(ptr))
{
ptr++;
continue;
}
if (size > biggest_memory_block_size)
{
biggest_memory_block = (void*)MMapEnt_Ptr(ptr);
biggest_memory_block_size = MMapEnt_Size(ptr);
}
ptr++;
}
page_bitmap_addr = (char*)biggest_memory_block;
page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready
if ((total_mem / ARCH_PAGE_SIZE / 8) >= biggest_memory_block_size)
2022-11-13 13:29:15 +00:00
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: No single memory block is enough to hold the page bitmap");
2022-11-13 13:29:15 +00:00
for (;;)
;
}
page_bitmap_size = total_mem / ARCH_PAGE_SIZE / 8 + 1;
2022-11-13 13:29:15 +00:00
memset(page_bitmap_addr, 0xFF, page_bitmap_size);
ptr = &bootboot.mmap;
for (u64 i = 0; i < mmap_entries; i++)
2022-11-13 13:29:15 +00:00
{
u64 index = MMapEnt_Ptr(ptr) / ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else
{
free_mem += MMapEnt_Size(ptr);
for (u64 j = 0; j < (MMapEnt_Size(ptr) / ARCH_PAGE_SIZE); j++) { page_bitmap_set(index + j, false); }
2022-11-13 13:29:15 +00:00
}
ptr++;
}
lock_frames((u64)page_bitmap_addr, page_bitmap_size / ARCH_PAGE_SIZE + 1);
2022-11-13 13:29:15 +00:00
}
void init()
{
init_physical_allocator();
MMU::setup_initial_page_directory();
}
void lock_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = ((u64)frame) / ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
if (page_bitmap_read(index)) return;
page_bitmap_set(index, true);
used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
void lock_frames(u64 frames, u64 count)
2022-11-13 13:29:15 +00:00
{
for (u64 index = 0; index < count; index++) { lock_frame(frames + (index * ARCH_PAGE_SIZE)); }
2022-11-13 13:29:15 +00:00
}
Result<u64> alloc_frame()
2022-11-13 13:29:15 +00:00
{
for (u64 index = start_index; index < (page_bitmap_size * 8); index++)
{
if (page_bitmap_read(index)) continue;
page_bitmap_set(index, true);
start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE;
return index * ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
2022-11-30 16:13:59 +00:00
return err(ENOMEM);
2022-11-13 13:29:15 +00:00
}
Result<void> free_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = frame / ARCH_PAGE_SIZE;
2022-11-30 16:13:59 +00:00
if (index > (page_bitmap_size * 8)) return err(EFAULT);
if (!page_bitmap_read(index)) return err(EFAULT);
2022-11-13 13:29:15 +00:00
page_bitmap_set(index, false);
used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
if (start_index > index) start_index = index;
return {};
}
Result<void> remap(u64 address, usize count, int flags)
{
CHECK_PAGE_ALIGNED(address);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
while (count--)
{
TRY(MMU::map(virt, phys, flags));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
}
return {};
}
Result<u64> alloc_at(u64 virt, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
u64 start = virt;
while (count--)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
}
return start;
}
Result<void> unmap_owned(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
u64 frame = TRY(MMU::unmap(virt));
TRY(free_frame(frame));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> unmap_weak(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
TRY(MMU::unmap(virt));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> remap_unaligned(u64 address, usize count, int flags)
{
if (!is_aligned(address, ARCH_PAGE_SIZE)) count++;
address = align_down(address, ARCH_PAGE_SIZE);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
bool validate_readable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
return true;
}
bool validate_writable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
if (rc.release_value() & MMU::ReadWrite) return true;
return false;
}
u64 free()
{
return free_mem;
}
u64 used()
{
return used_mem;
}
u64 reserved()
{
return reserved_mem;
}
2022-11-30 15:30:42 +00:00
u64 total()
{
return free_mem + used_mem + reserved_mem;
}
2022-11-13 13:29:15 +00:00
}