#include "MemoryManager.h" #include "arch/MMU.h" #include "arch/Serial.h" #include "bootboot.h" #include #include extern BOOTBOOT bootboot; static bool page_bitmap_read(u64 index); static void page_bitmap_set(u64 index, bool value); static u64 free_mem = 0; static u64 used_mem = 0; static u64 reserved_mem = 0; static char* page_bitmap_addr = nullptr; static char* page_virtual_bitmap_addr = nullptr; static u64 page_bitmap_size; static u64 start_index = 0; static bool page_bitmap_read(u64 index) { return (page_virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0; } static void page_bitmap_set(u64 index, bool value) { uint64_t byteIndex = index / 8; uint8_t bitIndexer = 0b10000000 >> (index % 8); page_virtual_bitmap_addr[byteIndex] &= (uint8_t)(~bitIndexer); if (value) { page_virtual_bitmap_addr[byteIndex] |= bitIndexer; } } namespace MemoryManager { void init_physical_allocator() { u64 total_mem = 0; void* biggest_memory_block = nullptr; u64 biggest_memory_block_size = 0; // walk the memory map MMapEnt* ptr = &bootboot.mmap; uint64_t mmap_entries = (bootboot.size - 128) / 16; for (u64 i = 0; i < mmap_entries; i++) { u64 size = MMapEnt_Size(ptr); total_mem += size; if (!MMapEnt_IsFree(ptr)) { ptr++; continue; } if (size > biggest_memory_block_size) { biggest_memory_block = (void*)MMapEnt_Ptr(ptr); biggest_memory_block_size = MMapEnt_Size(ptr); } ptr++; } page_bitmap_addr = (char*)biggest_memory_block; page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready if ((total_mem / MMU::page_size() / 8) >= biggest_memory_block_size) { Serial::println("ERROR: No single memory block is enough to hold the page bitmap"); for (;;) ; } page_bitmap_size = total_mem / MMU::page_size() / 8 + 1; memset(page_bitmap_addr, 0xFF, page_bitmap_size); ptr = &bootboot.mmap; for (uint64_t i = 0; i < mmap_entries; i++) { uint64_t index = MMapEnt_Ptr(ptr) / MMU::page_size(); if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); } else { free_mem += MMapEnt_Size(ptr); for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / MMU::page_size()); j++) { page_bitmap_set(index + j, false); } } ptr++; } lock_pages((u64)page_bitmap_addr, page_bitmap_size / MMU::page_size() + 1); } void init() { init_physical_allocator(); MMU::setup_initial_page_directory(); } void lock_page(u64 page) { uint64_t index = ((uint64_t)page) / MMU::page_size(); if (page_bitmap_read(index)) return; page_bitmap_set(index, true); used_mem += MMU::page_size(); free_mem -= MMU::page_size(); } void lock_pages(u64 pages, u64 count) { for (u64 index = 0; index < count; index++) { lock_page(pages + (index * MMU::page_size())); } } Result alloc_physical_page() { for (u64 index = start_index; index < (page_bitmap_size * 8); index++) { if (page_bitmap_read(index)) continue; page_bitmap_set(index, true); start_index = index + 1; free_mem -= MMU::page_size(); used_mem += MMU::page_size(); return index * MMU::page_size(); } return err; // FIXME: ENOMEM. } Result free_physical_page(u64 page) { u64 index = page / MMU::page_size(); if (index > (page_bitmap_size * 8)) return err; if (!page_bitmap_read(index)) return err; page_bitmap_set(index, false); used_mem -= MMU::page_size(); free_mem += MMU::page_size(); if (start_index > index) start_index = index; return {}; } }