#include "memory/MemoryManager.h" #include "Log.h" #include "arch/MMU.h" #include "boot/bootboot.h" #include #include #include #include extern BOOTBOOT bootboot; extern u8 start_of_kernel_rodata[1]; extern u8 end_of_kernel_rodata[1]; extern u8 start_of_kernel_data[1]; extern u8 end_of_kernel_data[1]; static bool page_bitmap_read(u64 index); static void page_bitmap_set(u64 index, bool value); static u64 free_mem = 0; static u64 used_mem = 0; static u64 reserved_mem = 0; static char* page_bitmap_addr = nullptr; static char* page_virtual_bitmap_addr = nullptr; static u64 page_bitmap_size; static u64 start_index = 0; static bool page_bitmap_read(u64 index) { return (page_virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0; } static void page_bitmap_set(u64 index, bool value) { u64 byte_index = index / 8; u8 mask = 0b10000000 >> (index % 8); page_virtual_bitmap_addr[byte_index] &= (u8)(~mask); if (value) { page_virtual_bitmap_addr[byte_index] |= mask; } } #define CHECK_PAGE_ALIGNED(address) check(is_aligned(address, ARCH_PAGE_SIZE)) namespace MemoryManager { Result protect_kernel_sections() { const u64 rodata_size = (u64)(end_of_kernel_rodata - start_of_kernel_rodata); const u64 rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute)); const u64 data_size = (u64)(end_of_kernel_data - start_of_kernel_data); const u64 data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite)); return {}; } void init_physical_allocator() { u64 total_mem = 0; void* biggest_memory_block = nullptr; u64 biggest_memory_block_size = 0; // walk the memory map MMapEnt* ptr = &bootboot.mmap; u64 mmap_entries = (bootboot.size - 128) / 16; for (u64 i = 0; i < mmap_entries; i++) { u64 size = MMapEnt_Size(ptr); total_mem += size; if (!MMapEnt_IsFree(ptr)) { ptr++; continue; } if (size > biggest_memory_block_size) { biggest_memory_block = (void*)MMapEnt_Ptr(ptr); biggest_memory_block_size = MMapEnt_Size(ptr); } ptr++; } page_bitmap_addr = (char*)biggest_memory_block; page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready if ((total_mem / ARCH_PAGE_SIZE / 8) >= biggest_memory_block_size) { kerrorln("ERROR: No single memory block is enough to hold the page bitmap"); for (;;) ; } page_bitmap_size = total_mem / ARCH_PAGE_SIZE / 8 + 1; memset(page_bitmap_addr, 0xFF, page_bitmap_size); ptr = &bootboot.mmap; for (u64 i = 0; i < mmap_entries; i++) { u64 index = MMapEnt_Ptr(ptr) / ARCH_PAGE_SIZE; if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); } else { free_mem += MMapEnt_Size(ptr); for (u64 j = 0; j < (MMapEnt_Size(ptr) / ARCH_PAGE_SIZE); j++) { page_bitmap_set(index + j, false); } } ptr++; } lock_frames((u64)page_bitmap_addr, page_bitmap_size / ARCH_PAGE_SIZE + 1); } void init() { init_physical_allocator(); MMU::setup_initial_page_directory(); } void lock_frame(u64 frame) { const u64 index = ((u64)frame) / ARCH_PAGE_SIZE; if (page_bitmap_read(index)) return; page_bitmap_set(index, true); used_mem += ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE; } void lock_frames(u64 frames, u64 count) { for (u64 index = 0; index < count; index++) { lock_frame(frames + (index * ARCH_PAGE_SIZE)); } } Result alloc_frame() { for (u64 index = start_index; index < (page_bitmap_size * 8); index++) { if (page_bitmap_read(index)) continue; page_bitmap_set(index, true); start_index = index + 1; free_mem -= ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE; return index * ARCH_PAGE_SIZE; } return err(ENOMEM); } Result free_frame(u64 frame) { const u64 index = frame / ARCH_PAGE_SIZE; if (index > (page_bitmap_size * 8)) return err(EFAULT); if (!page_bitmap_read(index)) return err(EFAULT); page_bitmap_set(index, false); used_mem -= ARCH_PAGE_SIZE; free_mem += ARCH_PAGE_SIZE; if (start_index > index) start_index = index; return {}; } Result remap(u64 address, usize count, int flags) { CHECK_PAGE_ALIGNED(address); while (count--) { TRY(MMU::remap(address, flags)); address += ARCH_PAGE_SIZE; } return {}; } Result map_frames_at(u64 virt, u64 phys, usize count, int flags) { CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(phys); while (count--) { TRY(MMU::map(virt, phys, flags)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; } return {}; } Result alloc_at(u64 virt, usize count, int flags) { CHECK_PAGE_ALIGNED(virt); u64 start = virt; while (count--) { u64 frame = TRY(alloc_frame()); TRY(MMU::map(virt, frame, flags)); virt += ARCH_PAGE_SIZE; } return start; } Result unmap_owned(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); while (count--) { u64 frame = TRY(MMU::unmap(virt)); TRY(free_frame(frame)); virt += ARCH_PAGE_SIZE; } return {}; } Result unmap_weak(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); while (count--) { TRY(MMU::unmap(virt)); virt += ARCH_PAGE_SIZE; } return {}; } Result remap_unaligned(u64 address, usize count, int flags) { if (!is_aligned(address, ARCH_PAGE_SIZE)) count++; address = align_down(address, ARCH_PAGE_SIZE); while (count--) { TRY(MMU::remap(address, flags)); address += ARCH_PAGE_SIZE; } return {}; } bool validate_readable_page(u64 address) { auto rc = MMU::get_flags(address); if (rc.has_error()) return false; return true; } bool validate_writable_page(u64 address) { auto rc = MMU::get_flags(address); if (rc.has_error()) return false; if (rc.release_value() & MMU::ReadWrite) return true; return false; } u64 free() { return free_mem; } u64 used() { return used_mem; } u64 reserved() { return reserved_mem; } u64 total() { return free_mem + used_mem + reserved_mem; } }