#ifdef MM_DEBUG #define MODULE "mm" #include "log/Log.h" #endif #include "memory/KernelHeap.h" #include "memory/MemoryManager.h" #include "memory/PMM.h" #include "memory/VMM.h" #include "misc/utils.h" #include "std/ensure.h" // FIXME: Use Result in here. void MemoryManager::init() { KernelHeap::clear(); PMM::init(); VMM::init(); PMM::map_bitmap_to_virtual(); } extern char start_of_kernel_rodata[1]; extern char end_of_kernel_rodata[1]; extern char start_of_kernel_data[1]; extern char end_of_kernel_data[1]; void MemoryManager::protect_kernel_sections() { protect(start_of_kernel_rodata, Utilities::get_blocks_from_size(PAGE_SIZE, end_of_kernel_rodata - start_of_kernel_rodata), 0); protect(start_of_kernel_data, Utilities::get_blocks_from_size(PAGE_SIZE, end_of_kernel_data - start_of_kernel_data), MAP_READ_WRITE); } Result MemoryManager::get_mapping(void* physicalAddress, int flags) { uint64_t virtualAddress = KernelHeap::request_virtual_page(); if (!virtualAddress) { #ifdef MM_DEBUG kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); return {ENOMEM}; } VMM::map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } Result MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) { uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE; uint64_t virtualAddress = KernelHeap::request_virtual_page(); if (!virtualAddress) { #ifdef MM_DEBUG kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); return {ENOMEM}; } VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags); return (void*)(virtualAddress + offset); } Result MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_unaligned_mapping(physicalAddress, flags); uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE; uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); if (!virtualAddress) { #ifdef MM_DEBUG kwarnln("Not enough contiguous pages (%ld) left in the kernel heap space (virtual address space from -128M to " "-64M)", count); #endif KernelHeap::dump_usage(); return {ENOMEM}; } for (uint64_t i = 0; i < count; i++) { VMM::map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags); } return (void*)(virtualAddress + offset); } void MemoryManager::release_unaligned_mapping(void* mapping) { uint64_t offset = (uint64_t)mapping % PAGE_SIZE; VMM::unmap((uint64_t)mapping - offset); KernelHeap::free_virtual_page((uint64_t)mapping - offset); } void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count) { if (!count) return; if (count == 1) return release_unaligned_mapping(mapping); uint64_t offset = (uint64_t)mapping % PAGE_SIZE; KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); for (uint64_t i = 0; i < count; i++) { VMM::unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); } } void MemoryManager::release_mapping(void* mapping) { VMM::unmap((uint64_t)mapping); KernelHeap::free_virtual_page((uint64_t)mapping); } Result MemoryManager::get_page(int flags) { uint64_t virtualAddress = KernelHeap::request_virtual_page(); if (!virtualAddress) { #ifdef MM_DEBUG kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); return {ENOMEM}; } return get_page_at(virtualAddress, flags); } Result MemoryManager::get_page_at(uint64_t addr, int flags) { auto paddr = PMM::request_page(); if (paddr.has_error()) { #ifdef MM_DEBUG kwarnln("OOM while allocating one page of memory. this is not good..."); #endif return {ENOMEM}; } VMM::map(addr, (uint64_t)paddr.release_value(), flags); return (void*)addr; } void MemoryManager::release_page(void* page) { uint64_t physicalAddress = VMM::get_physical((uint64_t)page); ensure(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space... VMM::unmap((uint64_t)page); PMM::free_page((void*)physicalAddress); } Result MemoryManager::get_pages(uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_page(flags); #ifdef MM_DEBUG kdbgln("allocating several pages (%ld)", count); #endif uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); if (!virtualAddress) { #ifdef MM_DEBUG kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); #endif KernelHeap::dump_usage(); return {ENOMEM}; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to // achieve... } return get_pages_at(virtualAddress, count, flags); } Result MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags) { if (!count) return 0; if (count == 1) return get_page_at(addr, flags); #ifdef MM_DEBUG kdbgln("allocating several pages (%ld), at address %lx", count, addr); #endif for (uint64_t i = 0; i < count; i++) { auto paddr = PMM::request_page(); if (paddr.has_error()) // OOM: No physical memory available! Since this might be at the end of a // long allocation, we should be able to recover most of it and allocate a // smaller range, so this might not be fatal. { #ifdef MM_DEBUG kwarnln("OOM while allocating page %ld of memory. this might be recoverable...", i); #endif // FIXME: Weren't we supposed to free all previously allocated pages, to avoid leaks when failing large // allocations? return {ENOMEM}; } VMM::map(addr + (i * PAGE_SIZE), (uint64_t)paddr.release_value(), flags); } return (void*)addr; } void MemoryManager::release_pages(void* pages, uint64_t count) { if (!count) return; if (count == 1) return release_page(pages); #ifdef MM_DEBUG kdbgln("releasing several pages (%ld)", count); #endif for (uint64_t i = 0; i < count; i++) { void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE)); uint64_t physicalAddress = VMM::get_physical((uint64_t)page); ensure(physicalAddress != UINT64_MAX); VMM::unmap((uint64_t)page); PMM::free_page((void*)physicalAddress); } KernelHeap::free_virtual_pages((uint64_t)pages, count); } void MemoryManager::protect(void* page, uint64_t count, int flags) { for (uint64_t i = 0; i < count; i++) { VMM::remap((uint64_t)page + (i * PAGE_SIZE), flags); } } void MemoryManager::map_several_pages(uint64_t physicalAddress, uint64_t virtualAddress, uint64_t count, int flags) { for (uint64_t i = 0; i < count; i++) { VMM::map(virtualAddress + (i * PAGE_SIZE), physicalAddress + (i * PAGE_SIZE), flags); } }