#include "memory/KernelMemoryManager.h" #include "assert.h" #include "memory/KernelHeap.h" #include "memory/RangeAllocator.h" #include "memory/VMM.h" RangeAllocator userVMMAllocator; void KernelMemoryManager::init() { userVMMAllocator.init((void*)0x1000, (void*)0xC000000); } void* KernelMemoryManager::get_mapping(void* physicalAddress, int flags) { uint64_t virtualAddress; if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page(); else virtualAddress = KernelHeap::request_virtual_page(); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } void* KernelMemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) { uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t virtualAddress; if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page(); else virtualAddress = KernelHeap::request_virtual_page(); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags); return (void*)(virtualAddress + offset); } void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags) { uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t virtualAddress; if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count); else virtualAddress = KernelHeap::request_virtual_pages(count); for (uint64_t i = 0; i < count; i++) { kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags); } return (void*)(virtualAddress + offset); } void KernelMemoryManager::release_unaligned_mapping(void* mapping) { uint64_t offset = (uint64_t)mapping % 4096; uint64_t flags = kernelVMM.getFlags((uint64_t)mapping); kernelVMM.unmap((uint64_t)mapping - offset); if (flags & MAP_USER) userVMMAllocator.free_page((void*)((uint64_t)mapping - offset)); else KernelHeap::free_virtual_page((uint64_t)mapping - offset); } void KernelMemoryManager::release_unaligned_mappings(void* mapping, uint64_t count) { uint64_t offset = (uint64_t)mapping % 4096; uint64_t flags = kernelVMM.getFlags((uint64_t)mapping); if (flags & MAP_USER) userVMMAllocator.free_pages((void*)((uint64_t)mapping - offset), count); else KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); } } void KernelMemoryManager::release_mapping(void* mapping) { kernelVMM.unmap((uint64_t)mapping); uint64_t flags = kernelVMM.getFlags((uint64_t)mapping); if (flags & MAP_USER) userVMMAllocator.free_page(mapping); else KernelHeap::free_virtual_page((uint64_t)mapping); } void* KernelMemoryManager::get_page(int flags) { void* physicalAddress = kernelPMM.request_page(); uint64_t virtualAddress; if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page(); else virtualAddress = KernelHeap::request_virtual_page(); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } void KernelMemoryManager::release_page(void* page) { uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); ASSERT(physicalAddress != UINT64_MAX); uint64_t flags = kernelVMM.getFlags((uint64_t)page); kernelVMM.unmap((uint64_t)page); kernelPMM.free_page((void*)physicalAddress); if (flags & MAP_USER) userVMMAllocator.free_page(page); else KernelHeap::free_virtual_page((uint64_t)page); } void* KernelMemoryManager::get_pages(uint64_t count, int flags) { uint64_t virtualAddress; if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count); else virtualAddress = KernelHeap::request_virtual_pages(count); for (uint64_t i = 0; i < count; i++) { void* physicalAddress = kernelPMM.request_page(); kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags); } return (void*)virtualAddress; } void KernelMemoryManager::release_pages(void* pages, uint64_t count) { uint64_t flags = kernelVMM.getFlags((uint64_t)pages); for (uint64_t i = 0; i < count; i++) { void* page = (void*)((uint64_t)pages + (i * 4096)); uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); ASSERT(physicalAddress != UINT64_MAX); kernelVMM.unmap((uint64_t)page); kernelPMM.free_page((void*)physicalAddress); } if (flags & MAP_USER) userVMMAllocator.free_pages(pages, count); else KernelHeap::free_virtual_pages((uint64_t)pages, count); }