Luna/kernel/src/memory/MemoryManager.cpp

191 lines
6.0 KiB
C++

#ifdef MM_DEBUG
#define MODULE "mm"
#include "log/Log.h"
#endif
#include "assert.h"
#include "memory/KernelHeap.h"
#include "memory/MemoryManager.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
void MemoryManager::init()
{
PMM::init();
kernelVMM.init();
}
void* MemoryManager::get_mapping(void* physicalAddress, int flags)
{
uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress;
}
void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
{
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
return (void*)(virtualAddress + offset);
}
void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags)
{
if (!count) return 0;
if (count == 1) return get_unaligned_mapping(physicalAddress, flags);
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("Not enough contiguous pages (%ld) left in the kernel heap space (virtual address space from -128M to "
"-64M)",
count);
#endif
return 0;
}
for (uint64_t i = 0; i < count; i++)
{
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags);
}
return (void*)(virtualAddress + offset);
}
void MemoryManager::release_unaligned_mapping(void* mapping)
{
uint64_t offset = (uint64_t)mapping % 4096;
kernelVMM.unmap((uint64_t)mapping - offset);
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
}
void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
{
if (!count) return;
if (count == 1) return release_unaligned_mapping(mapping);
uint64_t offset = (uint64_t)mapping % 4096;
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); }
}
void MemoryManager::release_mapping(void* mapping)
{
kernelVMM.unmap((uint64_t)mapping);
KernelHeap::free_virtual_page((uint64_t)mapping);
}
void* MemoryManager::get_page(int flags)
{
uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
return get_page_at(virtualAddress, flags);
}
void* MemoryManager::get_page_at(uint64_t addr, int flags)
{
void* physicalAddress = PMM::request_page();
if (PMM_DID_FAIL(physicalAddress))
{
#ifdef MM_DEBUG
kwarnln("OOM while allocating one page of memory. this is not good...");
#endif
return 0;
}
kernelVMM.map(addr, (uint64_t)physicalAddress, flags);
return (void*)addr;
}
void MemoryManager::release_page(void* page)
{
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
kernelVMM.unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress);
}
void* MemoryManager::get_pages(uint64_t count, int flags)
{
if (!count) return 0;
if (count == 1) return get_page(flags);
#ifdef MM_DEBUG
kdbgln("allocating several pages (%ld)", count);
#endif
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
// achieve...
}
return get_pages_at(virtualAddress, count, flags);
}
void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
{
if (!count) return 0;
if (count == 1) return get_page_at(addr, flags);
#ifdef MM_DEBUG
kdbgln("allocating several pages (%ld), at address %ld", count, addr);
#endif
for (uint64_t i = 0; i < count; i++)
{
void* physicalAddress = PMM::request_page();
if (PMM_DID_FAIL(physicalAddress)) // OOM: No physical memory available! Since this might be at the end of a
// long allocation, we should be able to recover most of it and allocate a
// smaller range, so this might not be fatal.
{
#ifdef MM_DEBUG
kwarnln("OOM while allocating page %ld of memory. this might be recoverable...");
#endif
return 0;
}
kernelVMM.map(addr + (i * 4096), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * 4096), (uint64_t)physicalAddress);
#endif
}
return (void*)addr;
}
void MemoryManager::release_pages(void* pages, uint64_t count)
{
if (!count) return;
if (count == 1) return release_page(pages);
#ifdef MM_DEBUG
kdbgln("releasing several pages (%ld)", count);
#endif
for (uint64_t i = 0; i < count; i++)
{
void* page = (void*)((uint64_t)pages + (i * 4096));
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page);
#ifdef MM_DEBUG
kdbgln("releasing virtual %lx, physical %lx", (uint64_t)page, physicalAddress);
#endif
PMM::free_page((void*)physicalAddress);
}
KernelHeap::free_virtual_pages((uint64_t)pages, count);
}