Sanity checks

This commit is contained in:
apio 2022-09-24 23:09:39 +02:00
parent 6bd3529f32
commit 704a23d0ad
4 changed files with 72 additions and 4 deletions

View File

@ -1,6 +1,9 @@
#pragma once #pragma once
#include <stdint.h> #include <stdint.h>
#define PMM_FAILED (void*)-1
#define PMM_DID_FAIL(addr) (void*)addr == PMM_FAILED
namespace PMM namespace PMM
{ {
void init(); void init();

View File

@ -12,6 +12,13 @@
void* MemoryManager::get_mapping(void* physicalAddress, int flags) void* MemoryManager::get_mapping(void* physicalAddress, int flags)
{ {
uint64_t virtualAddress = KernelHeap::request_virtual_page(); uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress; return (void*)virtualAddress;
} }
@ -20,6 +27,13 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
{ {
uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress = KernelHeap::request_virtual_page(); uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
return (void*)(virtualAddress + offset); return (void*)(virtualAddress + offset);
} }
@ -28,6 +42,15 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
{ {
uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("Not enough contiguous pages (%ld) left in the kernel heap space (virtual address space from -128M to "
"-64M)",
count);
#endif
return 0;
}
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags); kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags);
@ -44,6 +67,13 @@ void MemoryManager::release_unaligned_mapping(void* mapping)
void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count) void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
{ {
if (!count)
{
#ifdef MM_DEBUG
kwarnln("weird... got asked to free 0 pages of virtual memory");
#endif
return;
}
uint64_t offset = (uint64_t)mapping % 4096; uint64_t offset = (uint64_t)mapping % 4096;
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); } for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); }
@ -58,7 +88,21 @@ void MemoryManager::release_mapping(void* mapping)
void* MemoryManager::get_page(int flags) void* MemoryManager::get_page(int flags)
{ {
void* physicalAddress = PMM::request_page(); void* physicalAddress = PMM::request_page();
if (PMM_DID_FAIL(physicalAddress))
{
#ifdef MM_DEBUG
kwarnln("OOM while allocating one page of memory. this is not good...");
#endif
return 0;
}
uint64_t virtualAddress = KernelHeap::request_virtual_page(); uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0;
}
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress; return (void*)virtualAddress;
} }
@ -66,7 +110,7 @@ void* MemoryManager::get_page(int flags)
void MemoryManager::release_page(void* page) void MemoryManager::release_page(void* page)
{ {
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
kernelVMM.unmap((uint64_t)page); kernelVMM.unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }
@ -77,9 +121,27 @@ void* MemoryManager::get_pages(uint64_t count, int flags)
kdbgln("allocating several pages (%ld)", count); kdbgln("allocating several pages (%ld)", count);
#endif #endif
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
if (!virtualAddress)
{
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
// achieve...
}
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
void* physicalAddress = PMM::request_page(); void* physicalAddress = PMM::request_page();
if (PMM_DID_FAIL(physicalAddress)) // OOM: No physical memory available! Since this might be at the end of a
// long allocation, we should be able to recover most of it and allocate a
// smaller range, so this might not be fatal.
{
#ifdef MM_DEBUG
kwarnln("OOM while allocating page %ld of memory. this might be recoverable...");
#endif
release_pages((void*)virtualAddress, i); // release the pages we have already mapped
return 0;
}
kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags); kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * 4096), (uint64_t)physicalAddress); kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * 4096), (uint64_t)physicalAddress);

View File

@ -94,7 +94,7 @@ void* PMM::request_page()
return (void*)(index * 4096); return (void*)(index * 4096);
} }
return (void*)-1; return PMM_FAILED;
} }
void* PMM::request_pages(uint64_t count) void* PMM::request_pages(uint64_t count)
@ -124,7 +124,7 @@ void* PMM::request_pages(uint64_t count)
} }
} }
return (void*)-1; return PMM_FAILED;
} }
void PMM::free_page(void* address) void PMM::free_page(void* address)

View File

@ -1,5 +1,5 @@
#include "memory/VMM.h" #include "memory/VMM.h"
#include "log/Log.h" #include "assert.h"
#include "memory/PMM.h" #include "memory/PMM.h"
#include "std/string.h" #include "std/string.h"
@ -163,6 +163,7 @@ namespace Paging
if (!PDE.Present) if (!PDE.Present)
{ {
PDP = (PageTable*)PMM::request_page(); PDP = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PDP)));
memset(PDP, 0, 0x1000); memset(PDP, 0, 0x1000);
PDE.Address = (uint64_t)PDP >> 12; PDE.Address = (uint64_t)PDP >> 12;
PDE.Present = true; PDE.Present = true;
@ -177,6 +178,7 @@ namespace Paging
if (!PDE.Present) if (!PDE.Present)
{ {
PD = (PageTable*)PMM::request_page(); PD = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PD)));
memset(PD, 0, 0x1000); memset(PD, 0, 0x1000);
PDE.Address = (uint64_t)PD >> 12; PDE.Address = (uint64_t)PD >> 12;
PDE.Present = true; PDE.Present = true;
@ -191,6 +193,7 @@ namespace Paging
if (!PDE.Present) if (!PDE.Present)
{ {
PT = (PageTable*)PMM::request_page(); PT = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PT)));
memset(PT, 0, 0x1000); memset(PT, 0, 0x1000);
PDE.Address = (uint64_t)PT >> 12; PDE.Address = (uint64_t)PT >> 12;
PDE.Present = true; PDE.Present = true;