From 704a23d0ad2c559da498a9b1c176f0d055e27659 Mon Sep 17 00:00:00 2001 From: apio Date: Sat, 24 Sep 2022 23:09:39 +0200 Subject: [PATCH] Sanity checks --- kernel/include/memory/PMM.h | 3 ++ kernel/src/memory/MemoryManager.cpp | 64 ++++++++++++++++++++++++++++- kernel/src/memory/PMM.cpp | 4 +- kernel/src/memory/VMM.cpp | 5 ++- 4 files changed, 72 insertions(+), 4 deletions(-) diff --git a/kernel/include/memory/PMM.h b/kernel/include/memory/PMM.h index cb6b8d05..f991e21f 100644 --- a/kernel/include/memory/PMM.h +++ b/kernel/include/memory/PMM.h @@ -1,6 +1,9 @@ #pragma once #include +#define PMM_FAILED (void*)-1 +#define PMM_DID_FAIL(addr) (void*)addr == PMM_FAILED + namespace PMM { void init(); diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index 526b615d..cecffc27 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -12,6 +12,13 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags) { uint64_t virtualAddress = KernelHeap::request_virtual_page(); + if (!virtualAddress) + { +#ifdef MM_DEBUG + kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); +#endif + return 0; + } kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } @@ -20,6 +27,13 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags) { uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t virtualAddress = KernelHeap::request_virtual_page(); + if (!virtualAddress) + { +#ifdef MM_DEBUG + kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); +#endif + return 0; + } kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags); return (void*)(virtualAddress + offset); } @@ -28,6 +42,15 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun { uint64_t offset = (uint64_t)physicalAddress % 4096; uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); + if (!virtualAddress) + { +#ifdef MM_DEBUG + kwarnln("Not enough contiguous pages (%ld) left in the kernel heap space (virtual address space from -128M to " + "-64M)", + count); +#endif + return 0; + } for (uint64_t i = 0; i < count; i++) { kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags); @@ -44,6 +67,13 @@ void MemoryManager::release_unaligned_mapping(void* mapping) void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count) { + if (!count) + { +#ifdef MM_DEBUG + kwarnln("weird... got asked to free 0 pages of virtual memory"); +#endif + return; + } uint64_t offset = (uint64_t)mapping % 4096; KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); } @@ -58,7 +88,21 @@ void MemoryManager::release_mapping(void* mapping) void* MemoryManager::get_page(int flags) { void* physicalAddress = PMM::request_page(); + if (PMM_DID_FAIL(physicalAddress)) + { +#ifdef MM_DEBUG + kwarnln("OOM while allocating one page of memory. this is not good..."); +#endif + return 0; + } uint64_t virtualAddress = KernelHeap::request_virtual_page(); + if (!virtualAddress) + { +#ifdef MM_DEBUG + kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); +#endif + return 0; + } kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); return (void*)virtualAddress; } @@ -66,7 +110,7 @@ void* MemoryManager::get_page(int flags) void MemoryManager::release_page(void* page) { uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); - ASSERT(physicalAddress != UINT64_MAX); + ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space... kernelVMM.unmap((uint64_t)page); PMM::free_page((void*)physicalAddress); } @@ -77,9 +121,27 @@ void* MemoryManager::get_pages(uint64_t count, int flags) kdbgln("allocating several pages (%ld)", count); #endif uint64_t virtualAddress = KernelHeap::request_virtual_pages(count); + if (!virtualAddress) + { +#ifdef MM_DEBUG + kwarnln("No kernel heap space (virtual address space from -128M to -64M) left"); +#endif + return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to + // achieve... + } for (uint64_t i = 0; i < count; i++) { void* physicalAddress = PMM::request_page(); + if (PMM_DID_FAIL(physicalAddress)) // OOM: No physical memory available! Since this might be at the end of a + // long allocation, we should be able to recover most of it and allocate a + // smaller range, so this might not be fatal. + { +#ifdef MM_DEBUG + kwarnln("OOM while allocating page %ld of memory. this might be recoverable..."); +#endif + release_pages((void*)virtualAddress, i); // release the pages we have already mapped + return 0; + } kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags); #ifdef MM_DEBUG kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * 4096), (uint64_t)physicalAddress); diff --git a/kernel/src/memory/PMM.cpp b/kernel/src/memory/PMM.cpp index 6c533519..a71e0cb5 100644 --- a/kernel/src/memory/PMM.cpp +++ b/kernel/src/memory/PMM.cpp @@ -94,7 +94,7 @@ void* PMM::request_page() return (void*)(index * 4096); } - return (void*)-1; + return PMM_FAILED; } void* PMM::request_pages(uint64_t count) @@ -124,7 +124,7 @@ void* PMM::request_pages(uint64_t count) } } - return (void*)-1; + return PMM_FAILED; } void PMM::free_page(void* address) diff --git a/kernel/src/memory/VMM.cpp b/kernel/src/memory/VMM.cpp index 9088ba26..21d4b972 100644 --- a/kernel/src/memory/VMM.cpp +++ b/kernel/src/memory/VMM.cpp @@ -1,5 +1,5 @@ #include "memory/VMM.h" -#include "log/Log.h" +#include "assert.h" #include "memory/PMM.h" #include "std/string.h" @@ -163,6 +163,7 @@ namespace Paging if (!PDE.Present) { PDP = (PageTable*)PMM::request_page(); + ASSERT(!(PMM_DID_FAIL(PDP))); memset(PDP, 0, 0x1000); PDE.Address = (uint64_t)PDP >> 12; PDE.Present = true; @@ -177,6 +178,7 @@ namespace Paging if (!PDE.Present) { PD = (PageTable*)PMM::request_page(); + ASSERT(!(PMM_DID_FAIL(PD))); memset(PD, 0, 0x1000); PDE.Address = (uint64_t)PD >> 12; PDE.Present = true; @@ -191,6 +193,7 @@ namespace Paging if (!PDE.Present) { PT = (PageTable*)PMM::request_page(); + ASSERT(!(PMM_DID_FAIL(PT))); memset(PT, 0, 0x1000); PDE.Address = (uint64_t)PT >> 12; PDE.Present = true;