KernelHeap: Add more debug logging

This commit is contained in:
apio 2022-10-20 18:49:33 +02:00
parent 9d0dfbaedf
commit 712f4f5e51
3 changed files with 67 additions and 12 deletions

View File

@ -8,4 +8,8 @@ namespace KernelHeap
void free_virtual_page(uint64_t address);
void free_virtual_pages(uint64_t address, uint64_t count);
void clear();
void dump_usage();
}

View File

@ -2,6 +2,8 @@
#include "memory/KernelHeap.h"
#include "assert.h"
#include "log/Log.h"
#include "std/string.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
@ -9,10 +11,13 @@
static uint8_t page_bitmap[2048];
static int64_t kheap_free = sizeof(page_bitmap) * 8 * PAGE_SIZE;
static int64_t kheap_used = 0;
#define ALLOC_BASE 0xfffffffff8000000
#define ALLOC_END 0xfffffffffc000000
static uint64_t start_index = 0;
// static uint64_t start_index = 0;
static bool bitmap_read(uint64_t index)
{
@ -27,13 +32,28 @@ static void bitmap_set(uint64_t index, bool value)
if (value) { page_bitmap[byteIndex] |= bitIndexer; }
}
void KernelHeap::clear()
{
memset(page_bitmap, 0, sizeof(page_bitmap));
kinfoln("page bitmap located at %p", (void*)page_bitmap);
}
uint64_t KernelHeap::request_virtual_page()
{
for (uint64_t index = start_index; index < sizeof(page_bitmap) * 8; index++)
for (uint64_t index = 0; index < sizeof(page_bitmap) * 8; index++)
{
if (bitmap_read(index)) continue;
bitmap_set(index, true);
start_index = index + 1;
// start_index = index + 1;
#ifdef KHEAP_DEBUG
kinfoln("allocating one page for caller %p, returning %lx", __builtin_return_address(0),
ALLOC_BASE + (index * PAGE_SIZE));
#endif
kheap_free -= PAGE_SIZE;
kheap_used += PAGE_SIZE;
#ifdef KHEAP_DEBUG
dump_usage();
#endif
return ALLOC_BASE + (index * PAGE_SIZE);
}
@ -44,7 +64,7 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
{
uint64_t contiguous = 0;
uint64_t contiguous_start = 0;
for (uint64_t index = start_index; index < sizeof(page_bitmap) * 8; index++)
for (uint64_t index = 0; index < sizeof(page_bitmap) * 8; index++)
{
if (bitmap_read(index))
{
@ -61,6 +81,15 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
if (contiguous == count)
{
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
#ifdef KHEAP_DEBUG
kinfoln("allocating %lu pages for caller %p, returning %lx", count, __builtin_return_address(0),
ALLOC_BASE + (contiguous_start * PAGE_SIZE));
#endif
kheap_free -= (count * PAGE_SIZE);
kheap_used += (count * PAGE_SIZE);
#ifdef KHEAP_DEBUG
dump_usage();
#endif
return ALLOC_BASE + (contiguous_start * PAGE_SIZE);
}
}
@ -73,7 +102,15 @@ void KernelHeap::free_virtual_page(uint64_t address)
if (address < ALLOC_BASE || address >= ALLOC_END) return;
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
bitmap_set(index, false);
if (start_index > index) start_index = index;
#ifdef KHEAP_DEBUG
kinfoln("releasing one page for caller %p, %lx", __builtin_return_address(0), address);
#endif
kheap_free += PAGE_SIZE;
kheap_used -= PAGE_SIZE;
#ifdef KHEAP_DEBUG
dump_usage();
#endif
// if (start_index > index) start_index = index;
}
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
@ -81,5 +118,19 @@ void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
if (address < ALLOC_BASE || address >= ALLOC_END) return;
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
if (start_index > index) start_index = index;
#ifdef KHEAP_DEBUG
kinfoln("releasing %lu pages for caller %p, %lx", count, __builtin_return_address(0), address);
#endif
kheap_free += (count * PAGE_SIZE);
kheap_used -= (count * PAGE_SIZE);
#ifdef KHEAP_DEBUG
dump_usage();
#endif
// if (start_index > index) start_index = index;
}
void KernelHeap::dump_usage()
{
kinfoln("Used: %ld KB", kheap_used / 1024);
kinfoln("Free: %ld KB", kheap_free / 1024);
}

View File

@ -11,6 +11,7 @@
void MemoryManager::init()
{
KernelHeap::clear();
PMM::init();
VMM::init();
PMM::map_bitmap_to_virtual();
@ -24,6 +25,7 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
KernelHeap::dump_usage();
return 0;
}
VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
@ -39,6 +41,7 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
KernelHeap::dump_usage();
return 0;
}
VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
@ -58,6 +61,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
"-64M)",
count);
#endif
KernelHeap::dump_usage();
return 0;
}
for (uint64_t i = 0; i < count; i++)
@ -97,6 +101,7 @@ void* MemoryManager::get_page(int flags)
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
KernelHeap::dump_usage();
return 0;
}
return get_page_at(virtualAddress, flags);
@ -137,6 +142,7 @@ void* MemoryManager::get_pages(uint64_t count, int flags)
#ifdef MM_DEBUG
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
#endif
KernelHeap::dump_usage();
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
// achieve...
}
@ -163,9 +169,6 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
return 0;
}
VMM::map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress);
#endif
}
return (void*)addr;
}
@ -183,9 +186,6 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX);
VMM::unmap((uint64_t)page);
#ifdef MM_DEBUG
kdbgln("releasing virtual %p, physical %lx", page, physicalAddress);
#endif
PMM::free_page((void*)physicalAddress);
}
KernelHeap::free_virtual_pages((uint64_t)pages, count);