Remove RangeAllocator and make a PMM namespace

This commit is contained in:
apio 2022-09-24 21:27:45 +02:00
parent 198f4196c4
commit 46b7dab847
8 changed files with 84 additions and 173 deletions

View File

@ -7,8 +7,6 @@
namespace KernelMemoryManager
{
void init();
void* get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
void release_mapping(void* mapping);

View File

@ -0,0 +1,20 @@
#pragma once
#include <stdint.h>
namespace PMM
{
void init();
void* request_page();
void* request_pages(uint64_t count);
void free_page(void* address);
void free_pages(void* address, uint64_t count);
void lock_page(void* address);
void lock_pages(void* address, uint64_t count);
uint64_t get_free();
uint64_t get_used();
uint64_t get_reserved();
};

View File

@ -1,42 +0,0 @@
#pragma once
#include <stdint.h>
class RangeAllocator
{
public:
void init(void* start_address, void* end_address);
void init_from_mmap();
void* request_page();
void* request_pages(
uint64_t count); // Please don't use this in kernelPMM. That's what KernelMemoryManager/KernelHeap do.
void free_page(void* address);
void free_pages(void* address, uint64_t count);
void lock_page(void* address);
void lock_pages(void* address, uint64_t count);
uint64_t get_free();
uint64_t get_used();
uint64_t get_reserved();
private:
void reserve_page(void* address);
void reserve_pages(void* address, uint64_t count);
bool bitmap_read(uint64_t index);
void bitmap_set(uint64_t index, bool value);
uint64_t free_mem = 0;
uint64_t used_mem = 0;
uint64_t reserved_mem = 0;
char* bitmap_addr;
uint64_t bitmap_size;
uint64_t start_index = 0;
uint64_t alloc_base;
};
extern RangeAllocator kernelPMM;

View File

@ -7,7 +7,7 @@
#include "io/Serial.h"
#include "log/Log.h"
#include "memory/KernelMemoryManager.h"
#include "memory/RangeAllocator.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
#include "misc/hang.h"
#include "rand/Init.h"
@ -40,11 +40,9 @@ void Init::early_init()
framebuffer0.init((void*)bootboot.fb_ptr, bootboot.fb_type, bootboot.fb_scanline, bootboot.fb_width,
bootboot.fb_height);
kernelPMM.init_from_mmap();
PMM::init();
kernelVMM.init();
KernelMemoryManager::init();
InitRD::init();
ASSERT(TextRenderer::try_initialize());

View File

@ -17,7 +17,7 @@
#include "memory/KernelMemoryManager.h"
#include "memory/Memory.h"
#include "memory/MemoryMap.h"
#include "memory/RangeAllocator.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
#include "misc/PCITypes.h"
#include "misc/reboot.h"
@ -42,11 +42,6 @@ extern "C" void _start()
Memory::walk_memory_map();
kdbgln("System memory: %ld KB", Memory::get_system() / 1024);
kdbgln(" Free memory : %ld KB", kernelPMM.get_free() / 1024);
kdbgln(" Used memory : %ld KB", kernelPMM.get_used() / 1024);
kdbgln(" Reserved memory : %ld KB", kernelPMM.get_reserved() / 1024);
GDT::load();
kinfoln("Loaded GDT");
@ -71,7 +66,7 @@ extern "C" void _start()
kinfoln("Prepared scheduler");
/*Scheduler::add_kernel_task([]() {
Scheduler::add_kernel_task([]() {
int64_t x = 0;
int64_t y = 0;
int64_t xvel = 10;
@ -105,9 +100,9 @@ extern "C" void _start()
}
framebuffer0.paint_rect(x, y, 10, 10, *(Color*)colptr);
}
});*/
});
/*Scheduler::add_kernel_task([]() {
Scheduler::add_kernel_task([]() {
while (1)
{
sleep(200);
@ -117,11 +112,6 @@ extern "C" void _start()
Mersenne::get() % (framebuffer0.height() - 256), Mersenne::get() % 255,
Mersenne::get() % 255, *(Color*)colptr);
}
});*/
Scheduler::add_kernel_task([]() {
TextRenderer::putchar(':');
Scheduler::exit();
});
Scheduler::add_kernel_task([]() {
@ -132,12 +122,6 @@ extern "C" void _start()
}
});
InitRD::File hello = InitRD::open("sys/Hello.bin");
kernelVMM.map(0x7000, (uint64_t)kernelPMM.request_page(), MAP_USER);
memcpy((void*)0x7000, hello.addr, hello.size);
Scheduler::add_user_task((void*)0x7000);
kinfoln("Prepared scheduler tasks");
ACPI::SDTHeader* rootSDT = ACPI::GetRSDTOrXSDT();
@ -154,5 +138,10 @@ extern "C" void _start()
kinfoln("Found PCI device %x:%x, %s", dev.id().vendor, dev.id().device, pci_type_name(dev.type()));
});
kdbgln("System memory: %ld KB", Memory::get_system() / 1024);
kdbgln(" Free memory : %ld KB", PMM::get_free() / 1024);
kdbgln(" Used memory : %ld KB", PMM::get_used() / 1024);
kdbgln(" Reserved memory : %ld KB", PMM::get_reserved() / 1024);
Scheduler::exit();
}

View File

@ -1,22 +1,12 @@
#include "memory/KernelMemoryManager.h"
#include "assert.h"
#include "memory/KernelHeap.h"
#include "memory/RangeAllocator.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
RangeAllocator userVMMAllocator;
void KernelMemoryManager::init()
{
userVMMAllocator.init((void*)0x1000, (void*)0xC000000);
}
void* KernelMemoryManager::get_mapping(void* physicalAddress, int flags)
{
uint64_t virtualAddress;
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
else
virtualAddress = KernelHeap::request_virtual_page();
uint64_t virtualAddress = KernelHeap::request_virtual_page();
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress;
}
@ -24,10 +14,7 @@ void* KernelMemoryManager::get_mapping(void* physicalAddress, int flags)
void* KernelMemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
{
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress;
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
else
virtualAddress = KernelHeap::request_virtual_page();
uint64_t virtualAddress = KernelHeap::request_virtual_page();
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
return (void*)(virtualAddress + offset);
}
@ -35,10 +22,7 @@ void* KernelMemoryManager::get_unaligned_mapping(void* physicalAddress, int flag
void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags)
{
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t virtualAddress;
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count);
else
virtualAddress = KernelHeap::request_virtual_pages(count);
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
for (uint64_t i = 0; i < count; i++)
{
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags);
@ -49,39 +33,27 @@ void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_
void KernelMemoryManager::release_unaligned_mapping(void* mapping)
{
uint64_t offset = (uint64_t)mapping % 4096;
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
kernelVMM.unmap((uint64_t)mapping - offset);
if (flags & MAP_USER) userVMMAllocator.free_page((void*)((uint64_t)mapping - offset));
else
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
}
void KernelMemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
{
uint64_t offset = (uint64_t)mapping % 4096;
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
if (flags & MAP_USER) userVMMAllocator.free_pages((void*)((uint64_t)mapping - offset), count);
else
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); }
}
void KernelMemoryManager::release_mapping(void* mapping)
{
kernelVMM.unmap((uint64_t)mapping);
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
if (flags & MAP_USER) userVMMAllocator.free_page(mapping);
else
KernelHeap::free_virtual_page((uint64_t)mapping);
KernelHeap::free_virtual_page((uint64_t)mapping);
}
void* KernelMemoryManager::get_page(int flags)
{
void* physicalAddress = kernelPMM.request_page();
uint64_t virtualAddress;
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
else
virtualAddress = KernelHeap::request_virtual_page();
void* physicalAddress = PMM::request_page();
uint64_t virtualAddress = KernelHeap::request_virtual_page();
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress;
}
@ -90,23 +62,16 @@ void KernelMemoryManager::release_page(void* page)
{
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX);
uint64_t flags = kernelVMM.getFlags((uint64_t)page);
kernelVMM.unmap((uint64_t)page);
kernelPMM.free_page((void*)physicalAddress);
if (flags & MAP_USER) userVMMAllocator.free_page(page);
else
KernelHeap::free_virtual_page((uint64_t)page);
PMM::free_page((void*)physicalAddress);
}
void* KernelMemoryManager::get_pages(uint64_t count, int flags)
{
uint64_t virtualAddress;
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count);
else
virtualAddress = KernelHeap::request_virtual_pages(count);
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
for (uint64_t i = 0; i < count; i++)
{
void* physicalAddress = kernelPMM.request_page();
void* physicalAddress = PMM::request_page();
kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags);
}
return (void*)virtualAddress;
@ -114,16 +79,13 @@ void* KernelMemoryManager::get_pages(uint64_t count, int flags)
void KernelMemoryManager::release_pages(void* pages, uint64_t count)
{
uint64_t flags = kernelVMM.getFlags((uint64_t)pages);
for (uint64_t i = 0; i < count; i++)
{
void* page = (void*)((uint64_t)pages + (i * 4096));
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page);
kernelPMM.free_page((void*)physicalAddress);
PMM::free_page((void*)physicalAddress);
}
if (flags & MAP_USER) userVMMAllocator.free_pages(pages, count);
else
KernelHeap::free_virtual_pages((uint64_t)pages, count);
KernelHeap::free_virtual_pages((uint64_t)pages, count);
}

View File

@ -1,6 +1,6 @@
#define MODULE "mem"
#include "memory/RangeAllocator.h"
#include "memory/PMM.h"
#include "assert.h"
#include "bootboot.h"
#include "memory/KernelMemoryManager.h"
@ -9,9 +9,20 @@
extern BOOTBOOT bootboot;
RangeAllocator kernelPMM;
static bool bitmap_read(uint64_t index);
static void bitmap_set(uint64_t index, bool value);
void RangeAllocator::init_from_mmap()
static uint64_t free_mem = 0;
static uint64_t used_mem = 0;
static uint64_t reserved_mem = 0;
static char* bitmap_addr;
static char* virtual_bitmap_addr;
static uint64_t bitmap_size;
static uint64_t start_index = 0;
void PMM::init()
{
uint64_t total_mem = Memory::get_system();
@ -36,6 +47,7 @@ void RangeAllocator::init_from_mmap()
}
bitmap_addr = (char*)biggest_chunk;
virtual_bitmap_addr = bitmap_addr;
ASSERT((total_mem / 4096 / 8) < biggest_chunk_size);
bitmap_size = total_mem / 4096 / 8 + 1;
memset(bitmap_addr, 0xFF, bitmap_size);
@ -54,34 +66,22 @@ void RangeAllocator::init_from_mmap()
}
lock_pages(bitmap_addr, bitmap_size / 4096 + 1);
alloc_base = 0;
}
void RangeAllocator::init(void* start_address, void* end_address)
static bool bitmap_read(uint64_t index)
{
ASSERT(((int64_t)end_address - (int64_t)start_address) > 0);
uint64_t total_size = (uint64_t)end_address - (uint64_t)start_address;
bitmap_size = total_size / 4096 / 8 + 1;
bitmap_addr = (char*)KernelMemoryManager::get_pages(bitmap_size / 4096 + 1);
memset(bitmap_addr, 0, bitmap_size);
alloc_base = (uint64_t)start_address;
return (virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
bool RangeAllocator::bitmap_read(uint64_t index)
{
return (bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
void RangeAllocator::bitmap_set(uint64_t index, bool value)
static void bitmap_set(uint64_t index, bool value)
{
uint64_t byteIndex = index / 8;
uint8_t bitIndexer = 0b10000000 >> (index % 8);
bitmap_addr[byteIndex] &= ~bitIndexer;
if (value) { bitmap_addr[byteIndex] |= bitIndexer; }
virtual_bitmap_addr[byteIndex] &= ~bitIndexer;
if (value) { virtual_bitmap_addr[byteIndex] |= bitIndexer; }
}
void* RangeAllocator::request_page()
void* PMM::request_page()
{
for (uint64_t index = start_index; index < (bitmap_size * 8); index++)
{
@ -90,13 +90,13 @@ void* RangeAllocator::request_page()
start_index = index + 1;
free_mem -= 4096;
used_mem += 4096;
return (void*)(alloc_base + (index * 4096));
return (void*)(index * 4096);
}
return 0;
return (void*)-1;
}
void* RangeAllocator::request_pages(uint64_t count)
void* PMM::request_pages(uint64_t count)
{
uint64_t contiguous = 0;
uint64_t contiguous_start = 0;
@ -119,16 +119,16 @@ void* RangeAllocator::request_pages(uint64_t count)
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
free_mem -= (count * 4096);
used_mem += (count * 4096);
return (void*)(alloc_base + (contiguous_start * 4096));
return (void*)(contiguous_start * 4096);
}
}
return 0;
return (void*)-1;
}
void RangeAllocator::free_page(void* address)
void PMM::free_page(void* address)
{
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
uint64_t index = (uint64_t)address / 4096;
if (!bitmap_read(index)) return;
bitmap_set(index, false);
used_mem -= 4096;
@ -136,50 +136,36 @@ void RangeAllocator::free_page(void* address)
if (start_index > index) start_index = index;
}
void RangeAllocator::free_pages(void* address, uint64_t count)
void PMM::free_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { free_page((void*)((uint64_t)address + index)); }
}
void RangeAllocator::lock_page(void* address)
void PMM::lock_page(void* address)
{
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
uint64_t index = ((uint64_t)address) / 4096;
if (bitmap_read(index)) return;
bitmap_set(index, true);
used_mem += 4096;
free_mem -= 4096;
}
void RangeAllocator::reserve_page(void* address)
{
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
if (bitmap_read(index)) return;
bitmap_set(index, true);
reserved_mem += 4096;
free_mem -= 4096;
}
void RangeAllocator::lock_pages(void* address, uint64_t count)
void PMM::lock_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { lock_page((void*)((uint64_t)address + index)); }
}
void RangeAllocator::reserve_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { reserve_page((void*)((uint64_t)address + index)); }
}
uint64_t RangeAllocator::get_free()
uint64_t PMM::get_free()
{
return free_mem;
}
uint64_t RangeAllocator::get_used()
uint64_t PMM::get_used()
{
return used_mem;
}
uint64_t RangeAllocator::get_reserved()
uint64_t PMM::get_reserved()
{
return reserved_mem;
}

View File

@ -1,6 +1,6 @@
#include "memory/VMM.h"
#include "log/Log.h"
#include "memory/RangeAllocator.h"
#include "memory/PMM.h"
#include "std/string.h"
Paging::VirtualMemoryManager kernelVMM;
@ -162,7 +162,7 @@ namespace Paging
PageTable* PDP;
if (!PDE.Present)
{
PDP = (PageTable*)kernelPMM.request_page();
PDP = (PageTable*)PMM::request_page();
memset(PDP, 0, 0x1000);
PDE.Address = (uint64_t)PDP >> 12;
PDE.Present = true;
@ -176,7 +176,7 @@ namespace Paging
PageTable* PD;
if (!PDE.Present)
{
PD = (PageTable*)kernelPMM.request_page();
PD = (PageTable*)PMM::request_page();
memset(PD, 0, 0x1000);
PDE.Address = (uint64_t)PD >> 12;
PDE.Present = true;
@ -190,7 +190,7 @@ namespace Paging
PageTable* PT;
if (!PDE.Present)
{
PT = (PageTable*)kernelPMM.request_page();
PT = (PageTable*)PMM::request_page();
memset(PT, 0, 0x1000);
PDE.Address = (uint64_t)PT >> 12;
PDE.Present = true;