Luna/kernel/src/memory/RangeAllocator.cpp

185 lines
4.8 KiB
C++
Raw Normal View History

2022-09-11 06:23:32 +00:00
#define MODULE "mem"
#include "memory/RangeAllocator.h"
#include "assert.h"
#include "bootboot.h"
2022-09-22 05:57:30 +00:00
#include "memory/KernelMemoryManager.h"
#include "memory/Memory.h"
#include "std/string.h"
extern BOOTBOOT bootboot;
RangeAllocator kernelPMM;
void RangeAllocator::init_from_mmap()
{
uint64_t total_mem = Memory::get_system();
void* biggest_chunk = nullptr;
uint64_t biggest_chunk_size = 0;
MMapEnt* ptr = &bootboot.mmap;
uint64_t mmap_entries = (bootboot.size - 128) / 16;
for (uint64_t i = 0; i < mmap_entries; i++)
{
2022-09-11 06:23:32 +00:00
if (!MMapEnt_IsFree(ptr))
{
ptr++;
continue;
}
if (MMapEnt_Size(ptr) > biggest_chunk_size)
{
biggest_chunk = (void*)MMapEnt_Ptr(ptr);
biggest_chunk_size = MMapEnt_Size(ptr);
}
ptr++;
}
bitmap_addr = (char*)biggest_chunk;
ASSERT((total_mem / 4096 / 8) < biggest_chunk_size);
bitmap_size = total_mem / 4096 / 8 + 1;
memset(bitmap_addr, 0xFF, bitmap_size);
ptr = &bootboot.mmap;
for (uint64_t i = 0; i < mmap_entries; i++)
{
uint64_t index = MMapEnt_Ptr(ptr) / 4096;
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else
{
free_mem += MMapEnt_Size(ptr);
for (uint64_t i = 0; i < (MMapEnt_Size(ptr) / 4096); i++) { bitmap_set(index + i, false); }
}
ptr++;
}
lock_pages(bitmap_addr, bitmap_size / 4096 + 1);
2022-09-22 05:57:30 +00:00
alloc_base = 0;
}
void RangeAllocator::init(void* start_address, void* end_address)
{
ASSERT(((int64_t)start_address - (int64_t)end_address) > 0);
uint64_t total_size = (uint64_t)start_address - (uint64_t)end_address;
bitmap_size = total_size / 4096 / 8 + 1;
bitmap_addr = (char*)KernelMemoryManager::get_pages(bitmap_size / 4096 + 1);
memset(bitmap_addr, 0, bitmap_size);
alloc_base = (uint64_t)start_address;
}
bool RangeAllocator::bitmap_read(uint64_t index)
{
return (bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
void RangeAllocator::bitmap_set(uint64_t index, bool value)
{
uint64_t byteIndex = index / 8;
uint8_t bitIndexer = 0b10000000 >> (index % 8);
bitmap_addr[byteIndex] &= ~bitIndexer;
if (value) { bitmap_addr[byteIndex] |= bitIndexer; }
}
void* RangeAllocator::request_page()
{
for (uint64_t index = start_index; index < (bitmap_size * 8); index++)
{
if (bitmap_read(index)) continue;
bitmap_set(index, true);
start_index = index + 1;
free_mem -= 4096;
used_mem += 4096;
2022-09-22 05:57:30 +00:00
return (void*)(alloc_base + (index * 4096));
}
return 0;
}
void* RangeAllocator::request_pages(uint64_t count)
{
uint64_t contiguous = 0;
uint64_t contiguous_start = 0;
for (uint64_t index = start_index; index < (bitmap_size * 8); index++)
{
if (bitmap_read(index))
{
contiguous = 0;
continue;
}
if (contiguous == 0)
{
contiguous_start = index;
contiguous++;
}
else
contiguous++;
if (contiguous == count)
{
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
free_mem -= (count * 4096);
used_mem += (count * 4096);
return (void*)(alloc_base + (contiguous_start * 4096));
}
}
return 0;
}
void RangeAllocator::free_page(void* address)
{
2022-09-22 05:57:30 +00:00
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
if (!bitmap_read(index)) return;
bitmap_set(index, false);
used_mem -= 4096;
free_mem += 4096;
if (start_index > index) start_index = index;
}
void RangeAllocator::free_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { free_page((void*)((uint64_t)address + index)); }
}
void RangeAllocator::lock_page(void* address)
{
2022-09-22 05:57:30 +00:00
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
if (bitmap_read(index)) return;
bitmap_set(index, true);
used_mem += 4096;
free_mem -= 4096;
}
void RangeAllocator::reserve_page(void* address)
{
2022-09-22 05:57:30 +00:00
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
if (bitmap_read(index)) return;
bitmap_set(index, true);
reserved_mem += 4096;
free_mem -= 4096;
}
void RangeAllocator::lock_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { lock_page((void*)((uint64_t)address + index)); }
}
void RangeAllocator::reserve_pages(void* address, uint64_t count)
{
for (uint64_t index = 0; index < count; index++) { reserve_page((void*)((uint64_t)address + index)); }
}
uint64_t RangeAllocator::get_free()
{
return free_mem;
}
uint64_t RangeAllocator::get_used()
{
return used_mem;
}
uint64_t RangeAllocator::get_reserved()
{
return reserved_mem;
}