Add a Bitmap class to provide common functionality and use that in the MemoryManager

This commit is contained in:
apio 2022-12-04 15:14:07 +01:00
parent ee76bdf84d
commit 5aa2d1fa18
Signed by: apio
GPG Key ID: B8A7D06E42258954
5 changed files with 192 additions and 45 deletions

View File

@ -31,5 +31,5 @@ void Init::early_init()
CPU::platform_init();
MemoryManager::protect_kernel_sections().release_value();
MemoryManager::protect_kernel_sections().expect_release_value("We should succeed to protect sections");
}

View File

@ -1,11 +1,14 @@
#include "memory/MemoryManager.h"
#include "Log.h"
#include "arch/CPU.h"
#include "arch/MMU.h"
#include "boot/bootboot.h"
#include <luna/Alignment.h>
#include <luna/Bitmap.h>
#include <luna/String.h>
#include <luna/SystemError.h>
#include <luna/Types.h>
#include <luna/Units.h>
extern BOOTBOOT bootboot;
@ -14,34 +17,37 @@ extern u8 end_of_kernel_rodata[1];
extern u8 start_of_kernel_data[1];
extern u8 end_of_kernel_data[1];
static bool page_bitmap_read(u64 index);
static void page_bitmap_set(u64 index, bool value);
static u64 free_mem = 0;
static u64 used_mem = 0;
static u64 reserved_mem = 0;
static char* page_bitmap_addr = nullptr;
static char* page_virtual_bitmap_addr = nullptr;
static u64 page_bitmap_size;
static u64 start_index = 0;
static bool page_bitmap_read(u64 index)
{
return (page_virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
static void page_bitmap_set(u64 index, bool value)
{
u64 byte_index = index / 8;
u8 mask = 0b10000000 >> (index % 8);
page_virtual_bitmap_addr[byte_index] &= (u8)(~mask);
if (value) { page_virtual_bitmap_addr[byte_index] |= mask; }
}
static Bitmap g_frame_bitmap;
#define CHECK_PAGE_ALIGNED(address) check(is_aligned(address, ARCH_PAGE_SIZE))
static usize get_physical_address_space_size()
{
MMapEnt* ptr = &bootboot.mmap;
u64 biggest_ptr = 0;
u64 biggest_ptr_size = 0;
u64 mmap_entries = (bootboot.size - 128) / 16;
for (u64 i = 0; i < mmap_entries; i++)
{
if (MMapEnt_Ptr(ptr) > biggest_ptr)
{
biggest_ptr = MMapEnt_Ptr(ptr);
biggest_ptr_size = MMapEnt_Size(ptr);
}
ptr++;
}
return biggest_ptr + biggest_ptr_size; // This would be the address at the end of the last entry, thus the whole
// address space that was passed to us.
}
namespace MemoryManager
{
Result<void> protect_kernel_sections()
@ -57,12 +63,12 @@ namespace MemoryManager
return {};
}
void init_physical_allocator()
void init_physical_frame_allocator()
{
u64 total_mem = 0;
void* biggest_memory_block = nullptr;
u64 biggest_memory_block_size = 0;
void* biggest_usable_memory_block = nullptr;
u64 biggest_usable_memory_block_size = 0;
// walk the memory map
MMapEnt* ptr = &bootboot.mmap;
@ -76,53 +82,62 @@ namespace MemoryManager
ptr++;
continue;
}
if (size > biggest_memory_block_size)
if (size > biggest_usable_memory_block_size)
{
biggest_memory_block = (void*)MMapEnt_Ptr(ptr);
biggest_memory_block_size = MMapEnt_Size(ptr);
biggest_usable_memory_block = (void*)MMapEnt_Ptr(ptr);
biggest_usable_memory_block_size = MMapEnt_Size(ptr);
}
ptr++;
}
page_bitmap_addr = (char*)biggest_memory_block;
page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready
if ((total_mem / ARCH_PAGE_SIZE / 8) >= biggest_memory_block_size)
// The entire physical address space. May contain inexistent memory holes, thus differs from total_mem which
// only counts existent memory. Our bitmap needs to have space for all of the physical address space, since
// usable addresses will be scattered across it.
usize physical_address_space_size = get_physical_address_space_size();
char* frame_bitmap_addr = (char*)biggest_usable_memory_block;
usize frame_bitmap_size = physical_address_space_size / ARCH_PAGE_SIZE / 8 + 1;
// This should never happen, unless memory is very fragmented. Usually there is always a very big block of
// usable memory and then some tiny blocks around it.
if (frame_bitmap_size >= biggest_usable_memory_block_size) [[unlikely]]
{
kerrorln("ERROR: No single memory block is enough to hold the page bitmap");
for (;;)
;
kerrorln("ERROR: No single memory block is enough to hold the frame bitmap");
CPU::efficient_halt();
}
page_bitmap_size = total_mem / ARCH_PAGE_SIZE / 8 + 1;
memset(page_bitmap_addr, 0xFF, page_bitmap_size);
g_frame_bitmap.initialize(frame_bitmap_addr, frame_bitmap_size);
g_frame_bitmap.clear(true); // Set all pages to used/reserved by default, then clear out the free ones
ptr = &bootboot.mmap;
for (u64 i = 0; i < mmap_entries; i++)
{
u64 index = MMapEnt_Ptr(ptr) / ARCH_PAGE_SIZE;
u64 pages = MMapEnt_Size(ptr) / ARCH_PAGE_SIZE;
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else
{
free_mem += MMapEnt_Size(ptr);
for (u64 j = 0; j < (MMapEnt_Size(ptr) / ARCH_PAGE_SIZE); j++) { page_bitmap_set(index + j, false); }
g_frame_bitmap.clear_region(index, pages, false);
}
ptr++;
}
lock_frames((u64)page_bitmap_addr, page_bitmap_size / ARCH_PAGE_SIZE + 1);
lock_frames((u64)frame_bitmap_addr, frame_bitmap_size / ARCH_PAGE_SIZE + 1);
}
void init()
{
init_physical_allocator();
init_physical_frame_allocator();
MMU::setup_initial_page_directory();
}
void lock_frame(u64 frame)
{
const u64 index = ((u64)frame) / ARCH_PAGE_SIZE;
if (page_bitmap_read(index)) return;
page_bitmap_set(index, true);
if (g_frame_bitmap.get(index)) return;
g_frame_bitmap.set(index, true);
used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE;
}
@ -134,10 +149,10 @@ namespace MemoryManager
Result<u64> alloc_frame()
{
for (u64 index = start_index; index < (page_bitmap_size * 8); index++)
for (u64 index = start_index; index < g_frame_bitmap.size(); index++)
{
if (page_bitmap_read(index)) continue;
page_bitmap_set(index, true);
if (g_frame_bitmap.get(index)) continue;
g_frame_bitmap.set(index, true);
start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE;
@ -150,9 +165,9 @@ namespace MemoryManager
Result<void> free_frame(u64 frame)
{
const u64 index = frame / ARCH_PAGE_SIZE;
if (index > (page_bitmap_size * 8)) return err(EFAULT);
if (!page_bitmap_read(index)) return err(EFAULT);
page_bitmap_set(index, false);
if (index > g_frame_bitmap.size()) return err(EFAULT);
if (!g_frame_bitmap.get(index)) return err(EFAULT);
g_frame_bitmap.set(index, false);
used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE;
if (start_index > index) start_index = index;

View File

@ -6,6 +6,7 @@ set(FREESTANDING_SOURCES
src/String.cpp
src/Units.cpp
src/SystemError.cpp
src/Bitmap.cpp
)
set(SOURCES

View File

@ -0,0 +1,43 @@
#pragma once
#include <luna/Types.h>
class Bitmap
{
public:
Bitmap();
Bitmap(void* location, usize size_in_bytes);
void initialize(void* location, usize size_in_bytes);
void* move(void* new_location, usize new_location_size_in_bytes);
void set(usize index, bool value);
bool get(usize index);
// size() returns size in bits! If you want the size in bytes, call size_in_bytes().
usize size()
{
return m_size_in_bytes * 8;
}
usize size_in_bytes()
{
return m_size_in_bytes;
}
void* location()
{
return (void*)m_location;
}
bool initialized()
{
return m_location;
}
void clear(bool value);
void clear_region(usize start, usize bits, bool value);
private:
char* m_location = nullptr;
usize m_size_in_bytes = 0;
};

88
luna/src/Bitmap.cpp Normal file
View File

@ -0,0 +1,88 @@
#include <luna/Bitmap.h>
#include <luna/Check.h>
#include <luna/String.h>
Bitmap::Bitmap()
{
}
Bitmap::Bitmap(void* location, usize size_in_bytes) : m_location((char*)location), m_size_in_bytes(size_in_bytes)
{
}
void Bitmap::initialize(void* location, usize size_in_bytes)
{
m_location = (char*)location;
m_size_in_bytes = size_in_bytes;
}
void* Bitmap::move(void* new_location, usize new_location_size_in_bytes)
{
expect(initialized(), "Bitmap was never initialized");
if (new_location_size_in_bytes > m_size_in_bytes) memcpy(new_location, m_location, m_size_in_bytes);
else
memcpy(new_location, m_location, new_location_size_in_bytes);
void* old_location = (void*)m_location;
m_location = (char*)new_location;
m_size_in_bytes = new_location_size_in_bytes;
return old_location;
}
void Bitmap::set(usize index, bool value)
{
expect(initialized(), "Bitmap was never initialized");
expect(index < size(), "Bitmap access out of range");
u64 byte_index = index / 8;
u8 bit_mask = 0b10000000 >> (index % 8);
m_location[byte_index] &= (u8)(~bit_mask);
if (value) { m_location[byte_index] |= bit_mask; }
}
bool Bitmap::get(usize index)
{
expect(initialized(), "Bitmap was never initialized");
expect(index < size(), "Bitmap access out of range");
usize byte_index = index / 8;
usize bit_mask = 0b10000000 >> (index % 8);
return (m_location[byte_index] & bit_mask) > 0;
}
void Bitmap::clear(bool value)
{
expect(initialized(), "Bitmap was never initialized");
u8 value_byte = value ? 0xff : 0;
memset(m_location, value_byte, m_size_in_bytes);
}
void Bitmap::clear_region(usize start, usize bits, bool value)
{
expect(initialized(), "Bitmap was never initialized");
expect((start + bits) <= size(), "Bitmap clear out of range");
u8 value_byte = value ? 0xff : 0;
// Set individual bits while not on a byte boundary.
while ((start % 8) && bits--)
{
set(start, value);
start++;
}
// Clear out the rest in bytes.
usize bytes = bits / 8;
memset(&m_location[start / 8], value_byte, bytes);
start += bytes * 8;
bits -= bytes * 8;
// Set the remaining individual bits.
while (bits--)
{
set(start, value);
start++;
}
}