Add memory manager

This commit is contained in:
apio 2022-11-13 14:29:15 +01:00
parent 6b95307b54
commit 705c2747de
8 changed files with 452 additions and 2 deletions

View File

@ -2,6 +2,7 @@ set(SOURCES
src/main.cpp
src/string.cpp
src/Framebuffer.cpp
src/MemoryManager.cpp
src/Init.cpp
src/arch/Serial.cpp
)
@ -10,6 +11,7 @@ set(SOURCES
${SOURCES}
src/arch/x86_64/IO.cpp
src/arch/x86_64/Serial.cpp
src/arch/x86_64/MMU.cpp
)
add_compile_options(-Os)

View File

@ -1,5 +1,6 @@
#include "Init.h"
#include "Framebuffer.h"
#include "arch/MMU.h"
#include "arch/Serial.h"
#include "bootboot.h"
#include <String.h>
@ -19,4 +20,5 @@ void Init::check_magic()
void Init::early_init()
{
Framebuffer::init();
MMU::setup_initial_page_directory();
}

View File

@ -0,0 +1,142 @@
#include "MemoryManager.h"
#include "arch/MMU.h"
#include "arch/Serial.h"
#include "bootboot.h"
#include <String.h>
#include <Types.h>
extern BOOTBOOT bootboot;
static bool page_bitmap_read(u64 index);
static void page_bitmap_set(u64 index, bool value);
static u64 free_mem = 0;
static u64 used_mem = 0;
static u64 reserved_mem = 0;
static char* page_bitmap_addr = nullptr;
static char* page_virtual_bitmap_addr = nullptr;
static u64 page_bitmap_size;
static u64 start_index = 0;
static bool page_bitmap_read(u64 index)
{
return (page_virtual_bitmap_addr[index / 8] & (0b10000000 >> (index % 8))) > 0;
}
static void page_bitmap_set(u64 index, bool value)
{
uint64_t byteIndex = index / 8;
uint8_t bitIndexer = 0b10000000 >> (index % 8);
page_virtual_bitmap_addr[byteIndex] &= (uint8_t)(~bitIndexer);
if (value) { page_virtual_bitmap_addr[byteIndex] |= bitIndexer; }
}
namespace MemoryManager
{
void init_physical_allocator()
{
u64 total_mem = 0;
void* biggest_memory_block = nullptr;
u64 biggest_memory_block_size = 0;
// walk the memory map
MMapEnt* ptr = &bootboot.mmap;
uint64_t mmap_entries = (bootboot.size - 128) / 16;
for (u64 i = 0; i < mmap_entries; i++)
{
u64 size = MMapEnt_Size(ptr);
total_mem += size;
if (!MMapEnt_IsFree(ptr))
{
ptr++;
continue;
}
if (size > biggest_memory_block_size)
{
biggest_memory_block = (void*)MMapEnt_Ptr(ptr);
biggest_memory_block_size = MMapEnt_Size(ptr);
}
ptr++;
}
page_bitmap_addr = (char*)biggest_memory_block;
page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready
if ((total_mem / MMU::page_size() / 8) >= biggest_memory_block_size)
{
Serial::println("ERROR: No single memory block is enough to hold the page bitmap");
for (;;)
;
}
page_bitmap_size = total_mem / MMU::page_size() / 8 + 1;
memset(page_bitmap_addr, 0xFF, page_bitmap_size);
ptr = &bootboot.mmap;
for (uint64_t i = 0; i < mmap_entries; i++)
{
uint64_t index = MMapEnt_Ptr(ptr) / MMU::page_size();
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else
{
free_mem += MMapEnt_Size(ptr);
for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / MMU::page_size()); j++)
{
page_bitmap_set(index + j, false);
}
}
ptr++;
}
lock_pages((u64)page_bitmap_addr, page_bitmap_size / MMU::page_size() + 1);
}
void init()
{
init_physical_allocator();
MMU::setup_initial_page_directory();
}
void lock_page(u64 page)
{
uint64_t index = ((uint64_t)page) / MMU::page_size();
if (page_bitmap_read(index)) return;
page_bitmap_set(index, true);
used_mem += MMU::page_size();
free_mem -= MMU::page_size();
}
void lock_pages(u64 pages, u64 count)
{
for (u64 index = 0; index < count; index++) { lock_page(pages + (index * MMU::page_size())); }
}
Result<u64> alloc_physical_page()
{
for (u64 index = start_index; index < (page_bitmap_size * 8); index++)
{
if (page_bitmap_read(index)) continue;
page_bitmap_set(index, true);
start_index = index + 1;
free_mem -= MMU::page_size();
used_mem += MMU::page_size();
return index * MMU::page_size();
}
return err; // FIXME: ENOMEM.
}
Result<void> free_physical_page(u64 page)
{
u64 index = page / MMU::page_size();
if (index > (page_bitmap_size * 8)) return err;
if (!page_bitmap_read(index)) return err;
page_bitmap_set(index, false);
used_mem -= MMU::page_size();
free_mem += MMU::page_size();
if (start_index > index) start_index = index;
return {};
}
}

View File

@ -0,0 +1,14 @@
#pragma once
#include <Result.h>
#include <Types.h>
namespace MemoryManager
{
void init();
Result<u64> alloc_physical_page();
Result<void> free_physical_page(u64 page);
void lock_page(u64 page);
void lock_pages(u64 pages, u64 count);
}

33
kernel/src/arch/MMU.h Normal file
View File

@ -0,0 +1,33 @@
#pragma once
#include <Result.h>
struct PageDirectory;
namespace MMU
{
enum Flags
{
None = 0,
ReadWrite = 1,
User = 2,
NoExecute = 4,
WriteThrough = 8,
CacheDisable = 16,
};
Result<void> map(u64 virt, u64 phys, Flags flags);
Result<u64> unmap(u64 virt);
Result<u64> get_physical(u64 virt);
Result<Flags> get_flags(u64 virt);
Result<void> remap(u64 virt, Flags flags);
void switch_page_directory(PageDirectory* dir);
PageDirectory* get_page_directory();
void flush_all();
Result<PageDirectory*> create_page_directory();
void setup_initial_page_directory();
size_t page_size();
}

View File

@ -0,0 +1,248 @@
#include "arch/MMU.h"
#include "MemoryManager.h"
#define PAGE_SIZE 4096
const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension
struct [[gnu::packed]] PageTableEntry
{
bool present : 1;
bool read_write : 1;
bool user : 1;
bool write_through : 1;
bool cache_disabled : 1;
bool accessed : 1;
bool ignore0 : 1;
bool larger_pages : 1;
bool ignore1 : 1;
u8 available : 3;
u64 address : 48;
u8 available2 : 3;
bool no_execute : 1;
void set_address(uint64_t addr);
uint64_t get_address();
};
#pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion"
void PageTableEntry::set_address(uint64_t addr)
{
this->address = (addr >> 12);
}
uint64_t PageTableEntry::get_address()
{
return (uint64_t)this->address << 12;
}
#pragma GCC pop_options
struct alignas(PAGE_SIZE) PageDirectory
{
PageTableEntry entries[512];
};
static_assert(sizeof(PageTableEntry) == 8UL);
static_assert(sizeof(PageDirectory) == PAGE_SIZE);
namespace MMU
{
size_t page_size()
{
return PAGE_SIZE;
}
PageDirectory* l4_table()
{
u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12);
return (PageDirectory*)l4;
}
u64 l4_index(u64 addr)
{
return (addr >> 39) & 0777;
}
PageTableEntry& l4_entry(u64 addr)
{
return l4_table()->entries[l4_index(addr)];
}
PageDirectory* l3_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
return (PageDirectory*)l3;
}
u64 l3_index(u64 addr)
{
return (addr >> 30) & 0777;
}
PageTableEntry& l3_entry(u64 addr)
{
return l3_table(addr)->entries[l3_index(addr)];
}
PageDirectory* l2_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = l3_index(addr);
u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
}
u64 l2_index(u64 addr)
{
return (addr >> 21) & 0777;
}
PageTableEntry& l2_entry(u64 addr)
{
return l2_table(addr)->entries[l2_index(addr)];
}
PageDirectory* l1_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = l3_index(addr);
u64 l2 = l2_index(addr);
u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
}
u64 l1_index(u64 addr)
{
return (addr >> 12) & 0777;
}
PageTableEntry& l1_entry(u64 addr)
{
return l1_table(addr)->entries[l1_index(addr)];
}
void switch_page_directory(PageDirectory* dir)
{
asm volatile("mov %0, %%cr3" : : "r"(dir));
}
PageDirectory* get_page_directory()
{
PageDirectory* value;
asm volatile("mov %%cr3, %0" : "=r"(value));
return value;
}
void flush_all()
{
switch_page_directory(get_page_directory());
}
void flush_page(u64 page)
{
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
}
Flags arch_flags_to_mmu(PageTableEntry& entry)
{
int result = Flags::None;
if (entry.read_write) result |= Flags::ReadWrite;
if (entry.user) result |= Flags::User;
if (entry.no_execute) result |= Flags::NoExecute;
if (entry.write_through) result |= Flags::WriteThrough;
if (entry.cache_disabled) result |= Flags::CacheDisable;
return (Flags)result;
}
Result<void> map(u64 virt, u64 phys, Flags flags)
{
auto& l4 = l4_entry(virt);
if (!l4.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l4.present = true;
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
l4.set_address(addr.release_value());
}
auto& l3 = l3_entry(virt);
if (!l3.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l3.present = true;
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
l3.set_address(addr.release_value());
}
if (l3.larger_pages) return err; // FIXME: Replacing larger pages is not supported ATM
auto& l2 = l2_entry(virt);
if (!l2.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l2.present = true;
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
l2.set_address(addr.release_value());
}
if (l2.larger_pages) return err; // FIXME: Replacing larger pages is not supported ATM
auto& l1 = l1_entry(virt);
bool was_present = l1.present;
if (flags & Flags::ReadWrite) l1.read_write = true;
if (flags & Flags::User) l1.user = true;
if (flags & Flags::WriteThrough) l1.write_through = true;
if (flags & Flags::CacheDisable) l1.cache_disabled = true;
if (flags & Flags::NoExecute) l1.no_execute = true;
l1.set_address(phys);
if (was_present) flush_page(virt);
return {};
}
Result<u64> get_physical(u64 virt)
{
auto& l4 = l4_entry(virt);
if (!l4.present) return err;
auto& l3 = l3_entry(virt);
if (!l3.present) return err;
if (l3.larger_pages) return l3.get_address();
auto& l2 = l2_entry(virt);
if (!l2.present) return err;
if (l2.larger_pages) return l2.get_address();
auto& l1 = l1_entry(virt);
if (!l1.present) return err;
return l1.get_address();
}
Result<Flags> get_flags(u64 virt)
{
auto& l4 = l4_entry(virt);
if (!l4.present) return err;
auto& l3 = l3_entry(virt);
if (!l3.present) return err;
if (l3.larger_pages) return arch_flags_to_mmu(l3);
auto& l2 = l2_entry(virt);
if (!l2.present) return err;
if (l2.larger_pages) return arch_flags_to_mmu(l2);
auto& l1 = l1_entry(virt);
if (!l1.present) return err;
return arch_flags_to_mmu(l1);
}
void setup_initial_page_directory()
{
PageDirectory* dir = get_page_directory();
u64 paddr = (u64)dir;
PageTableEntry& recursive_entry = dir->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(paddr);
flush_all();
}
}

View File

@ -1,7 +1,10 @@
#include "Framebuffer.h"
#include "Init.h"
#include "arch/MMU.h"
#include "arch/Serial.h"
extern u8 fb[1];
extern "C" void _start()
{
Init::check_magic();
@ -11,6 +14,8 @@ extern "C" void _start()
Framebuffer::rect(0, 0, 200, 200, 0xFF00FF00);
Serial::println(MMU::get_physical((u64)fb).has_error() ? "fb is not mapped" : "fb is mapped!!");
for (;;)
;
}

View File

@ -25,7 +25,7 @@ template <typename T> class Result
Result(T&& value)
{
m_storage.store_movable_reference(value);
m_storage.store_movable_reference(move(value));
m_has_value = true;
m_has_error = false;
}
@ -232,3 +232,7 @@ template <> class Result<void>
int m_error;
bool m_has_error;
};
// clang-format off
#define err Error{0}
// clang-format on