Compare commits

..

2 Commits

Author SHA1 Message Date
9d318d50aa Use TRY in MMU.cpp 2022-11-16 20:37:41 +01:00
2c9329928c Replace page_size (function) with ARCH_PAGE_SIZE (constant) 2022-11-16 20:37:32 +01:00
3 changed files with 36 additions and 49 deletions

View File

@ -43,10 +43,10 @@ namespace MemoryManager
{ {
Result<void> protect_kernel_sections() Result<void> protect_kernel_sections()
{ {
u64 rodata_pages = get_blocks_from_size((u64)(end_of_kernel_rodata - start_of_kernel_rodata), MMU::page_size()); u64 rodata_pages = get_blocks_from_size((u64)(end_of_kernel_rodata - start_of_kernel_rodata), ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute)); TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
u64 data_pages = get_blocks_from_size((u64)(end_of_kernel_data - start_of_kernel_data), MMU::page_size()); u64 data_pages = get_blocks_from_size((u64)(end_of_kernel_data - start_of_kernel_data), ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite)); TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
return {}; return {};
@ -81,25 +81,25 @@ namespace MemoryManager
page_bitmap_addr = (char*)biggest_memory_block; page_bitmap_addr = (char*)biggest_memory_block;
page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready page_virtual_bitmap_addr = page_bitmap_addr; // we'll map this to virtual memory as soon as the MMU is ready
if ((total_mem / MMU::page_size() / 8) >= biggest_memory_block_size) if ((total_mem / ARCH_PAGE_SIZE / 8) >= biggest_memory_block_size)
{ {
Serial::println("ERROR: No single memory block is enough to hold the page bitmap"); Serial::println("ERROR: No single memory block is enough to hold the page bitmap");
for (;;) for (;;)
; ;
} }
page_bitmap_size = total_mem / MMU::page_size() / 8 + 1; page_bitmap_size = total_mem / ARCH_PAGE_SIZE / 8 + 1;
memset(page_bitmap_addr, 0xFF, page_bitmap_size); memset(page_bitmap_addr, 0xFF, page_bitmap_size);
ptr = &bootboot.mmap; ptr = &bootboot.mmap;
for (uint64_t i = 0; i < mmap_entries; i++) for (uint64_t i = 0; i < mmap_entries; i++)
{ {
uint64_t index = MMapEnt_Ptr(ptr) / MMU::page_size(); uint64_t index = MMapEnt_Ptr(ptr) / ARCH_PAGE_SIZE;
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); } if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else else
{ {
free_mem += MMapEnt_Size(ptr); free_mem += MMapEnt_Size(ptr);
for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / MMU::page_size()); j++) for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / ARCH_PAGE_SIZE); j++)
{ {
page_bitmap_set(index + j, false); page_bitmap_set(index + j, false);
} }
@ -107,7 +107,7 @@ namespace MemoryManager
ptr++; ptr++;
} }
lock_pages((u64)page_bitmap_addr, page_bitmap_size / MMU::page_size() + 1); lock_pages((u64)page_bitmap_addr, page_bitmap_size / ARCH_PAGE_SIZE + 1);
} }
void init() void init()
@ -118,16 +118,16 @@ namespace MemoryManager
void lock_page(u64 page) void lock_page(u64 page)
{ {
uint64_t index = ((uint64_t)page) / MMU::page_size(); uint64_t index = ((uint64_t)page) / ARCH_PAGE_SIZE;
if (page_bitmap_read(index)) return; if (page_bitmap_read(index)) return;
page_bitmap_set(index, true); page_bitmap_set(index, true);
used_mem += MMU::page_size(); used_mem += ARCH_PAGE_SIZE;
free_mem -= MMU::page_size(); free_mem -= ARCH_PAGE_SIZE;
} }
void lock_pages(u64 pages, u64 count) void lock_pages(u64 pages, u64 count)
{ {
for (u64 index = 0; index < count; index++) { lock_page(pages + (index * MMU::page_size())); } for (u64 index = 0; index < count; index++) { lock_page(pages + (index * ARCH_PAGE_SIZE)); }
} }
Result<u64> alloc_physical_page() Result<u64> alloc_physical_page()
@ -137,9 +137,9 @@ namespace MemoryManager
if (page_bitmap_read(index)) continue; if (page_bitmap_read(index)) continue;
page_bitmap_set(index, true); page_bitmap_set(index, true);
start_index = index + 1; start_index = index + 1;
free_mem -= MMU::page_size(); free_mem -= ARCH_PAGE_SIZE;
used_mem += MMU::page_size(); used_mem += ARCH_PAGE_SIZE;
return index * MMU::page_size(); return index * ARCH_PAGE_SIZE;
} }
return err; // FIXME: ENOMEM. return err; // FIXME: ENOMEM.
@ -147,24 +147,24 @@ namespace MemoryManager
Result<void> free_physical_page(u64 page) Result<void> free_physical_page(u64 page)
{ {
u64 index = page / MMU::page_size(); u64 index = page / ARCH_PAGE_SIZE;
if (index > (page_bitmap_size * 8)) return err; if (index > (page_bitmap_size * 8)) return err;
if (!page_bitmap_read(index)) return err; if (!page_bitmap_read(index)) return err;
page_bitmap_set(index, false); page_bitmap_set(index, false);
used_mem -= MMU::page_size(); used_mem -= ARCH_PAGE_SIZE;
free_mem += MMU::page_size(); free_mem += ARCH_PAGE_SIZE;
if (start_index > index) start_index = index; if (start_index > index) start_index = index;
return {}; return {};
} }
Result<void> remap(u64 address, usize count, int flags) Result<void> remap(u64 address, usize count, int flags)
{ {
check(is_aligned(address, MMU::page_size())); check(is_aligned(address, ARCH_PAGE_SIZE));
while (count--) while (count--)
{ {
TRY(MMU::remap(address, flags)); TRY(MMU::remap(address, flags));
address += MMU::page_size(); address += ARCH_PAGE_SIZE;
} }
return {}; return {};
@ -172,13 +172,13 @@ namespace MemoryManager
Result<void> remap_unaligned(u64 address, usize count, int flags) Result<void> remap_unaligned(u64 address, usize count, int flags)
{ {
if (!is_aligned(address, MMU::page_size())) count++; if (!is_aligned(address, ARCH_PAGE_SIZE)) count++;
address = align_down(address, MMU::page_size()); address = align_down(address, ARCH_PAGE_SIZE);
while (count--) while (count--)
{ {
TRY(MMU::remap(address, flags)); TRY(MMU::remap(address, flags));
address += MMU::page_size(); address += ARCH_PAGE_SIZE;
} }
return {}; return {};

View File

@ -3,6 +3,8 @@
struct PageDirectory; struct PageDirectory;
extern const usize ARCH_PAGE_SIZE;
namespace MMU namespace MMU
{ {
enum Flags enum Flags
@ -28,6 +30,4 @@ namespace MMU
Result<PageDirectory*> create_page_directory_for_userspace(); Result<PageDirectory*> create_page_directory_for_userspace();
void setup_initial_page_directory(); void setup_initial_page_directory();
usize page_size();
} }

View File

@ -4,6 +4,8 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
const usize ARCH_PAGE_SIZE = PAGE_SIZE;
const u64 rindex = 0776; // recursive index const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension const u64 sign = 0177777UL << 48; // sign extension
@ -52,10 +54,6 @@ static_assert(sizeof(PageDirectory) == PAGE_SIZE);
namespace MMU namespace MMU
{ {
usize page_size()
{
return PAGE_SIZE;
}
PageDirectory* l4_table() PageDirectory* l4_table()
{ {
@ -198,10 +196,9 @@ namespace MMU
auto& l4 = l4_entry(virt); auto& l4 = l4_entry(virt);
if (!l4.present) if (!l4.present)
{ {
auto addr = MemoryManager::alloc_physical_page(); u64 addr = TRY(MemoryManager::alloc_physical_page());
if (addr.has_error()) return addr.release_error();
l4.present = true; l4.present = true;
l4.set_address(addr.release_value()); l4.set_address(addr);
memset(l3_table(virt), 0, PAGE_SIZE); memset(l3_table(virt), 0, PAGE_SIZE);
l4.ignore0 = l4.ignore1 = 0; l4.ignore0 = l4.ignore1 = 0;
} }
@ -211,10 +208,9 @@ namespace MMU
auto& l3 = l3_entry(virt); auto& l3 = l3_entry(virt);
if (!l3.present) if (!l3.present)
{ {
auto addr = MemoryManager::alloc_physical_page(); u64 addr = TRY(MemoryManager::alloc_physical_page());
if (addr.has_error()) return addr.release_error();
l3.present = true; l3.present = true;
l3.set_address(addr.release_value()); l3.set_address(addr);
memset(l2_table(virt), 0, PAGE_SIZE); memset(l2_table(virt), 0, PAGE_SIZE);
l3.ignore0 = l3.ignore1 = 0; l3.ignore0 = l3.ignore1 = 0;
} }
@ -226,10 +222,9 @@ namespace MMU
auto& l2 = l2_entry(virt); auto& l2 = l2_entry(virt);
if (!l2.present) if (!l2.present)
{ {
auto addr = MemoryManager::alloc_physical_page(); u64 addr = TRY(MemoryManager::alloc_physical_page());
if (addr.has_error()) return addr.release_error();
l2.present = true; l2.present = true;
l2.set_address(addr.release_value()); l2.set_address(addr);
memset(l1_table(virt), 0, PAGE_SIZE); memset(l1_table(virt), 0, PAGE_SIZE);
l2.ignore0 = l2.ignore1 = 0; l2.ignore0 = l2.ignore1 = 0;
} }
@ -253,9 +248,7 @@ namespace MMU
Result<void> remap(u64 virt, int flags) Result<void> remap(u64 virt, int flags)
{ {
auto rc = apply_cascading_flags(virt, flags); auto& l1 = *TRY(apply_cascading_flags(virt, flags));
if (rc.has_error()) return rc.release_error();
auto& l1 = *rc.release_value();
if (!l1.present) return err; if (!l1.present) return err;
l1.read_write = (flags & Flags::ReadWrite); l1.read_write = (flags & Flags::ReadWrite);
l1.user = (flags & Flags::User); l1.user = (flags & Flags::User);
@ -267,9 +260,7 @@ namespace MMU
Result<u64> unmap(u64 virt) Result<u64> unmap(u64 virt)
{ {
auto rc = find_entry(virt); auto& l1 = *TRY(find_entry(virt));
if (rc.has_error()) return rc.release_error();
auto& l1 = *rc.release_value();
if (!l1.present) return err; if (!l1.present) return err;
u64 address = l1.get_address(); u64 address = l1.get_address();
memset(&l1, 0, sizeof(l1)); memset(&l1, 0, sizeof(l1));
@ -279,18 +270,14 @@ namespace MMU
Result<u64> get_physical(u64 virt) Result<u64> get_physical(u64 virt)
{ {
auto rc = find_entry(virt); auto& l1 = *TRY(find_entry(virt));
if (rc.has_error()) return rc.release_error();
auto& l1 = *rc.release_value();
if (!l1.present) return err; if (!l1.present) return err;
return l1.get_address(); return l1.get_address();
} }
Result<int> get_flags(u64 virt) Result<int> get_flags(u64 virt)
{ {
auto rc = find_entry(virt); auto& l1 = *TRY(find_entry(virt));
if (rc.has_error()) return rc.release_error();
auto& l1 = *rc.release_value();
if (!l1.present) return err; if (!l1.present) return err;
return arch_flags_to_mmu(l1); return arch_flags_to_mmu(l1);
} }