From 995d1bc36e1323c75ddaad25741c5e2afbdb9618 Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:24:21 +0100 Subject: [PATCH 1/6] x86_64/MMU: Map all physical memory into the higher half instead of using recursive mapping This still has a few problems. Notably, we are not using huge pages for this mapping, which would be a lot more efficient. Right now, used memory is 8.1 MiB at boot for a 256MiB system. But after improving it, this system will be much better than recursive mapping. fork() will be MUCH easier to implement, for example. --- kernel/src/arch/x86_64/MMU.cpp | 225 +++++++++++++++------------------ 1 file changed, 99 insertions(+), 126 deletions(-) diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 2bd6ee49..2f85f238 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -1,6 +1,8 @@ #include "arch/MMU.h" #include "Log.h" #include "memory/MemoryManager.h" +#include "memory/MemoryMap.h" +#include #include #include #include @@ -12,6 +14,10 @@ PageDirectory* g_kernel_directory; u64 g_kernel_directory_virt; +// The bootloader maps up to 16GiB of physical memory for us at address 0. Using this bootstrap mapping, we'll map (all) +// physical memory at 0xFFFF800000000000. +u64 g_physical_mapping_base = 0; + void PageTableEntry::set_address(u64 addr) { this->address = (addr >> 12); @@ -36,90 +42,9 @@ static bool has_flag(int flags, MMU::Flags flag) namespace MMU { - - constexpr PageDirectory* l4_table() + template T translate_physical(T phys) { - constexpr u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12); - return (PageDirectory*)l4; - } - - constexpr u64 l4_index(u64 addr) - { - return (addr >> 39) & 0777; - } - - PageTableEntry& l4_entry(u64 addr) - { - return l4_table()->entries[l4_index(addr)]; - } - - constexpr PageDirectory* raw_l3_table(u64 l4) - { - const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12); - return (PageDirectory*)l3; - } - - constexpr PageDirectory* l3_table(u64 addr) - { - const u64 l4 = l4_index(addr); - return raw_l3_table(l4); - } - - constexpr u64 l3_index(u64 addr) - { - return (addr >> 30) & 0777; - } - - PageTableEntry& l3_entry(u64 addr) - { - return l3_table(addr)->entries[l3_index(addr)]; - } - - constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3) - { - const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12); - return (PageDirectory*)l2; - } - - constexpr PageDirectory* l2_table(u64 addr) - { - const u64 l4 = l4_index(addr); - const u64 l3 = l3_index(addr); - return raw_l2_table(l4, l3); - } - - constexpr u64 l2_index(u64 addr) - { - return (addr >> 21) & 0777; - } - - PageTableEntry& l2_entry(u64 addr) - { - return l2_table(addr)->entries[l2_index(addr)]; - } - - constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2) - { - const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12); - return (PageDirectory*)l1; - } - - constexpr PageDirectory* l1_table(u64 addr) - { - const u64 l4 = l4_index(addr); - const u64 l3 = l3_index(addr); - const u64 l2 = l2_index(addr); - return raw_l1_table(l4, l3, l2); - } - - constexpr u64 l1_index(u64 addr) - { - return (addr >> 12) & 0777; - } - - PageTableEntry& l1_entry(u64 addr) - { - return l1_table(addr)->entries[l1_index(addr)]; + return (T)(g_physical_mapping_base + (u64)phys); } void switch_page_directory(PageDirectory* dir) @@ -134,6 +59,11 @@ namespace MMU return value; } + PageDirectory* get_virtual_page_directory() + { + return translate_physical(get_page_directory()); + } + void flush_all() { switch_page_directory(get_page_directory()); @@ -144,6 +74,26 @@ namespace MMU asm volatile("invlpg (%0)" : : "r"(page) : "memory"); } + constexpr u64 l4_index(u64 addr) + { + return (addr >> 39) & 0777; + } + + constexpr u64 l3_index(u64 addr) + { + return (addr >> 30) & 0777; + } + + constexpr u64 l2_index(u64 addr) + { + return (addr >> 21) & 0777; + } + + constexpr u64 l1_index(u64 addr) + { + return (addr >> 12) & 0777; + } + int arch_flags_to_mmu(const PageTableEntry& entry) { int result = Flags::None; @@ -155,17 +105,46 @@ namespace MMU return result; } + PageTableEntry& l4_entry(u64 virt) + { + auto index = l4_index(virt); + return get_virtual_page_directory()->entries[index]; + } + + PageDirectory& page_table(const PageTableEntry& entry) + { + return *translate_physical((PageDirectory*)entry.get_address()); + } + + PageTableEntry& l3_entry(const PageTableEntry& entry, u64 virt) + { + auto index = l3_index(virt); + return page_table(entry).entries[index]; + } + + PageTableEntry& l2_entry(const PageTableEntry& entry, u64 virt) + { + auto index = l2_index(virt); + return page_table(entry).entries[index]; + } + + PageTableEntry& l1_entry(const PageTableEntry& entry, u64 virt) + { + auto index = l1_index(virt); + return page_table(entry).entries[index]; + } + Result find_entry(u64 virt) { const auto& l4 = l4_entry(virt); if (!l4.present) return err(EFAULT); - auto& l3 = l3_entry(virt); + auto& l3 = l3_entry(l4, virt); if (!l3.present) return err(EFAULT); if (l3.larger_pages) return &l3; - auto& l2 = l2_entry(virt); + auto& l2 = l2_entry(l3, virt); if (!l2.present) return err(EFAULT); if (l2.larger_pages) return &l2; - return &l1_entry(virt); + return &l1_entry(l2, virt); } Result apply_cascading_flags(u64 virt, int flags) @@ -174,17 +153,17 @@ namespace MMU if (!l4.present) return err(EFAULT); if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::User) l4.user = true; - auto& l3 = l3_entry(virt); + auto& l3 = l3_entry(l4, virt); if (!l3.present) return err(EFAULT); if (l3.larger_pages) return &l3; if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::User) l3.user = true; - auto& l2 = l2_entry(virt); + auto& l2 = l2_entry(l3, virt); if (!l2.present) return err(EFAULT); if (l2.larger_pages) return &l2; if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; - auto& l1 = l1_entry(virt); + auto& l1 = l1_entry(l2, virt); return &l1; } @@ -196,38 +175,38 @@ namespace MMU const u64 addr = TRY(MemoryManager::alloc_frame()); l4.present = true; l4.set_address(addr); - memset(l3_table(virt), 0, ARCH_PAGE_SIZE); + memset(&page_table(l4), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::User) l4.user = true; - auto& l3 = l3_entry(virt); + auto& l3 = l3_entry(l4, virt); if (!l3.present) { const u64 addr = TRY(MemoryManager::alloc_frame()); l3.present = true; l3.set_address(addr); - memset(l2_table(virt), 0, ARCH_PAGE_SIZE); + memset(&page_table(l3), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::User) l3.user = true; if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM - auto& l2 = l2_entry(virt); + auto& l2 = l2_entry(l3, virt); if (!l2.present) { const u64 addr = TRY(MemoryManager::alloc_frame()); l2.present = true; l2.set_address(addr); - memset(l1_table(virt), 0, ARCH_PAGE_SIZE); + memset(&page_table(l2), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM - auto& l1 = l1_entry(virt); + auto& l1 = l1_entry(l2, virt); if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again. l1.present = true; l1.read_write = has_flag(flags, Flags::ReadWrite); @@ -281,39 +260,38 @@ namespace MMU PageDirectory* const dir = get_page_directory(); g_kernel_directory = dir; - const u64 paddr = (u64)dir; - PageTableEntry& recursive_entry = dir->entries[rindex]; - recursive_entry.read_write = true; - recursive_entry.present = true; - recursive_entry.set_address(paddr); - flush_all(); + const u64 physical_memory_base = 0xFFFF800000000000; - g_kernel_directory_virt = - MemoryManager::get_kernel_mapping_for_frames((u64)dir, 1, MMU::ReadWrite | MMU::NoExecute).value(); + MemoryMapIterator iter; + const MemoryMapEntry highest_entry = iter.highest(); + + const u64 physical_memory_size = highest_entry.address() + highest_entry.size(); + + // FIXME: Do this using 2MiB huge pages. + MemoryManager::map_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_PAGE_SIZE, + MMU::ReadWrite | MMU::NoExecute); + + g_physical_mapping_base = physical_memory_base; + + g_kernel_directory_virt = translate_physical((u64)g_kernel_directory); kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory); } Result create_page_directory_for_userspace() { - const u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute)); - const u64 directory_phys = MMU::get_physical(directory_virt).value(); + const u64 directory_phys = TRY(MemoryManager::alloc_frame()); + const u64 directory_virt = translate_physical(directory_phys); PageDirectory* const directory = (PageDirectory*)directory_virt; memset(directory, 0, ARCH_PAGE_SIZE); - PageTableEntry& recursive_entry = directory->entries[rindex]; - recursive_entry.read_write = true; - recursive_entry.present = true; - recursive_entry.set_address(directory_phys); + + constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2; + // Copy the upper part of the page directory (higher half) + memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr(g_kernel_directory, HALF_PAGE), HALF_PAGE); kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys); - directory->entries[511] = ((PageDirectory*)g_kernel_directory_virt)->entries[511]; - - // From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt - // with using recursive mapping. So let's make sure we don't leak any VM. - MemoryManager::unmap_weak_and_free_vm(directory_virt, 1); - return (PageDirectory*)directory_phys; } @@ -321,16 +299,11 @@ namespace MMU { check(directory); - // Needed in order to access page tables using the recursive mapping system. - switch_page_directory(directory); + switch_page_directory(g_kernel_directory); - auto guard = make_scope_guard([&] { - check(g_kernel_directory); - switch_page_directory(g_kernel_directory); - MemoryManager::free_frame((u64)directory); - }); + auto guard = make_scope_guard([directory] { MemoryManager::free_frame((u64)directory); }); - PageDirectory* const table = l4_table(); + PageDirectory* const table = translate_physical(directory); // Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages) for (u64 i = 0; i < 510; i++) @@ -338,7 +311,7 @@ namespace MMU PageTableEntry& l4 = table->entries[i]; if (!l4.present) continue; - PageDirectory* const pdp = raw_l3_table(i); + PageDirectory* const pdp = &page_table(l4); for (u64 j = 0; j < 512; j++) { @@ -350,7 +323,7 @@ namespace MMU TRY(MemoryManager::free_frame(l3.get_address())); } - PageDirectory* const pd = raw_l2_table(i, j); + PageDirectory* const pd = &page_table(l3); for (u64 k = 0; k < 512; k++) { @@ -362,7 +335,7 @@ namespace MMU TRY(MemoryManager::free_frame(l2.get_address())); } - PageDirectory* const pt = raw_l1_table(i, j, k); + PageDirectory* const pt = &page_table(l2); for (u64 l = 0; l < 512; l++) { -- 2.34.1 From 837d483e0b895b19072538b00072f508379870ed Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:30:52 +0100 Subject: [PATCH 2/6] x86_64/MMU: Copy from the mapped kernel directory instead of the physical version --- kernel/src/arch/x86_64/MMU.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 2f85f238..0e04332b 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -288,7 +288,8 @@ namespace MMU constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2; // Copy the upper part of the page directory (higher half) - memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr(g_kernel_directory, HALF_PAGE), HALF_PAGE); + memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr((PageDirectory*)g_kernel_directory_virt, HALF_PAGE), + HALF_PAGE); kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys); -- 2.34.1 From 426f6e4da82d12abdb6f6658118edf2c2f35d73a Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:41:28 +0100 Subject: [PATCH 3/6] x86_64: Allow mapping huge pages, but don't do it yet --- kernel/src/arch/MMU.h | 10 +++++++++- kernel/src/arch/x86_64/MMU.cpp | 31 +++++++++++++++++++---------- kernel/src/arch/x86_64/MMU.h | 1 + kernel/src/memory/MemoryManager.cpp | 8 ++++---- 4 files changed, 35 insertions(+), 15 deletions(-) diff --git a/kernel/src/arch/MMU.h b/kernel/src/arch/MMU.h index 031c038e..addd5863 100644 --- a/kernel/src/arch/MMU.h +++ b/kernel/src/arch/MMU.h @@ -7,6 +7,8 @@ #error "Unknown architecture." #endif +constexpr u64 PAGES_PER_HUGE_PAGE = ARCH_HUGE_PAGE_SIZE / ARCH_PAGE_SIZE; + namespace MMU { enum Flags @@ -19,7 +21,13 @@ namespace MMU CacheDisable = 16, }; - Result map(u64 virt, u64 phys, int flags); + enum class UseHugePages + { + No = 0, + Yes = 1 + }; + + Result map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages); Result unmap(u64 virt); Result get_physical(u64 virt); Result get_flags(u64 virt); diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 0e04332b..de239a22 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -167,7 +167,18 @@ namespace MMU return &l1; } - Result map(u64 virt, u64 phys, int flags) + void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags) + { + entry.present = true; + entry.read_write = has_flag(flags, Flags::ReadWrite); + entry.user = has_flag(flags, Flags::User); + entry.write_through = has_flag(flags, Flags::WriteThrough); + entry.cache_disabled = has_flag(flags, Flags::CacheDisable); + entry.no_execute = has_flag(flags, Flags::NoExecute); + entry.set_address(phys); + } + + Result map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages) { auto& l4 = l4_entry(virt); if (!l4.present) @@ -191,7 +202,7 @@ namespace MMU if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::User) l3.user = true; - if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM + if (l3.larger_pages) return err(EEXIST); auto& l2 = l2_entry(l3, virt); if (!l2.present) @@ -204,17 +215,17 @@ namespace MMU if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; - if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM + if (l2.larger_pages) return err(EEXIST); + else if (use_huge_pages == UseHugePages::Yes) + { + l2.larger_pages = true; + set_page_table_entry_properties(l2, phys, flags); + return {}; + } auto& l1 = l1_entry(l2, virt); if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again. - l1.present = true; - l1.read_write = has_flag(flags, Flags::ReadWrite); - l1.user = has_flag(flags, Flags::User); - l1.write_through = has_flag(flags, Flags::WriteThrough); - l1.cache_disabled = has_flag(flags, Flags::CacheDisable); - l1.no_execute = has_flag(flags, Flags::NoExecute); - l1.set_address(phys); + set_page_table_entry_properties(l1, phys, flags); return {}; } diff --git a/kernel/src/arch/x86_64/MMU.h b/kernel/src/arch/x86_64/MMU.h index 198485f0..d1f8c362 100644 --- a/kernel/src/arch/x86_64/MMU.h +++ b/kernel/src/arch/x86_64/MMU.h @@ -2,6 +2,7 @@ #include const usize ARCH_PAGE_SIZE = 4096; +const usize ARCH_HUGE_PAGE_SIZE = 2 * 1024 * 1024; // 2 MiB const u64 rindex = 0776; // recursive index const u64 sign = 0177777UL << 48; // sign extension diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index 96818eb7..35e6b80a 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -202,7 +202,7 @@ namespace MemoryManager while (pages_mapped < count) { - TRY(MMU::map(virt, phys, flags)); + TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; pages_mapped++; @@ -225,7 +225,7 @@ namespace MemoryManager while (pages_mapped < count) { const u64 frame = TRY(alloc_frame()); - TRY(MMU::map(virt, frame, flags)); + TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; pages_mapped++; } @@ -250,7 +250,7 @@ namespace MemoryManager while (pages_mapped < count) { const u64 frame = TRY(alloc_frame()); - TRY(MMU::map(virt, frame, flags)); + TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; pages_mapped++; } @@ -275,7 +275,7 @@ namespace MemoryManager while (pages_mapped < count) { - TRY(MMU::map(virt, phys, flags)); + TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; pages_mapped++; -- 2.34.1 From 8df441064f58e173b0092f8b8172c3044569b61d Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:47:17 +0100 Subject: [PATCH 4/6] x86_64/MMU: Map the physical address space using huge pages --- kernel/src/arch/x86_64/MMU.cpp | 6 ++--- kernel/src/memory/MemoryManager.cpp | 36 +++++++++++++++++++++++++++++ kernel/src/memory/MemoryManager.h | 3 +++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index de239a22..7d037bef 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -278,9 +278,9 @@ namespace MMU const u64 physical_memory_size = highest_entry.address() + highest_entry.size(); - // FIXME: Do this using 2MiB huge pages. - MemoryManager::map_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_PAGE_SIZE, - MMU::ReadWrite | MMU::NoExecute); + check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0); + MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE, + MMU::ReadWrite | MMU::NoExecute); g_physical_mapping_base = physical_memory_base; diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index 35e6b80a..bfa4e497 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -213,6 +213,29 @@ namespace MemoryManager return {}; } + Result map_huge_frames_at(u64 virt, u64 phys, usize count, int flags) + { + CHECK_PAGE_ALIGNED(virt); + CHECK_PAGE_ALIGNED(phys); + + usize pages_mapped = 0; + + // Let's clean up after ourselves if we fail. + auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak_huge(virt, pages_mapped); }); + + while (pages_mapped < count) + { + TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::Yes)); + virt += ARCH_HUGE_PAGE_SIZE; + phys += ARCH_HUGE_PAGE_SIZE; + pages_mapped++; + } + + guard.deactivate(); + + return {}; + } + Result alloc_at(u64 virt, usize count, int flags) { CHECK_PAGE_ALIGNED(virt); @@ -322,6 +345,19 @@ namespace MemoryManager return {}; } + Result unmap_weak_huge(u64 virt, usize count) + { + CHECK_PAGE_ALIGNED(virt); + + while (count--) + { + TRY(MMU::unmap(virt)); + virt += ARCH_HUGE_PAGE_SIZE; + } + + return {}; + } + Result unmap_weak_and_free_vm(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); diff --git a/kernel/src/memory/MemoryManager.h b/kernel/src/memory/MemoryManager.h index 8f2b9bbc..3ceecffb 100644 --- a/kernel/src/memory/MemoryManager.h +++ b/kernel/src/memory/MemoryManager.h @@ -53,6 +53,7 @@ namespace MemoryManager } Result map_frames_at(u64 virt, u64 phys, usize count, int flags); + Result map_huge_frames_at(u64 virt, u64 phys, usize count, int flags); Result alloc_at(u64 virt, usize count, int flags); Result alloc_for_kernel(usize count, int flags); @@ -64,6 +65,8 @@ namespace MemoryManager Result unmap_weak(u64 virt, usize count); Result unmap_weak_and_free_vm(u64 virt, usize count); + Result unmap_weak_huge(u64 virt, usize count); + usize free(); usize used(); usize reserved(); -- 2.34.1 From 752dfdbf1c77b995cd54cbe904ea8e0f9ccb85bb Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:51:29 +0100 Subject: [PATCH 5/6] MemoryManager: Reuse the existing mapping of physical memory that MMU has --- kernel/src/arch/MMU.h | 2 ++ kernel/src/arch/x86_64/MMU.cpp | 5 +++++ kernel/src/memory/MemoryManager.cpp | 14 ++------------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/kernel/src/arch/MMU.h b/kernel/src/arch/MMU.h index addd5863..8a46e6fa 100644 --- a/kernel/src/arch/MMU.h +++ b/kernel/src/arch/MMU.h @@ -27,6 +27,8 @@ namespace MMU Yes = 1 }; + u64 translate_physical_address(u64 phys); + Result map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages); Result unmap(u64 virt); Result get_physical(u64 virt); diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 7d037bef..760508c4 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -47,6 +47,11 @@ namespace MMU return (T)(g_physical_mapping_base + (u64)phys); } + u64 translate_physical_address(u64 phys) + { + return g_physical_mapping_base + phys; + } + void switch_page_directory(PageDirectory* dir) { asm volatile("mov %0, %%cr3" : : "r"(dir)); diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index bfa4e497..3160f808 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -103,20 +103,10 @@ namespace MemoryManager KernelVM::init(); MMU::setup_initial_page_directory(); - // NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory - // there's no point in continuing. - auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE; - - auto virtual_bitmap_base = - KernelVM::alloc_several_pages(bitmap_pages) - .expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue"); - - u64 phys = (u64)g_frame_bitmap.lock()->location(); - map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute) - .expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue"); - auto frame_bitmap = g_frame_bitmap.lock(); + u64 phys = (u64)frame_bitmap->location(); + auto virtual_bitmap_base = MMU::translate_physical_address(phys); frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes()); } -- 2.34.1 From 3ec54fafdeff07d58562bfe65d1320193f03e124 Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 27 Feb 2023 12:55:15 +0100 Subject: [PATCH 6/6] x86_64/MMU: Do not allocate level 1 page tables for huge page entries This would leak memory, since we would never end up using them. --- kernel/src/arch/x86_64/MMU.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 760508c4..cc91b6c3 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -212,11 +212,16 @@ namespace MMU auto& l2 = l2_entry(l3, virt); if (!l2.present) { - const u64 addr = TRY(MemoryManager::alloc_frame()); l2.present = true; - l2.set_address(addr); - memset(&page_table(l2), 0, ARCH_PAGE_SIZE); + + if (use_huge_pages == UseHugePages::No) + { + const u64 addr = TRY(MemoryManager::alloc_frame()); + l2.set_address(addr); + memset(&page_table(l2), 0, ARCH_PAGE_SIZE); + } } + if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; -- 2.34.1