Compare commits

..

3 Commits

Author SHA1 Message Date
752dfdbf1c
MemoryManager: Reuse the existing mapping of physical memory that MMU has
All checks were successful
continuous-integration/drone/pr Build is passing
2023-02-27 12:51:29 +01:00
8df441064f
x86_64/MMU: Map the physical address space using huge pages 2023-02-27 12:47:17 +01:00
426f6e4da8
x86_64: Allow mapping huge pages, but don't do it yet 2023-02-27 12:41:28 +01:00
5 changed files with 86 additions and 30 deletions

View File

@ -7,6 +7,8 @@
#error "Unknown architecture." #error "Unknown architecture."
#endif #endif
constexpr u64 PAGES_PER_HUGE_PAGE = ARCH_HUGE_PAGE_SIZE / ARCH_PAGE_SIZE;
namespace MMU namespace MMU
{ {
enum Flags enum Flags
@ -19,7 +21,15 @@ namespace MMU
CacheDisable = 16, CacheDisable = 16,
}; };
Result<void> map(u64 virt, u64 phys, int flags); enum class UseHugePages
{
No = 0,
Yes = 1
};
u64 translate_physical_address(u64 phys);
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages);
Result<u64> unmap(u64 virt); Result<u64> unmap(u64 virt);
Result<u64> get_physical(u64 virt); Result<u64> get_physical(u64 virt);
Result<int> get_flags(u64 virt); Result<int> get_flags(u64 virt);

View File

@ -47,6 +47,11 @@ namespace MMU
return (T)(g_physical_mapping_base + (u64)phys); return (T)(g_physical_mapping_base + (u64)phys);
} }
u64 translate_physical_address(u64 phys)
{
return g_physical_mapping_base + phys;
}
void switch_page_directory(PageDirectory* dir) void switch_page_directory(PageDirectory* dir)
{ {
asm volatile("mov %0, %%cr3" : : "r"(dir)); asm volatile("mov %0, %%cr3" : : "r"(dir));
@ -167,7 +172,18 @@ namespace MMU
return &l1; return &l1;
} }
Result<void> map(u64 virt, u64 phys, int flags) void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags)
{
entry.present = true;
entry.read_write = has_flag(flags, Flags::ReadWrite);
entry.user = has_flag(flags, Flags::User);
entry.write_through = has_flag(flags, Flags::WriteThrough);
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
entry.no_execute = has_flag(flags, Flags::NoExecute);
entry.set_address(phys);
}
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages)
{ {
auto& l4 = l4_entry(virt); auto& l4 = l4_entry(virt);
if (!l4.present) if (!l4.present)
@ -191,7 +207,7 @@ namespace MMU
if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true; if (flags & Flags::User) l3.user = true;
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM if (l3.larger_pages) return err(EEXIST);
auto& l2 = l2_entry(l3, virt); auto& l2 = l2_entry(l3, virt);
if (!l2.present) if (!l2.present)
@ -204,17 +220,17 @@ namespace MMU
if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true; if (flags & Flags::User) l2.user = true;
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM if (l2.larger_pages) return err(EEXIST);
else if (use_huge_pages == UseHugePages::Yes)
{
l2.larger_pages = true;
set_page_table_entry_properties(l2, phys, flags);
return {};
}
auto& l1 = l1_entry(l2, virt); auto& l1 = l1_entry(l2, virt);
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again. if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
l1.present = true; set_page_table_entry_properties(l1, phys, flags);
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
l1.set_address(phys);
return {}; return {};
} }
@ -267,8 +283,8 @@ namespace MMU
const u64 physical_memory_size = highest_entry.address() + highest_entry.size(); const u64 physical_memory_size = highest_entry.address() + highest_entry.size();
// FIXME: Do this using 2MiB huge pages. check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
MemoryManager::map_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_PAGE_SIZE, MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
MMU::ReadWrite | MMU::NoExecute); MMU::ReadWrite | MMU::NoExecute);
g_physical_mapping_base = physical_memory_base; g_physical_mapping_base = physical_memory_base;

View File

@ -2,6 +2,7 @@
#include <luna/Types.h> #include <luna/Types.h>
const usize ARCH_PAGE_SIZE = 4096; const usize ARCH_PAGE_SIZE = 4096;
const usize ARCH_HUGE_PAGE_SIZE = 2 * 1024 * 1024; // 2 MiB
const u64 rindex = 0776; // recursive index const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension const u64 sign = 0177777UL << 48; // sign extension

View File

@ -103,20 +103,10 @@ namespace MemoryManager
KernelVM::init(); KernelVM::init();
MMU::setup_initial_page_directory(); MMU::setup_initial_page_directory();
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE;
auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
u64 phys = (u64)g_frame_bitmap.lock()->location();
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
auto frame_bitmap = g_frame_bitmap.lock(); auto frame_bitmap = g_frame_bitmap.lock();
u64 phys = (u64)frame_bitmap->location();
auto virtual_bitmap_base = MMU::translate_physical_address(phys);
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes()); frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
} }
@ -202,7 +192,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
TRY(MMU::map(virt, phys, flags)); TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
@ -213,6 +203,29 @@ namespace MemoryManager
return {}; return {};
} }
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
usize pages_mapped = 0;
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak_huge(virt, pages_mapped); });
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::Yes));
virt += ARCH_HUGE_PAGE_SIZE;
phys += ARCH_HUGE_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return {};
}
Result<u64> alloc_at(u64 virt, usize count, int flags) Result<u64> alloc_at(u64 virt, usize count, int flags)
{ {
CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(virt);
@ -225,7 +238,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
const u64 frame = TRY(alloc_frame()); const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags)); TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
} }
@ -250,7 +263,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
const u64 frame = TRY(alloc_frame()); const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags)); TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
} }
@ -275,7 +288,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
TRY(MMU::map(virt, phys, flags)); TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
@ -322,6 +335,19 @@ namespace MemoryManager
return {}; return {};
} }
Result<void> unmap_weak_huge(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
TRY(MMU::unmap(virt));
virt += ARCH_HUGE_PAGE_SIZE;
}
return {};
}
Result<void> unmap_weak_and_free_vm(u64 virt, usize count) Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
{ {
CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(virt);

View File

@ -53,6 +53,7 @@ namespace MemoryManager
} }
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags); Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags);
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags);
Result<u64> alloc_at(u64 virt, usize count, int flags); Result<u64> alloc_at(u64 virt, usize count, int flags);
Result<u64> alloc_for_kernel(usize count, int flags); Result<u64> alloc_for_kernel(usize count, int flags);
@ -64,6 +65,8 @@ namespace MemoryManager
Result<void> unmap_weak(u64 virt, usize count); Result<void> unmap_weak(u64 virt, usize count);
Result<void> unmap_weak_and_free_vm(u64 virt, usize count); Result<void> unmap_weak_and_free_vm(u64 virt, usize count);
Result<void> unmap_weak_huge(u64 virt, usize count);
usize free(); usize free();
usize used(); usize used();
usize reserved(); usize reserved();