Map all physical memory into the higher half instead of using recursive mapping #23

Merged
apio merged 6 commits from map-physical-world into main 2023-02-27 11:59:51 +00:00
4 changed files with 35 additions and 15 deletions
Showing only changes of commit 426f6e4da8 - Show all commits

View File

@ -7,6 +7,8 @@
#error "Unknown architecture."
#endif
constexpr u64 PAGES_PER_HUGE_PAGE = ARCH_HUGE_PAGE_SIZE / ARCH_PAGE_SIZE;
namespace MMU
{
enum Flags
@ -19,7 +21,13 @@ namespace MMU
CacheDisable = 16,
};
Result<void> map(u64 virt, u64 phys, int flags);
enum class UseHugePages
{
No = 0,
Yes = 1
};
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages);
Result<u64> unmap(u64 virt);
Result<u64> get_physical(u64 virt);
Result<int> get_flags(u64 virt);

View File

@ -167,7 +167,18 @@ namespace MMU
return &l1;
}
Result<void> map(u64 virt, u64 phys, int flags)
void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags)
{
entry.present = true;
entry.read_write = has_flag(flags, Flags::ReadWrite);
entry.user = has_flag(flags, Flags::User);
entry.write_through = has_flag(flags, Flags::WriteThrough);
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
entry.no_execute = has_flag(flags, Flags::NoExecute);
entry.set_address(phys);
}
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages)
{
auto& l4 = l4_entry(virt);
if (!l4.present)
@ -191,7 +202,7 @@ namespace MMU
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
if (l3.larger_pages) return err(EEXIST);
auto& l2 = l2_entry(l3, virt);
if (!l2.present)
@ -204,17 +215,17 @@ namespace MMU
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
if (l2.larger_pages) return err(EEXIST);
else if (use_huge_pages == UseHugePages::Yes)
{
l2.larger_pages = true;
set_page_table_entry_properties(l2, phys, flags);
return {};
}
auto& l1 = l1_entry(l2, virt);
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
l1.present = true;
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
l1.set_address(phys);
set_page_table_entry_properties(l1, phys, flags);
return {};
}

View File

@ -2,6 +2,7 @@
#include <luna/Types.h>
const usize ARCH_PAGE_SIZE = 4096;
const usize ARCH_HUGE_PAGE_SIZE = 2 * 1024 * 1024; // 2 MiB
const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension

View File

@ -202,7 +202,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags));
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;
@ -225,7 +225,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
@ -250,7 +250,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
@ -275,7 +275,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags));
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;