Map all physical memory into the higher half instead of using recursive mapping #23
@ -7,6 +7,8 @@
|
||||
#error "Unknown architecture."
|
||||
#endif
|
||||
|
||||
constexpr u64 PAGES_PER_HUGE_PAGE = ARCH_HUGE_PAGE_SIZE / ARCH_PAGE_SIZE;
|
||||
|
||||
namespace MMU
|
||||
{
|
||||
enum Flags
|
||||
@ -19,7 +21,15 @@ namespace MMU
|
||||
CacheDisable = 16,
|
||||
};
|
||||
|
||||
Result<void> map(u64 virt, u64 phys, int flags);
|
||||
enum class UseHugePages
|
||||
{
|
||||
No = 0,
|
||||
Yes = 1
|
||||
};
|
||||
|
||||
u64 translate_physical_address(u64 phys);
|
||||
|
||||
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages);
|
||||
Result<u64> unmap(u64 virt);
|
||||
Result<u64> get_physical(u64 virt);
|
||||
Result<int> get_flags(u64 virt);
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include "arch/MMU.h"
|
||||
#include "Log.h"
|
||||
#include "memory/MemoryManager.h"
|
||||
#include "memory/MemoryMap.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/Result.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
@ -12,6 +14,10 @@
|
||||
PageDirectory* g_kernel_directory;
|
||||
u64 g_kernel_directory_virt;
|
||||
|
||||
// The bootloader maps up to 16GiB of physical memory for us at address 0. Using this bootstrap mapping, we'll map (all)
|
||||
// physical memory at 0xFFFF800000000000.
|
||||
u64 g_physical_mapping_base = 0;
|
||||
|
||||
void PageTableEntry::set_address(u64 addr)
|
||||
{
|
||||
this->address = (addr >> 12);
|
||||
@ -36,90 +42,14 @@ static bool has_flag(int flags, MMU::Flags flag)
|
||||
|
||||
namespace MMU
|
||||
{
|
||||
|
||||
constexpr PageDirectory* l4_table()
|
||||
template <typename T> T translate_physical(T phys)
|
||||
{
|
||||
constexpr u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12);
|
||||
return (PageDirectory*)l4;
|
||||
return (T)(g_physical_mapping_base + (u64)phys);
|
||||
}
|
||||
|
||||
constexpr u64 l4_index(u64 addr)
|
||||
u64 translate_physical_address(u64 phys)
|
||||
{
|
||||
return (addr >> 39) & 0777;
|
||||
}
|
||||
|
||||
PageTableEntry& l4_entry(u64 addr)
|
||||
{
|
||||
return l4_table()->entries[l4_index(addr)];
|
||||
}
|
||||
|
||||
constexpr PageDirectory* raw_l3_table(u64 l4)
|
||||
{
|
||||
const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
|
||||
return (PageDirectory*)l3;
|
||||
}
|
||||
|
||||
constexpr PageDirectory* l3_table(u64 addr)
|
||||
{
|
||||
const u64 l4 = l4_index(addr);
|
||||
return raw_l3_table(l4);
|
||||
}
|
||||
|
||||
constexpr u64 l3_index(u64 addr)
|
||||
{
|
||||
return (addr >> 30) & 0777;
|
||||
}
|
||||
|
||||
PageTableEntry& l3_entry(u64 addr)
|
||||
{
|
||||
return l3_table(addr)->entries[l3_index(addr)];
|
||||
}
|
||||
|
||||
constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3)
|
||||
{
|
||||
const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
|
||||
return (PageDirectory*)l2;
|
||||
}
|
||||
|
||||
constexpr PageDirectory* l2_table(u64 addr)
|
||||
{
|
||||
const u64 l4 = l4_index(addr);
|
||||
const u64 l3 = l3_index(addr);
|
||||
return raw_l2_table(l4, l3);
|
||||
}
|
||||
|
||||
constexpr u64 l2_index(u64 addr)
|
||||
{
|
||||
return (addr >> 21) & 0777;
|
||||
}
|
||||
|
||||
PageTableEntry& l2_entry(u64 addr)
|
||||
{
|
||||
return l2_table(addr)->entries[l2_index(addr)];
|
||||
}
|
||||
|
||||
constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2)
|
||||
{
|
||||
const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
|
||||
return (PageDirectory*)l1;
|
||||
}
|
||||
|
||||
constexpr PageDirectory* l1_table(u64 addr)
|
||||
{
|
||||
const u64 l4 = l4_index(addr);
|
||||
const u64 l3 = l3_index(addr);
|
||||
const u64 l2 = l2_index(addr);
|
||||
return raw_l1_table(l4, l3, l2);
|
||||
}
|
||||
|
||||
constexpr u64 l1_index(u64 addr)
|
||||
{
|
||||
return (addr >> 12) & 0777;
|
||||
}
|
||||
|
||||
PageTableEntry& l1_entry(u64 addr)
|
||||
{
|
||||
return l1_table(addr)->entries[l1_index(addr)];
|
||||
return g_physical_mapping_base + phys;
|
||||
}
|
||||
|
||||
void switch_page_directory(PageDirectory* dir)
|
||||
@ -134,6 +64,11 @@ namespace MMU
|
||||
return value;
|
||||
}
|
||||
|
||||
PageDirectory* get_virtual_page_directory()
|
||||
{
|
||||
return translate_physical(get_page_directory());
|
||||
}
|
||||
|
||||
void flush_all()
|
||||
{
|
||||
switch_page_directory(get_page_directory());
|
||||
@ -144,6 +79,26 @@ namespace MMU
|
||||
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
|
||||
}
|
||||
|
||||
constexpr u64 l4_index(u64 addr)
|
||||
{
|
||||
return (addr >> 39) & 0777;
|
||||
}
|
||||
|
||||
constexpr u64 l3_index(u64 addr)
|
||||
{
|
||||
return (addr >> 30) & 0777;
|
||||
}
|
||||
|
||||
constexpr u64 l2_index(u64 addr)
|
||||
{
|
||||
return (addr >> 21) & 0777;
|
||||
}
|
||||
|
||||
constexpr u64 l1_index(u64 addr)
|
||||
{
|
||||
return (addr >> 12) & 0777;
|
||||
}
|
||||
|
||||
int arch_flags_to_mmu(const PageTableEntry& entry)
|
||||
{
|
||||
int result = Flags::None;
|
||||
@ -155,17 +110,46 @@ namespace MMU
|
||||
return result;
|
||||
}
|
||||
|
||||
PageTableEntry& l4_entry(u64 virt)
|
||||
{
|
||||
auto index = l4_index(virt);
|
||||
return get_virtual_page_directory()->entries[index];
|
||||
}
|
||||
|
||||
PageDirectory& page_table(const PageTableEntry& entry)
|
||||
{
|
||||
return *translate_physical((PageDirectory*)entry.get_address());
|
||||
}
|
||||
|
||||
PageTableEntry& l3_entry(const PageTableEntry& entry, u64 virt)
|
||||
{
|
||||
auto index = l3_index(virt);
|
||||
return page_table(entry).entries[index];
|
||||
}
|
||||
|
||||
PageTableEntry& l2_entry(const PageTableEntry& entry, u64 virt)
|
||||
{
|
||||
auto index = l2_index(virt);
|
||||
return page_table(entry).entries[index];
|
||||
}
|
||||
|
||||
PageTableEntry& l1_entry(const PageTableEntry& entry, u64 virt)
|
||||
{
|
||||
auto index = l1_index(virt);
|
||||
return page_table(entry).entries[index];
|
||||
}
|
||||
|
||||
Result<PageTableEntry*> find_entry(u64 virt)
|
||||
{
|
||||
const auto& l4 = l4_entry(virt);
|
||||
if (!l4.present) return err(EFAULT);
|
||||
auto& l3 = l3_entry(virt);
|
||||
auto& l3 = l3_entry(l4, virt);
|
||||
if (!l3.present) return err(EFAULT);
|
||||
if (l3.larger_pages) return &l3;
|
||||
auto& l2 = l2_entry(virt);
|
||||
auto& l2 = l2_entry(l3, virt);
|
||||
if (!l2.present) return err(EFAULT);
|
||||
if (l2.larger_pages) return &l2;
|
||||
return &l1_entry(virt);
|
||||
return &l1_entry(l2, virt);
|
||||
}
|
||||
|
||||
Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags)
|
||||
@ -174,21 +158,32 @@ namespace MMU
|
||||
if (!l4.present) return err(EFAULT);
|
||||
if (flags & Flags::ReadWrite) l4.read_write = true;
|
||||
if (flags & Flags::User) l4.user = true;
|
||||
auto& l3 = l3_entry(virt);
|
||||
auto& l3 = l3_entry(l4, virt);
|
||||
if (!l3.present) return err(EFAULT);
|
||||
if (l3.larger_pages) return &l3;
|
||||
if (flags & Flags::ReadWrite) l3.read_write = true;
|
||||
if (flags & Flags::User) l3.user = true;
|
||||
auto& l2 = l2_entry(virt);
|
||||
auto& l2 = l2_entry(l3, virt);
|
||||
if (!l2.present) return err(EFAULT);
|
||||
if (l2.larger_pages) return &l2;
|
||||
if (flags & Flags::ReadWrite) l2.read_write = true;
|
||||
if (flags & Flags::User) l2.user = true;
|
||||
auto& l1 = l1_entry(virt);
|
||||
auto& l1 = l1_entry(l2, virt);
|
||||
return &l1;
|
||||
}
|
||||
|
||||
Result<void> map(u64 virt, u64 phys, int flags)
|
||||
void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags)
|
||||
{
|
||||
entry.present = true;
|
||||
entry.read_write = has_flag(flags, Flags::ReadWrite);
|
||||
entry.user = has_flag(flags, Flags::User);
|
||||
entry.write_through = has_flag(flags, Flags::WriteThrough);
|
||||
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
||||
entry.no_execute = has_flag(flags, Flags::NoExecute);
|
||||
entry.set_address(phys);
|
||||
}
|
||||
|
||||
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages)
|
||||
{
|
||||
auto& l4 = l4_entry(virt);
|
||||
if (!l4.present)
|
||||
@ -196,46 +191,51 @@ namespace MMU
|
||||
const u64 addr = TRY(MemoryManager::alloc_frame());
|
||||
l4.present = true;
|
||||
l4.set_address(addr);
|
||||
memset(l3_table(virt), 0, ARCH_PAGE_SIZE);
|
||||
memset(&page_table(l4), 0, ARCH_PAGE_SIZE);
|
||||
}
|
||||
if (flags & Flags::ReadWrite) l4.read_write = true;
|
||||
if (flags & Flags::User) l4.user = true;
|
||||
|
||||
auto& l3 = l3_entry(virt);
|
||||
auto& l3 = l3_entry(l4, virt);
|
||||
if (!l3.present)
|
||||
{
|
||||
const u64 addr = TRY(MemoryManager::alloc_frame());
|
||||
l3.present = true;
|
||||
l3.set_address(addr);
|
||||
memset(l2_table(virt), 0, ARCH_PAGE_SIZE);
|
||||
memset(&page_table(l3), 0, ARCH_PAGE_SIZE);
|
||||
}
|
||||
if (flags & Flags::ReadWrite) l3.read_write = true;
|
||||
if (flags & Flags::User) l3.user = true;
|
||||
|
||||
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
|
||||
if (l3.larger_pages) return err(EEXIST);
|
||||
|
||||
auto& l2 = l2_entry(virt);
|
||||
auto& l2 = l2_entry(l3, virt);
|
||||
if (!l2.present)
|
||||
{
|
||||
const u64 addr = TRY(MemoryManager::alloc_frame());
|
||||
l2.present = true;
|
||||
l2.set_address(addr);
|
||||
memset(l1_table(virt), 0, ARCH_PAGE_SIZE);
|
||||
|
||||
if (use_huge_pages == UseHugePages::No)
|
||||
{
|
||||
const u64 addr = TRY(MemoryManager::alloc_frame());
|
||||
l2.set_address(addr);
|
||||
memset(&page_table(l2), 0, ARCH_PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & Flags::ReadWrite) l2.read_write = true;
|
||||
if (flags & Flags::User) l2.user = true;
|
||||
|
||||
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
|
||||
if (l2.larger_pages) return err(EEXIST);
|
||||
else if (use_huge_pages == UseHugePages::Yes)
|
||||
{
|
||||
l2.larger_pages = true;
|
||||
set_page_table_entry_properties(l2, phys, flags);
|
||||
return {};
|
||||
}
|
||||
|
||||
auto& l1 = l1_entry(virt);
|
||||
auto& l1 = l1_entry(l2, virt);
|
||||
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
|
||||
l1.present = true;
|
||||
l1.read_write = has_flag(flags, Flags::ReadWrite);
|
||||
l1.user = has_flag(flags, Flags::User);
|
||||
l1.write_through = has_flag(flags, Flags::WriteThrough);
|
||||
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
||||
l1.no_execute = has_flag(flags, Flags::NoExecute);
|
||||
l1.set_address(phys);
|
||||
set_page_table_entry_properties(l1, phys, flags);
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -281,39 +281,39 @@ namespace MMU
|
||||
PageDirectory* const dir = get_page_directory();
|
||||
g_kernel_directory = dir;
|
||||
|
||||
const u64 paddr = (u64)dir;
|
||||
PageTableEntry& recursive_entry = dir->entries[rindex];
|
||||
recursive_entry.read_write = true;
|
||||
recursive_entry.present = true;
|
||||
recursive_entry.set_address(paddr);
|
||||
flush_all();
|
||||
const u64 physical_memory_base = 0xFFFF800000000000;
|
||||
|
||||
g_kernel_directory_virt =
|
||||
MemoryManager::get_kernel_mapping_for_frames((u64)dir, 1, MMU::ReadWrite | MMU::NoExecute).value();
|
||||
MemoryMapIterator iter;
|
||||
const MemoryMapEntry highest_entry = iter.highest();
|
||||
|
||||
const u64 physical_memory_size = highest_entry.address() + highest_entry.size();
|
||||
|
||||
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
|
||||
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
|
||||
MMU::ReadWrite | MMU::NoExecute);
|
||||
|
||||
g_physical_mapping_base = physical_memory_base;
|
||||
|
||||
g_kernel_directory_virt = translate_physical((u64)g_kernel_directory);
|
||||
|
||||
kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory);
|
||||
}
|
||||
|
||||
Result<PageDirectory*> create_page_directory_for_userspace()
|
||||
{
|
||||
const u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute));
|
||||
const u64 directory_phys = MMU::get_physical(directory_virt).value();
|
||||
const u64 directory_phys = TRY(MemoryManager::alloc_frame());
|
||||
const u64 directory_virt = translate_physical(directory_phys);
|
||||
|
||||
PageDirectory* const directory = (PageDirectory*)directory_virt;
|
||||
memset(directory, 0, ARCH_PAGE_SIZE);
|
||||
PageTableEntry& recursive_entry = directory->entries[rindex];
|
||||
recursive_entry.read_write = true;
|
||||
recursive_entry.present = true;
|
||||
recursive_entry.set_address(directory_phys);
|
||||
|
||||
constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2;
|
||||
// Copy the upper part of the page directory (higher half)
|
||||
memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr((PageDirectory*)g_kernel_directory_virt, HALF_PAGE),
|
||||
HALF_PAGE);
|
||||
|
||||
kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys);
|
||||
|
||||
directory->entries[511] = ((PageDirectory*)g_kernel_directory_virt)->entries[511];
|
||||
|
||||
// From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt
|
||||
// with using recursive mapping. So let's make sure we don't leak any VM.
|
||||
MemoryManager::unmap_weak_and_free_vm(directory_virt, 1);
|
||||
|
||||
return (PageDirectory*)directory_phys;
|
||||
}
|
||||
|
||||
@ -321,16 +321,11 @@ namespace MMU
|
||||
{
|
||||
check(directory);
|
||||
|
||||
// Needed in order to access page tables using the recursive mapping system.
|
||||
switch_page_directory(directory);
|
||||
switch_page_directory(g_kernel_directory);
|
||||
|
||||
auto guard = make_scope_guard([&] {
|
||||
check(g_kernel_directory);
|
||||
switch_page_directory(g_kernel_directory);
|
||||
MemoryManager::free_frame((u64)directory);
|
||||
});
|
||||
auto guard = make_scope_guard([directory] { MemoryManager::free_frame((u64)directory); });
|
||||
|
||||
PageDirectory* const table = l4_table();
|
||||
PageDirectory* const table = translate_physical(directory);
|
||||
|
||||
// Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages)
|
||||
for (u64 i = 0; i < 510; i++)
|
||||
@ -338,7 +333,7 @@ namespace MMU
|
||||
PageTableEntry& l4 = table->entries[i];
|
||||
if (!l4.present) continue;
|
||||
|
||||
PageDirectory* const pdp = raw_l3_table(i);
|
||||
PageDirectory* const pdp = &page_table(l4);
|
||||
|
||||
for (u64 j = 0; j < 512; j++)
|
||||
{
|
||||
@ -350,7 +345,7 @@ namespace MMU
|
||||
TRY(MemoryManager::free_frame(l3.get_address()));
|
||||
}
|
||||
|
||||
PageDirectory* const pd = raw_l2_table(i, j);
|
||||
PageDirectory* const pd = &page_table(l3);
|
||||
|
||||
for (u64 k = 0; k < 512; k++)
|
||||
{
|
||||
@ -362,7 +357,7 @@ namespace MMU
|
||||
TRY(MemoryManager::free_frame(l2.get_address()));
|
||||
}
|
||||
|
||||
PageDirectory* const pt = raw_l1_table(i, j, k);
|
||||
PageDirectory* const pt = &page_table(l2);
|
||||
|
||||
for (u64 l = 0; l < 512; l++)
|
||||
{
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <luna/Types.h>
|
||||
|
||||
const usize ARCH_PAGE_SIZE = 4096;
|
||||
const usize ARCH_HUGE_PAGE_SIZE = 2 * 1024 * 1024; // 2 MiB
|
||||
|
||||
const u64 rindex = 0776; // recursive index
|
||||
const u64 sign = 0177777UL << 48; // sign extension
|
||||
|
@ -103,20 +103,10 @@ namespace MemoryManager
|
||||
KernelVM::init();
|
||||
MMU::setup_initial_page_directory();
|
||||
|
||||
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
|
||||
// there's no point in continuing.
|
||||
auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE;
|
||||
|
||||
auto virtual_bitmap_base =
|
||||
KernelVM::alloc_several_pages(bitmap_pages)
|
||||
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
|
||||
|
||||
u64 phys = (u64)g_frame_bitmap.lock()->location();
|
||||
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
|
||||
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
|
||||
|
||||
auto frame_bitmap = g_frame_bitmap.lock();
|
||||
u64 phys = (u64)frame_bitmap->location();
|
||||
|
||||
auto virtual_bitmap_base = MMU::translate_physical_address(phys);
|
||||
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
|
||||
}
|
||||
|
||||
@ -202,7 +192,7 @@ namespace MemoryManager
|
||||
|
||||
while (pages_mapped < count)
|
||||
{
|
||||
TRY(MMU::map(virt, phys, flags));
|
||||
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
phys += ARCH_PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
@ -213,6 +203,29 @@ namespace MemoryManager
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
CHECK_PAGE_ALIGNED(phys);
|
||||
|
||||
usize pages_mapped = 0;
|
||||
|
||||
// Let's clean up after ourselves if we fail.
|
||||
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak_huge(virt, pages_mapped); });
|
||||
|
||||
while (pages_mapped < count)
|
||||
{
|
||||
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::Yes));
|
||||
virt += ARCH_HUGE_PAGE_SIZE;
|
||||
phys += ARCH_HUGE_PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
}
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<u64> alloc_at(u64 virt, usize count, int flags)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
@ -225,7 +238,7 @@ namespace MemoryManager
|
||||
while (pages_mapped < count)
|
||||
{
|
||||
const u64 frame = TRY(alloc_frame());
|
||||
TRY(MMU::map(virt, frame, flags));
|
||||
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
}
|
||||
@ -250,7 +263,7 @@ namespace MemoryManager
|
||||
while (pages_mapped < count)
|
||||
{
|
||||
const u64 frame = TRY(alloc_frame());
|
||||
TRY(MMU::map(virt, frame, flags));
|
||||
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
}
|
||||
@ -275,7 +288,7 @@ namespace MemoryManager
|
||||
|
||||
while (pages_mapped < count)
|
||||
{
|
||||
TRY(MMU::map(virt, phys, flags));
|
||||
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
phys += ARCH_PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
@ -322,6 +335,19 @@ namespace MemoryManager
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void> unmap_weak_huge(u64 virt, usize count)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
|
||||
while (count--)
|
||||
{
|
||||
TRY(MMU::unmap(virt));
|
||||
virt += ARCH_HUGE_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
|
@ -53,6 +53,7 @@ namespace MemoryManager
|
||||
}
|
||||
|
||||
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags);
|
||||
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags);
|
||||
|
||||
Result<u64> alloc_at(u64 virt, usize count, int flags);
|
||||
Result<u64> alloc_for_kernel(usize count, int flags);
|
||||
@ -64,6 +65,8 @@ namespace MemoryManager
|
||||
Result<void> unmap_weak(u64 virt, usize count);
|
||||
Result<void> unmap_weak_and_free_vm(u64 virt, usize count);
|
||||
|
||||
Result<void> unmap_weak_huge(u64 virt, usize count);
|
||||
|
||||
usize free();
|
||||
usize used();
|
||||
usize reserved();
|
||||
|
Loading…
Reference in New Issue
Block a user