462 lines
14 KiB
C++
462 lines
14 KiB
C++
#include "arch/MMU.h"
|
|
#include "Log.h"
|
|
#include "memory/MemoryManager.h"
|
|
#include "memory/MemoryMap.h"
|
|
#include <luna/Alignment.h>
|
|
#include <luna/CString.h>
|
|
#include <luna/Result.h>
|
|
#include <luna/ScopeGuard.h>
|
|
#include <luna/SystemError.h>
|
|
|
|
PageDirectory* g_kernel_directory;
|
|
u64 g_kernel_directory_virt;
|
|
|
|
// The bootloader maps up to 16GiB of physical memory for us at address 0. Using this bootstrap mapping, we'll map (all)
|
|
// physical memory at 0xFFFF800000000000.
|
|
u64 g_physical_mapping_base = 0;
|
|
|
|
#pragma GCC push_options
|
|
#pragma GCC diagnostic ignored "-Wconversion"
|
|
|
|
void PageTableEntry::set_address(u64 addr)
|
|
{
|
|
this->address = (addr >> 12);
|
|
}
|
|
|
|
u64 PageTableEntry::get_address() const
|
|
{
|
|
return (u64)this->address << 12;
|
|
}
|
|
|
|
#pragma GCC pop_options
|
|
|
|
void PageTableEntry::clear()
|
|
{
|
|
raw = 0;
|
|
}
|
|
|
|
static bool has_flag(int flags, MMU::Flags flag)
|
|
{
|
|
return flags & flag;
|
|
}
|
|
|
|
namespace MMU
|
|
{
|
|
template <typename T> T translate_physical(T phys)
|
|
{
|
|
return (T)(g_physical_mapping_base + (u64)phys);
|
|
}
|
|
|
|
u64 translate_physical_address(u64 phys)
|
|
{
|
|
return g_physical_mapping_base + phys;
|
|
}
|
|
|
|
void switch_page_directory(PageDirectory* dir)
|
|
{
|
|
asm volatile("mov %0, %%cr3" : : "r"(dir));
|
|
}
|
|
|
|
PageDirectory* get_page_directory()
|
|
{
|
|
PageDirectory* value;
|
|
asm volatile("mov %%cr3, %0" : "=r"(value));
|
|
return value;
|
|
}
|
|
|
|
PageDirectory* get_virtual_page_directory()
|
|
{
|
|
return translate_physical(get_page_directory());
|
|
}
|
|
|
|
void flush_all()
|
|
{
|
|
switch_page_directory(get_page_directory());
|
|
}
|
|
|
|
void flush_page(u64 page)
|
|
{
|
|
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
|
|
}
|
|
|
|
constexpr u64 l4_index(u64 addr)
|
|
{
|
|
return (addr >> 39) & 0777;
|
|
}
|
|
|
|
constexpr u64 l3_index(u64 addr)
|
|
{
|
|
return (addr >> 30) & 0777;
|
|
}
|
|
|
|
constexpr u64 l2_index(u64 addr)
|
|
{
|
|
return (addr >> 21) & 0777;
|
|
}
|
|
|
|
constexpr u64 l1_index(u64 addr)
|
|
{
|
|
return (addr >> 12) & 0777;
|
|
}
|
|
|
|
int arch_flags_to_mmu(const PageTableEntry& entry)
|
|
{
|
|
int result = Flags::None;
|
|
if (entry.read_write) result |= Flags::ReadWrite;
|
|
if (entry.user) result |= Flags::User;
|
|
if (entry.no_execute) result |= Flags::NoExecute;
|
|
if (entry.write_through) result |= Flags::WriteThrough;
|
|
if (entry.cache_disabled) result |= Flags::CacheDisable;
|
|
return result;
|
|
}
|
|
|
|
PageTableEntry& l4_entry(u64 virt)
|
|
{
|
|
auto index = l4_index(virt);
|
|
return get_virtual_page_directory()->entries[index];
|
|
}
|
|
|
|
PageDirectory& page_table(const PageTableEntry& entry)
|
|
{
|
|
return *translate_physical((PageDirectory*)entry.get_address());
|
|
}
|
|
|
|
PageTableEntry& l3_entry(const PageTableEntry& entry, u64 virt)
|
|
{
|
|
auto index = l3_index(virt);
|
|
return page_table(entry).entries[index];
|
|
}
|
|
|
|
PageTableEntry& l2_entry(const PageTableEntry& entry, u64 virt)
|
|
{
|
|
auto index = l2_index(virt);
|
|
return page_table(entry).entries[index];
|
|
}
|
|
|
|
PageTableEntry& l1_entry(const PageTableEntry& entry, u64 virt)
|
|
{
|
|
auto index = l1_index(virt);
|
|
return page_table(entry).entries[index];
|
|
}
|
|
|
|
Result<PageTableEntry*> find_entry(u64 virt)
|
|
{
|
|
const auto& l4 = l4_entry(virt);
|
|
if (!l4.present) return err(EFAULT);
|
|
auto& l3 = l3_entry(l4, virt);
|
|
if (!l3.present) return err(EFAULT);
|
|
if (l3.larger_pages) return &l3;
|
|
auto& l2 = l2_entry(l3, virt);
|
|
if (!l2.present) return err(EFAULT);
|
|
if (l2.larger_pages) return &l2;
|
|
return &l1_entry(l2, virt);
|
|
}
|
|
|
|
Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags)
|
|
{
|
|
auto& l4 = l4_entry(virt);
|
|
if (!l4.present) return err(EFAULT);
|
|
if (flags & Flags::ReadWrite) l4.read_write = true;
|
|
if (flags & Flags::User) l4.user = true;
|
|
auto& l3 = l3_entry(l4, virt);
|
|
if (!l3.present) return err(EFAULT);
|
|
if (l3.larger_pages) return &l3;
|
|
if (flags & Flags::ReadWrite) l3.read_write = true;
|
|
if (flags & Flags::User) l3.user = true;
|
|
auto& l2 = l2_entry(l3, virt);
|
|
if (!l2.present) return err(EFAULT);
|
|
if (l2.larger_pages) return &l2;
|
|
if (flags & Flags::ReadWrite) l2.read_write = true;
|
|
if (flags & Flags::User) l2.user = true;
|
|
auto& l1 = l1_entry(l2, virt);
|
|
return &l1;
|
|
}
|
|
|
|
void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags)
|
|
{
|
|
entry.present = true;
|
|
entry.read_write = has_flag(flags, Flags::ReadWrite);
|
|
entry.user = has_flag(flags, Flags::User);
|
|
entry.write_through = has_flag(flags, Flags::WriteThrough);
|
|
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
|
entry.no_execute = has_flag(flags, Flags::NoExecute);
|
|
entry.set_address(phys);
|
|
}
|
|
|
|
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages)
|
|
{
|
|
auto& l4 = l4_entry(virt);
|
|
if (!l4.present)
|
|
{
|
|
const u64 addr = TRY(MemoryManager::alloc_frame());
|
|
l4.present = true;
|
|
l4.set_address(addr);
|
|
memset(&page_table(l4), 0, ARCH_PAGE_SIZE);
|
|
}
|
|
if (flags & Flags::ReadWrite) l4.read_write = true;
|
|
if (flags & Flags::User) l4.user = true;
|
|
|
|
auto& l3 = l3_entry(l4, virt);
|
|
if (!l3.present)
|
|
{
|
|
const u64 addr = TRY(MemoryManager::alloc_frame());
|
|
l3.present = true;
|
|
l3.set_address(addr);
|
|
memset(&page_table(l3), 0, ARCH_PAGE_SIZE);
|
|
}
|
|
if (flags & Flags::ReadWrite) l3.read_write = true;
|
|
if (flags & Flags::User) l3.user = true;
|
|
|
|
if (l3.larger_pages) return err(ENOMEM);
|
|
|
|
auto& l2 = l2_entry(l3, virt);
|
|
if (!l2.present)
|
|
{
|
|
l2.present = true;
|
|
|
|
if (use_huge_pages == UseHugePages::No)
|
|
{
|
|
const u64 addr = TRY(MemoryManager::alloc_frame());
|
|
l2.set_address(addr);
|
|
memset(&page_table(l2), 0, ARCH_PAGE_SIZE);
|
|
}
|
|
}
|
|
|
|
if (flags & Flags::ReadWrite) l2.read_write = true;
|
|
if (flags & Flags::User) l2.user = true;
|
|
|
|
if (l2.larger_pages) return err(ENOMEM);
|
|
else if (use_huge_pages == UseHugePages::Yes)
|
|
{
|
|
l2.larger_pages = true;
|
|
set_page_table_entry_properties(l2, phys, flags);
|
|
return {};
|
|
}
|
|
|
|
auto& l1 = l1_entry(l2, virt);
|
|
if (l1.present) return err(ENOMEM);
|
|
set_page_table_entry_properties(l1, phys, flags);
|
|
return {};
|
|
}
|
|
|
|
Result<void> remap(u64 virt, int flags)
|
|
{
|
|
auto& l1 = *TRY(apply_cascading_flags(virt, flags));
|
|
if (!l1.present) return err(EFAULT);
|
|
l1.read_write = has_flag(flags, Flags::ReadWrite);
|
|
l1.user = has_flag(flags, Flags::User);
|
|
l1.write_through = has_flag(flags, Flags::WriteThrough);
|
|
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
|
l1.no_execute = has_flag(flags, Flags::NoExecute);
|
|
flush_page(virt);
|
|
return {};
|
|
}
|
|
|
|
Result<u64> unmap(u64 virt)
|
|
{
|
|
auto& l1 = *TRY(find_entry(virt));
|
|
if (!l1.present) return err(EFAULT);
|
|
const u64 address = l1.get_address();
|
|
l1.clear();
|
|
flush_page(virt);
|
|
return address;
|
|
}
|
|
|
|
Result<u64> get_physical(u64 virt)
|
|
{
|
|
const auto& l1 = *TRY(find_entry(virt));
|
|
if (!l1.present) return err(EFAULT);
|
|
return l1.get_address();
|
|
}
|
|
|
|
Result<int> get_flags(u64 virt)
|
|
{
|
|
const auto& l1 = *TRY(find_entry(virt));
|
|
if (!l1.present) return err(EFAULT);
|
|
return arch_flags_to_mmu(l1);
|
|
}
|
|
|
|
void setup_initial_page_directory()
|
|
{
|
|
PageDirectory* const dir = get_page_directory();
|
|
g_kernel_directory = dir;
|
|
|
|
const u64 physical_memory_base = 0xFFFF800000000000;
|
|
|
|
MemoryMapIterator iter;
|
|
const MemoryMapEntry highest_entry = iter.highest();
|
|
|
|
const u64 physical_memory_size = highest_entry.address() + highest_entry.size();
|
|
|
|
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
|
|
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
|
|
MMU::ReadWrite | MMU::NoExecute);
|
|
|
|
g_physical_mapping_base = physical_memory_base;
|
|
|
|
g_kernel_directory_virt = translate_physical((u64)g_kernel_directory);
|
|
|
|
#ifdef MMU_DEBUG
|
|
kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory);
|
|
#endif
|
|
}
|
|
|
|
Result<PageDirectory*> create_page_directory_for_userspace()
|
|
{
|
|
const u64 directory_phys = TRY(MemoryManager::alloc_frame());
|
|
const u64 directory_virt = translate_physical(directory_phys);
|
|
|
|
PageDirectory* const directory = (PageDirectory*)directory_virt;
|
|
memset(directory, 0, ARCH_PAGE_SIZE);
|
|
|
|
constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2;
|
|
// Copy the upper part of the page directory (higher half)
|
|
memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr((PageDirectory*)g_kernel_directory_virt, HALF_PAGE),
|
|
HALF_PAGE);
|
|
|
|
#ifdef MMU_DEBUG
|
|
kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys);
|
|
#endif
|
|
|
|
return (PageDirectory*)directory_phys;
|
|
}
|
|
|
|
Result<void> delete_userspace_page_directory(PageDirectory* directory)
|
|
{
|
|
check(directory);
|
|
|
|
switch_page_directory(g_kernel_directory);
|
|
|
|
auto guard = make_scope_guard([directory] { MemoryManager::free_frame((u64)directory); });
|
|
|
|
PageDirectory* const table = translate_physical(directory);
|
|
|
|
// Let's iterate over every top-level entry in the lower half
|
|
for (u64 i = 0; i < 256; i++)
|
|
{
|
|
PageTableEntry& l4 = table->entries[i];
|
|
if (!l4.present) continue;
|
|
|
|
PageDirectory* const pdp = &page_table(l4);
|
|
|
|
for (u64 j = 0; j < 512; j++)
|
|
{
|
|
PageTableEntry& l3 = pdp->entries[j];
|
|
if (!l3.present) continue;
|
|
if (l3.larger_pages)
|
|
{
|
|
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
|
|
TRY(MemoryManager::free_frame(l3.get_address()));
|
|
}
|
|
|
|
PageDirectory* const pd = &page_table(l3);
|
|
|
|
for (u64 k = 0; k < 512; k++)
|
|
{
|
|
PageTableEntry& l2 = pd->entries[k];
|
|
if (!l2.present) continue;
|
|
if (l2.larger_pages)
|
|
{
|
|
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
|
|
TRY(MemoryManager::free_frame(l2.get_address()));
|
|
}
|
|
|
|
PageDirectory* const pt = &page_table(l2);
|
|
|
|
for (u64 l = 0; l < 512; l++)
|
|
{
|
|
PageTableEntry& l1 = pt->entries[l];
|
|
if (!l1.present) continue;
|
|
|
|
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
|
|
TRY(MemoryManager::free_frame(l1.get_address()));
|
|
}
|
|
|
|
TRY(MemoryManager::free_frame(l2.get_address()));
|
|
}
|
|
|
|
TRY(MemoryManager::free_frame(l3.get_address()));
|
|
}
|
|
|
|
TRY(MemoryManager::free_frame(l4.get_address()));
|
|
}
|
|
|
|
// No need to clean up manually, the ScopeGuard we set up earlier will do that for us.
|
|
return {};
|
|
}
|
|
|
|
// FIXME: Use the ancient magic of CoW (copy-on-write)
|
|
Result<PageDirectory*> clone_userspace_page_directory(PageDirectory* directory)
|
|
{
|
|
PageDirectory* result = TRY(create_page_directory_for_userspace());
|
|
|
|
PageDirectory* const old_table = translate_physical(directory);
|
|
PageDirectory* const new_table = translate_physical(result);
|
|
|
|
// FIXME: Do not leak the WIP new page directory on OOM.
|
|
|
|
memcpy(new_table, old_table, sizeof(*old_table));
|
|
|
|
// Let's iterate over every top-level entry in the lower half
|
|
for (u64 i = 0; i < 256; i++)
|
|
{
|
|
PageTableEntry& old_l4 = old_table->entries[i];
|
|
if (!old_l4.present) continue;
|
|
PageTableEntry& new_l4 = new_table->entries[i];
|
|
new_l4.set_address(TRY(MemoryManager::alloc_frame()));
|
|
|
|
PageDirectory* const old_pdp = &page_table(old_l4);
|
|
PageDirectory* const new_pdp = &page_table(new_l4);
|
|
|
|
memcpy(new_pdp, old_pdp, sizeof(*old_pdp));
|
|
|
|
for (u64 j = 0; j < 512; j++)
|
|
{
|
|
PageTableEntry& old_l3 = old_pdp->entries[j];
|
|
if (!old_l3.present) continue;
|
|
PageTableEntry& new_l3 = new_pdp->entries[j];
|
|
new_l3.set_address(TRY(MemoryManager::alloc_frame()));
|
|
|
|
PageDirectory* const old_pd = &page_table(old_l3);
|
|
PageDirectory* const new_pd = &page_table(new_l3);
|
|
|
|
memcpy(new_pd, old_pd, sizeof(*old_pd));
|
|
|
|
if (old_l3.larger_pages) continue;
|
|
|
|
for (u64 k = 0; k < 512; k++)
|
|
{
|
|
PageTableEntry& old_l2 = old_pd->entries[k];
|
|
if (!old_l2.present) continue;
|
|
PageTableEntry& new_l2 = new_pd->entries[k];
|
|
new_l2.set_address(TRY(MemoryManager::alloc_frame()));
|
|
|
|
PageDirectory* const old_pt = &page_table(old_l2);
|
|
PageDirectory* const new_pt = &page_table(new_l2);
|
|
|
|
memcpy(new_pt, old_pt, sizeof(*old_pt));
|
|
|
|
if (old_l2.larger_pages) continue;
|
|
|
|
for (u64 l = 0; l < 512; l++)
|
|
{
|
|
PageTableEntry& old_l1 = old_pt->entries[l];
|
|
if (!old_l1.present) continue;
|
|
PageTableEntry& new_l1 = new_pt->entries[l];
|
|
new_l1.set_address(TRY(MemoryManager::alloc_frame()));
|
|
|
|
memcpy(&page_table(new_l1), &page_table(old_l1), ARCH_PAGE_SIZE);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
PageDirectory* kernel_page_directory()
|
|
{
|
|
return g_kernel_directory;
|
|
}
|
|
}
|