Luna/kernel/src/arch/x86_64/MMU.cpp

368 lines
11 KiB
C++
Raw Normal View History

2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
#include "Log.h"
2022-11-19 16:59:49 +00:00
#include "memory/MemoryManager.h"
#include "memory/MemoryMap.h"
#include <luna/Alignment.h>
#include <luna/CString.h>
2022-12-07 11:25:42 +00:00
#include <luna/Result.h>
#include <luna/ScopeGuard.h>
#include <luna/SystemError.h>
2022-11-13 13:29:15 +00:00
#pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion"
PageDirectory* g_kernel_directory;
u64 g_kernel_directory_virt;
// The bootloader maps up to 16GiB of physical memory for us at address 0. Using this bootstrap mapping, we'll map (all)
// physical memory at 0xFFFF800000000000.
u64 g_physical_mapping_base = 0;
2022-11-13 15:31:32 +00:00
void PageTableEntry::set_address(u64 addr)
2022-11-13 13:29:15 +00:00
{
this->address = (addr >> 12);
}
u64 PageTableEntry::get_address() const
2022-11-13 13:29:15 +00:00
{
2022-11-13 15:31:32 +00:00
return (u64)this->address << 12;
2022-11-13 13:29:15 +00:00
}
#pragma GCC pop_options
void PageTableEntry::clear()
{
raw = 0;
}
static bool has_flag(int flags, MMU::Flags flag)
{
return flags & flag;
}
2022-11-13 13:29:15 +00:00
namespace MMU
{
template <typename T> T translate_physical(T phys)
2022-11-13 13:29:15 +00:00
{
return (T)(g_physical_mapping_base + (u64)phys);
2022-11-13 13:29:15 +00:00
}
void switch_page_directory(PageDirectory* dir)
2022-11-13 13:29:15 +00:00
{
asm volatile("mov %0, %%cr3" : : "r"(dir));
2022-11-13 13:29:15 +00:00
}
PageDirectory* get_page_directory()
2022-11-13 13:29:15 +00:00
{
PageDirectory* value;
asm volatile("mov %%cr3, %0" : "=r"(value));
return value;
2022-11-13 13:29:15 +00:00
}
PageDirectory* get_virtual_page_directory()
{
return translate_physical(get_page_directory());
}
void flush_all()
2022-11-13 13:29:15 +00:00
{
switch_page_directory(get_page_directory());
2022-11-13 13:29:15 +00:00
}
void flush_page(u64 page)
2022-11-13 13:29:15 +00:00
{
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
2022-11-13 13:29:15 +00:00
}
constexpr u64 l4_index(u64 addr)
{
return (addr >> 39) & 0777;
}
constexpr u64 l3_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 30) & 0777;
2022-11-13 13:29:15 +00:00
}
constexpr u64 l2_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 21) & 0777;
}
constexpr u64 l1_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 12) & 0777;
}
int arch_flags_to_mmu(const PageTableEntry& entry)
2022-11-13 13:29:15 +00:00
{
int result = Flags::None;
if (entry.read_write) result |= Flags::ReadWrite;
if (entry.user) result |= Flags::User;
if (entry.no_execute) result |= Flags::NoExecute;
if (entry.write_through) result |= Flags::WriteThrough;
if (entry.cache_disabled) result |= Flags::CacheDisable;
return result;
2022-11-13 13:29:15 +00:00
}
PageTableEntry& l4_entry(u64 virt)
2022-11-13 13:29:15 +00:00
{
auto index = l4_index(virt);
return get_virtual_page_directory()->entries[index];
2022-11-13 13:29:15 +00:00
}
PageDirectory& page_table(const PageTableEntry& entry)
2022-11-13 13:29:15 +00:00
{
return *translate_physical((PageDirectory*)entry.get_address());
2022-11-13 13:29:15 +00:00
}
PageTableEntry& l3_entry(const PageTableEntry& entry, u64 virt)
2022-11-13 13:29:15 +00:00
{
auto index = l3_index(virt);
return page_table(entry).entries[index];
2022-11-13 13:29:15 +00:00
}
PageTableEntry& l2_entry(const PageTableEntry& entry, u64 virt)
2022-11-13 13:29:15 +00:00
{
auto index = l2_index(virt);
return page_table(entry).entries[index];
2022-11-13 13:29:15 +00:00
}
PageTableEntry& l1_entry(const PageTableEntry& entry, u64 virt)
2022-11-13 13:29:15 +00:00
{
auto index = l1_index(virt);
return page_table(entry).entries[index];
2022-11-13 13:29:15 +00:00
}
2022-11-13 15:51:21 +00:00
Result<PageTableEntry*> find_entry(u64 virt)
{
const auto& l4 = l4_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l4.present) return err(EFAULT);
auto& l3 = l3_entry(l4, virt);
2022-11-30 16:13:59 +00:00
if (!l3.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l3.larger_pages) return &l3;
auto& l2 = l2_entry(l3, virt);
2022-11-30 16:13:59 +00:00
if (!l2.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l2.larger_pages) return &l2;
return &l1_entry(l2, virt);
2022-11-13 15:51:21 +00:00
}
Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags)
{
auto& l4 = l4_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l4.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
auto& l3 = l3_entry(l4, virt);
2022-11-30 16:13:59 +00:00
if (!l3.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l3.larger_pages) return &l3;
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
auto& l2 = l2_entry(l3, virt);
2022-11-30 16:13:59 +00:00
if (!l2.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l2.larger_pages) return &l2;
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
auto& l1 = l1_entry(l2, virt);
2022-11-13 15:51:21 +00:00
return &l1;
}
Result<void> map(u64 virt, u64 phys, int flags)
2022-11-13 13:29:15 +00:00
{
auto& l4 = l4_entry(virt);
if (!l4.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l4.present = true;
2022-11-16 19:37:41 +00:00
l4.set_address(addr);
memset(&page_table(l4), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
auto& l3 = l3_entry(l4, virt);
2022-11-13 13:29:15 +00:00
if (!l3.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l3.present = true;
2022-11-16 19:37:41 +00:00
l3.set_address(addr);
memset(&page_table(l3), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
2022-11-30 16:13:59 +00:00
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
auto& l2 = l2_entry(l3, virt);
2022-11-13 13:29:15 +00:00
if (!l2.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l2.present = true;
2022-11-16 19:37:41 +00:00
l2.set_address(addr);
memset(&page_table(l2), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
2022-11-30 16:13:59 +00:00
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
auto& l1 = l1_entry(l2, virt);
2022-11-30 16:13:59 +00:00
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
l1.present = true;
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
2022-11-13 13:29:15 +00:00
l1.set_address(phys);
return {};
}
2022-11-13 15:51:21 +00:00
Result<void> remap(u64 virt, int flags)
{
2022-11-16 19:37:41 +00:00
auto& l1 = *TRY(apply_cascading_flags(virt, flags));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
flush_page(virt);
2022-11-13 15:51:21 +00:00
return {};
}
Result<u64> unmap(u64 virt)
{
2022-11-16 19:37:41 +00:00
auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
const u64 address = l1.get_address();
l1.clear();
2022-11-13 15:51:21 +00:00
flush_page(virt);
return address;
}
2022-11-13 13:29:15 +00:00
Result<u64> get_physical(u64 virt)
{
const auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
2022-11-13 13:29:15 +00:00
return l1.get_address();
}
Result<int> get_flags(u64 virt)
2022-11-13 13:29:15 +00:00
{
const auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
2022-11-13 13:29:15 +00:00
return arch_flags_to_mmu(l1);
}
void setup_initial_page_directory()
{
PageDirectory* const dir = get_page_directory();
g_kernel_directory = dir;
const u64 physical_memory_base = 0xFFFF800000000000;
MemoryMapIterator iter;
const MemoryMapEntry highest_entry = iter.highest();
const u64 physical_memory_size = highest_entry.address() + highest_entry.size();
// FIXME: Do this using 2MiB huge pages.
MemoryManager::map_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_PAGE_SIZE,
MMU::ReadWrite | MMU::NoExecute);
g_physical_mapping_base = physical_memory_base;
g_kernel_directory_virt = translate_physical((u64)g_kernel_directory);
kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory);
2022-11-13 13:29:15 +00:00
}
Result<PageDirectory*> create_page_directory_for_userspace()
{
const u64 directory_phys = TRY(MemoryManager::alloc_frame());
const u64 directory_virt = translate_physical(directory_phys);
2023-01-10 18:31:41 +00:00
PageDirectory* const directory = (PageDirectory*)directory_virt;
memset(directory, 0, ARCH_PAGE_SIZE);
constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2;
// Copy the upper part of the page directory (higher half)
memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr((PageDirectory*)g_kernel_directory_virt, HALF_PAGE),
HALF_PAGE);
kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys);
return (PageDirectory*)directory_phys;
}
Result<void> delete_userspace_page_directory(PageDirectory* directory)
{
check(directory);
switch_page_directory(g_kernel_directory);
auto guard = make_scope_guard([directory] { MemoryManager::free_frame((u64)directory); });
PageDirectory* const table = translate_physical(directory);
// Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages)
for (u64 i = 0; i < 510; i++)
{
PageTableEntry& l4 = table->entries[i];
if (!l4.present) continue;
PageDirectory* const pdp = &page_table(l4);
for (u64 j = 0; j < 512; j++)
{
PageTableEntry& l3 = pdp->entries[j];
if (!l3.present) continue;
if (l3.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l3.get_address()));
}
PageDirectory* const pd = &page_table(l3);
for (u64 k = 0; k < 512; k++)
{
PageTableEntry& l2 = pd->entries[k];
if (!l2.present) continue;
if (l2.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l2.get_address()));
}
PageDirectory* const pt = &page_table(l2);
for (u64 l = 0; l < 512; l++)
{
PageTableEntry& l1 = pt->entries[l];
if (!l1.present) continue;
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l1.get_address()));
}
TRY(MemoryManager::free_frame(l2.get_address()));
}
TRY(MemoryManager::free_frame(l3.get_address()));
}
TRY(MemoryManager::free_frame(l4.get_address()));
}
// No need to clean up manually, the ScopeGuard we set up earlier will do that for us.
return {};
}
PageDirectory* kernel_page_directory()
{
return g_kernel_directory;
}
2023-01-02 12:07:29 +00:00
}