Luna/kernel/src/arch/x86_64/MMU.cpp

394 lines
12 KiB
C++
Raw Normal View History

2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
#include "Log.h"
2022-11-19 16:59:49 +00:00
#include "memory/MemoryManager.h"
#include <luna/CString.h>
2022-12-07 11:25:42 +00:00
#include <luna/Result.h>
#include <luna/ScopeGuard.h>
#include <luna/SystemError.h>
2022-11-13 13:29:15 +00:00
#pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion"
PageDirectory* g_kernel_directory;
u64 g_kernel_directory_virt;
2022-11-13 15:31:32 +00:00
void PageTableEntry::set_address(u64 addr)
2022-11-13 13:29:15 +00:00
{
this->address = (addr >> 12);
}
u64 PageTableEntry::get_address() const
2022-11-13 13:29:15 +00:00
{
2022-11-13 15:31:32 +00:00
return (u64)this->address << 12;
2022-11-13 13:29:15 +00:00
}
#pragma GCC pop_options
void PageTableEntry::clear()
{
raw = 0;
}
static bool has_flag(int flags, MMU::Flags flag)
{
return flags & flag;
}
2022-11-13 13:29:15 +00:00
namespace MMU
{
constexpr PageDirectory* l4_table()
2022-11-13 13:29:15 +00:00
{
constexpr u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12);
2022-11-13 13:29:15 +00:00
return (PageDirectory*)l4;
}
constexpr u64 l4_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 39) & 0777;
}
PageTableEntry& l4_entry(u64 addr)
{
return l4_table()->entries[l4_index(addr)];
}
constexpr PageDirectory* raw_l3_table(u64 l4)
2022-11-13 13:29:15 +00:00
{
const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
2022-11-13 13:29:15 +00:00
return (PageDirectory*)l3;
}
constexpr PageDirectory* l3_table(u64 addr)
{
const u64 l4 = l4_index(addr);
return raw_l3_table(l4);
}
constexpr u64 l3_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 30) & 0777;
}
PageTableEntry& l3_entry(u64 addr)
{
return l3_table(addr)->entries[l3_index(addr)];
}
constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3)
{
const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
}
constexpr PageDirectory* l2_table(u64 addr)
2022-11-13 13:29:15 +00:00
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
return raw_l2_table(l4, l3);
2022-11-13 13:29:15 +00:00
}
constexpr u64 l2_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 21) & 0777;
}
PageTableEntry& l2_entry(u64 addr)
{
return l2_table(addr)->entries[l2_index(addr)];
}
constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2)
{
const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
}
constexpr PageDirectory* l1_table(u64 addr)
2022-11-13 13:29:15 +00:00
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
const u64 l2 = l2_index(addr);
return raw_l1_table(l4, l3, l2);
2022-11-13 13:29:15 +00:00
}
constexpr u64 l1_index(u64 addr)
2022-11-13 13:29:15 +00:00
{
return (addr >> 12) & 0777;
}
PageTableEntry& l1_entry(u64 addr)
{
return l1_table(addr)->entries[l1_index(addr)];
}
void switch_page_directory(PageDirectory* dir)
{
asm volatile("mov %0, %%cr3" : : "r"(dir));
}
PageDirectory* get_page_directory()
{
PageDirectory* value;
asm volatile("mov %%cr3, %0" : "=r"(value));
return value;
}
void flush_all()
{
switch_page_directory(get_page_directory());
}
void flush_page(u64 page)
{
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
}
int arch_flags_to_mmu(const PageTableEntry& entry)
2022-11-13 13:29:15 +00:00
{
int result = Flags::None;
if (entry.read_write) result |= Flags::ReadWrite;
if (entry.user) result |= Flags::User;
if (entry.no_execute) result |= Flags::NoExecute;
if (entry.write_through) result |= Flags::WriteThrough;
if (entry.cache_disabled) result |= Flags::CacheDisable;
return result;
2022-11-13 13:29:15 +00:00
}
2022-11-13 15:51:21 +00:00
Result<PageTableEntry*> find_entry(u64 virt)
{
const auto& l4 = l4_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l4.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
auto& l3 = l3_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l3.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l3.larger_pages) return &l3;
auto& l2 = l2_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l2.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l2.larger_pages) return &l2;
return &l1_entry(virt);
}
Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags)
{
auto& l4 = l4_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l4.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
auto& l3 = l3_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l3.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l3.larger_pages) return &l3;
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
auto& l2 = l2_entry(virt);
2022-11-30 16:13:59 +00:00
if (!l2.present) return err(EFAULT);
2022-11-13 15:51:21 +00:00
if (l2.larger_pages) return &l2;
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
auto& l1 = l1_entry(virt);
return &l1;
}
Result<void> map(u64 virt, u64 phys, int flags)
2022-11-13 13:29:15 +00:00
{
auto& l4 = l4_entry(virt);
if (!l4.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l4.present = true;
2022-11-16 19:37:41 +00:00
l4.set_address(addr);
memset(l3_table(virt), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
2022-11-13 13:29:15 +00:00
auto& l3 = l3_entry(virt);
if (!l3.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l3.present = true;
2022-11-16 19:37:41 +00:00
l3.set_address(addr);
memset(l2_table(virt), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
2022-11-30 16:13:59 +00:00
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
2022-11-13 13:29:15 +00:00
auto& l2 = l2_entry(virt);
if (!l2.present)
{
2023-01-10 18:31:41 +00:00
const u64 addr = TRY(MemoryManager::alloc_frame());
2022-11-13 13:29:15 +00:00
l2.present = true;
2022-11-16 19:37:41 +00:00
l2.set_address(addr);
memset(l1_table(virt), 0, ARCH_PAGE_SIZE);
2022-11-13 13:29:15 +00:00
}
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
2022-11-30 16:13:59 +00:00
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM
2022-11-13 13:29:15 +00:00
auto& l1 = l1_entry(virt);
2022-11-30 16:13:59 +00:00
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
l1.present = true;
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
2022-11-13 13:29:15 +00:00
l1.set_address(phys);
return {};
}
2022-11-13 15:51:21 +00:00
Result<void> remap(u64 virt, int flags)
{
2022-11-16 19:37:41 +00:00
auto& l1 = *TRY(apply_cascading_flags(virt, flags));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
flush_page(virt);
2022-11-13 15:51:21 +00:00
return {};
}
Result<u64> unmap(u64 virt)
{
2022-11-16 19:37:41 +00:00
auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
const u64 address = l1.get_address();
l1.clear();
2022-11-13 15:51:21 +00:00
flush_page(virt);
return address;
}
2022-11-13 13:29:15 +00:00
Result<u64> get_physical(u64 virt)
{
const auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
2022-11-13 13:29:15 +00:00
return l1.get_address();
}
Result<int> get_flags(u64 virt)
2022-11-13 13:29:15 +00:00
{
const auto& l1 = *TRY(find_entry(virt));
2022-11-30 16:13:59 +00:00
if (!l1.present) return err(EFAULT);
2022-11-13 13:29:15 +00:00
return arch_flags_to_mmu(l1);
}
void setup_initial_page_directory()
{
PageDirectory* const dir = get_page_directory();
g_kernel_directory = dir;
const u64 paddr = (u64)dir;
2022-11-13 13:29:15 +00:00
PageTableEntry& recursive_entry = dir->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(paddr);
flush_all();
g_kernel_directory_virt =
MemoryManager::get_kernel_mapping_for_frames((u64)dir, 1, MMU::ReadWrite | MMU::NoExecute).value();
kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory);
2022-11-13 13:29:15 +00:00
}
Result<PageDirectory*> create_page_directory_for_userspace()
{
2023-01-10 18:31:41 +00:00
const u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute));
const u64 directory_phys = MMU::get_physical(directory_virt).value();
2023-01-10 18:31:41 +00:00
PageDirectory* const directory = (PageDirectory*)directory_virt;
memset(directory, 0, ARCH_PAGE_SIZE);
PageTableEntry& recursive_entry = directory->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(directory_phys);
kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys);
directory->entries[511] = ((PageDirectory*)g_kernel_directory_virt)->entries[511];
// From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt
// with using recursive mapping. So let's make sure we don't leak any VM.
MemoryManager::unmap_weak_and_free_vm(directory_virt, 1);
return (PageDirectory*)directory_phys;
}
Result<void> delete_userspace_page_directory(PageDirectory* directory)
{
check(directory);
// Needed in order to access page tables using the recursive mapping system.
switch_page_directory(directory);
auto guard = make_scope_guard([&] {
check(g_kernel_directory);
switch_page_directory(g_kernel_directory);
MemoryManager::free_frame((u64)directory);
});
2023-01-10 18:31:41 +00:00
PageDirectory* const table = l4_table();
// Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages)
for (u64 i = 0; i < 510; i++)
{
PageTableEntry& l4 = table->entries[i];
if (!l4.present) continue;
2023-01-10 18:31:41 +00:00
PageDirectory* const pdp = raw_l3_table(i);
for (u64 j = 0; j < 512; j++)
{
PageTableEntry& l3 = pdp->entries[j];
if (!l3.present) continue;
if (l3.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l3.get_address()));
}
2023-01-10 18:31:41 +00:00
PageDirectory* const pd = raw_l2_table(i, j);
for (u64 k = 0; k < 512; k++)
{
PageTableEntry& l2 = pd->entries[k];
if (!l2.present) continue;
if (l2.larger_pages)
{
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l2.get_address()));
}
2023-01-10 18:31:41 +00:00
PageDirectory* const pt = raw_l1_table(i, j, k);
for (u64 l = 0; l < 512; l++)
{
PageTableEntry& l1 = pt->entries[l];
if (!l1.present) continue;
// FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory.
TRY(MemoryManager::free_frame(l1.get_address()));
}
TRY(MemoryManager::free_frame(l2.get_address()));
}
TRY(MemoryManager::free_frame(l3.get_address()));
}
TRY(MemoryManager::free_frame(l4.get_address()));
}
// No need to clean up manually, the ScopeGuard we set up earlier will do that for us.
return {};
}
PageDirectory* kernel_page_directory()
{
return g_kernel_directory;
}
2023-01-02 12:07:29 +00:00
}