Luna/kernel/src/arch/x86_64/MMU.cpp

248 lines
6.7 KiB
C++
Raw Normal View History

2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
#include "MemoryManager.h"
#define PAGE_SIZE 4096
const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension
struct [[gnu::packed]] PageTableEntry
{
bool present : 1;
bool read_write : 1;
bool user : 1;
bool write_through : 1;
bool cache_disabled : 1;
bool accessed : 1;
bool ignore0 : 1;
bool larger_pages : 1;
bool ignore1 : 1;
u8 available : 3;
u64 address : 48;
u8 available2 : 3;
bool no_execute : 1;
void set_address(uint64_t addr);
uint64_t get_address();
};
#pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion"
void PageTableEntry::set_address(uint64_t addr)
{
this->address = (addr >> 12);
}
uint64_t PageTableEntry::get_address()
{
return (uint64_t)this->address << 12;
}
#pragma GCC pop_options
struct alignas(PAGE_SIZE) PageDirectory
{
PageTableEntry entries[512];
};
static_assert(sizeof(PageTableEntry) == 8UL);
static_assert(sizeof(PageDirectory) == PAGE_SIZE);
namespace MMU
{
size_t page_size()
{
return PAGE_SIZE;
}
PageDirectory* l4_table()
{
u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12);
return (PageDirectory*)l4;
}
u64 l4_index(u64 addr)
{
return (addr >> 39) & 0777;
}
PageTableEntry& l4_entry(u64 addr)
{
return l4_table()->entries[l4_index(addr)];
}
PageDirectory* l3_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
return (PageDirectory*)l3;
}
u64 l3_index(u64 addr)
{
return (addr >> 30) & 0777;
}
PageTableEntry& l3_entry(u64 addr)
{
return l3_table(addr)->entries[l3_index(addr)];
}
PageDirectory* l2_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = l3_index(addr);
u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
}
u64 l2_index(u64 addr)
{
return (addr >> 21) & 0777;
}
PageTableEntry& l2_entry(u64 addr)
{
return l2_table(addr)->entries[l2_index(addr)];
}
PageDirectory* l1_table(u64 addr)
{
u64 l4 = l4_index(addr);
u64 l3 = l3_index(addr);
u64 l2 = l2_index(addr);
u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
}
u64 l1_index(u64 addr)
{
return (addr >> 12) & 0777;
}
PageTableEntry& l1_entry(u64 addr)
{
return l1_table(addr)->entries[l1_index(addr)];
}
void switch_page_directory(PageDirectory* dir)
{
asm volatile("mov %0, %%cr3" : : "r"(dir));
}
PageDirectory* get_page_directory()
{
PageDirectory* value;
asm volatile("mov %%cr3, %0" : "=r"(value));
return value;
}
void flush_all()
{
switch_page_directory(get_page_directory());
}
void flush_page(u64 page)
{
asm volatile("invlpg (%0)" : : "r"(page) : "memory");
}
Flags arch_flags_to_mmu(PageTableEntry& entry)
{
int result = Flags::None;
if (entry.read_write) result |= Flags::ReadWrite;
if (entry.user) result |= Flags::User;
if (entry.no_execute) result |= Flags::NoExecute;
if (entry.write_through) result |= Flags::WriteThrough;
if (entry.cache_disabled) result |= Flags::CacheDisable;
return (Flags)result;
}
Result<void> map(u64 virt, u64 phys, Flags flags)
{
auto& l4 = l4_entry(virt);
if (!l4.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l4.present = true;
if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true;
l4.set_address(addr.release_value());
}
auto& l3 = l3_entry(virt);
if (!l3.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l3.present = true;
if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true;
l3.set_address(addr.release_value());
}
if (l3.larger_pages) return err; // FIXME: Replacing larger pages is not supported ATM
auto& l2 = l2_entry(virt);
if (!l2.present)
{
auto addr = MemoryManager::alloc_physical_page();
if (addr.has_error()) return addr.release_error();
l2.present = true;
if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true;
l2.set_address(addr.release_value());
}
if (l2.larger_pages) return err; // FIXME: Replacing larger pages is not supported ATM
auto& l1 = l1_entry(virt);
bool was_present = l1.present;
if (flags & Flags::ReadWrite) l1.read_write = true;
if (flags & Flags::User) l1.user = true;
if (flags & Flags::WriteThrough) l1.write_through = true;
if (flags & Flags::CacheDisable) l1.cache_disabled = true;
if (flags & Flags::NoExecute) l1.no_execute = true;
l1.set_address(phys);
if (was_present) flush_page(virt);
return {};
}
Result<u64> get_physical(u64 virt)
{
auto& l4 = l4_entry(virt);
if (!l4.present) return err;
auto& l3 = l3_entry(virt);
if (!l3.present) return err;
if (l3.larger_pages) return l3.get_address();
auto& l2 = l2_entry(virt);
if (!l2.present) return err;
if (l2.larger_pages) return l2.get_address();
auto& l1 = l1_entry(virt);
if (!l1.present) return err;
return l1.get_address();
}
Result<Flags> get_flags(u64 virt)
{
auto& l4 = l4_entry(virt);
if (!l4.present) return err;
auto& l3 = l3_entry(virt);
if (!l3.present) return err;
if (l3.larger_pages) return arch_flags_to_mmu(l3);
auto& l2 = l2_entry(virt);
if (!l2.present) return err;
if (l2.larger_pages) return arch_flags_to_mmu(l2);
auto& l1 = l1_entry(virt);
if (!l1.present) return err;
return arch_flags_to_mmu(l1);
}
void setup_initial_page_directory()
{
PageDirectory* dir = get_page_directory();
u64 paddr = (u64)dir;
PageTableEntry& recursive_entry = dir->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(paddr);
flush_all();
}
}