#include "arch/MMU.h" #include "memory/MemoryManager.h" #include #include #include #include #pragma GCC push_options #pragma GCC diagnostic ignored "-Wconversion" PageDirectory* g_kernel_directory; void PageTableEntry::set_address(u64 addr) { this->address = (addr >> 12); } u64 PageTableEntry::get_address() const { return (u64)this->address << 12; } #pragma GCC pop_options void PageTableEntry::clear() { raw = 0; } static bool has_flag(int flags, MMU::Flags flag) { return flags & flag; } namespace MMU { constexpr PageDirectory* l4_table() { constexpr u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12); return (PageDirectory*)l4; } constexpr u64 l4_index(u64 addr) { return (addr >> 39) & 0777; } PageTableEntry& l4_entry(u64 addr) { return l4_table()->entries[l4_index(addr)]; } constexpr PageDirectory* raw_l3_table(u64 l4) { const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12); return (PageDirectory*)l3; } constexpr PageDirectory* l3_table(u64 addr) { const u64 l4 = l4_index(addr); return raw_l3_table(l4); } constexpr u64 l3_index(u64 addr) { return (addr >> 30) & 0777; } PageTableEntry& l3_entry(u64 addr) { return l3_table(addr)->entries[l3_index(addr)]; } constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3) { const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12); return (PageDirectory*)l2; } constexpr PageDirectory* l2_table(u64 addr) { const u64 l4 = l4_index(addr); const u64 l3 = l3_index(addr); return raw_l2_table(l4, l3); } constexpr u64 l2_index(u64 addr) { return (addr >> 21) & 0777; } PageTableEntry& l2_entry(u64 addr) { return l2_table(addr)->entries[l2_index(addr)]; } constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2) { const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12); return (PageDirectory*)l1; } constexpr PageDirectory* l1_table(u64 addr) { const u64 l4 = l4_index(addr); const u64 l3 = l3_index(addr); const u64 l2 = l2_index(addr); return raw_l1_table(l4, l3, l2); } constexpr u64 l1_index(u64 addr) { return (addr >> 12) & 0777; } PageTableEntry& l1_entry(u64 addr) { return l1_table(addr)->entries[l1_index(addr)]; } void switch_page_directory(PageDirectory* dir) { asm volatile("mov %0, %%cr3" : : "r"(dir)); } PageDirectory* get_page_directory() { PageDirectory* value; asm volatile("mov %%cr3, %0" : "=r"(value)); return value; } void flush_all() { switch_page_directory(get_page_directory()); } void flush_page(u64 page) { asm volatile("invlpg (%0)" : : "r"(page) : "memory"); } int arch_flags_to_mmu(const PageTableEntry& entry) { int result = Flags::None; if (entry.read_write) result |= Flags::ReadWrite; if (entry.user) result |= Flags::User; if (entry.no_execute) result |= Flags::NoExecute; if (entry.write_through) result |= Flags::WriteThrough; if (entry.cache_disabled) result |= Flags::CacheDisable; return result; } Result find_entry(u64 virt) { const auto& l4 = l4_entry(virt); if (!l4.present) return err(EFAULT); auto& l3 = l3_entry(virt); if (!l3.present) return err(EFAULT); if (l3.larger_pages) return &l3; auto& l2 = l2_entry(virt); if (!l2.present) return err(EFAULT); if (l2.larger_pages) return &l2; return &l1_entry(virt); } Result apply_cascading_flags(u64 virt, int flags) { auto& l4 = l4_entry(virt); if (!l4.present) return err(EFAULT); if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::User) l4.user = true; auto& l3 = l3_entry(virt); if (!l3.present) return err(EFAULT); if (l3.larger_pages) return &l3; if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::User) l3.user = true; auto& l2 = l2_entry(virt); if (!l2.present) return err(EFAULT); if (l2.larger_pages) return &l2; if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; auto& l1 = l1_entry(virt); return &l1; } Result map(u64 virt, u64 phys, int flags) { auto& l4 = l4_entry(virt); if (!l4.present) { u64 addr = TRY(MemoryManager::alloc_frame()); l4.present = true; l4.set_address(addr); memset(l3_table(virt), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::User) l4.user = true; auto& l3 = l3_entry(virt); if (!l3.present) { u64 addr = TRY(MemoryManager::alloc_frame()); l3.present = true; l3.set_address(addr); memset(l2_table(virt), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::User) l3.user = true; if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM auto& l2 = l2_entry(virt); if (!l2.present) { u64 addr = TRY(MemoryManager::alloc_frame()); l2.present = true; l2.set_address(addr); memset(l1_table(virt), 0, ARCH_PAGE_SIZE); } if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::User) l2.user = true; if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM auto& l1 = l1_entry(virt); if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again. l1.present = true; l1.read_write = has_flag(flags, Flags::ReadWrite); l1.user = has_flag(flags, Flags::User); l1.write_through = has_flag(flags, Flags::WriteThrough); l1.cache_disabled = has_flag(flags, Flags::CacheDisable); l1.no_execute = has_flag(flags, Flags::NoExecute); l1.set_address(phys); return {}; } Result remap(u64 virt, int flags) { auto& l1 = *TRY(apply_cascading_flags(virt, flags)); if (!l1.present) return err(EFAULT); l1.read_write = has_flag(flags, Flags::ReadWrite); l1.user = has_flag(flags, Flags::User); l1.write_through = has_flag(flags, Flags::WriteThrough); l1.cache_disabled = has_flag(flags, Flags::CacheDisable); l1.no_execute = has_flag(flags, Flags::NoExecute); flush_page(virt); return {}; } Result unmap(u64 virt) { auto& l1 = *TRY(find_entry(virt)); if (!l1.present) return err(EFAULT); const u64 address = l1.get_address(); l1.clear(); flush_page(virt); return address; } Result get_physical(u64 virt) { const auto& l1 = *TRY(find_entry(virt)); if (!l1.present) return err(EFAULT); return l1.get_address(); } Result get_flags(u64 virt) { const auto& l1 = *TRY(find_entry(virt)); if (!l1.present) return err(EFAULT); return arch_flags_to_mmu(l1); } void setup_initial_page_directory() { PageDirectory* const dir = get_page_directory(); g_kernel_directory = dir; const u64 paddr = (u64)dir; PageTableEntry& recursive_entry = dir->entries[rindex]; recursive_entry.read_write = true; recursive_entry.present = true; recursive_entry.set_address(paddr); flush_all(); } Result create_page_directory_for_userspace() { u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute)); u64 directory_phys = MMU::get_physical(directory_virt).value(); PageDirectory* directory = (PageDirectory*)directory_virt; memset(directory, 0, ARCH_PAGE_SIZE); PageTableEntry& recursive_entry = directory->entries[rindex]; recursive_entry.read_write = true; recursive_entry.present = true; recursive_entry.set_address(directory_phys); directory->entries[511] = g_kernel_directory->entries[511]; // From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt // with using recursive mapping. So let's make sure we don't leak any VM. MemoryManager::unmap_weak_and_free_vm(directory_virt, 1); return (PageDirectory*)directory_phys; } Result delete_userspace_page_directory(PageDirectory* directory) { check(directory); // Needed in order to access page tables using the recursive mapping system. switch_page_directory(directory); auto guard = make_scope_guard([&] { check(g_kernel_directory); switch_page_directory(g_kernel_directory); MemoryManager::free_frame((u64)directory); }); PageDirectory* table = l4_table(); // Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages) for (u64 i = 0; i < 510; i++) { PageTableEntry& l4 = table->entries[i]; if (!l4.present) continue; PageDirectory* pdp = raw_l3_table(i); for (u64 j = 0; j < 512; j++) { PageTableEntry& l3 = pdp->entries[j]; if (!l3.present) continue; if (l3.larger_pages) { // FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory. TRY(MemoryManager::free_frame(l3.get_address())); } PageDirectory* pd = raw_l2_table(i, j); for (u64 k = 0; k < 512; k++) { PageTableEntry& l2 = pd->entries[k]; if (!l2.present) continue; if (l2.larger_pages) { // FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory. TRY(MemoryManager::free_frame(l2.get_address())); } PageDirectory* pt = raw_l1_table(i, j, k); for (u64 l = 0; l < 512; l++) { PageTableEntry& l1 = pt->entries[l]; if (!l1.present) continue; // FIXME: Maybe we shouldn't delete some pages in an address space, such as shared memory. TRY(MemoryManager::free_frame(l1.get_address())); } TRY(MemoryManager::free_frame(l2.get_address())); } TRY(MemoryManager::free_frame(l3.get_address())); } TRY(MemoryManager::free_frame(l4.get_address())); } // No need to clean up manually, the ScopeGuard we set up earlier will do that for us. return {}; } PageDirectory* kernel_page_directory() { return g_kernel_directory; } }