Kernel: Move VMM from a class to a namespace

Also, rename the ugly Paging::VirtualMemoryManager name to just 'VMM'. Which is now used instead of kernelVMM.
This commit is contained in:
apio 2022-10-12 20:02:25 +02:00
parent 5f8376409d
commit 69a9f7f06a
7 changed files with 308 additions and 322 deletions

View File

@ -5,10 +5,8 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
#endif #endif
namespace Paging struct PageDirectoryEntry
{ {
struct PageDirectoryEntry
{
bool Present : 1; bool Present : 1;
bool ReadWrite : 1; bool ReadWrite : 1;
bool UserSuper : 1; bool UserSuper : 1;
@ -22,10 +20,9 @@ namespace Paging
uint64_t Address : 52; uint64_t Address : 52;
void set_address(uint64_t addr); void set_address(uint64_t addr);
}; };
struct PageTable struct PageTable
{ {
PageDirectoryEntry entries[512]; PageDirectoryEntry entries[512];
} __attribute__((aligned(PAGE_SIZE))); } __attribute__((aligned(PAGE_SIZE)));
}

View File

@ -1,17 +1,14 @@
#pragma once #pragma once
#include "memory/Paging.h" #include "memory/Paging.h"
namespace Paging enum Flags
{ {
enum Flags
{
ReadWrite = 1 << 0, ReadWrite = 1 << 0,
User = 1 << 1, User = 1 << 1,
Execute = 1 << 2 Execute = 1 << 2
}; };
class VirtualMemoryManager namespace VMM
{ {
public:
void init(); // fetch page table from cr3 void init(); // fetch page table from cr3
void init(PageTable* cr3); void init(PageTable* cr3);
@ -21,9 +18,4 @@ namespace Paging
uint64_t getPhysical(uint64_t virtualAddress); uint64_t getPhysical(uint64_t virtualAddress);
uint64_t getFlags(uint64_t virtualAddress); uint64_t getFlags(uint64_t virtualAddress);
private: };
PageTable* PML4;
};
}
extern Paging::VirtualMemoryManager kernelVMM;

View File

@ -12,7 +12,7 @@
void MemoryManager::init() void MemoryManager::init()
{ {
PMM::init(); PMM::init();
kernelVMM.init(); VMM::init();
PMM::map_bitmap_to_virtual(); PMM::map_bitmap_to_virtual();
} }
@ -26,7 +26,7 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress; return (void*)virtualAddress;
} }
@ -41,7 +41,7 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags); VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
return (void*)(virtualAddress + offset); return (void*)(virtualAddress + offset);
} }
@ -62,7 +62,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
} }
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
kernelVMM.map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags); VMM::map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags);
} }
return (void*)(virtualAddress + offset); return (void*)(virtualAddress + offset);
} }
@ -70,7 +70,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
void MemoryManager::release_unaligned_mapping(void* mapping) void MemoryManager::release_unaligned_mapping(void* mapping)
{ {
uint64_t offset = (uint64_t)mapping % PAGE_SIZE; uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
kernelVMM.unmap((uint64_t)mapping - offset); VMM::unmap((uint64_t)mapping - offset);
KernelHeap::free_virtual_page((uint64_t)mapping - offset); KernelHeap::free_virtual_page((uint64_t)mapping - offset);
} }
@ -80,12 +80,12 @@ void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
if (count == 1) return release_unaligned_mapping(mapping); if (count == 1) return release_unaligned_mapping(mapping);
uint64_t offset = (uint64_t)mapping % PAGE_SIZE; uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); } for (uint64_t i = 0; i < count; i++) { VMM::unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); }
} }
void MemoryManager::release_mapping(void* mapping) void MemoryManager::release_mapping(void* mapping)
{ {
kernelVMM.unmap((uint64_t)mapping); VMM::unmap((uint64_t)mapping);
KernelHeap::free_virtual_page((uint64_t)mapping); KernelHeap::free_virtual_page((uint64_t)mapping);
} }
@ -112,15 +112,15 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(addr, (uint64_t)physicalAddress, flags); VMM::map(addr, (uint64_t)physicalAddress, flags);
return (void*)addr; return (void*)addr;
} }
void MemoryManager::release_page(void* page) void MemoryManager::release_page(void* page)
{ {
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space... ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
kernelVMM.unmap((uint64_t)page); VMM::unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }
@ -162,7 +162,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags); VMM::map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress); kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress);
#endif #endif
@ -180,9 +180,9 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE)); void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE));
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page); VMM::unmap((uint64_t)page);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("releasing virtual %p, physical %lx", page, physicalAddress); kdbgln("releasing virtual %p, physical %lx", page, physicalAddress);
#endif #endif

View File

@ -3,7 +3,7 @@
#pragma GCC push_options #pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wconversion"
void Paging::PageDirectoryEntry::set_address(uint64_t addr) void PageDirectoryEntry::set_address(uint64_t addr)
{ {
this->Address = (addr >> 12); this->Address = (addr >> 12);
} }

View File

@ -3,24 +3,22 @@
#include "memory/PMM.h" #include "memory/PMM.h"
#include "std/string.h" #include "std/string.h"
Paging::VirtualMemoryManager kernelVMM;
// FIXME: There is a lot of duplicate code in this file. This should probably be refactored. // FIXME: There is a lot of duplicate code in this file. This should probably be refactored.
namespace Paging static PageTable* PML4;
void VMM::init()
{ {
void VirtualMemoryManager::init()
{
asm volatile("mov %%cr3, %0" : "=r"(PML4)); asm volatile("mov %%cr3, %0" : "=r"(PML4));
} }
void VirtualMemoryManager::init(PageTable* cr3) void VMM::init(PageTable* cr3)
{ {
PML4 = cr3; PML4 = cr3;
} }
void VirtualMemoryManager::unmap(uint64_t virtualAddress) void VMM::unmap(uint64_t virtualAddress)
{ {
virtualAddress >>= 12; virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff; uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9; virtualAddress >>= 9;
@ -89,12 +87,12 @@ namespace Paging
PDE = PT->entries[P_i]; PDE = PT->entries[P_i];
PDE.Present = false; PDE.Present = false;
PT->entries[P_i] = PDE; PT->entries[P_i] = PDE;
invalidate: invalidate:
asm volatile("invlpg (%0)" : : "r"(virtualAddress) : "memory"); asm volatile("invlpg (%0)" : : "r"(virtualAddress) : "memory");
} }
uint64_t VirtualMemoryManager::getPhysical(uint64_t virtualAddress) uint64_t VMM::getPhysical(uint64_t virtualAddress)
{ {
virtualAddress >>= 12; virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff; uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9; virtualAddress >>= 9;
@ -145,10 +143,10 @@ namespace Paging
PDE = PT->entries[P_i]; PDE = PT->entries[P_i];
if (!PDE.Present) return UINT64_MAX; if (!PDE.Present) return UINT64_MAX;
return PDE.Address << 12 | (virtualAddress & PAGE_SIZE); return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
} }
uint64_t VirtualMemoryManager::getFlags(uint64_t virtualAddress) // FIXME: Add support for larger pages to getFlags. uint64_t VMM::getFlags(uint64_t virtualAddress) // FIXME: Add support for larger pages to getFlags.
{ {
virtualAddress >>= 12; virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff; uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9; virtualAddress >>= 9;
@ -190,10 +188,10 @@ namespace Paging
if (PDE.UserSuper) flags |= User; if (PDE.UserSuper) flags |= User;
if (PDE.ReadWrite) flags |= ReadWrite; if (PDE.ReadWrite) flags |= ReadWrite;
return flags; return flags;
} }
void VirtualMemoryManager::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags) void VMM::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags)
{ {
virtualAddress >>= 12; virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff; uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9; virtualAddress >>= 9;
@ -319,5 +317,4 @@ namespace Paging
PDE.UserSuper = flags & User; PDE.UserSuper = flags & User;
PDE.set_address(physicalAddress); PDE.set_address(physicalAddress);
PT->entries[P_i] = PDE; PT->entries[P_i] = PDE;
}
} }

View File

@ -47,7 +47,7 @@ void sys_exec(Context* context, const char* pathname)
return; return;
} }
if (memusage > PMM::get_free()) if ((uint64_t)memusage > PMM::get_free())
{ {
MemoryManager::release_pages((void*)allocated_stack, TASK_PAGES_IN_STACK); MemoryManager::release_pages((void*)allocated_stack, TASK_PAGES_IN_STACK);
context->rax = -ENOMEM; context->rax = -ENOMEM;

View File

@ -24,7 +24,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
{ {
kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address, kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address,
real_flags & MAP_READ_WRITE ? "rw" : "ro"); real_flags & MAP_READ_WRITE ? "rw" : "ro");
if (kernelVMM.getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used. if (VMM::getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used.
{ {
kdbgln("attempt to mmap an already mapped address"); kdbgln("attempt to mmap an already mapped address");
context->rax = MAP_FAIL(ENOMEM); context->rax = MAP_FAIL(ENOMEM);
@ -78,7 +78,7 @@ void sys_munmap(Context* context, void* address, size_t size)
context->rax = -EINVAL; context->rax = -EINVAL;
return; return;
} }
uint64_t flags = kernelVMM.getFlags((uint64_t)address); uint64_t flags = VMM::getFlags((uint64_t)address);
if (!(flags & MAP_USER)) if (!(flags & MAP_USER))
{ {
kdbgln("munmap failed: attempted to unmap a kernel page"); kdbgln("munmap failed: attempted to unmap a kernel page");