Kernel: Move VMM from a class to a namespace

Also, rename the ugly Paging::VirtualMemoryManager name to just 'VMM'. Which is now used instead of kernelVMM.
This commit is contained in:
apio 2022-10-12 20:02:25 +02:00
parent 5f8376409d
commit 69a9f7f06a
7 changed files with 308 additions and 322 deletions

View File

@ -5,27 +5,24 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
#endif #endif
namespace Paging struct PageDirectoryEntry
{ {
struct PageDirectoryEntry bool Present : 1;
{ bool ReadWrite : 1;
bool Present : 1; bool UserSuper : 1;
bool ReadWrite : 1; bool WriteThrough : 1;
bool UserSuper : 1; bool CacheDisabled : 1;
bool WriteThrough : 1; bool Accessed : 1;
bool CacheDisabled : 1; bool ignore0 : 1;
bool Accessed : 1; bool LargerPages : 1;
bool ignore0 : 1; bool ignore1 : 1;
bool LargerPages : 1; uint8_t Available : 3;
bool ignore1 : 1; uint64_t Address : 52;
uint8_t Available : 3;
uint64_t Address : 52;
void set_address(uint64_t addr); void set_address(uint64_t addr);
}; };
struct PageTable struct PageTable
{ {
PageDirectoryEntry entries[512]; PageDirectoryEntry entries[512];
} __attribute__((aligned(PAGE_SIZE))); } __attribute__((aligned(PAGE_SIZE)));
}

View File

@ -1,29 +1,21 @@
#pragma once #pragma once
#include "memory/Paging.h" #include "memory/Paging.h"
namespace Paging enum Flags
{ {
enum Flags ReadWrite = 1 << 0,
{ User = 1 << 1,
ReadWrite = 1 << 0, Execute = 1 << 2
User = 1 << 1, };
Execute = 1 << 2 namespace VMM
}; {
class VirtualMemoryManager void init(); // fetch page table from cr3
{ void init(PageTable* cr3);
public:
void init(); // fetch page table from cr3
void init(PageTable* cr3);
void map(uint64_t virtualAddress, uint64_t physicalAddress, int flags); void map(uint64_t virtualAddress, uint64_t physicalAddress, int flags);
void remap(uint64_t virtualAddress, int flags); void remap(uint64_t virtualAddress, int flags);
void unmap(uint64_t virtualAddress); void unmap(uint64_t virtualAddress);
uint64_t getPhysical(uint64_t virtualAddress); uint64_t getPhysical(uint64_t virtualAddress);
uint64_t getFlags(uint64_t virtualAddress); uint64_t getFlags(uint64_t virtualAddress);
private: };
PageTable* PML4;
};
}
extern Paging::VirtualMemoryManager kernelVMM;

View File

@ -12,7 +12,7 @@
void MemoryManager::init() void MemoryManager::init()
{ {
PMM::init(); PMM::init();
kernelVMM.init(); VMM::init();
PMM::map_bitmap_to_virtual(); PMM::map_bitmap_to_virtual();
} }
@ -26,7 +26,7 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags); VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
return (void*)virtualAddress; return (void*)virtualAddress;
} }
@ -41,7 +41,7 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags); VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
return (void*)(virtualAddress + offset); return (void*)(virtualAddress + offset);
} }
@ -62,7 +62,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
} }
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
kernelVMM.map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags); VMM::map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags);
} }
return (void*)(virtualAddress + offset); return (void*)(virtualAddress + offset);
} }
@ -70,7 +70,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
void MemoryManager::release_unaligned_mapping(void* mapping) void MemoryManager::release_unaligned_mapping(void* mapping)
{ {
uint64_t offset = (uint64_t)mapping % PAGE_SIZE; uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
kernelVMM.unmap((uint64_t)mapping - offset); VMM::unmap((uint64_t)mapping - offset);
KernelHeap::free_virtual_page((uint64_t)mapping - offset); KernelHeap::free_virtual_page((uint64_t)mapping - offset);
} }
@ -80,12 +80,12 @@ void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
if (count == 1) return release_unaligned_mapping(mapping); if (count == 1) return release_unaligned_mapping(mapping);
uint64_t offset = (uint64_t)mapping % PAGE_SIZE; uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count); KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); } for (uint64_t i = 0; i < count; i++) { VMM::unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); }
} }
void MemoryManager::release_mapping(void* mapping) void MemoryManager::release_mapping(void* mapping)
{ {
kernelVMM.unmap((uint64_t)mapping); VMM::unmap((uint64_t)mapping);
KernelHeap::free_virtual_page((uint64_t)mapping); KernelHeap::free_virtual_page((uint64_t)mapping);
} }
@ -112,15 +112,15 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(addr, (uint64_t)physicalAddress, flags); VMM::map(addr, (uint64_t)physicalAddress, flags);
return (void*)addr; return (void*)addr;
} }
void MemoryManager::release_page(void* page) void MemoryManager::release_page(void* page)
{ {
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space... ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
kernelVMM.unmap((uint64_t)page); VMM::unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }
@ -162,7 +162,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
#endif #endif
return 0; return 0;
} }
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags); VMM::map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress); kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress);
#endif #endif
@ -180,9 +180,9 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE)); void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE));
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page); uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page); VMM::unmap((uint64_t)page);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("releasing virtual %p, physical %lx", page, physicalAddress); kdbgln("releasing virtual %p, physical %lx", page, physicalAddress);
#endif #endif

View File

@ -3,7 +3,7 @@
#pragma GCC push_options #pragma GCC push_options
#pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wconversion"
void Paging::PageDirectoryEntry::set_address(uint64_t addr) void PageDirectoryEntry::set_address(uint64_t addr)
{ {
this->Address = (addr >> 12); this->Address = (addr >> 12);
} }

View File

@ -3,212 +3,225 @@
#include "memory/PMM.h" #include "memory/PMM.h"
#include "std/string.h" #include "std/string.h"
Paging::VirtualMemoryManager kernelVMM;
// FIXME: There is a lot of duplicate code in this file. This should probably be refactored. // FIXME: There is a lot of duplicate code in this file. This should probably be refactored.
namespace Paging static PageTable* PML4;
void VMM::init()
{ {
void VirtualMemoryManager::init() asm volatile("mov %%cr3, %0" : "=r"(PML4));
}
void VMM::init(PageTable* cr3)
{
PML4 = cr3;
}
void VMM::unmap(uint64_t virtualAddress)
{
virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PT_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PD_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{ {
asm volatile("mov %%cr3, %0" : "=r"(PML4)); return; // Already unmapped
}
else
{
if (PDE.LargerPages)
{
PDE.Present = false;
PDE.LargerPages = false;
PML4->entries[PDP_i] = PDE;
goto invalidate;
}
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
} }
void VirtualMemoryManager::init(PageTable* cr3) PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{ {
PML4 = cr3; return; // Already unmapped
}
else
{
if (PDE.LargerPages)
{
PDE.Present = false;
PDE.LargerPages = false;
PDP->entries[PD_i] = PDE;
goto invalidate;
}
PD = (PageTable*)((uint64_t)PDE.Address << 12);
} }
void VirtualMemoryManager::unmap(uint64_t virtualAddress) PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{ {
virtualAddress >>= 12; return; // Already unmapped
uint64_t P_i = virtualAddress & 0x1ff; }
virtualAddress >>= 9; else
uint64_t PT_i = virtualAddress & 0x1ff; {
virtualAddress >>= 9; if (PDE.LargerPages)
uint64_t PD_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{ {
return; // Already unmapped PDE.LargerPages = false;
PDE.Present = false;
PD->entries[PT_i] = PDE;
goto invalidate;
} }
else PT = (PageTable*)((uint64_t)PDE.Address << 12);
{
if (PDE.LargerPages)
{
PDE.Present = false;
PDE.LargerPages = false;
PML4->entries[PDP_i] = PDE;
goto invalidate;
}
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{
return; // Already unmapped
}
else
{
if (PDE.LargerPages)
{
PDE.Present = false;
PDE.LargerPages = false;
PDP->entries[PD_i] = PDE;
goto invalidate;
}
PD = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{
return; // Already unmapped
}
else
{
if (PDE.LargerPages)
{
PDE.LargerPages = false;
PDE.Present = false;
PD->entries[PT_i] = PDE;
goto invalidate;
}
PT = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PT->entries[P_i];
PDE.Present = false;
PT->entries[P_i] = PDE;
invalidate:
asm volatile("invlpg (%0)" : : "r"(virtualAddress) : "memory");
} }
uint64_t VirtualMemoryManager::getPhysical(uint64_t virtualAddress) PDE = PT->entries[P_i];
PDE.Present = false;
PT->entries[P_i] = PDE;
invalidate:
asm volatile("invlpg (%0)" : : "r"(virtualAddress) : "memory");
}
uint64_t VMM::getPhysical(uint64_t virtualAddress)
{
virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PT_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PD_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{ {
virtualAddress >>= 12; return UINT64_MAX; // Not mapped
uint64_t P_i = virtualAddress & 0x1ff; }
virtualAddress >>= 9; else
uint64_t PT_i = virtualAddress & 0x1ff; {
virtualAddress >>= 9; if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
uint64_t PD_i = virtualAddress & 0x1ff; PDP = (PageTable*)((uint64_t)PDE.Address << 12);
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{
return UINT64_MAX; // Not mapped
}
else
{
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{
return UINT64_MAX; // Not mapped
}
else
{
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
PD = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{
return UINT64_MAX; // Not mapped
}
else
{
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
PT = (PageTable*)((uint64_t)PDE.Address << 12);
}
PDE = PT->entries[P_i];
if (!PDE.Present) return UINT64_MAX;
return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
} }
uint64_t VirtualMemoryManager::getFlags(uint64_t virtualAddress) // FIXME: Add support for larger pages to getFlags. PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{ {
virtualAddress >>= 12; return UINT64_MAX; // Not mapped
uint64_t P_i = virtualAddress & 0x1ff; }
virtualAddress >>= 9; else
uint64_t PT_i = virtualAddress & 0x1ff; {
virtualAddress >>= 9; if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
uint64_t PD_i = virtualAddress & 0x1ff; PD = (PageTable*)((uint64_t)PDE.Address << 12);
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
uint64_t flags = 0;
PDE = PT->entries[P_i];
if (PDE.UserSuper) flags |= User;
if (PDE.ReadWrite) flags |= ReadWrite;
return flags;
} }
void VirtualMemoryManager::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags) PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{ {
virtualAddress >>= 12; return UINT64_MAX; // Not mapped
uint64_t P_i = virtualAddress & 0x1ff; }
virtualAddress >>= 9; else
uint64_t PT_i = virtualAddress & 0x1ff; {
virtualAddress >>= 9; if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
uint64_t PD_i = virtualAddress & 0x1ff; PT = (PageTable*)((uint64_t)PDE.Address << 12);
virtualAddress >>= 9; }
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE; PDE = PT->entries[P_i];
if (!PDE.Present) return UINT64_MAX;
return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
}
PDE = PML4->entries[PDP_i]; uint64_t VMM::getFlags(uint64_t virtualAddress) // FIXME: Add support for larger pages to getFlags.
PageTable* PDP; {
if (!PDE.Present) virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PT_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PD_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
PDE = PDP->entries[PD_i];
PageTable* PD;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
PDE = PD->entries[PT_i];
PageTable* PT;
if (!PDE.Present)
{
return 0; // Not mapped
}
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
uint64_t flags = 0;
PDE = PT->entries[P_i];
if (PDE.UserSuper) flags |= User;
if (PDE.ReadWrite) flags |= ReadWrite;
return flags;
}
void VMM::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags)
{
virtualAddress >>= 12;
uint64_t P_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PT_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PD_i = virtualAddress & 0x1ff;
virtualAddress >>= 9;
uint64_t PDP_i = virtualAddress & 0x1ff;
PageDirectoryEntry PDE;
PDE = PML4->entries[PDP_i];
PageTable* PDP;
if (!PDE.Present)
{
PDP = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PDP)));
memset(PDP, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PDP);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PML4->entries[PDP_i] = PDE;
}
else
{
if (PDE.LargerPages)
{ {
unmap(virtualAddress);
PDE.LargerPages = false;
PDP = (PageTable*)PMM::request_page(); PDP = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PDP))); ASSERT(!(PMM_DID_FAIL(PDP)));
memset(PDP, 0, PAGE_SIZE); memset(PDP, 0, PAGE_SIZE);
@ -218,33 +231,33 @@ namespace Paging
if (flags & User) PDE.UserSuper = true; if (flags & User) PDE.UserSuper = true;
PML4->entries[PDP_i] = PDE; PML4->entries[PDP_i] = PDE;
} }
else PDP = (PageTable*)((uint64_t)PDE.Address << 12);
{ }
if (PDE.LargerPages) if ((flags & User) && !PDE.UserSuper)
{ {
unmap(virtualAddress); PDE.UserSuper = true;
PDE.LargerPages = false; PML4->entries[PDP_i] = PDE;
PDP = (PageTable*)PMM::request_page(); }
ASSERT(!(PMM_DID_FAIL(PDP)));
memset(PDP, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PDP);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PML4->entries[PDP_i] = PDE;
}
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
}
if ((flags & User) && !PDE.UserSuper)
{
PDE.UserSuper = true;
PML4->entries[PDP_i] = PDE;
}
PDE = PDP->entries[PD_i]; PDE = PDP->entries[PD_i];
PageTable* PD; PageTable* PD;
if (!PDE.Present) if (!PDE.Present)
{
PD = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PD)));
memset(PD, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PD);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PDP->entries[PD_i] = PDE;
}
else
{
if (PDE.LargerPages)
{ {
unmap(virtualAddress);
PDE.LargerPages = false;
PD = (PageTable*)PMM::request_page(); PD = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PD))); ASSERT(!(PMM_DID_FAIL(PD)));
memset(PD, 0, PAGE_SIZE); memset(PD, 0, PAGE_SIZE);
@ -254,33 +267,33 @@ namespace Paging
if (flags & User) PDE.UserSuper = true; if (flags & User) PDE.UserSuper = true;
PDP->entries[PD_i] = PDE; PDP->entries[PD_i] = PDE;
} }
else PD = (PageTable*)((uint64_t)PDE.Address << 12);
{ }
if (PDE.LargerPages) if ((flags & User) && !PDE.UserSuper)
{ {
unmap(virtualAddress); PDE.UserSuper = true;
PDE.LargerPages = false; PDP->entries[PD_i] = PDE;
PD = (PageTable*)PMM::request_page(); }
ASSERT(!(PMM_DID_FAIL(PD)));
memset(PD, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PD);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PDP->entries[PD_i] = PDE;
}
PD = (PageTable*)((uint64_t)PDE.Address << 12);
}
if ((flags & User) && !PDE.UserSuper)
{
PDE.UserSuper = true;
PDP->entries[PD_i] = PDE;
}
PDE = PD->entries[PT_i]; PDE = PD->entries[PT_i];
PageTable* PT; PageTable* PT;
if (!PDE.Present) if (!PDE.Present)
{
PT = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PT)));
memset(PT, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PT);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PD->entries[PT_i] = PDE;
}
else
{
if (PDE.LargerPages)
{ {
unmap(virtualAddress);
PDE.LargerPages = false;
PT = (PageTable*)PMM::request_page(); PT = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PT))); ASSERT(!(PMM_DID_FAIL(PT)));
memset(PT, 0, PAGE_SIZE); memset(PT, 0, PAGE_SIZE);
@ -290,34 +303,18 @@ namespace Paging
if (flags & User) PDE.UserSuper = true; if (flags & User) PDE.UserSuper = true;
PD->entries[PT_i] = PDE; PD->entries[PT_i] = PDE;
} }
else PT = (PageTable*)((uint64_t)PDE.Address << 12);
{
if (PDE.LargerPages)
{
unmap(virtualAddress);
PDE.LargerPages = false;
PT = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(PT)));
memset(PT, 0, PAGE_SIZE);
PDE.set_address((uint64_t)PT);
PDE.Present = true;
PDE.ReadWrite = true;
if (flags & User) PDE.UserSuper = true;
PD->entries[PT_i] = PDE;
}
PT = (PageTable*)((uint64_t)PDE.Address << 12);
}
if ((flags & User) && !PDE.UserSuper)
{
PDE.UserSuper = true;
PD->entries[PT_i] = PDE;
}
PDE = PT->entries[P_i];
PDE.Present = true;
PDE.ReadWrite = flags & ReadWrite;
PDE.UserSuper = flags & User;
PDE.set_address(physicalAddress);
PT->entries[P_i] = PDE;
} }
if ((flags & User) && !PDE.UserSuper)
{
PDE.UserSuper = true;
PD->entries[PT_i] = PDE;
}
PDE = PT->entries[P_i];
PDE.Present = true;
PDE.ReadWrite = flags & ReadWrite;
PDE.UserSuper = flags & User;
PDE.set_address(physicalAddress);
PT->entries[P_i] = PDE;
} }

View File

@ -47,7 +47,7 @@ void sys_exec(Context* context, const char* pathname)
return; return;
} }
if (memusage > PMM::get_free()) if ((uint64_t)memusage > PMM::get_free())
{ {
MemoryManager::release_pages((void*)allocated_stack, TASK_PAGES_IN_STACK); MemoryManager::release_pages((void*)allocated_stack, TASK_PAGES_IN_STACK);
context->rax = -ENOMEM; context->rax = -ENOMEM;

View File

@ -24,7 +24,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
{ {
kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address, kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address,
real_flags & MAP_READ_WRITE ? "rw" : "ro"); real_flags & MAP_READ_WRITE ? "rw" : "ro");
if (kernelVMM.getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used. if (VMM::getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used.
{ {
kdbgln("attempt to mmap an already mapped address"); kdbgln("attempt to mmap an already mapped address");
context->rax = MAP_FAIL(ENOMEM); context->rax = MAP_FAIL(ENOMEM);
@ -78,7 +78,7 @@ void sys_munmap(Context* context, void* address, size_t size)
context->rax = -EINVAL; context->rax = -EINVAL;
return; return;
} }
uint64_t flags = kernelVMM.getFlags((uint64_t)address); uint64_t flags = VMM::getFlags((uint64_t)address);
if (!(flags & MAP_USER)) if (!(flags & MAP_USER))
{ {
kdbgln("munmap failed: attempted to unmap a kernel page"); kdbgln("munmap failed: attempted to unmap a kernel page");