Add support for user pages
This commit is contained in:
parent
ec01dc2927
commit
543fe2885f
@ -1,18 +1,23 @@
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
|
||||
#define MAP_READ_WRITE 1 << 0
|
||||
#define MAP_USER 1 << 1
|
||||
|
||||
namespace KernelMemoryManager
|
||||
{
|
||||
void* get_mapping(void* physicalAddress);
|
||||
void init();
|
||||
|
||||
void* get_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
void release_mapping(void* mapping);
|
||||
|
||||
void* get_unaligned_mapping(void* physicalAddress);
|
||||
void* get_unaligned_mappings(void* physicalAddress, uint64_t count);
|
||||
void* get_unaligned_mapping(void* physicalAddress, int flags = MAP_READ_WRITE);
|
||||
void* get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags = MAP_READ_WRITE);
|
||||
void release_unaligned_mapping(void* mapping);
|
||||
void release_unaligned_mappings(void* mapping, uint64_t count);
|
||||
|
||||
void* get_page();
|
||||
void* get_pages(uint64_t count);
|
||||
void* get_page(int flags = MAP_READ_WRITE);
|
||||
void* get_pages(uint64_t count, int flags = MAP_READ_WRITE);
|
||||
|
||||
void release_page(void* page);
|
||||
void release_pages(void* pages, uint64_t count);
|
||||
|
@ -4,10 +4,12 @@
|
||||
class RangeAllocator
|
||||
{
|
||||
public:
|
||||
void init(void* start_address, uint64_t count);
|
||||
void init(void* start_address, void* end_address);
|
||||
void init_from_mmap();
|
||||
|
||||
void* request_page();
|
||||
void* request_pages(
|
||||
uint64_t count); // Please don't use this in kernelPMM. That's what KernelMemoryManager/KernelHeap do.
|
||||
void free_page(void* address);
|
||||
void free_pages(void* address, uint64_t count);
|
||||
|
||||
@ -33,6 +35,8 @@ class RangeAllocator
|
||||
uint64_t bitmap_size;
|
||||
|
||||
uint64_t start_index = 0;
|
||||
|
||||
uint64_t alloc_base;
|
||||
};
|
||||
|
||||
extern RangeAllocator kernelPMM;
|
@ -3,13 +3,19 @@
|
||||
|
||||
namespace Paging
|
||||
{
|
||||
enum Flags
|
||||
{
|
||||
ReadWrite = 1 << 0,
|
||||
User = 1 << 1,
|
||||
};
|
||||
class VirtualMemoryManager
|
||||
{
|
||||
public:
|
||||
void init(); // fetch page table from cr3
|
||||
void init(PageTable* PML4);
|
||||
|
||||
void map(uint64_t virtualAddress, uint64_t physicalAddress);
|
||||
void map(uint64_t virtualAddress, uint64_t physicalAddress, int flags);
|
||||
void remap(uint64_t virtualAddress, int flags);
|
||||
void unmap(uint64_t virtualAddress);
|
||||
uint64_t getPhysical(uint64_t virtualAddress);
|
||||
|
||||
|
@ -4,28 +4,44 @@
|
||||
#include "memory/RangeAllocator.h"
|
||||
#include "memory/VMM.h"
|
||||
|
||||
void* KernelMemoryManager::get_mapping(void* physicalAddress)
|
||||
RangeAllocator userVMMAllocator;
|
||||
|
||||
void KernelMemoryManager::init()
|
||||
{
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress);
|
||||
userVMMAllocator.init((void*)0x1000, (void*)0xC000000);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_mapping(void* physicalAddress, int flags)
|
||||
{
|
||||
uint64_t virtualAddress;
|
||||
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
|
||||
else
|
||||
virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
|
||||
return (void*)virtualAddress;
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_unaligned_mapping(void* physicalAddress)
|
||||
void* KernelMemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
|
||||
{
|
||||
uint64_t offset = (uint64_t)physicalAddress % 4096;
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset);
|
||||
uint64_t virtualAddress;
|
||||
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
|
||||
else
|
||||
virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
|
||||
return (void*)(virtualAddress + offset);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count)
|
||||
void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t count, int flags)
|
||||
{
|
||||
uint64_t offset = (uint64_t)physicalAddress % 4096;
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
|
||||
uint64_t virtualAddress;
|
||||
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count);
|
||||
else
|
||||
virtualAddress = KernelHeap::request_virtual_pages(count);
|
||||
for (uint64_t i = 0; i < count; i++)
|
||||
{
|
||||
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096));
|
||||
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags);
|
||||
}
|
||||
return (void*)(virtualAddress + offset);
|
||||
}
|
||||
@ -50,11 +66,14 @@ void KernelMemoryManager::release_mapping(void* mapping)
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_page()
|
||||
void* KernelMemoryManager::get_page(int flags)
|
||||
{
|
||||
void* physicalAddress = kernelPMM.request_page();
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress);
|
||||
uint64_t virtualAddress;
|
||||
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_page();
|
||||
else
|
||||
virtualAddress = KernelHeap::request_virtual_page();
|
||||
kernelVMM.map(virtualAddress, (uint64_t)physicalAddress, flags);
|
||||
return (void*)virtualAddress;
|
||||
}
|
||||
|
||||
@ -67,13 +86,16 @@ void KernelMemoryManager::release_page(void* page)
|
||||
KernelHeap::free_virtual_page((uint64_t)page);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_pages(uint64_t count)
|
||||
void* KernelMemoryManager::get_pages(uint64_t count, int flags)
|
||||
{
|
||||
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
|
||||
uint64_t virtualAddress;
|
||||
if (flags & MAP_USER) virtualAddress = (uint64_t)userVMMAllocator.request_pages(count);
|
||||
else
|
||||
virtualAddress = KernelHeap::request_virtual_pages(count);
|
||||
for (uint64_t i = 0; i < count; i++)
|
||||
{
|
||||
void* physicalAddress = kernelPMM.request_page();
|
||||
kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress);
|
||||
kernelVMM.map(virtualAddress + (i * 4096), (uint64_t)physicalAddress, flags);
|
||||
}
|
||||
return (void*)virtualAddress;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "memory/RangeAllocator.h"
|
||||
#include "assert.h"
|
||||
#include "bootboot.h"
|
||||
#include "memory/KernelMemoryManager.h"
|
||||
#include "memory/Memory.h"
|
||||
#include "std/string.h"
|
||||
|
||||
@ -53,6 +54,18 @@ void RangeAllocator::init_from_mmap()
|
||||
}
|
||||
|
||||
lock_pages(bitmap_addr, bitmap_size / 4096 + 1);
|
||||
|
||||
alloc_base = 0;
|
||||
}
|
||||
|
||||
void RangeAllocator::init(void* start_address, void* end_address)
|
||||
{
|
||||
ASSERT(((int64_t)start_address - (int64_t)end_address) > 0);
|
||||
uint64_t total_size = (uint64_t)start_address - (uint64_t)end_address;
|
||||
bitmap_size = total_size / 4096 / 8 + 1;
|
||||
bitmap_addr = (char*)KernelMemoryManager::get_pages(bitmap_size / 4096 + 1);
|
||||
memset(bitmap_addr, 0, bitmap_size);
|
||||
alloc_base = (uint64_t)start_address;
|
||||
}
|
||||
|
||||
bool RangeAllocator::bitmap_read(uint64_t index)
|
||||
@ -77,7 +90,37 @@ void* RangeAllocator::request_page()
|
||||
start_index = index + 1;
|
||||
free_mem -= 4096;
|
||||
used_mem += 4096;
|
||||
return (void*)(index * 4096);
|
||||
return (void*)(alloc_base + (index * 4096));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* RangeAllocator::request_pages(uint64_t count)
|
||||
{
|
||||
uint64_t contiguous = 0;
|
||||
uint64_t contiguous_start = 0;
|
||||
for (uint64_t index = start_index; index < (bitmap_size * 8); index++)
|
||||
{
|
||||
if (bitmap_read(index))
|
||||
{
|
||||
contiguous = 0;
|
||||
continue;
|
||||
}
|
||||
if (contiguous == 0)
|
||||
{
|
||||
contiguous_start = index;
|
||||
contiguous++;
|
||||
}
|
||||
else
|
||||
contiguous++;
|
||||
if (contiguous == count)
|
||||
{
|
||||
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
|
||||
free_mem -= (count * 4096);
|
||||
used_mem += (count * 4096);
|
||||
return (void*)(alloc_base + (contiguous_start * 4096));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -85,7 +128,7 @@ void* RangeAllocator::request_page()
|
||||
|
||||
void RangeAllocator::free_page(void* address)
|
||||
{
|
||||
uint64_t index = (uint64_t)address / 4096;
|
||||
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
|
||||
if (!bitmap_read(index)) return;
|
||||
bitmap_set(index, false);
|
||||
used_mem -= 4096;
|
||||
@ -100,7 +143,7 @@ void RangeAllocator::free_pages(void* address, uint64_t count)
|
||||
|
||||
void RangeAllocator::lock_page(void* address)
|
||||
{
|
||||
uint64_t index = (uint64_t)address / 4096;
|
||||
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
|
||||
if (bitmap_read(index)) return;
|
||||
bitmap_set(index, true);
|
||||
used_mem += 4096;
|
||||
@ -109,7 +152,7 @@ void RangeAllocator::lock_page(void* address)
|
||||
|
||||
void RangeAllocator::reserve_page(void* address)
|
||||
{
|
||||
uint64_t index = (uint64_t)address / 4096;
|
||||
uint64_t index = ((uint64_t)address - (uint64_t)alloc_base) / 4096;
|
||||
if (bitmap_read(index)) return;
|
||||
bitmap_set(index, true);
|
||||
reserved_mem += 4096;
|
||||
|
@ -55,8 +55,7 @@ namespace Paging
|
||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PT->entries[P_i];
|
||||
PDE.Present = false;
|
||||
PDE.ReadWrite = false;
|
||||
memset(&PDE, 0, sizeof(PDE));
|
||||
PT->entries[P_i] = PDE;
|
||||
}
|
||||
|
||||
@ -101,7 +100,7 @@ namespace Paging
|
||||
return PDE.Address;
|
||||
}
|
||||
|
||||
void VirtualMemoryManager::map(uint64_t virtualAddress, uint64_t physicalAddress)
|
||||
void VirtualMemoryManager::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags)
|
||||
{
|
||||
virtualAddress >>= 12;
|
||||
uint64_t P_i = virtualAddress & 0x1ff;
|
||||
@ -123,6 +122,7 @@ namespace Paging
|
||||
PDE.Address = (uint64_t)PDP >> 12;
|
||||
PDE.Present = true;
|
||||
PDE.ReadWrite = true;
|
||||
if (flags & User) PDE.UserSuper = true;
|
||||
PML4->entries[PDP_i] = PDE;
|
||||
}
|
||||
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
@ -136,6 +136,7 @@ namespace Paging
|
||||
PDE.Address = (uint64_t)PD >> 12;
|
||||
PDE.Present = true;
|
||||
PDE.ReadWrite = true;
|
||||
if (flags & User) PDE.UserSuper = true;
|
||||
PDP->entries[PD_i] = PDE;
|
||||
}
|
||||
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
@ -149,13 +150,15 @@ namespace Paging
|
||||
PDE.Address = (uint64_t)PT >> 12;
|
||||
PDE.Present = true;
|
||||
PDE.ReadWrite = true;
|
||||
if (flags & User) PDE.UserSuper = true;
|
||||
PD->entries[PT_i] = PDE;
|
||||
}
|
||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PT->entries[P_i];
|
||||
PDE.Present = true;
|
||||
PDE.ReadWrite = true;
|
||||
if (flags & ReadWrite) PDE.ReadWrite = true;
|
||||
if (flags & User) PDE.UserSuper = true;
|
||||
PDE.Address = physicalAddress >> 12;
|
||||
PT->entries[P_i] = PDE;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user