Scheduler: add proper support for user tasks (still faults though)
This commit is contained in:
parent
543fe2885f
commit
11dd165a8e
@ -18,6 +18,7 @@ namespace Paging
|
||||
void remap(uint64_t virtualAddress, int flags);
|
||||
void unmap(uint64_t virtualAddress);
|
||||
uint64_t getPhysical(uint64_t virtualAddress);
|
||||
uint64_t getFlags(uint64_t virtualAddress);
|
||||
|
||||
private:
|
||||
PageTable* PML4;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "interrupts/Interrupts.h"
|
||||
#include "io/Serial.h"
|
||||
#include "log/Log.h"
|
||||
#include "memory/KernelMemoryManager.h"
|
||||
#include "memory/RangeAllocator.h"
|
||||
#include "memory/VMM.h"
|
||||
#include "misc/hang.h"
|
||||
@ -42,6 +43,8 @@ void Init::early_init()
|
||||
kernelPMM.init_from_mmap();
|
||||
kernelVMM.init();
|
||||
|
||||
KernelMemoryManager::init();
|
||||
|
||||
InitRD::init();
|
||||
|
||||
ASSERT(TextRenderer::try_initialize());
|
||||
|
@ -49,21 +49,30 @@ void* KernelMemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_
|
||||
void KernelMemoryManager::release_unaligned_mapping(void* mapping)
|
||||
{
|
||||
uint64_t offset = (uint64_t)mapping % 4096;
|
||||
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
|
||||
kernelVMM.unmap((uint64_t)mapping - offset);
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
|
||||
if (flags & MAP_USER) userVMMAllocator.free_page((void*)((uint64_t)mapping - offset));
|
||||
else
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
|
||||
}
|
||||
|
||||
void KernelMemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
|
||||
{
|
||||
uint64_t offset = (uint64_t)mapping % 4096;
|
||||
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
|
||||
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
|
||||
if (flags & MAP_USER) userVMMAllocator.free_pages((void*)((uint64_t)mapping - offset), count);
|
||||
else
|
||||
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
|
||||
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); }
|
||||
}
|
||||
|
||||
void KernelMemoryManager::release_mapping(void* mapping)
|
||||
{
|
||||
kernelVMM.unmap((uint64_t)mapping);
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping);
|
||||
uint64_t flags = kernelVMM.getFlags((uint64_t)mapping);
|
||||
if (flags & MAP_USER) userVMMAllocator.free_page(mapping);
|
||||
else
|
||||
KernelHeap::free_virtual_page((uint64_t)mapping);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_page(int flags)
|
||||
@ -81,9 +90,12 @@ void KernelMemoryManager::release_page(void* page)
|
||||
{
|
||||
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
|
||||
ASSERT(physicalAddress != UINT64_MAX);
|
||||
uint64_t flags = kernelVMM.getFlags((uint64_t)page);
|
||||
kernelVMM.unmap((uint64_t)page);
|
||||
kernelPMM.free_page((void*)physicalAddress);
|
||||
KernelHeap::free_virtual_page((uint64_t)page);
|
||||
if (flags & MAP_USER) userVMMAllocator.free_page(page);
|
||||
else
|
||||
KernelHeap::free_virtual_page((uint64_t)page);
|
||||
}
|
||||
|
||||
void* KernelMemoryManager::get_pages(uint64_t count, int flags)
|
||||
@ -102,6 +114,7 @@ void* KernelMemoryManager::get_pages(uint64_t count, int flags)
|
||||
|
||||
void KernelMemoryManager::release_pages(void* pages, uint64_t count)
|
||||
{
|
||||
uint64_t flags = kernelVMM.getFlags((uint64_t)pages);
|
||||
for (uint64_t i = 0; i < count; i++)
|
||||
{
|
||||
void* page = (void*)((uint64_t)pages + (i * 4096));
|
||||
@ -110,5 +123,7 @@ void KernelMemoryManager::release_pages(void* pages, uint64_t count)
|
||||
kernelVMM.unmap((uint64_t)page);
|
||||
kernelPMM.free_page((void*)physicalAddress);
|
||||
}
|
||||
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
||||
if (flags & MAP_USER) userVMMAllocator.free_pages(pages, count);
|
||||
else
|
||||
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
||||
}
|
@ -60,8 +60,8 @@ void RangeAllocator::init_from_mmap()
|
||||
|
||||
void RangeAllocator::init(void* start_address, void* end_address)
|
||||
{
|
||||
ASSERT(((int64_t)start_address - (int64_t)end_address) > 0);
|
||||
uint64_t total_size = (uint64_t)start_address - (uint64_t)end_address;
|
||||
ASSERT(((int64_t)end_address - (int64_t)start_address) > 0);
|
||||
uint64_t total_size = (uint64_t)end_address - (uint64_t)start_address;
|
||||
bitmap_size = total_size / 4096 / 8 + 1;
|
||||
bitmap_addr = (char*)KernelMemoryManager::get_pages(bitmap_size / 4096 + 1);
|
||||
memset(bitmap_addr, 0, bitmap_size);
|
||||
|
@ -55,7 +55,7 @@ namespace Paging
|
||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PT->entries[P_i];
|
||||
memset(&PDE, 0, sizeof(PDE));
|
||||
PDE.Present = false;
|
||||
PT->entries[P_i] = PDE;
|
||||
}
|
||||
|
||||
@ -100,6 +100,51 @@ namespace Paging
|
||||
return PDE.Address;
|
||||
}
|
||||
|
||||
uint64_t VirtualMemoryManager::getFlags(uint64_t virtualAddress)
|
||||
{
|
||||
virtualAddress >>= 12;
|
||||
uint64_t P_i = virtualAddress & 0x1ff;
|
||||
virtualAddress >>= 9;
|
||||
uint64_t PT_i = virtualAddress & 0x1ff;
|
||||
virtualAddress >>= 9;
|
||||
uint64_t PD_i = virtualAddress & 0x1ff;
|
||||
virtualAddress >>= 9;
|
||||
uint64_t PDP_i = virtualAddress & 0x1ff;
|
||||
|
||||
PageDirectoryEntry PDE;
|
||||
|
||||
PDE = PML4->entries[PDP_i];
|
||||
PageTable* PDP;
|
||||
if (!PDE.Present)
|
||||
{
|
||||
return 0; // Not mapped
|
||||
}
|
||||
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PDP->entries[PD_i];
|
||||
PageTable* PD;
|
||||
if (!PDE.Present)
|
||||
{
|
||||
return 0; // Not mapped
|
||||
}
|
||||
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PD->entries[PT_i];
|
||||
PageTable* PT;
|
||||
if (!PDE.Present)
|
||||
{
|
||||
return 0; // Not mapped
|
||||
}
|
||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
uint64_t flags = 0;
|
||||
|
||||
PDE = PT->entries[P_i];
|
||||
if (PDE.UserSuper) flags |= User;
|
||||
if (PDE.ReadWrite) flags |= ReadWrite;
|
||||
return flags;
|
||||
}
|
||||
|
||||
void VirtualMemoryManager::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags)
|
||||
{
|
||||
virtualAddress >>= 12;
|
||||
@ -156,6 +201,7 @@ namespace Paging
|
||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
||||
|
||||
PDE = PT->entries[P_i];
|
||||
memset(&PDE, 0, sizeof(PDE));
|
||||
PDE.Present = true;
|
||||
if (flags & ReadWrite) PDE.ReadWrite = true;
|
||||
if (flags & User) PDE.UserSuper = true;
|
||||
|
@ -87,8 +87,9 @@ void Scheduler::add_user_task(void (*task)(void))
|
||||
Task* new_task =
|
||||
(Task*)KernelMemoryManager::get_page(); // FIXME: allocate memory the size of Task, not 4 KB for each task
|
||||
new_task->id = free_tid++;
|
||||
new_task->regs.rip = (uint64_t)task;
|
||||
new_task->allocated_stack = (uint64_t)KernelMemoryManager::get_pages(4); // 16 KB is enough for everyone, right?
|
||||
new_task->regs.rip = (uint64_t)KernelMemoryManager::get_unaligned_mapping((void*)(uint64_t)task, MAP_USER);
|
||||
new_task->allocated_stack =
|
||||
(uint64_t)KernelMemoryManager::get_pages(4, MAP_READ_WRITE | MAP_USER); // 16 KB is enough for everyone, right?
|
||||
new_task->regs.rsp = new_task->allocated_stack + (4096 * 4) - sizeof(uintptr_t);
|
||||
new_task->regs.cs = 0x18 | 0x03;
|
||||
new_task->regs.ss = 0x20 | 0x03;
|
||||
|
Loading…
Reference in New Issue
Block a user