Kernel: Start preparing for recursive paging

This commit is contained in:
apio 2022-11-08 17:12:59 +01:00
parent 2230ebd969
commit 6f72f92493
2 changed files with 25 additions and 8 deletions

View File

@ -38,6 +38,7 @@ namespace VMM
void propagate_user(PageTable* root, uint64_t vaddr); void propagate_user(PageTable* root, uint64_t vaddr);
void flush_tlb(uint64_t addr); void flush_tlb(uint64_t addr);
void flush_tlb_full();
void decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index, void decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index,
uint64_t& pdp_index); uint64_t& pdp_index);

View File

@ -61,6 +61,16 @@ void VMM::init()
{ {
kernel_pml4 = (PageTable*)read_cr3(); kernel_pml4 = (PageTable*)read_cr3();
current_pml4 = kernel_pml4; current_pml4 = kernel_pml4;
// Set up recursive paging
PageDirectoryEntry& recursive_pde = kernel_pml4->entries[510];
memset(&recursive_pde, 0, sizeof(PageDirectoryEntry));
recursive_pde.present = true;
recursive_pde.read_write = true;
recursive_pde.larger_pages = false;
recursive_pde.no_execute = true;
recursive_pde.set_address((uint64_t)kernel_pml4);
flush_tlb_full();
} }
void VMM::unmap(uint64_t vaddr) void VMM::unmap(uint64_t vaddr)
@ -87,10 +97,10 @@ void VMM::remap(uint64_t vaddr, int flags)
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr); if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
else else
pde->read_write = false; pde->read_write = false;
if(flags & Execute) pde->no_execute = false; if (flags & Execute) pde->no_execute = false;
else else
pde->no_execute = true; pde->no_execute = true;
if(flags & OwnedByTask) pde->owned_by_task = true; if (flags & OwnedByTask) pde->owned_by_task = true;
else else
pde->owned_by_task = false; pde->owned_by_task = false;
flush_tlb(vaddr); flush_tlb(vaddr);
@ -112,8 +122,8 @@ uint64_t VMM::get_flags(uint64_t vaddr)
uint64_t flags = 0; uint64_t flags = 0;
if (pde->user) flags |= User; if (pde->user) flags |= User;
if (pde->read_write) flags |= ReadWrite; if (pde->read_write) flags |= ReadWrite;
if(!pde->no_execute) flags |= Execute; if (!pde->no_execute) flags |= Execute;
if(pde->owned_by_task) flags |= OwnedByTask; if (pde->owned_by_task) flags |= OwnedByTask;
return flags; return flags;
} }
@ -131,6 +141,7 @@ void VMM::map(uint64_t vaddr, uint64_t paddr, int flags)
{ {
unmap(vaddr); unmap(vaddr);
pde = create_pde_if_not_exists(current_pml4, vaddr); pde = create_pde_if_not_exists(current_pml4, vaddr);
will_flush_tlb = false; // unmap() already flushes the TLB for us
} }
pde->set_address(round_down_to_nearest_page(paddr)); pde->set_address(round_down_to_nearest_page(paddr));
@ -140,10 +151,10 @@ void VMM::map(uint64_t vaddr, uint64_t paddr, int flags)
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr); if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
else else
pde->read_write = false; pde->read_write = false;
if(flags & Execute) pde->no_execute = false; if (flags & Execute) pde->no_execute = false;
else else
pde->no_execute = true; pde->no_execute = true;
if(flags & OwnedByTask) pde->owned_by_task = true; if (flags & OwnedByTask) pde->owned_by_task = true;
else else
pde->owned_by_task = false; pde->owned_by_task = false;
if (will_flush_tlb) flush_tlb(vaddr); if (will_flush_tlb) flush_tlb(vaddr);
@ -295,6 +306,11 @@ void VMM::flush_tlb(uint64_t addr)
asm volatile("invlpg (%0)" : : "r"(addr) : "memory"); asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
} }
void VMM::flush_tlb_full()
{
write_cr3(current_pml4);
}
void VMM::decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index, void VMM::decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index,
uint64_t& pdp_index) uint64_t& pdp_index)
{ {