VMM: Fix naming convention
This commit is contained in:
parent
57482e4e93
commit
83e6b8cd21
@ -7,19 +7,20 @@
|
||||
|
||||
struct PageDirectoryEntry
|
||||
{
|
||||
bool Present : 1;
|
||||
bool ReadWrite : 1;
|
||||
bool UserSuper : 1;
|
||||
bool WriteThrough : 1;
|
||||
bool CacheDisabled : 1;
|
||||
bool Accessed : 1;
|
||||
bool present : 1;
|
||||
bool read_write : 1;
|
||||
bool user : 1;
|
||||
bool write_through : 1;
|
||||
bool cache_disabled : 1;
|
||||
bool accessed : 1;
|
||||
bool ignore0 : 1;
|
||||
bool LargerPages : 1;
|
||||
bool larger_pages : 1;
|
||||
bool ignore1 : 1;
|
||||
uint8_t Available : 3;
|
||||
uint64_t Address : 52;
|
||||
uint8_t available : 3;
|
||||
uint64_t address : 52;
|
||||
|
||||
void set_address(uint64_t addr);
|
||||
uint64_t get_address();
|
||||
};
|
||||
|
||||
struct PageTable
|
||||
|
@ -9,19 +9,19 @@ enum Flags
|
||||
};
|
||||
namespace VMM
|
||||
{
|
||||
void init(); // fetch page table from cr3
|
||||
void init(); // Fetch page table from cr3
|
||||
|
||||
void map(uint64_t virtualAddress, uint64_t physicalAddress, int flags);
|
||||
void remap(uint64_t virtualAddress, int flags);
|
||||
void unmap(uint64_t virtualAddress);
|
||||
uint64_t getPhysical(uint64_t virtualAddress);
|
||||
uint64_t getFlags(uint64_t virtualAddress);
|
||||
void map(uint64_t vaddr, uint64_t paddr, int flags);
|
||||
void remap(uint64_t vaddr, int flags);
|
||||
void unmap(uint64_t vaddr);
|
||||
uint64_t get_physical(uint64_t vaddr);
|
||||
uint64_t get_flags(uint64_t vaddr);
|
||||
|
||||
PageDirectoryEntry* find_pde(PageTable* root, uint64_t virtualAddress);
|
||||
PageDirectoryEntry* create_pde_if_not_exists(PageTable* root, uint64_t virtualAddress);
|
||||
PageDirectoryEntry* find_pde(PageTable* root, uint64_t vaddr);
|
||||
PageDirectoryEntry* create_pde_if_not_exists(PageTable* root, uint64_t vaddr);
|
||||
|
||||
void propagate_read_write(PageTable* root, uint64_t virtualAddress);
|
||||
void propagate_user(PageTable* root, uint64_t virtualAddress);
|
||||
void propagate_read_write(PageTable* root, uint64_t vaddr);
|
||||
void propagate_user(PageTable* root, uint64_t vaddr);
|
||||
|
||||
void flush_tlb(uint64_t addr);
|
||||
|
||||
|
@ -118,7 +118,7 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags)
|
||||
|
||||
void MemoryManager::release_page(void* page)
|
||||
{
|
||||
uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
|
||||
uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
|
||||
ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
|
||||
VMM::unmap((uint64_t)page);
|
||||
PMM::free_page((void*)physicalAddress);
|
||||
@ -180,7 +180,7 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
|
||||
for (uint64_t i = 0; i < count; i++)
|
||||
{
|
||||
void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE));
|
||||
uint64_t physicalAddress = VMM::getPhysical((uint64_t)page);
|
||||
uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
|
||||
ASSERT(physicalAddress != UINT64_MAX);
|
||||
VMM::unmap((uint64_t)page);
|
||||
#ifdef MM_DEBUG
|
||||
|
@ -5,7 +5,12 @@
|
||||
|
||||
void PageDirectoryEntry::set_address(uint64_t addr)
|
||||
{
|
||||
this->Address = (addr >> 12);
|
||||
this->address = (addr >> 12);
|
||||
}
|
||||
|
||||
uint64_t PageDirectoryEntry::get_address()
|
||||
{
|
||||
return (uint64_t)this->address << 12;
|
||||
}
|
||||
|
||||
#pragma GCC pop_options
|
@ -16,66 +16,66 @@ void VMM::init()
|
||||
asm volatile("mov %%cr3, %0" : "=r"(PML4));
|
||||
}
|
||||
|
||||
void VMM::unmap(uint64_t virtualAddress)
|
||||
void VMM::unmap(uint64_t vaddr)
|
||||
{
|
||||
virtualAddress = Utilities::round_down_to_nearest_page(virtualAddress);
|
||||
vaddr = Utilities::round_down_to_nearest_page(vaddr);
|
||||
|
||||
PageDirectoryEntry* pde = find_pde(PML4, virtualAddress);
|
||||
PageDirectoryEntry* pde = find_pde(PML4, vaddr);
|
||||
if (!pde) return; // Already unmapped
|
||||
|
||||
memset(pde, 0, sizeof(PageDirectoryEntry));
|
||||
flush_tlb(virtualAddress);
|
||||
flush_tlb(vaddr);
|
||||
}
|
||||
|
||||
uint64_t VMM::getPhysical(uint64_t virtualAddress)
|
||||
uint64_t VMM::get_physical(uint64_t vaddr)
|
||||
{
|
||||
PageDirectoryEntry* pde = find_pde(PML4, Utilities::round_down_to_nearest_page(virtualAddress));
|
||||
PageDirectoryEntry* pde = find_pde(PML4, Utilities::round_down_to_nearest_page(vaddr));
|
||||
if (!pde) return UINT64_MAX; // Not mapped
|
||||
|
||||
return pde->Address << 12 | (virtualAddress % PAGE_SIZE);
|
||||
return pde->get_address() | (vaddr % PAGE_SIZE);
|
||||
}
|
||||
|
||||
uint64_t VMM::getFlags(uint64_t virtualAddress)
|
||||
uint64_t VMM::get_flags(uint64_t vaddr)
|
||||
{
|
||||
PageDirectoryEntry* pde = find_pde(PML4, Utilities::round_down_to_nearest_page(virtualAddress));
|
||||
PageDirectoryEntry* pde = find_pde(PML4, Utilities::round_down_to_nearest_page(vaddr));
|
||||
if (!pde) return 0; // Not mapped
|
||||
|
||||
uint64_t flags = 0;
|
||||
if (pde->UserSuper) flags |= User;
|
||||
if (pde->ReadWrite) flags |= ReadWrite;
|
||||
if (pde->user) flags |= User;
|
||||
if (pde->read_write) flags |= ReadWrite;
|
||||
return flags;
|
||||
}
|
||||
|
||||
void VMM::map(uint64_t virtualAddress, uint64_t physicalAddress, int flags)
|
||||
void VMM::map(uint64_t vaddr, uint64_t paddr, int flags)
|
||||
{
|
||||
virtualAddress = Utilities::round_down_to_nearest_page(virtualAddress);
|
||||
PageDirectoryEntry* pde = find_pde(PML4, virtualAddress);
|
||||
vaddr = Utilities::round_down_to_nearest_page(vaddr);
|
||||
PageDirectoryEntry* pde = find_pde(PML4, vaddr);
|
||||
bool will_flush_tlb = true;
|
||||
if (!pde)
|
||||
{
|
||||
pde = create_pde_if_not_exists(PML4, virtualAddress);
|
||||
pde = create_pde_if_not_exists(PML4, vaddr);
|
||||
will_flush_tlb = false;
|
||||
}
|
||||
else if (pde->LargerPages)
|
||||
else if (pde->larger_pages)
|
||||
{
|
||||
unmap(virtualAddress);
|
||||
pde = create_pde_if_not_exists(PML4, virtualAddress);
|
||||
unmap(vaddr);
|
||||
pde = create_pde_if_not_exists(PML4, vaddr);
|
||||
will_flush_tlb = false;
|
||||
}
|
||||
|
||||
pde->set_address(Utilities::round_down_to_nearest_page(physicalAddress));
|
||||
if (flags & User) propagate_user(PML4, virtualAddress);
|
||||
if (flags & ReadWrite) propagate_read_write(PML4, virtualAddress);
|
||||
if (will_flush_tlb) flush_tlb(virtualAddress);
|
||||
pde->set_address(Utilities::round_down_to_nearest_page(paddr));
|
||||
if (flags & User) propagate_user(PML4, vaddr);
|
||||
if (flags & ReadWrite) propagate_read_write(PML4, vaddr);
|
||||
if (will_flush_tlb) flush_tlb(vaddr);
|
||||
}
|
||||
|
||||
PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t virtualAddress)
|
||||
PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
|
||||
{
|
||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
||||
PageDirectoryEntry* pde;
|
||||
PageTable* pt = root;
|
||||
|
||||
decompose_vaddr(virtualAddress, page_index, pt_index, pd_index, pdp_index);
|
||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
||||
|
||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
||||
|
||||
@ -83,31 +83,31 @@ PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t virtualAddress)
|
||||
i++) // Walk through the page map level 4, page directory pointer, and page directory to find the page table.
|
||||
{
|
||||
pde = &pt->entries[indexes[i]];
|
||||
if (!pde->Present) return nullptr;
|
||||
else if (pde->LargerPages)
|
||||
if (!pde->present) return nullptr;
|
||||
else if (pde->larger_pages)
|
||||
return pde;
|
||||
else { pt = (PageTable*)((uint64_t)pde->Address << 12); }
|
||||
else { pt = (PageTable*)pde->get_address(); }
|
||||
}
|
||||
|
||||
pde = &pt->entries[page_index]; // PT
|
||||
if (!pde->Present) return nullptr;
|
||||
if (!pde->present) return nullptr;
|
||||
return pde;
|
||||
}
|
||||
|
||||
PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t virtualAddress)
|
||||
PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vaddr)
|
||||
{
|
||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
||||
PageDirectoryEntry* pde;
|
||||
PageTable* pt = root;
|
||||
|
||||
decompose_vaddr(virtualAddress, page_index, pt_index, pd_index, pdp_index);
|
||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
||||
|
||||
auto pde_create_if_not_present = [&]() {
|
||||
pt = (PageTable*)PMM::request_page();
|
||||
ASSERT(!(PMM_DID_FAIL(pt)));
|
||||
memset(pt, 0, PAGE_SIZE);
|
||||
pde->set_address((uint64_t)pt);
|
||||
pde->Present = true;
|
||||
pde->present = true;
|
||||
};
|
||||
|
||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
||||
@ -115,71 +115,71 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t virt
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
pde = &pt->entries[indexes[i]];
|
||||
if (!pde->Present) { pde_create_if_not_present(); }
|
||||
else if (pde->LargerPages)
|
||||
if (!pde->present) { pde_create_if_not_present(); }
|
||||
else if (pde->larger_pages)
|
||||
return pde;
|
||||
else { pt = (PageTable*)((uint64_t)pde->Address << 12); }
|
||||
else { pt = (PageTable*)pde->get_address(); }
|
||||
}
|
||||
|
||||
pde = &pt->entries[page_index];
|
||||
if (!pde->Present) { pde->Present = true; }
|
||||
if (!pde->present) { pde->present = true; }
|
||||
return pde;
|
||||
}
|
||||
|
||||
void VMM::propagate_read_write(PageTable* root, uint64_t virtualAddress)
|
||||
void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
|
||||
{
|
||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
||||
PageDirectoryEntry* pde;
|
||||
PageTable* pt = root;
|
||||
|
||||
decompose_vaddr(virtualAddress, page_index, pt_index, pd_index, pdp_index);
|
||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
||||
|
||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
pde = &pt->entries[indexes[i]];
|
||||
if (!pde->Present) return;
|
||||
if (!pde->present) return;
|
||||
else
|
||||
{
|
||||
pde->ReadWrite = true;
|
||||
if (pde->LargerPages) return;
|
||||
pt = (PageTable*)((uint64_t)pde->Address << 12);
|
||||
pde->read_write = true;
|
||||
if (pde->larger_pages) return;
|
||||
pt = (PageTable*)pde->get_address();
|
||||
}
|
||||
}
|
||||
|
||||
pde = &pt->entries[page_index];
|
||||
if (!pde->Present) return;
|
||||
if (!pde->present) return;
|
||||
else
|
||||
pde->ReadWrite = true;
|
||||
pde->read_write = true;
|
||||
}
|
||||
|
||||
void VMM::propagate_user(PageTable* root, uint64_t virtualAddress)
|
||||
void VMM::propagate_user(PageTable* root, uint64_t vaddr)
|
||||
{
|
||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
||||
PageDirectoryEntry* pde;
|
||||
PageTable* pt = root;
|
||||
|
||||
decompose_vaddr(virtualAddress, page_index, pt_index, pd_index, pdp_index);
|
||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
||||
|
||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
pde = &pt->entries[indexes[i]];
|
||||
if (!pde->Present) return;
|
||||
if (!pde->present) return;
|
||||
else
|
||||
{
|
||||
pde->UserSuper = true;
|
||||
if (pde->LargerPages) return;
|
||||
pt = (PageTable*)((uint64_t)pde->Address << 12);
|
||||
pde->user = true;
|
||||
if (pde->larger_pages) return;
|
||||
pt = (PageTable*)pde->get_address();
|
||||
}
|
||||
}
|
||||
|
||||
pde = &pt->entries[page_index];
|
||||
if (!pde->Present) return;
|
||||
if (!pde->present) return;
|
||||
else
|
||||
pde->UserSuper = true;
|
||||
pde->user = true;
|
||||
}
|
||||
|
||||
void VMM::flush_tlb(uint64_t addr)
|
||||
|
@ -24,7 +24,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
|
||||
{
|
||||
kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address,
|
||||
real_flags & MAP_READ_WRITE ? "rw" : "ro");
|
||||
if (VMM::getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used.
|
||||
if (VMM::get_physical((uint64_t)address) != (uint64_t)-1) // Address is already used.
|
||||
{
|
||||
kdbgln("attempt to mmap an already mapped address");
|
||||
context->rax = MAP_FAIL(ENOMEM);
|
||||
@ -78,7 +78,7 @@ void sys_munmap(Context* context, void* address, size_t size)
|
||||
context->rax = -EINVAL;
|
||||
return;
|
||||
}
|
||||
uint64_t flags = VMM::getFlags((uint64_t)address);
|
||||
uint64_t flags = VMM::get_flags((uint64_t)address);
|
||||
if (!(flags & MAP_USER))
|
||||
{
|
||||
kdbgln("munmap failed: attempted to unmap a kernel page");
|
||||
|
Loading…
Reference in New Issue
Block a user