Compare commits
10 Commits
cf160d1260
...
8daffa876c
Author | SHA1 | Date | |
---|---|---|---|
8daffa876c | |||
28469497e9 | |||
d3cb642e5f | |||
0ee9bd7290 | |||
eaea4603c6 | |||
4021cb3ac0 | |||
ad9c7af0bf | |||
950f4ef608 | |||
525d567af6 | |||
c9ebe89899 |
@ -3,7 +3,8 @@ MOON_SRC := $(MOON_DIR)/src
|
|||||||
MOON_OBJ := $(MOON_DIR)/lib
|
MOON_OBJ := $(MOON_DIR)/lib
|
||||||
MOON_BIN := $(MOON_DIR)/bin
|
MOON_BIN := $(MOON_DIR)/bin
|
||||||
|
|
||||||
CFLAGS := -pedantic -Wall -Wextra -Werror -Wfloat-equal -Wdisabled-optimization -Wformat=2 -Winit-self -Wmissing-include-dirs -Wswitch-default -Wcast-qual -Wundef -Wcast-align -Wwrite-strings -Wlogical-op -Wredundant-decls -Wshadow -Wconversion -Os -ffreestanding -fstack-protector-all -fno-omit-frame-pointer -mno-red-zone -mno-mmx -mno-sse -mno-sse2 -fshort-wchar -mcmodel=kernel -I$(MOON_DIR)/include -isystem $(MOON_DIR)/include/std
|
CFLAGS ?= -Os
|
||||||
|
CFLAGS := ${CFLAGS} -pedantic -Wall -Wextra -Werror -Wfloat-equal -Wdisabled-optimization -Wformat=2 -Winit-self -Wmissing-include-dirs -Wswitch-default -Wcast-qual -Wundef -Wcast-align -Wwrite-strings -Wlogical-op -Wredundant-decls -Wshadow -Wconversion -ffreestanding -fstack-protector-all -fno-omit-frame-pointer -mno-red-zone -mno-mmx -mno-sse -mno-sse2 -fshort-wchar -mcmodel=kernel -I$(MOON_DIR)/include -isystem $(MOON_DIR)/include/std
|
||||||
CXXFLAGS := -fno-rtti -fno-exceptions -Wsign-promo -Wstrict-null-sentinel -Wctor-dtor-privacy
|
CXXFLAGS := -fno-rtti -fno-exceptions -Wsign-promo -Wstrict-null-sentinel -Wctor-dtor-privacy
|
||||||
ASMFLAGS := -felf64
|
ASMFLAGS := -felf64
|
||||||
LDFLAGS := -T$(MOON_DIR)/moon.ld -nostdlib -lgcc -Wl,--build-id=none -z max-page-size=0x1000 -mno-red-zone -mcmodel=kernel
|
LDFLAGS := -T$(MOON_DIR)/moon.ld -nostdlib -lgcc -Wl,--build-id=none -z max-page-size=0x1000 -mno-red-zone -mcmodel=kernel
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#define MODULE "kheap"
|
||||||
|
|
||||||
#include "memory/KernelHeap.h"
|
#include "memory/KernelHeap.h"
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
|
|
||||||
@ -68,7 +70,7 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
|
|||||||
|
|
||||||
void KernelHeap::free_virtual_page(uint64_t address)
|
void KernelHeap::free_virtual_page(uint64_t address)
|
||||||
{
|
{
|
||||||
ASSERT(address >= ALLOC_BASE && address < ALLOC_END);
|
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
||||||
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
||||||
bitmap_set(index, false);
|
bitmap_set(index, false);
|
||||||
if (start_index > index) start_index = index;
|
if (start_index > index) start_index = index;
|
||||||
@ -76,7 +78,7 @@ void KernelHeap::free_virtual_page(uint64_t address)
|
|||||||
|
|
||||||
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
|
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
|
||||||
{
|
{
|
||||||
ASSERT(address >= ALLOC_BASE && address < ALLOC_END);
|
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
||||||
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
||||||
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
|
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
|
||||||
if (start_index > index) start_index = index;
|
if (start_index > index) start_index = index;
|
||||||
|
@ -158,13 +158,13 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
|
|||||||
// smaller range, so this might not be fatal.
|
// smaller range, so this might not be fatal.
|
||||||
{
|
{
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kwarnln("OOM while allocating page %ld of memory. this might be recoverable...");
|
kwarnln("OOM while allocating page %ld of memory. this might be recoverable...", i);
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
|
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kdbgln("allocating virtual %lx, physical %p", virtualAddress + (i * PAGE_SIZE), physicalAddress);
|
kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
return (void*)addr;
|
return (void*)addr;
|
||||||
|
@ -36,7 +36,17 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return; // Already unmapped
|
return; // Already unmapped
|
||||||
}
|
}
|
||||||
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
PDE.Present = false;
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PML4->entries[PDP_i] = PDE;
|
||||||
|
goto invalidate;
|
||||||
|
}
|
||||||
|
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
|
|
||||||
PDE = PDP->entries[PD_i];
|
PDE = PDP->entries[PD_i];
|
||||||
PageTable* PD;
|
PageTable* PD;
|
||||||
@ -44,7 +54,17 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return; // Already unmapped
|
return; // Already unmapped
|
||||||
}
|
}
|
||||||
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
PDE.Present = false;
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PDP->entries[PD_i] = PDE;
|
||||||
|
goto invalidate;
|
||||||
|
}
|
||||||
|
PD = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
|
|
||||||
PDE = PD->entries[PT_i];
|
PDE = PD->entries[PT_i];
|
||||||
PageTable* PT;
|
PageTable* PT;
|
||||||
@ -52,11 +72,23 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return; // Already unmapped
|
return; // Already unmapped
|
||||||
}
|
}
|
||||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PDE.Present = false;
|
||||||
|
PD->entries[PT_i] = PDE;
|
||||||
|
goto invalidate;
|
||||||
|
}
|
||||||
|
PT = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
|
|
||||||
PDE = PT->entries[P_i];
|
PDE = PT->entries[P_i];
|
||||||
PDE.Present = false;
|
PDE.Present = false;
|
||||||
PT->entries[P_i] = PDE;
|
PT->entries[P_i] = PDE;
|
||||||
|
invalidate:
|
||||||
|
asm volatile("invlpg (%0)" : : "r"(virtualAddress) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t VirtualMemoryManager::getPhysical(uint64_t virtualAddress)
|
uint64_t VirtualMemoryManager::getPhysical(uint64_t virtualAddress)
|
||||||
@ -78,7 +110,11 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return UINT64_MAX; // Not mapped
|
return UINT64_MAX; // Not mapped
|
||||||
}
|
}
|
||||||
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
|
||||||
|
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
|
|
||||||
PDE = PDP->entries[PD_i];
|
PDE = PDP->entries[PD_i];
|
||||||
PageTable* PD;
|
PageTable* PD;
|
||||||
@ -86,7 +122,11 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return UINT64_MAX; // Not mapped
|
return UINT64_MAX; // Not mapped
|
||||||
}
|
}
|
||||||
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
|
||||||
|
PD = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
|
|
||||||
PDE = PD->entries[PT_i];
|
PDE = PD->entries[PT_i];
|
||||||
PageTable* PT;
|
PageTable* PT;
|
||||||
@ -94,13 +134,18 @@ namespace Paging
|
|||||||
{
|
{
|
||||||
return UINT64_MAX; // Not mapped
|
return UINT64_MAX; // Not mapped
|
||||||
}
|
}
|
||||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
PDE = PT->entries[P_i];
|
if (PDE.LargerPages) return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
|
||||||
return PDE.Address << 12;
|
PT = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t VirtualMemoryManager::getFlags(uint64_t virtualAddress)
|
PDE = PT->entries[P_i];
|
||||||
|
if (!PDE.Present) return UINT64_MAX;
|
||||||
|
return PDE.Address << 12 | (virtualAddress & PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t VirtualMemoryManager::getFlags(uint64_t virtualAddress) // FIXME: Add support for larger pages to getFlags.
|
||||||
{
|
{
|
||||||
virtualAddress >>= 12;
|
virtualAddress >>= 12;
|
||||||
uint64_t P_i = virtualAddress & 0x1ff;
|
uint64_t P_i = virtualAddress & 0x1ff;
|
||||||
@ -171,7 +216,24 @@ namespace Paging
|
|||||||
if (flags & User) PDE.UserSuper = true;
|
if (flags & User) PDE.UserSuper = true;
|
||||||
PML4->entries[PDP_i] = PDE;
|
PML4->entries[PDP_i] = PDE;
|
||||||
}
|
}
|
||||||
else { PDP = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
unmap(virtualAddress);
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PML4->entries[PDP_i] = PDE;
|
||||||
|
PDP = (PageTable*)PMM::request_page();
|
||||||
|
ASSERT(!(PMM_DID_FAIL(PDP)));
|
||||||
|
memset(PDP, 0, PAGE_SIZE);
|
||||||
|
PDE.set_address((uint64_t)PDP);
|
||||||
|
PDE.Present = true;
|
||||||
|
PDE.ReadWrite = true;
|
||||||
|
if (flags & User) PDE.UserSuper = true;
|
||||||
|
return map(virtualAddress, physicalAddress, flags);
|
||||||
|
}
|
||||||
|
PDP = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
if ((flags & User) && !PDE.UserSuper)
|
if ((flags & User) && !PDE.UserSuper)
|
||||||
{
|
{
|
||||||
PDE.UserSuper = true;
|
PDE.UserSuper = true;
|
||||||
@ -191,7 +253,24 @@ namespace Paging
|
|||||||
if (flags & User) PDE.UserSuper = true;
|
if (flags & User) PDE.UserSuper = true;
|
||||||
PDP->entries[PD_i] = PDE;
|
PDP->entries[PD_i] = PDE;
|
||||||
}
|
}
|
||||||
else { PD = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
unmap(virtualAddress);
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PDP->entries[PD_i] = PDE;
|
||||||
|
PD = (PageTable*)PMM::request_page();
|
||||||
|
ASSERT(!(PMM_DID_FAIL(PD)));
|
||||||
|
memset(PD, 0, PAGE_SIZE);
|
||||||
|
PDE.set_address((uint64_t)PD);
|
||||||
|
PDE.Present = true;
|
||||||
|
PDE.ReadWrite = true;
|
||||||
|
if (flags & User) PDE.UserSuper = true;
|
||||||
|
return map(virtualAddress, physicalAddress, flags);
|
||||||
|
}
|
||||||
|
PD = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
if ((flags & User) && !PDE.UserSuper)
|
if ((flags & User) && !PDE.UserSuper)
|
||||||
{
|
{
|
||||||
PDE.UserSuper = true;
|
PDE.UserSuper = true;
|
||||||
@ -211,7 +290,23 @@ namespace Paging
|
|||||||
if (flags & User) PDE.UserSuper = true;
|
if (flags & User) PDE.UserSuper = true;
|
||||||
PD->entries[PT_i] = PDE;
|
PD->entries[PT_i] = PDE;
|
||||||
}
|
}
|
||||||
else { PT = (PageTable*)((uint64_t)PDE.Address << 12); }
|
else
|
||||||
|
{
|
||||||
|
if (PDE.LargerPages)
|
||||||
|
{
|
||||||
|
unmap(virtualAddress);
|
||||||
|
PDE.LargerPages = false;
|
||||||
|
PT = (PageTable*)PMM::request_page();
|
||||||
|
ASSERT(!(PMM_DID_FAIL(PT)));
|
||||||
|
memset(PT, 0, PAGE_SIZE);
|
||||||
|
PDE.set_address((uint64_t)PT);
|
||||||
|
PDE.Present = true;
|
||||||
|
PDE.ReadWrite = true;
|
||||||
|
if (flags & User) PDE.UserSuper = true;
|
||||||
|
PD->entries[PT_i] = PDE;
|
||||||
|
}
|
||||||
|
PT = (PageTable*)((uint64_t)PDE.Address << 12);
|
||||||
|
}
|
||||||
if ((flags & User) && !PDE.UserSuper)
|
if ((flags & User) && !PDE.UserSuper)
|
||||||
{
|
{
|
||||||
PDE.UserSuper = true;
|
PDE.UserSuper = true;
|
||||||
|
@ -156,12 +156,13 @@ void Scheduler::reap_task(Task* task)
|
|||||||
kinfoln("reaping task %ld, exited with code %ld", exiting_task->id, exiting_task->exit_status);
|
kinfoln("reaping task %ld, exited with code %ld", exiting_task->id, exiting_task->exit_status);
|
||||||
if (exiting_task->allocated_stack)
|
if (exiting_task->allocated_stack)
|
||||||
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
|
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
|
||||||
if (exiting_task->image)
|
if (exiting_task->image) // FIXME: Also free pages the task has mmap-ed but not munmap-ed.
|
||||||
{
|
{
|
||||||
for (uint64_t i = 0; i < exiting_task->image->section_count; i++)
|
for (uint64_t i = 0; i < exiting_task->image->section_count; i++)
|
||||||
{
|
{
|
||||||
ELFSection& section = exiting_task->image->sections[i];
|
ELFSection& section = exiting_task->image->sections[i];
|
||||||
kdbgln("Task was using region %lx, which used %ld pages", section.base, section.pages);
|
kdbgln("Task was using region %lx, which used %ld pages", section.base, section.pages);
|
||||||
|
MemoryManager::release_pages((void*)section.base, section.pages);
|
||||||
}
|
}
|
||||||
kfree(exiting_task->image);
|
kfree(exiting_task->image);
|
||||||
}
|
}
|
||||||
|
0
tools/build-debug.sh
Normal file → Executable file
0
tools/build-debug.sh
Normal file → Executable file
0
tools/gdb.sh
Normal file → Executable file
0
tools/gdb.sh
Normal file → Executable file
Loading…
Reference in New Issue
Block a user