diff --git a/kernel/src/main.cpp b/kernel/src/main.cpp index 6947c075..05768089 100644 --- a/kernel/src/main.cpp +++ b/kernel/src/main.cpp @@ -1,10 +1,12 @@ #include "Log.h" #include "arch/CPU.h" +#include "arch/MMU.h" #include "arch/Serial.h" #include "arch/Timer.h" #include "boot/Init.h" #include "config.h" #include "memory/Heap.h" +#include "memory/KernelVM.h" #include "memory/MemoryManager.h" #include "thread/Scheduler.h" #include @@ -28,6 +30,7 @@ void heap_thread() { CPU::disable_interrupts(); dump_heap_usage(); + kdbgln("uses %lu vm pages", KernelVM::used() / ARCH_PAGE_SIZE); while (true) kernel_sleep(UINT64_MAX); } diff --git a/kernel/src/memory/Heap.cpp b/kernel/src/memory/Heap.cpp index 5cf176e8..faea101d 100644 --- a/kernel/src/memory/Heap.cpp +++ b/kernel/src/memory/Heap.cpp @@ -42,17 +42,13 @@ static DoublyLinkedList heap; static Result allocate_pages(usize count) { - u64 virt = TRY(KernelVM::alloc_several_pages(count)); - auto vm_guard = make_scope_guard([&] { KernelVM::free_several_pages(virt, count).value(); }); - void* const ptr = (void*)TRY(MemoryManager::alloc_at(virt, count, MMU::ReadWrite | MMU::NoExecute)); - vm_guard.deactivate(); + void* const ptr = (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute)); return (HeapBlock*)ptr; } static Result release_pages(void* ptr, usize count) { - TRY(KernelVM::free_several_pages((u64)ptr, count)); - return MemoryManager::unmap_owned((u64)ptr, count); + return MemoryManager::unmap_owned_and_free_vm((u64)ptr, count); } // If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index dffbd6cd..8106a353 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -6,6 +6,7 @@ #include "memory/MemoryMap.h" #include #include +#include #include #include #include @@ -171,6 +172,9 @@ namespace MemoryManager CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(phys); + // Let's clean up after ourselves if we fail. + auto guard = make_scope_guard([=] { unmap_weak(virt, count); }); + while (count--) { TRY(MMU::map(virt, phys, flags)); @@ -178,6 +182,8 @@ namespace MemoryManager phys += ARCH_PAGE_SIZE; } + guard.deactivate(); + return {}; } @@ -187,6 +193,8 @@ namespace MemoryManager u64 start = virt; + auto guard = make_scope_guard([=] { unmap_owned(start, count); }); + while (count--) { u64 frame = TRY(alloc_frame()); @@ -194,6 +202,32 @@ namespace MemoryManager virt += ARCH_PAGE_SIZE; } + guard.deactivate(); + + return start; + } + + Result alloc_for_kernel(usize count, int flags) + { + u64 start = TRY(KernelVM::alloc_several_pages(count)); + + auto guard = make_scope_guard([=] { + KernelVM::free_several_pages(start, count); + // unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want. + unmap_owned(start, count); + }); + + u64 virt = start; + + while (count--) + { + u64 frame = TRY(alloc_frame()); + TRY(MMU::map(virt, frame, flags)); + virt += ARCH_PAGE_SIZE; + } + + guard.deactivate(); + return start; } @@ -211,6 +245,15 @@ namespace MemoryManager return {}; } + Result unmap_owned_and_free_vm(u64 virt, usize count) + { + CHECK_PAGE_ALIGNED(virt); + + KernelVM::free_several_pages(virt, count); + + return unmap_owned(virt, count); + } + Result unmap_weak(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); diff --git a/kernel/src/memory/MemoryManager.h b/kernel/src/memory/MemoryManager.h index 5ef62177..cf3ed2ce 100644 --- a/kernel/src/memory/MemoryManager.h +++ b/kernel/src/memory/MemoryManager.h @@ -23,8 +23,10 @@ namespace MemoryManager Result map_frames_at(u64 virt, u64 phys, usize count, int flags); Result alloc_at(u64 virt, usize count, int flags); + Result alloc_for_kernel(usize count, int flags); Result unmap_owned(u64 virt, usize count); + Result unmap_owned_and_free_vm(u64 virt, usize count); Result unmap_weak(u64 virt, usize count); usize free(); diff --git a/kernel/src/thread/Scheduler.cpp b/kernel/src/thread/Scheduler.cpp index 03e2be9c..e6267ea2 100644 --- a/kernel/src/thread/Scheduler.cpp +++ b/kernel/src/thread/Scheduler.cpp @@ -24,8 +24,7 @@ namespace Scheduler g_idle.ticks_left = 1; // Map some stack for the idle task - u64 idle_stack_vm = KernelVM::alloc_one_page().release_value(); - MemoryManager::alloc_at(idle_stack_vm, 1, MMU::NoExecute | MMU::ReadWrite).release_value(); + u64 idle_stack_vm = MemoryManager::alloc_for_kernel(1, MMU::NoExecute | MMU::ReadWrite).release_value(); Stack idle_stack{idle_stack_vm, ARCH_PAGE_SIZE}; g_idle.set_sp(idle_stack.top()); @@ -48,15 +47,11 @@ namespace Scheduler Result new_kernel_thread_impl(Thread* thread) { // If anything fails, make sure to clean up. - auto thread_guard = make_scope_guard([&] { delete thread; }); + auto guard = make_scope_guard([&] { delete thread; }); - u64 thread_stack_vm = TRY(KernelVM::alloc_several_pages(4)); - auto vm_guard = make_scope_guard([&] { KernelVM::free_several_pages(thread_stack_vm, 4).value(); }); + u64 thread_stack_vm = TRY(MemoryManager::alloc_for_kernel(4, MMU::NoExecute | MMU::ReadWrite)); - TRY(MemoryManager::alloc_at(thread_stack_vm, 4, MMU::NoExecute | MMU::ReadWrite)); - - thread_guard.deactivate(); - vm_guard.deactivate(); + guard.deactivate(); Stack thread_stack{thread_stack_vm, ARCH_PAGE_SIZE * 4}; thread->set_sp(thread_stack.top());