From 60520dff4cb55568f392bd2da519d0e14fcbab93 Mon Sep 17 00:00:00 2001 From: apio Date: Mon, 19 Dec 2022 11:55:53 +0100 Subject: [PATCH] Make MemoryManager's scope guards more robust --- kernel/src/memory/MemoryManager.cpp | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index b23c8f17..33935d26 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -182,14 +182,17 @@ namespace MemoryManager CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(phys); - // Let's clean up after ourselves if we fail. - auto guard = make_scope_guard([=] { unmap_weak(virt, count); }); + usize pages_mapped = 0; - while (count--) + // Let's clean up after ourselves if we fail. + auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); }); + + while (pages_mapped < count) { TRY(MMU::map(virt, phys, flags)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; + pages_mapped++; } guard.deactivate(); @@ -202,14 +205,16 @@ namespace MemoryManager CHECK_PAGE_ALIGNED(virt); u64 start = virt; + usize pages_mapped = 0; - auto guard = make_scope_guard([=] { unmap_owned(start, count); }); + auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); }); - while (count--) + while (pages_mapped < count) { u64 frame = TRY(alloc_frame()); TRY(MMU::map(virt, frame, flags)); virt += ARCH_PAGE_SIZE; + pages_mapped++; } guard.deactivate(); @@ -220,20 +225,21 @@ namespace MemoryManager Result alloc_for_kernel(usize count, int flags) { u64 start = TRY(KernelVM::alloc_several_pages(count)); + usize pages_mapped = 0; - auto guard = make_scope_guard([=] { - KernelVM::free_several_pages(start, count); - // unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want. - unmap_owned(start, count); + auto guard = make_scope_guard([=, &pages_mapped] { + KernelVM::free_several_pages(start, pages_mapped); + unmap_owned(start, pages_mapped); }); u64 virt = start; - while (count--) + while (pages_mapped < count) { u64 frame = TRY(alloc_frame()); TRY(MMU::map(virt, frame, flags)); virt += ARCH_PAGE_SIZE; + pages_mapped++; } guard.deactivate();