Improve cleanup on MemoryManager failure + add methods that use KernelVM
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
814672c771
commit
cedcfa9c63
@ -1,10 +1,12 @@
|
||||
#include "Log.h"
|
||||
#include "arch/CPU.h"
|
||||
#include "arch/MMU.h"
|
||||
#include "arch/Serial.h"
|
||||
#include "arch/Timer.h"
|
||||
#include "boot/Init.h"
|
||||
#include "config.h"
|
||||
#include "memory/Heap.h"
|
||||
#include "memory/KernelVM.h"
|
||||
#include "memory/MemoryManager.h"
|
||||
#include "thread/Scheduler.h"
|
||||
#include <luna/Result.h>
|
||||
@ -28,6 +30,7 @@ void heap_thread()
|
||||
{
|
||||
CPU::disable_interrupts();
|
||||
dump_heap_usage();
|
||||
kdbgln("uses %lu vm pages", KernelVM::used() / ARCH_PAGE_SIZE);
|
||||
while (true) kernel_sleep(UINT64_MAX);
|
||||
}
|
||||
|
||||
|
@ -42,17 +42,13 @@ static DoublyLinkedList<HeapBlock> heap;
|
||||
|
||||
static Result<HeapBlock*> allocate_pages(usize count)
|
||||
{
|
||||
u64 virt = TRY(KernelVM::alloc_several_pages(count));
|
||||
auto vm_guard = make_scope_guard([&] { KernelVM::free_several_pages(virt, count).value(); });
|
||||
void* const ptr = (void*)TRY(MemoryManager::alloc_at(virt, count, MMU::ReadWrite | MMU::NoExecute));
|
||||
vm_guard.deactivate();
|
||||
void* const ptr = (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute));
|
||||
return (HeapBlock*)ptr;
|
||||
}
|
||||
|
||||
static Result<void> release_pages(void* ptr, usize count)
|
||||
{
|
||||
TRY(KernelVM::free_several_pages((u64)ptr, count));
|
||||
return MemoryManager::unmap_owned((u64)ptr, count);
|
||||
return MemoryManager::unmap_owned_and_free_vm((u64)ptr, count);
|
||||
}
|
||||
|
||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "memory/MemoryMap.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Bitmap.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/String.h>
|
||||
#include <luna/SystemError.h>
|
||||
#include <luna/Types.h>
|
||||
@ -171,6 +172,9 @@ namespace MemoryManager
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
CHECK_PAGE_ALIGNED(phys);
|
||||
|
||||
// Let's clean up after ourselves if we fail.
|
||||
auto guard = make_scope_guard([=] { unmap_weak(virt, count); });
|
||||
|
||||
while (count--)
|
||||
{
|
||||
TRY(MMU::map(virt, phys, flags));
|
||||
@ -178,6 +182,8 @@ namespace MemoryManager
|
||||
phys += ARCH_PAGE_SIZE;
|
||||
}
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -187,6 +193,8 @@ namespace MemoryManager
|
||||
|
||||
u64 start = virt;
|
||||
|
||||
auto guard = make_scope_guard([=] { unmap_owned(start, count); });
|
||||
|
||||
while (count--)
|
||||
{
|
||||
u64 frame = TRY(alloc_frame());
|
||||
@ -194,6 +202,32 @@ namespace MemoryManager
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
}
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
Result<u64> alloc_for_kernel(usize count, int flags)
|
||||
{
|
||||
u64 start = TRY(KernelVM::alloc_several_pages(count));
|
||||
|
||||
auto guard = make_scope_guard([=] {
|
||||
KernelVM::free_several_pages(start, count);
|
||||
// unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want.
|
||||
unmap_owned(start, count);
|
||||
});
|
||||
|
||||
u64 virt = start;
|
||||
|
||||
while (count--)
|
||||
{
|
||||
u64 frame = TRY(alloc_frame());
|
||||
TRY(MMU::map(virt, frame, flags));
|
||||
virt += ARCH_PAGE_SIZE;
|
||||
}
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
@ -211,6 +245,15 @@ namespace MemoryManager
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void> unmap_owned_and_free_vm(u64 virt, usize count)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
|
||||
KernelVM::free_several_pages(virt, count);
|
||||
|
||||
return unmap_owned(virt, count);
|
||||
}
|
||||
|
||||
Result<void> unmap_weak(u64 virt, usize count)
|
||||
{
|
||||
CHECK_PAGE_ALIGNED(virt);
|
||||
|
@ -23,8 +23,10 @@ namespace MemoryManager
|
||||
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags);
|
||||
|
||||
Result<u64> alloc_at(u64 virt, usize count, int flags);
|
||||
Result<u64> alloc_for_kernel(usize count, int flags);
|
||||
|
||||
Result<void> unmap_owned(u64 virt, usize count);
|
||||
Result<void> unmap_owned_and_free_vm(u64 virt, usize count);
|
||||
Result<void> unmap_weak(u64 virt, usize count);
|
||||
|
||||
usize free();
|
||||
|
@ -24,8 +24,7 @@ namespace Scheduler
|
||||
g_idle.ticks_left = 1;
|
||||
|
||||
// Map some stack for the idle task
|
||||
u64 idle_stack_vm = KernelVM::alloc_one_page().release_value();
|
||||
MemoryManager::alloc_at(idle_stack_vm, 1, MMU::NoExecute | MMU::ReadWrite).release_value();
|
||||
u64 idle_stack_vm = MemoryManager::alloc_for_kernel(1, MMU::NoExecute | MMU::ReadWrite).release_value();
|
||||
|
||||
Stack idle_stack{idle_stack_vm, ARCH_PAGE_SIZE};
|
||||
g_idle.set_sp(idle_stack.top());
|
||||
@ -48,15 +47,11 @@ namespace Scheduler
|
||||
Result<void> new_kernel_thread_impl(Thread* thread)
|
||||
{
|
||||
// If anything fails, make sure to clean up.
|
||||
auto thread_guard = make_scope_guard([&] { delete thread; });
|
||||
auto guard = make_scope_guard([&] { delete thread; });
|
||||
|
||||
u64 thread_stack_vm = TRY(KernelVM::alloc_several_pages(4));
|
||||
auto vm_guard = make_scope_guard([&] { KernelVM::free_several_pages(thread_stack_vm, 4).value(); });
|
||||
u64 thread_stack_vm = TRY(MemoryManager::alloc_for_kernel(4, MMU::NoExecute | MMU::ReadWrite));
|
||||
|
||||
TRY(MemoryManager::alloc_at(thread_stack_vm, 4, MMU::NoExecute | MMU::ReadWrite));
|
||||
|
||||
thread_guard.deactivate();
|
||||
vm_guard.deactivate();
|
||||
guard.deactivate();
|
||||
|
||||
Stack thread_stack{thread_stack_vm, ARCH_PAGE_SIZE * 4};
|
||||
thread->set_sp(thread_stack.top());
|
||||
|
Loading…
Reference in New Issue
Block a user