Make MemoryManager's scope guards more robust
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
apio 2022-12-19 11:55:53 +01:00
parent 0bdbffe0ca
commit 60520dff4c
Signed by: apio
GPG Key ID: B8A7D06E42258954

View File

@ -182,14 +182,17 @@ namespace MemoryManager
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=] { unmap_weak(virt, count); });
usize pages_mapped = 0;
while (count--)
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); });
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
@ -202,14 +205,16 @@ namespace MemoryManager
CHECK_PAGE_ALIGNED(virt);
u64 start = virt;
usize pages_mapped = 0;
auto guard = make_scope_guard([=] { unmap_owned(start, count); });
auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); });
while (count--)
while (pages_mapped < count)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
@ -220,20 +225,21 @@ namespace MemoryManager
Result<u64> alloc_for_kernel(usize count, int flags)
{
u64 start = TRY(KernelVM::alloc_several_pages(count));
usize pages_mapped = 0;
auto guard = make_scope_guard([=] {
KernelVM::free_several_pages(start, count);
// unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want.
unmap_owned(start, count);
auto guard = make_scope_guard([=, &pages_mapped] {
KernelVM::free_several_pages(start, pages_mapped);
unmap_owned(start, pages_mapped);
});
u64 virt = start;
while (count--)
while (pages_mapped < count)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();