Make MemoryManager's scope guards more robust
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
0bdbffe0ca
commit
60520dff4c
@ -182,14 +182,17 @@ namespace MemoryManager
|
|||||||
CHECK_PAGE_ALIGNED(virt);
|
CHECK_PAGE_ALIGNED(virt);
|
||||||
CHECK_PAGE_ALIGNED(phys);
|
CHECK_PAGE_ALIGNED(phys);
|
||||||
|
|
||||||
// Let's clean up after ourselves if we fail.
|
usize pages_mapped = 0;
|
||||||
auto guard = make_scope_guard([=] { unmap_weak(virt, count); });
|
|
||||||
|
|
||||||
while (count--)
|
// Let's clean up after ourselves if we fail.
|
||||||
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); });
|
||||||
|
|
||||||
|
while (pages_mapped < count)
|
||||||
{
|
{
|
||||||
TRY(MMU::map(virt, phys, flags));
|
TRY(MMU::map(virt, phys, flags));
|
||||||
virt += ARCH_PAGE_SIZE;
|
virt += ARCH_PAGE_SIZE;
|
||||||
phys += ARCH_PAGE_SIZE;
|
phys += ARCH_PAGE_SIZE;
|
||||||
|
pages_mapped++;
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.deactivate();
|
guard.deactivate();
|
||||||
@ -202,14 +205,16 @@ namespace MemoryManager
|
|||||||
CHECK_PAGE_ALIGNED(virt);
|
CHECK_PAGE_ALIGNED(virt);
|
||||||
|
|
||||||
u64 start = virt;
|
u64 start = virt;
|
||||||
|
usize pages_mapped = 0;
|
||||||
|
|
||||||
auto guard = make_scope_guard([=] { unmap_owned(start, count); });
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); });
|
||||||
|
|
||||||
while (count--)
|
while (pages_mapped < count)
|
||||||
{
|
{
|
||||||
u64 frame = TRY(alloc_frame());
|
u64 frame = TRY(alloc_frame());
|
||||||
TRY(MMU::map(virt, frame, flags));
|
TRY(MMU::map(virt, frame, flags));
|
||||||
virt += ARCH_PAGE_SIZE;
|
virt += ARCH_PAGE_SIZE;
|
||||||
|
pages_mapped++;
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.deactivate();
|
guard.deactivate();
|
||||||
@ -220,20 +225,21 @@ namespace MemoryManager
|
|||||||
Result<u64> alloc_for_kernel(usize count, int flags)
|
Result<u64> alloc_for_kernel(usize count, int flags)
|
||||||
{
|
{
|
||||||
u64 start = TRY(KernelVM::alloc_several_pages(count));
|
u64 start = TRY(KernelVM::alloc_several_pages(count));
|
||||||
|
usize pages_mapped = 0;
|
||||||
|
|
||||||
auto guard = make_scope_guard([=] {
|
auto guard = make_scope_guard([=, &pages_mapped] {
|
||||||
KernelVM::free_several_pages(start, count);
|
KernelVM::free_several_pages(start, pages_mapped);
|
||||||
// unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want.
|
unmap_owned(start, pages_mapped);
|
||||||
unmap_owned(start, count);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
u64 virt = start;
|
u64 virt = start;
|
||||||
|
|
||||||
while (count--)
|
while (pages_mapped < count)
|
||||||
{
|
{
|
||||||
u64 frame = TRY(alloc_frame());
|
u64 frame = TRY(alloc_frame());
|
||||||
TRY(MMU::map(virt, frame, flags));
|
TRY(MMU::map(virt, frame, flags));
|
||||||
virt += ARCH_PAGE_SIZE;
|
virt += ARCH_PAGE_SIZE;
|
||||||
|
pages_mapped++;
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.deactivate();
|
guard.deactivate();
|
||||||
|
Loading…
Reference in New Issue
Block a user