#include "memory/KernelVM.h" #include "arch/MMU.h" #include #include static const u64 KERNEL_VM_RANGE_START = 0xffffffffc0000000; static LockedValue g_kernelvm_bitmap; static u8 bitmap_memory[4096 * 7]; static const usize KERNEL_VM_RANGE_SIZE = (sizeof(bitmap_memory) * 8) * ARCH_PAGE_SIZE; static const u64 KERNEL_VM_RANGE_END = KERNEL_VM_RANGE_SIZE + KERNEL_VM_RANGE_START; static_assert(KERNEL_VM_RANGE_END == 0xfffffffff8000000); static Atomic g_used_vm; namespace KernelVM { void init() { auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); kernelvm_bitmap->initialize(bitmap_memory, sizeof(bitmap_memory)); kernelvm_bitmap->clear(false); } Result alloc_one_page() { auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); usize index; bool ok = kernelvm_bitmap->find_and_toggle(false).try_set_value(index); if (!ok) return err(ENOMEM); g_used_vm += ARCH_PAGE_SIZE; return KERNEL_VM_RANGE_START + (index * ARCH_PAGE_SIZE); } bool find_several_pages_impl(usize count, u64& start_index) { u64 first_free_index = 0; u64 free_contiguous_pages = 0; auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); for (u64 index = 0; index < kernelvm_bitmap->size(); index++) { if (kernelvm_bitmap->get(index)) { free_contiguous_pages = 0; continue; } // At this point, we have a free page. if (!free_contiguous_pages) first_free_index = index; free_contiguous_pages++; // Found enough contiguous free pages!! if (free_contiguous_pages == count) { start_index = first_free_index; return true; } } return false; } Result alloc_several_pages(const usize count) { u64 start_index; if (find_several_pages_impl(count, start_index)) { g_kernelvm_bitmap.lock()->clear_region(start_index, count, true); g_used_vm += ARCH_PAGE_SIZE * count; return KERNEL_VM_RANGE_START + (start_index * ARCH_PAGE_SIZE); } return err(ENOMEM); } Result free_one_page(u64 address) { if (address < KERNEL_VM_RANGE_START) return err(EFAULT); u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); if (index >= kernelvm_bitmap->size()) return err(EFAULT); kernelvm_bitmap->set(index, false); g_used_vm -= ARCH_PAGE_SIZE; return {}; } Result free_several_pages(u64 address, usize count) { if (address < KERNEL_VM_RANGE_START) return err(EFAULT); if (address + (count * ARCH_PAGE_SIZE) >= KERNEL_VM_RANGE_END) return err(EFAULT); u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; g_kernelvm_bitmap.lock()->clear_region(index, count, false); g_used_vm -= ARCH_PAGE_SIZE * count; return {}; } usize used() { return g_used_vm; } usize free() { return KERNEL_VM_RANGE_SIZE - g_used_vm; } }