#include "memory/MemoryManager.h" #include "arch/MMU.h" #include "memory/KernelVM.h" #include "memory/MemoryMap.h" #include "thread/Spinlock.h" #include #include #include #include #include extern const u8 start_of_kernel_rodata[1]; extern const u8 end_of_kernel_rodata[1]; extern const u8 start_of_kernel_data[1]; extern const u8 end_of_kernel_data[1]; static Atomic free_mem; static Atomic used_mem; static Atomic reserved_mem; static Atomic start_index; static LockedValue g_frame_bitmap; #define CHECK_PAGE_ALIGNED(address) expect(is_aligned(address), "Address is not page-aligned") static usize get_physical_address_space_size() { MemoryMapIterator iter; const MemoryMapEntry entry = iter.highest(); return entry.address() + entry.size(); // This is the address at the end of the last (highest) entry, thus the whole // address space that was passed to us. } namespace MemoryManager { Result protect_kernel_sections() { const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata); const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute)); const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data); const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite)); return {}; } void init_physical_frame_allocator() { MemoryMapIterator iter; MemoryMapEntry entry; const auto largest_free_entry = iter.largest_free(); expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!"); // The entire physical address space. May contain inexistent memory holes, thus differs from total_mem which // only counts existent memory. Our bitmap needs to have space for all of the physical address space, since // usable addresses will be scattered across it. const usize physical_address_space_size = get_physical_address_space_size(); // We store our frame bitmap at the beginning of the largest free memory block. char* const frame_bitmap_addr = (char*)largest_free_entry.ptr(); const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL); // This should never happen, unless memory is very fragmented. Usually there is always a very big block of // usable memory and then some tiny blocks around it. expect(frame_bitmap_size < largest_free_entry.size(), "No single memory region is enough to hold the frame bitmap"); { auto frame_bitmap = g_frame_bitmap.lock(); frame_bitmap->initialize(frame_bitmap_addr, frame_bitmap_size); frame_bitmap->clear(true); // Set all pages to used/reserved by default, then clear out the free ones iter.rewind(); while (iter.next().try_set_value(entry)) { const u64 index = entry.address() / ARCH_PAGE_SIZE; const usize pages = entry.size() / ARCH_PAGE_SIZE; if (!entry.is_free()) { reserved_mem += entry.size(); } else { free_mem += entry.size(); frame_bitmap->clear_region(index, pages, false); } } } // Make sure that the physical frames used by the bitmap aren't handed out to anyone else. lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE)); } void init() { init_physical_frame_allocator(); KernelVM::init(); MMU::setup_initial_page_directory(); // NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory // there's no point in continuing. auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE; auto virtual_bitmap_base = KernelVM::alloc_several_pages(bitmap_pages) .expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue"); u64 phys = (u64)g_frame_bitmap.lock()->location(); map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute) .expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue"); auto frame_bitmap = g_frame_bitmap.lock(); frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes()); } void lock_frame(u64 frame) { const u64 index = frame / ARCH_PAGE_SIZE; auto frame_bitmap = g_frame_bitmap.lock(); if (frame_bitmap->get(index)) return; frame_bitmap->set(index, true); used_mem += ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE; } void lock_frames(u64 frames, usize count) { for (usize index = 0; index < count; index++) { lock_frame(frames + (index * ARCH_PAGE_SIZE)); } } Result alloc_frame() { auto frame_bitmap = g_frame_bitmap.lock(); for (u64 index = start_index; index < frame_bitmap->size(); index++) { if (frame_bitmap->get(index)) continue; frame_bitmap->set(index, true); start_index = index + 1; free_mem -= ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE; return index * ARCH_PAGE_SIZE; } return err(ENOMEM); } Result free_frame(u64 frame) { const u64 index = frame / ARCH_PAGE_SIZE; auto frame_bitmap = g_frame_bitmap.lock(); if (index > frame_bitmap->size()) return err(EFAULT); if (!frame_bitmap->get(index)) return err(EFAULT); frame_bitmap->set(index, false); used_mem -= ARCH_PAGE_SIZE; free_mem += ARCH_PAGE_SIZE; if (start_index > index) start_index = index; return {}; } Result remap(u64 address, usize count, int flags) { CHECK_PAGE_ALIGNED(address); while (count--) { TRY(MMU::remap(address, flags)); address += ARCH_PAGE_SIZE; } return {}; } Result map_frames_at(u64 virt, u64 phys, usize count, int flags) { CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(phys); usize pages_mapped = 0; // Let's clean up after ourselves if we fail. auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); }); while (pages_mapped < count) { TRY(MMU::map(virt, phys, flags)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; pages_mapped++; } guard.deactivate(); return {}; } Result alloc_at(u64 virt, usize count, int flags) { CHECK_PAGE_ALIGNED(virt); u64 start = virt; usize pages_mapped = 0; auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); }); while (pages_mapped < count) { u64 frame = TRY(alloc_frame()); TRY(MMU::map(virt, frame, flags)); virt += ARCH_PAGE_SIZE; pages_mapped++; } guard.deactivate(); return start; } Result alloc_for_kernel(usize count, int flags) { u64 start = TRY(KernelVM::alloc_several_pages(count)); usize pages_mapped = 0; auto guard = make_scope_guard([=, &pages_mapped] { KernelVM::free_several_pages(start, pages_mapped); unmap_owned(start, pages_mapped); }); u64 virt = start; while (pages_mapped < count) { u64 frame = TRY(alloc_frame()); TRY(MMU::map(virt, frame, flags)); virt += ARCH_PAGE_SIZE; pages_mapped++; } guard.deactivate(); return start; } Result get_kernel_mapping_for_frames(u64 phys, usize count, int flags) { u64 start = TRY(KernelVM::alloc_several_pages(count)); usize pages_mapped = 0; auto guard = make_scope_guard([=, &pages_mapped] { KernelVM::free_several_pages(start, pages_mapped); unmap_weak(start, pages_mapped); }); u64 virt = start; while (pages_mapped < count) { TRY(MMU::map(virt, phys, flags)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; pages_mapped++; } guard.deactivate(); return start; } Result unmap_owned(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); while (count--) { u64 frame = TRY(MMU::unmap(virt)); TRY(free_frame(frame)); virt += ARCH_PAGE_SIZE; } return {}; } Result unmap_owned_and_free_vm(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); KernelVM::free_several_pages(virt, count); return unmap_owned(virt, count); } Result unmap_weak(u64 virt, usize count) { CHECK_PAGE_ALIGNED(virt); while (count--) { TRY(MMU::unmap(virt)); virt += ARCH_PAGE_SIZE; } return {}; } Result remap_unaligned(u64 address, usize count, int flags) { if (!is_aligned(address)) count++; address = align_down(address); while (count--) { TRY(MMU::remap(address, flags)); address += ARCH_PAGE_SIZE; } return {}; } bool validate_readable_page(u64 address) { auto rc = MMU::get_flags(address); if (rc.has_error()) return false; return true; } bool validate_writable_page(u64 address) { auto rc = MMU::get_flags(address); if (rc.has_error()) return false; if (rc.value() & MMU::ReadWrite) return true; return false; } usize free() { return free_mem; } usize used() { return used_mem; } usize reserved() { return reserved_mem; } usize total() { return free_mem + used_mem + reserved_mem; } }