Luna/kernel/src/memory/MemoryManager.cpp

320 lines
9.3 KiB
C++
Raw Normal View History

2022-11-19 16:59:49 +00:00
#include "memory/MemoryManager.h"
2022-11-30 16:16:36 +00:00
#include "Log.h"
#include "arch/CPU.h"
2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
2022-12-05 20:01:18 +00:00
#include "memory/KernelVM.h"
#include "memory/MemoryMap.h"
#include <luna/Alignment.h>
#include <luna/Bitmap.h>
#include <luna/CString.h>
#include <luna/ScopeGuard.h>
#include <luna/SystemError.h>
#include <luna/Types.h>
2022-11-13 13:29:15 +00:00
extern const u8 start_of_kernel_rodata[1];
extern const u8 end_of_kernel_rodata[1];
extern const u8 start_of_kernel_data[1];
extern const u8 end_of_kernel_data[1];
2022-11-13 15:54:07 +00:00
static usize free_mem = 0;
static usize used_mem = 0;
static usize reserved_mem = 0;
2022-11-13 13:29:15 +00:00
static u64 start_index = 0;
static Bitmap g_frame_bitmap;
2022-11-13 13:29:15 +00:00
#define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned")
static usize get_physical_address_space_size()
2022-11-13 13:29:15 +00:00
{
MemoryMapIterator iter;
const MemoryMapEntry entry = iter.highest();
2022-11-13 13:29:15 +00:00
return entry.address() + entry.size(); // This is the address at the end of the last (highest) entry, thus the whole
// address space that was passed to us.
}
2022-11-13 13:29:15 +00:00
namespace MemoryManager
{
Result<void> protect_kernel_sections()
{
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
return {};
}
void init_physical_frame_allocator()
2022-11-13 13:29:15 +00:00
{
MemoryMapIterator iter;
MemoryMapEntry entry;
const auto largest_free_entry = iter.largest_free();
expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!");
2022-11-13 13:29:15 +00:00
// The entire physical address space. May contain inexistent memory holes, thus differs from total_mem which
// only counts existent memory. Our bitmap needs to have space for all of the physical address space, since
// usable addresses will be scattered across it.
const usize physical_address_space_size = get_physical_address_space_size();
// We store our frame bitmap at the beginning of the largest free memory block.
char* const frame_bitmap_addr = (char*)largest_free_entry.ptr();
const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL);
// This should never happen, unless memory is very fragmented. Usually there is always a very big block of
// usable memory and then some tiny blocks around it.
expect(frame_bitmap_size < largest_free_entry.size(),
"No single memory region is enough to hold the frame bitmap");
2022-11-13 13:29:15 +00:00
g_frame_bitmap.initialize(frame_bitmap_addr, frame_bitmap_size);
g_frame_bitmap.clear(true); // Set all pages to used/reserved by default, then clear out the free ones
2022-11-13 13:29:15 +00:00
iter.rewind();
while (iter.next().try_set_value(entry))
2022-11-13 13:29:15 +00:00
{
const u64 index = entry.address() / ARCH_PAGE_SIZE;
const usize pages = entry.size() / ARCH_PAGE_SIZE;
if (!entry.is_free()) { reserved_mem += entry.size(); }
2022-11-13 13:29:15 +00:00
else
{
free_mem += entry.size();
g_frame_bitmap.clear_region(index, pages, false);
2022-11-13 13:29:15 +00:00
}
}
2022-12-04 14:52:56 +00:00
// Make sure that the physical frames used by the bitmap aren't handed out to anyone else.
lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE));
2022-11-13 13:29:15 +00:00
}
void init()
{
init_physical_frame_allocator();
2022-12-05 20:01:18 +00:00
KernelVM::init();
2022-11-13 13:29:15 +00:00
MMU::setup_initial_page_directory();
2022-12-07 09:58:59 +00:00
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.size_in_bytes() / ARCH_PAGE_SIZE;
2022-12-16 18:44:33 +00:00
auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
2022-12-07 09:58:59 +00:00
map_frames_at(virtual_bitmap_base, (u64)g_frame_bitmap.location(), bitmap_pages,
MMU::ReadWrite | MMU::NoExecute)
2022-12-16 18:44:33 +00:00
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
2022-12-07 09:58:59 +00:00
g_frame_bitmap.initialize((void*)virtual_bitmap_base, g_frame_bitmap.size_in_bytes());
2022-11-13 13:29:15 +00:00
}
void lock_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = frame / ARCH_PAGE_SIZE;
if (g_frame_bitmap.get(index)) return;
g_frame_bitmap.set(index, true);
used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
void lock_frames(u64 frames, usize count)
2022-11-13 13:29:15 +00:00
{
for (usize index = 0; index < count; index++) { lock_frame(frames + (index * ARCH_PAGE_SIZE)); }
2022-11-13 13:29:15 +00:00
}
Result<u64> alloc_frame()
2022-11-13 13:29:15 +00:00
{
for (u64 index = start_index; index < g_frame_bitmap.size(); index++)
2022-11-13 13:29:15 +00:00
{
if (g_frame_bitmap.get(index)) continue;
g_frame_bitmap.set(index, true);
2022-11-13 13:29:15 +00:00
start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE;
return index * ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
2022-11-30 16:13:59 +00:00
return err(ENOMEM);
2022-11-13 13:29:15 +00:00
}
Result<void> free_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = frame / ARCH_PAGE_SIZE;
if (index > g_frame_bitmap.size()) return err(EFAULT);
if (!g_frame_bitmap.get(index)) return err(EFAULT);
g_frame_bitmap.set(index, false);
used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
if (start_index > index) start_index = index;
return {};
}
Result<void> remap(u64 address, usize count, int flags)
{
CHECK_PAGE_ALIGNED(address);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=] { unmap_weak(virt, count); });
while (count--)
{
TRY(MMU::map(virt, phys, flags));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
}
guard.deactivate();
return {};
}
Result<u64> alloc_at(u64 virt, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
u64 start = virt;
auto guard = make_scope_guard([=] { unmap_owned(start, count); });
while (count--)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
}
guard.deactivate();
return start;
}
Result<u64> alloc_for_kernel(usize count, int flags)
{
u64 start = TRY(KernelVM::alloc_several_pages(count));
auto guard = make_scope_guard([=] {
KernelVM::free_several_pages(start, count);
// unmap_owned will fail as soon as we reach the end of the mapped range. That's fine, exactly what we want.
unmap_owned(start, count);
});
u64 virt = start;
while (count--)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
}
guard.deactivate();
return start;
}
Result<void> unmap_owned(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
u64 frame = TRY(MMU::unmap(virt));
TRY(free_frame(frame));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> unmap_owned_and_free_vm(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
KernelVM::free_several_pages(virt, count);
return unmap_owned(virt, count);
}
Result<void> unmap_weak(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
TRY(MMU::unmap(virt));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> remap_unaligned(u64 address, usize count, int flags)
{
if (!is_aligned<ARCH_PAGE_SIZE>(address)) count++;
address = align_down<ARCH_PAGE_SIZE>(address);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
bool validate_readable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
return true;
}
bool validate_writable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
2022-12-16 18:44:33 +00:00
if (rc.value() & MMU::ReadWrite) return true;
return false;
}
usize free()
{
return free_mem;
}
usize used()
{
return used_mem;
}
usize reserved()
{
return reserved_mem;
}
2022-11-30 15:30:42 +00:00
usize total()
2022-11-30 15:30:42 +00:00
{
return free_mem + used_mem + reserved_mem;
}
2022-11-13 13:29:15 +00:00
}