Luna/kernel/src/memory/MemoryManager.cpp

416 lines
12 KiB
C++
Raw Normal View History

2022-11-19 16:59:49 +00:00
#include "memory/MemoryManager.h"
2022-11-13 13:29:15 +00:00
#include "arch/MMU.h"
2022-12-05 20:01:18 +00:00
#include "memory/KernelVM.h"
#include "memory/MemoryMap.h"
#include "thread/Spinlock.h"
#include <luna/Alignment.h>
#include <luna/Bitmap.h>
#include <luna/ScopeGuard.h>
#include <luna/SystemError.h>
#include <luna/Types.h>
2022-11-13 13:29:15 +00:00
extern const u8 start_of_kernel_rodata[1];
extern const u8 end_of_kernel_rodata[1];
extern const u8 start_of_kernel_data[1];
extern const u8 end_of_kernel_data[1];
2022-11-13 15:54:07 +00:00
static Atomic<usize> free_mem;
static Atomic<usize> used_mem;
static Atomic<usize> reserved_mem;
2022-11-13 13:29:15 +00:00
static Atomic<u64> start_index;
2022-11-13 13:29:15 +00:00
static LockedValue<Bitmap> g_frame_bitmap;
2022-11-13 13:29:15 +00:00
#define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned")
static usize get_physical_address_space_size()
2022-11-13 13:29:15 +00:00
{
MemoryMapIterator iter;
const MemoryMapEntry entry = iter.highest();
2022-11-13 13:29:15 +00:00
return entry.address() + entry.size(); // This is the address at the end of the last (highest) entry, thus the whole
// address space that was passed to us.
}
2022-11-13 13:29:15 +00:00
namespace MemoryManager
{
Result<void> protect_kernel_sections()
{
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
return {};
}
void init_physical_frame_allocator()
2022-11-13 13:29:15 +00:00
{
MemoryMapIterator iter;
MemoryMapEntry entry;
const auto largest_free_entry = iter.largest_free();
expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!");
2022-11-13 13:29:15 +00:00
// The entire physical address space. May contain inexistent memory holes, thus differs from total_mem which
// only counts existent memory. Our bitmap needs to have space for all of the physical address space, since
// usable addresses will be scattered across it.
const usize physical_address_space_size = get_physical_address_space_size();
// We store our frame bitmap at the beginning of the largest free memory block.
char* const frame_bitmap_addr = (char*)largest_free_entry.ptr();
const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL);
// This should never happen, unless memory is very fragmented. Usually there is always a very big block of
// usable memory and then some tiny blocks around it.
expect(frame_bitmap_size < largest_free_entry.size(),
"No single memory region is enough to hold the frame bitmap");
2022-11-13 13:29:15 +00:00
{
auto frame_bitmap = g_frame_bitmap.lock();
frame_bitmap->initialize(frame_bitmap_addr, frame_bitmap_size);
2022-11-13 13:29:15 +00:00
frame_bitmap->clear(true); // Set all pages to used/reserved by default, then clear out the free ones
iter.rewind();
while (iter.next().try_set_value(entry))
2022-11-13 13:29:15 +00:00
{
const u64 index = entry.address() / ARCH_PAGE_SIZE;
const usize pages = entry.size() / ARCH_PAGE_SIZE;
if (!entry.is_free()) { reserved_mem += entry.size(); }
else
{
free_mem += entry.size();
frame_bitmap->clear_region(index, pages, false);
}
2022-11-13 13:29:15 +00:00
}
}
2022-12-04 14:52:56 +00:00
// Make sure that the physical frames used by the bitmap aren't handed out to anyone else.
lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE));
2022-11-13 13:29:15 +00:00
}
void init()
{
init_physical_frame_allocator();
2022-12-05 20:01:18 +00:00
KernelVM::init();
2022-11-13 13:29:15 +00:00
MMU::setup_initial_page_directory();
2022-12-07 09:58:59 +00:00
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE;
2022-12-07 09:58:59 +00:00
2022-12-16 18:44:33 +00:00
auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
u64 phys = (u64)g_frame_bitmap.lock()->location();
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
2022-12-16 18:44:33 +00:00
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
2022-12-07 09:58:59 +00:00
auto frame_bitmap = g_frame_bitmap.lock();
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
2022-11-13 13:29:15 +00:00
}
void lock_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = frame / ARCH_PAGE_SIZE;
auto frame_bitmap = g_frame_bitmap.lock();
if (frame_bitmap->get(index)) return;
frame_bitmap->set(index, true);
used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
void lock_frames(u64 frames, usize count)
2022-11-13 13:29:15 +00:00
{
for (usize index = 0; index < count; index++) { lock_frame(frames + (index * ARCH_PAGE_SIZE)); }
2022-11-13 13:29:15 +00:00
}
Result<u64> alloc_frame()
2022-11-13 13:29:15 +00:00
{
auto frame_bitmap = g_frame_bitmap.lock();
for (u64 index = start_index; index < frame_bitmap->size(); index++)
2022-11-13 13:29:15 +00:00
{
if (frame_bitmap->get(index)) continue;
frame_bitmap->set(index, true);
2022-11-13 13:29:15 +00:00
start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE;
return index * ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
}
2022-11-30 16:13:59 +00:00
return err(ENOMEM);
2022-11-13 13:29:15 +00:00
}
Result<void> free_frame(u64 frame)
2022-11-13 13:29:15 +00:00
{
const u64 index = frame / ARCH_PAGE_SIZE;
auto frame_bitmap = g_frame_bitmap.lock();
if (index > frame_bitmap->size()) return err(EFAULT);
if (!frame_bitmap->get(index)) return err(EFAULT);
frame_bitmap->set(index, false);
used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE;
2022-11-13 13:29:15 +00:00
if (start_index > index) start_index = index;
return {};
}
Result<void> remap(u64 address, usize count, int flags)
{
CHECK_PAGE_ALIGNED(address);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
usize pages_mapped = 0;
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); });
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return {};
}
Result<u64> alloc_at(u64 virt, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
u64 start = virt;
usize pages_mapped = 0;
auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); });
while (pages_mapped < count)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return start;
}
Result<u64> alloc_for_kernel(usize count, int flags)
{
u64 start = TRY(KernelVM::alloc_several_pages(count));
usize pages_mapped = 0;
auto guard = make_scope_guard([=, &pages_mapped] {
KernelVM::free_several_pages(start, pages_mapped);
unmap_owned(start, pages_mapped);
});
u64 virt = start;
while (pages_mapped < count)
{
u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return start;
}
Result<u64> get_kernel_mapping_for_frames(u64 phys, usize count, int flags)
{
u64 start = TRY(KernelVM::alloc_several_pages(count));
usize pages_mapped = 0;
auto guard = make_scope_guard([=, &pages_mapped] {
KernelVM::free_several_pages(start, pages_mapped);
unmap_weak(start, pages_mapped);
});
u64 virt = start;
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return start;
}
Result<void> unmap_owned(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
u64 frame = TRY(MMU::unmap(virt));
TRY(free_frame(frame));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> unmap_owned_and_free_vm(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
KernelVM::free_several_pages(virt, count);
return unmap_owned(virt, count);
}
Result<void> unmap_weak(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
TRY(MMU::unmap(virt));
virt += ARCH_PAGE_SIZE;
}
return {};
}
Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
KernelVM::free_several_pages(virt, count);
return unmap_weak(virt, count);
}
Result<void> remap_unaligned(u64 address, usize count, int flags)
{
if (!is_aligned<ARCH_PAGE_SIZE>(address)) count++;
address = align_down<ARCH_PAGE_SIZE>(address);
while (count--)
{
TRY(MMU::remap(address, flags));
address += ARCH_PAGE_SIZE;
}
return {};
}
bool validate_readable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
return true;
}
bool validate_writable_page(u64 address)
{
auto rc = MMU::get_flags(address);
if (rc.has_error()) return false;
2022-12-16 18:44:33 +00:00
if (rc.value() & MMU::ReadWrite) return true;
return false;
}
// FIXME: Replace this with some kind of strdup_from_user() function.
bool validate_userspace_string(u64 address)
{
if (!validate_readable_page(address)) return false;
while (*(char*)address != 0)
{
address++;
if (address % ARCH_PAGE_SIZE)
{
if (!validate_readable_page(address)) return false;
}
}
return true;
}
bool copy_to_user(void* user, const void* kernel, usize size)
{
uintptr_t user_ptr = (uintptr_t)user;
uintptr_t user_page = align_down<ARCH_PAGE_SIZE>(user_ptr);
const char* kernel_ptr = (const char*)kernel;
// Userspace pointer not aligned on page boundary
if (user_ptr != user_page)
{
// FIXME: Validate that this page is writable by the user, not just the kernel.
if (!validate_writable_page(user_page)) return false;
}
while (size--)
{
// Crossed a page boundary, gotta check the page tables again before touching any memory!!
if (user_ptr % ARCH_PAGE_SIZE)
{
if (!validate_writable_page(user_ptr)) return false;
}
*(char*)user_ptr = *kernel_ptr++;
user_ptr++;
}
return true;
}
usize free()
{
return free_mem;
}
usize used()
{
return used_mem;
}
usize reserved()
{
return reserved_mem;
}
2022-11-30 15:30:42 +00:00
usize total()
2022-11-30 15:30:42 +00:00
{
return free_mem + used_mem + reserved_mem;
}
2023-01-02 12:07:29 +00:00
}