2022-11-19 17:59:49 +01:00
|
|
|
#include "memory/MemoryManager.h"
|
2023-08-02 22:19:06 +02:00
|
|
|
#include "Log.h"
|
2022-11-13 14:29:15 +01:00
|
|
|
#include "arch/MMU.h"
|
2022-12-05 21:01:18 +01:00
|
|
|
#include "memory/KernelVM.h"
|
2022-12-04 15:45:13 +01:00
|
|
|
#include "memory/MemoryMap.h"
|
2022-12-04 12:42:43 +01:00
|
|
|
#include <luna/Alignment.h>
|
2022-12-04 15:14:07 +01:00
|
|
|
#include <luna/Bitmap.h>
|
2022-12-16 19:36:38 +01:00
|
|
|
#include <luna/ScopeGuard.h>
|
2023-02-25 17:09:03 +01:00
|
|
|
#include <luna/Spinlock.h>
|
2022-12-04 12:42:43 +01:00
|
|
|
#include <luna/SystemError.h>
|
|
|
|
#include <luna/Types.h>
|
2023-03-12 16:30:36 +01:00
|
|
|
#include <luna/Vector.h>
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-05 12:49:01 +01:00
|
|
|
extern const u8 start_of_kernel_rodata[1];
|
|
|
|
extern const u8 end_of_kernel_rodata[1];
|
|
|
|
extern const u8 start_of_kernel_data[1];
|
|
|
|
extern const u8 end_of_kernel_data[1];
|
2022-11-13 16:54:07 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
static Atomic<usize> free_mem;
|
|
|
|
static Atomic<usize> used_mem;
|
|
|
|
static Atomic<usize> reserved_mem;
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
static Atomic<u64> start_index;
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
static LockedValue<Bitmap> g_frame_bitmap;
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-06 19:27:58 +01:00
|
|
|
#define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned")
|
2022-12-04 15:14:07 +01:00
|
|
|
|
|
|
|
static usize get_physical_address_space_size()
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-12-04 15:45:13 +01:00
|
|
|
MemoryMapIterator iter;
|
2022-12-05 12:49:01 +01:00
|
|
|
const MemoryMapEntry entry = iter.highest();
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-05 13:23:01 +01:00
|
|
|
return entry.address() + entry.size(); // This is the address at the end of the last (highest) entry, thus the whole
|
|
|
|
// address space that was passed to us.
|
2022-12-04 15:14:07 +01:00
|
|
|
}
|
2022-11-19 22:28:45 +01:00
|
|
|
|
2022-11-13 14:29:15 +01:00
|
|
|
namespace MemoryManager
|
|
|
|
{
|
2022-11-16 20:02:04 +01:00
|
|
|
Result<void> protect_kernel_sections()
|
|
|
|
{
|
2022-12-07 10:55:47 +01:00
|
|
|
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
|
|
|
|
const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE);
|
2022-11-16 20:02:04 +01:00
|
|
|
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
|
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
|
|
|
|
const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE);
|
2022-11-16 20:02:04 +01:00
|
|
|
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-12-04 15:14:07 +01:00
|
|
|
void init_physical_frame_allocator()
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-12-04 15:45:13 +01:00
|
|
|
MemoryMapIterator iter;
|
|
|
|
MemoryMapEntry entry;
|
|
|
|
|
2022-12-05 13:26:09 +01:00
|
|
|
const auto largest_free_entry = iter.largest_free();
|
2022-12-04 15:45:13 +01:00
|
|
|
|
2022-12-05 13:26:09 +01:00
|
|
|
expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!");
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-04 15:14:07 +01:00
|
|
|
// The entire physical address space. May contain inexistent memory holes, thus differs from total_mem which
|
|
|
|
// only counts existent memory. Our bitmap needs to have space for all of the physical address space, since
|
|
|
|
// usable addresses will be scattered across it.
|
2022-12-05 12:49:01 +01:00
|
|
|
const usize physical_address_space_size = get_physical_address_space_size();
|
2022-12-04 15:14:07 +01:00
|
|
|
|
2022-12-04 15:50:21 +01:00
|
|
|
// We store our frame bitmap at the beginning of the largest free memory block.
|
2022-12-05 13:26:09 +01:00
|
|
|
char* const frame_bitmap_addr = (char*)largest_free_entry.ptr();
|
2022-12-04 15:50:21 +01:00
|
|
|
|
2022-12-05 12:49:01 +01:00
|
|
|
const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL);
|
2022-12-04 15:14:07 +01:00
|
|
|
|
|
|
|
// This should never happen, unless memory is very fragmented. Usually there is always a very big block of
|
|
|
|
// usable memory and then some tiny blocks around it.
|
2022-12-05 13:26:09 +01:00
|
|
|
expect(frame_bitmap_size < largest_free_entry.size(),
|
|
|
|
"No single memory region is enough to hold the frame bitmap");
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
{
|
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
2022-12-04 15:14:07 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
frame_bitmap->initialize(frame_bitmap_addr, frame_bitmap_size);
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
frame_bitmap->clear(true); // Set all pages to used/reserved by default, then clear out the free ones
|
|
|
|
|
|
|
|
iter.rewind();
|
|
|
|
while (iter.next().try_set_value(entry))
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-12-17 10:53:49 +01:00
|
|
|
const u64 index = entry.address() / ARCH_PAGE_SIZE;
|
|
|
|
const usize pages = entry.size() / ARCH_PAGE_SIZE;
|
|
|
|
if (!entry.is_free()) { reserved_mem += entry.size(); }
|
|
|
|
else
|
|
|
|
{
|
|
|
|
free_mem += entry.size();
|
|
|
|
frame_bitmap->clear_region(index, pages, false);
|
|
|
|
}
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-04 15:52:56 +01:00
|
|
|
// Make sure that the physical frames used by the bitmap aren't handed out to anyone else.
|
2022-12-05 13:26:09 +01:00
|
|
|
lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE));
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void init()
|
|
|
|
{
|
2022-12-04 15:14:07 +01:00
|
|
|
init_physical_frame_allocator();
|
2023-02-27 13:24:58 +01:00
|
|
|
|
2022-11-13 14:29:15 +01:00
|
|
|
MMU::setup_initial_page_directory();
|
2022-12-07 10:58:59 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
2023-02-27 12:51:29 +01:00
|
|
|
u64 phys = (u64)frame_bitmap->location();
|
2022-12-17 10:53:49 +01:00
|
|
|
|
2023-02-27 12:51:29 +01:00
|
|
|
auto virtual_bitmap_base = MMU::translate_physical_address(phys);
|
2022-12-17 10:53:49 +01:00
|
|
|
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
|
2023-02-27 13:24:58 +01:00
|
|
|
|
|
|
|
KernelVM::init();
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
|
2023-01-09 17:59:52 +01:00
|
|
|
void do_lock_frame(u64 index, Bitmap& bitmap)
|
|
|
|
{
|
|
|
|
if (bitmap.get(index)) return;
|
|
|
|
bitmap.set(index, true);
|
|
|
|
used_mem += ARCH_PAGE_SIZE;
|
|
|
|
free_mem -= ARCH_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:27:08 +01:00
|
|
|
void lock_frame(u64 frame)
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-12-05 12:49:01 +01:00
|
|
|
const u64 index = frame / ARCH_PAGE_SIZE;
|
2022-12-17 10:53:49 +01:00
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
2023-01-09 17:59:52 +01:00
|
|
|
do_lock_frame(index, *frame_bitmap);
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
void lock_frames(u64 frames, usize count)
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2023-01-09 17:59:52 +01:00
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
|
|
|
const u64 frame_index = frames / ARCH_PAGE_SIZE;
|
|
|
|
for (usize index = 0; index < count; index++) { do_lock_frame(frame_index + index, *frame_bitmap); }
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
|
2022-11-19 22:27:08 +01:00
|
|
|
Result<u64> alloc_frame()
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-12-17 10:53:49 +01:00
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
2022-11-13 14:29:15 +01:00
|
|
|
|
2023-04-07 10:55:04 +02:00
|
|
|
usize index;
|
|
|
|
bool ok = frame_bitmap->find_and_toggle(false, start_index).try_set_value(index);
|
|
|
|
if (!ok) return err(ENOMEM);
|
2023-01-09 17:59:52 +01:00
|
|
|
|
|
|
|
start_index = index + 1;
|
2023-01-09 18:08:50 +01:00
|
|
|
|
|
|
|
used_mem += ARCH_PAGE_SIZE;
|
|
|
|
free_mem -= ARCH_PAGE_SIZE;
|
2023-01-09 17:59:52 +01:00
|
|
|
|
|
|
|
return index * ARCH_PAGE_SIZE;
|
2022-11-13 14:29:15 +01:00
|
|
|
}
|
|
|
|
|
2023-07-04 00:49:26 +02:00
|
|
|
Result<u64> alloc_zeroed_frame()
|
|
|
|
{
|
|
|
|
const u64 frame = TRY(alloc_frame());
|
|
|
|
|
|
|
|
const u64 address = MMU::translate_physical_address(frame);
|
|
|
|
memset((void*)address, 0, ARCH_PAGE_SIZE);
|
|
|
|
|
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:27:08 +01:00
|
|
|
Result<void> free_frame(u64 frame)
|
2022-11-13 14:29:15 +01:00
|
|
|
{
|
2022-11-19 22:27:08 +01:00
|
|
|
const u64 index = frame / ARCH_PAGE_SIZE;
|
2023-01-09 17:59:52 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
auto frame_bitmap = g_frame_bitmap.lock();
|
2023-01-09 17:59:52 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
if (index > frame_bitmap->size()) return err(EFAULT);
|
|
|
|
if (!frame_bitmap->get(index)) return err(EFAULT);
|
2023-01-09 17:59:52 +01:00
|
|
|
|
2022-12-17 10:53:49 +01:00
|
|
|
frame_bitmap->set(index, false);
|
2023-01-09 17:59:52 +01:00
|
|
|
|
2022-11-16 20:37:32 +01:00
|
|
|
used_mem -= ARCH_PAGE_SIZE;
|
|
|
|
free_mem += ARCH_PAGE_SIZE;
|
2023-01-09 17:59:52 +01:00
|
|
|
|
2022-11-13 14:29:15 +01:00
|
|
|
if (start_index > index) start_index = index;
|
|
|
|
return {};
|
|
|
|
}
|
2022-11-13 16:56:03 +01:00
|
|
|
|
2023-03-16 23:02:53 +01:00
|
|
|
Result<void> free_frames(u64 address, usize count)
|
|
|
|
{
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
TRY(free_frame(address));
|
|
|
|
address += ARCH_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-11-16 20:30:34 +01:00
|
|
|
Result<void> remap(u64 address, usize count, int flags)
|
2022-11-16 20:02:04 +01:00
|
|
|
{
|
2022-11-19 22:28:45 +01:00
|
|
|
CHECK_PAGE_ALIGNED(address);
|
2022-11-16 20:02:04 +01:00
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
TRY(MMU::remap(address, flags));
|
2022-11-16 20:37:32 +01:00
|
|
|
address += ARCH_PAGE_SIZE;
|
2022-11-16 20:02:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:32:48 +01:00
|
|
|
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags)
|
2022-11-19 22:28:45 +01:00
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
CHECK_PAGE_ALIGNED(phys);
|
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
usize pages_mapped = 0;
|
|
|
|
|
2022-12-16 19:36:38 +01:00
|
|
|
// Let's clean up after ourselves if we fail.
|
2022-12-19 11:55:53 +01:00
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak(virt, pages_mapped); });
|
2022-12-16 19:36:38 +01:00
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
while (pages_mapped < count)
|
2022-11-19 22:28:45 +01:00
|
|
|
{
|
2023-02-27 12:41:28 +01:00
|
|
|
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
|
2022-11-19 22:28:45 +01:00
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
phys += ARCH_PAGE_SIZE;
|
2022-12-19 11:55:53 +01:00
|
|
|
pages_mapped++;
|
2022-11-19 22:28:45 +01:00
|
|
|
}
|
|
|
|
|
2022-12-16 19:36:38 +01:00
|
|
|
guard.deactivate();
|
|
|
|
|
2022-11-19 22:28:45 +01:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-08-02 22:19:06 +02:00
|
|
|
Result<void> copy_region(u64 virt, usize count, PageDirectory* oldpd, PageDirectory* newpd)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
usize pages_mapped = 0;
|
|
|
|
|
|
|
|
// Let's clean up after ourselves if we fail.
|
|
|
|
auto guard = make_scope_guard(
|
|
|
|
[=, &pages_mapped] { kwarnln("copy_region failed, sorry! cannot reclaim already copied pages"); });
|
|
|
|
|
|
|
|
while (pages_mapped < count)
|
|
|
|
{
|
|
|
|
u64 phys = TRY(MMU::get_physical(virt, oldpd));
|
|
|
|
int flags = TRY(MMU::get_flags(virt, oldpd));
|
|
|
|
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No, newpd));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
pages_mapped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
Result<void> copy_region_data(u64 virt, usize count, PageDirectory* oldpd, PageDirectory* newpd)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
usize pages_mapped = 0;
|
|
|
|
|
|
|
|
// Let's clean up after ourselves if we fail.
|
|
|
|
auto guard = make_scope_guard(
|
|
|
|
[=, &pages_mapped] { kwarnln("copy_region_data failed, sorry! cannot reclaim already copied pages"); });
|
|
|
|
|
|
|
|
while (pages_mapped < count)
|
|
|
|
{
|
|
|
|
u64 frame = TRY(alloc_frame());
|
|
|
|
u64 phys = TRY(MMU::get_physical(virt, oldpd));
|
|
|
|
int flags = TRY(MMU::get_flags(virt, oldpd));
|
|
|
|
memcpy((void*)MMU::translate_physical_address(frame), (void*)MMU::translate_physical_address(phys),
|
|
|
|
ARCH_PAGE_SIZE);
|
|
|
|
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No, newpd));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
pages_mapped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-02-27 12:47:17 +01:00
|
|
|
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
CHECK_PAGE_ALIGNED(phys);
|
|
|
|
|
|
|
|
usize pages_mapped = 0;
|
|
|
|
|
|
|
|
// Let's clean up after ourselves if we fail.
|
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak_huge(virt, pages_mapped); });
|
|
|
|
|
|
|
|
while (pages_mapped < count)
|
|
|
|
{
|
|
|
|
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::Yes));
|
|
|
|
virt += ARCH_HUGE_PAGE_SIZE;
|
|
|
|
phys += ARCH_HUGE_PAGE_SIZE;
|
|
|
|
pages_mapped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:28:45 +01:00
|
|
|
Result<u64> alloc_at(u64 virt, usize count, int flags)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
2022-11-20 15:11:53 +01:00
|
|
|
u64 start = virt;
|
2022-12-19 11:55:53 +01:00
|
|
|
usize pages_mapped = 0;
|
2022-11-20 15:11:53 +01:00
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); });
|
2022-12-16 19:36:38 +01:00
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
while (pages_mapped < count)
|
2022-11-19 22:28:45 +01:00
|
|
|
{
|
2023-01-10 19:31:41 +01:00
|
|
|
const u64 frame = TRY(alloc_frame());
|
2023-02-27 12:41:28 +01:00
|
|
|
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
2022-11-19 22:28:45 +01:00
|
|
|
virt += ARCH_PAGE_SIZE;
|
2022-12-19 11:55:53 +01:00
|
|
|
pages_mapped++;
|
2022-11-19 22:28:45 +01:00
|
|
|
}
|
|
|
|
|
2022-12-16 19:36:38 +01:00
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
2023-04-17 20:11:07 +02:00
|
|
|
Result<u64> alloc_at_zeroed(u64 virt, usize count, int flags)
|
|
|
|
{
|
2023-07-04 00:49:26 +02:00
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
2023-04-17 20:11:07 +02:00
|
|
|
|
2023-07-04 00:49:26 +02:00
|
|
|
u64 start = virt;
|
|
|
|
usize pages_mapped = 0;
|
2023-04-17 20:11:07 +02:00
|
|
|
|
2023-07-04 00:49:26 +02:00
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] { unmap_owned(start, pages_mapped); });
|
2023-04-17 20:11:07 +02:00
|
|
|
|
2023-07-04 00:49:26 +02:00
|
|
|
while (pages_mapped < count)
|
|
|
|
{
|
|
|
|
const u64 frame = TRY(alloc_zeroed_frame());
|
|
|
|
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
pages_mapped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return start;
|
2023-04-17 20:11:07 +02:00
|
|
|
}
|
|
|
|
|
2022-12-16 19:36:38 +01:00
|
|
|
Result<u64> alloc_for_kernel(usize count, int flags)
|
|
|
|
{
|
2023-01-10 19:31:41 +01:00
|
|
|
const u64 start = TRY(KernelVM::alloc_several_pages(count));
|
2022-12-19 11:55:53 +01:00
|
|
|
usize pages_mapped = 0;
|
2022-12-16 19:36:38 +01:00
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] {
|
|
|
|
KernelVM::free_several_pages(start, pages_mapped);
|
|
|
|
unmap_owned(start, pages_mapped);
|
2022-12-16 19:36:38 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
u64 virt = start;
|
|
|
|
|
2022-12-19 11:55:53 +01:00
|
|
|
while (pages_mapped < count)
|
2022-12-16 19:36:38 +01:00
|
|
|
{
|
2023-01-10 19:31:41 +01:00
|
|
|
const u64 frame = TRY(alloc_frame());
|
2023-02-27 12:41:28 +01:00
|
|
|
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
2022-12-16 19:36:38 +01:00
|
|
|
virt += ARCH_PAGE_SIZE;
|
2022-12-19 11:55:53 +01:00
|
|
|
pages_mapped++;
|
2022-12-16 19:36:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
2022-11-20 15:11:53 +01:00
|
|
|
return start;
|
2022-11-19 22:28:45 +01:00
|
|
|
}
|
|
|
|
|
2022-12-23 11:30:49 +01:00
|
|
|
Result<u64> get_kernel_mapping_for_frames(u64 phys, usize count, int flags)
|
|
|
|
{
|
2023-01-10 19:31:41 +01:00
|
|
|
const u64 start = TRY(KernelVM::alloc_several_pages(count));
|
2022-12-23 11:30:49 +01:00
|
|
|
|
|
|
|
usize pages_mapped = 0;
|
|
|
|
|
|
|
|
auto guard = make_scope_guard([=, &pages_mapped] {
|
|
|
|
KernelVM::free_several_pages(start, pages_mapped);
|
|
|
|
unmap_weak(start, pages_mapped);
|
|
|
|
});
|
|
|
|
|
|
|
|
u64 virt = start;
|
|
|
|
|
|
|
|
while (pages_mapped < count)
|
|
|
|
{
|
2023-02-27 12:41:28 +01:00
|
|
|
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
|
2022-12-23 11:30:49 +01:00
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
phys += ARCH_PAGE_SIZE;
|
|
|
|
pages_mapped++;
|
|
|
|
}
|
|
|
|
|
|
|
|
guard.deactivate();
|
|
|
|
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:28:45 +01:00
|
|
|
Result<void> unmap_owned(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
2023-01-10 19:31:41 +01:00
|
|
|
const u64 frame = TRY(MMU::unmap(virt));
|
2022-11-19 22:28:45 +01:00
|
|
|
TRY(free_frame(frame));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-08-02 22:19:06 +02:00
|
|
|
Result<void> unmap_owned_if_possible(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
const auto frame = MMU::unmap(virt);
|
|
|
|
if (frame.has_value()) TRY(free_frame(frame.value()));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-12-16 19:36:38 +01:00
|
|
|
Result<void> unmap_owned_and_free_vm(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
KernelVM::free_several_pages(virt, count);
|
|
|
|
|
|
|
|
return unmap_owned(virt, count);
|
|
|
|
}
|
|
|
|
|
2022-11-19 22:28:45 +01:00
|
|
|
Result<void> unmap_weak(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
TRY(MMU::unmap(virt));
|
|
|
|
virt += ARCH_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-02-27 12:47:17 +01:00
|
|
|
Result<void> unmap_weak_huge(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
TRY(MMU::unmap(virt));
|
|
|
|
virt += ARCH_HUGE_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-01-05 21:50:06 +01:00
|
|
|
Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
|
|
|
|
{
|
|
|
|
CHECK_PAGE_ALIGNED(virt);
|
|
|
|
|
|
|
|
KernelVM::free_several_pages(virt, count);
|
|
|
|
|
|
|
|
return unmap_weak(virt, count);
|
|
|
|
}
|
|
|
|
|
2022-11-16 20:30:34 +01:00
|
|
|
Result<void> remap_unaligned(u64 address, usize count, int flags)
|
2022-11-16 20:02:04 +01:00
|
|
|
{
|
2022-12-06 19:27:58 +01:00
|
|
|
if (!is_aligned<ARCH_PAGE_SIZE>(address)) count++;
|
|
|
|
address = align_down<ARCH_PAGE_SIZE>(address);
|
2022-11-16 20:02:04 +01:00
|
|
|
|
|
|
|
while (count--)
|
|
|
|
{
|
|
|
|
TRY(MMU::remap(address, flags));
|
2022-11-16 20:37:32 +01:00
|
|
|
address += ARCH_PAGE_SIZE;
|
2022-11-16 20:02:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-05-07 21:37:01 +02:00
|
|
|
bool validate_page_default_access(u64 address)
|
2022-11-19 18:38:47 +01:00
|
|
|
{
|
|
|
|
auto rc = MMU::get_flags(address);
|
|
|
|
if (rc.has_error()) return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-05-07 21:37:01 +02:00
|
|
|
bool validate_page_access(u64 address, int flags)
|
2022-11-19 18:38:47 +01:00
|
|
|
{
|
|
|
|
auto rc = MMU::get_flags(address);
|
|
|
|
if (rc.has_error()) return false;
|
2023-05-07 21:37:01 +02:00
|
|
|
if (rc.value() & flags) return true;
|
2022-11-19 18:38:47 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-03-12 16:30:36 +01:00
|
|
|
// FIXME: Make this more efficient.
|
2023-03-29 17:28:22 +02:00
|
|
|
Result<String> strdup_from_user(u64 address)
|
2023-03-12 16:30:36 +01:00
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(address, MMU::User)) return err(EFAULT);
|
2023-03-12 16:30:36 +01:00
|
|
|
|
|
|
|
Vector<char> result;
|
|
|
|
|
|
|
|
while (*(char*)address != 0)
|
|
|
|
{
|
|
|
|
TRY(result.try_append(*(char*)address));
|
|
|
|
address++;
|
2023-08-08 10:27:19 +02:00
|
|
|
if ((address % ARCH_PAGE_SIZE) == 0)
|
2023-03-12 16:30:36 +01:00
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(address, MMU::User)) return err(EFAULT);
|
2023-03-12 16:30:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TRY(result.try_append(0)); // null terminator
|
|
|
|
|
2023-07-02 19:30:25 +02:00
|
|
|
return String { result.release_data() };
|
2023-03-12 16:30:36 +01:00
|
|
|
}
|
|
|
|
|
2023-05-07 21:42:03 +02:00
|
|
|
bool validate_access(const void* mem, usize size, int flags)
|
2023-01-07 01:39:33 +01:00
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
uintptr_t address = (uintptr_t)mem;
|
|
|
|
uintptr_t page = align_down<ARCH_PAGE_SIZE>(address);
|
2023-01-07 01:39:33 +01:00
|
|
|
|
2023-05-07 21:37:01 +02:00
|
|
|
uintptr_t diff = address - page;
|
2023-01-07 01:39:33 +01:00
|
|
|
|
|
|
|
usize pages = get_blocks_from_size(size + diff, ARCH_PAGE_SIZE);
|
|
|
|
|
|
|
|
while (pages--)
|
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (flags > 0)
|
|
|
|
{
|
|
|
|
if (!validate_page_access(page, flags)) return false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!validate_page_default_access(page)) return false;
|
|
|
|
}
|
|
|
|
page += ARCH_PAGE_SIZE;
|
2023-01-07 01:39:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-25 21:22:14 +01:00
|
|
|
// FIXME: Use memcpy() in both copy_to_user and copy_from_user().
|
|
|
|
|
2023-01-07 00:17:16 +01:00
|
|
|
bool copy_to_user(void* user, const void* kernel, usize size)
|
|
|
|
{
|
|
|
|
uintptr_t user_ptr = (uintptr_t)user;
|
|
|
|
uintptr_t user_page = align_down<ARCH_PAGE_SIZE>(user_ptr);
|
|
|
|
|
2023-01-16 19:52:34 +01:00
|
|
|
const u8* kernel_ptr = (const u8*)kernel;
|
2023-01-07 00:17:16 +01:00
|
|
|
|
|
|
|
// Userspace pointer not aligned on page boundary
|
|
|
|
if (user_ptr != user_page)
|
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(user_page, MMU::ReadWrite | MMU::User)) return false;
|
2023-01-07 00:17:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while (size--)
|
|
|
|
{
|
|
|
|
// Crossed a page boundary, gotta check the page tables again before touching any memory!!
|
2023-08-07 22:49:00 +02:00
|
|
|
if ((user_ptr % ARCH_PAGE_SIZE) == 0)
|
2023-01-07 00:17:16 +01:00
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(user_ptr, MMU::ReadWrite | MMU::User)) return false;
|
2023-01-07 00:17:16 +01:00
|
|
|
}
|
|
|
|
|
2023-01-16 19:52:34 +01:00
|
|
|
*(u8*)user_ptr = *kernel_ptr++;
|
2023-01-07 00:17:16 +01:00
|
|
|
user_ptr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-11 19:25:28 +01:00
|
|
|
bool copy_from_user(const void* user, void* kernel, usize size)
|
|
|
|
{
|
|
|
|
uintptr_t user_ptr = (uintptr_t)user;
|
|
|
|
uintptr_t user_page = align_down<ARCH_PAGE_SIZE>(user_ptr);
|
|
|
|
|
2023-01-16 19:52:34 +01:00
|
|
|
u8* kernel_ptr = (u8*)kernel;
|
2023-01-11 19:25:28 +01:00
|
|
|
|
|
|
|
// Userspace pointer not aligned on page boundary
|
|
|
|
if (user_ptr != user_page)
|
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(user_page, MMU::User)) return false;
|
2023-01-11 19:25:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while (size--)
|
|
|
|
{
|
|
|
|
// Crossed a page boundary, gotta check the page tables again before touching any memory!!
|
2023-08-07 22:49:00 +02:00
|
|
|
if ((user_ptr % ARCH_PAGE_SIZE) == 0)
|
2023-01-11 19:25:28 +01:00
|
|
|
{
|
2023-05-07 21:37:01 +02:00
|
|
|
if (!validate_page_access(user_ptr, MMU::User)) return false;
|
2023-01-11 19:25:28 +01:00
|
|
|
}
|
|
|
|
|
2023-01-16 19:52:34 +01:00
|
|
|
*kernel_ptr++ = *(const u8*)user_ptr;
|
2023-01-11 19:25:28 +01:00
|
|
|
user_ptr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
usize free()
|
2022-11-13 16:56:03 +01:00
|
|
|
{
|
|
|
|
return free_mem;
|
|
|
|
}
|
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
usize used()
|
2022-11-13 16:56:03 +01:00
|
|
|
{
|
|
|
|
return used_mem;
|
|
|
|
}
|
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
usize reserved()
|
2022-11-13 16:56:03 +01:00
|
|
|
{
|
|
|
|
return reserved_mem;
|
|
|
|
}
|
2022-11-30 16:30:42 +01:00
|
|
|
|
2022-12-07 10:55:47 +01:00
|
|
|
usize total()
|
2022-11-30 16:30:42 +01:00
|
|
|
{
|
|
|
|
return free_mem + used_mem + reserved_mem;
|
|
|
|
}
|
2023-01-02 13:07:29 +01:00
|
|
|
}
|