Luna/kernel/src/memory/AddressSpace.cpp

310 lines
8.6 KiB
C++
Raw Normal View History

#include "memory/AddressSpace.h"
#include "arch/MMU.h"
#include "memory/Heap.h"
#include "memory/MemoryManager.h"
#include "memory/SharedMemory.h"
#include <bits/mmap-flags.h>
2023-03-18 22:45:48 +00:00
#include <luna/CString.h>
#include <luna/ScopeGuard.h>
static constexpr u64 VM_START = ARCH_PAGE_SIZE;
static constexpr u64 VM_END = 0x0000800000000000;
Result<OwnedPtr<AddressSpace>> AddressSpace::try_create()
{
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
TRY(ptr->create_null_region());
TRY(ptr->create_default_region());
ptr->m_directory = TRY(MMU::create_page_directory_for_userspace());
return move(ptr);
}
Result<void> AddressSpace::create_null_region()
2023-03-18 22:45:48 +00:00
{
// Create a small region at the start of the address space to prevent anyone from mapping page 0.
auto* region = TRY(make<VMRegion>());
region->start = 0;
region->end = VM_START;
region->count = 1;
region->used = true;
region->persistent = true;
m_regions.append(region);
return {};
2023-03-18 22:45:48 +00:00
}
Result<void> AddressSpace::create_default_region()
{
// Create a free region covering the rest of the address space.
auto* region = TRY(make<VMRegion>());
region->start = VM_START;
region->end = VM_END;
region->count = (VM_END / ARCH_PAGE_SIZE) - 1;
region->used = false;
m_regions.append(region);
return {};
}
Result<OwnedPtr<AddressSpace>> AddressSpace::clone()
{
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
ptr->m_directory = TRY(MMU::clone_userspace_page_directory(m_directory));
for (const auto* region : m_regions)
{
auto* new_region = TRY(make<VMRegion>());
memcpy(new_region, region, sizeof(*region));
ptr->m_regions.append(new_region);
if (new_region->used && new_region->prot != 0 && new_region->flags & MAP_SHARED)
{
TRY(MemoryManager::copy_region(new_region->start, new_region->count, m_directory, ptr->m_directory));
auto* shm = g_shared_memory_map.try_get_ref(new_region->shmid);
if (shm) shm->refs++;
}
else if (new_region->used && new_region->prot != 0)
{
TRY(MemoryManager::copy_region_data(new_region->start, new_region->count, m_directory, ptr->m_directory));
}
}
return move(ptr);
}
AddressSpace::AddressSpace()
{
}
AddressSpace& AddressSpace::operator=(AddressSpace&& other)
{
if (&other == this) return *this;
m_regions.consume([](VMRegion* region) { delete region; });
if (m_directory) MMU::delete_userspace_page_directory(m_directory);
m_regions = other.m_regions;
m_directory = other.m_directory;
other.m_regions.reset();
other.m_directory = nullptr;
return *this;
}
2023-08-02 20:39:07 +00:00
Result<u64> AddressSpace::alloc_region(usize count, int prot, int flags, off_t offset, u64 shmid, bool persistent)
{
auto update_region = [=](VMRegion* region) {
region->used = true;
region->persistent = persistent;
region->prot = prot;
region->flags = flags;
2023-08-02 20:39:07 +00:00
region->offset = offset;
region->shmid = shmid;
};
for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr))
{
if (!region->used)
{
if (region->count < count) continue;
if (region->count == count)
{
update_region(region);
u64 address = region->start;
try_merge_region_with_neighbors(region);
return address;
}
u64 boundary = region->end - (count * ARCH_PAGE_SIZE);
auto* new_region = TRY(split_region(region, boundary));
update_region(new_region);
try_merge_region_with_neighbors(new_region);
return boundary;
}
}
return err(ENOMEM);
}
2023-08-02 20:39:07 +00:00
Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, int prot, int flags, off_t offset, u64 shmid,
bool persistent)
{
if (address >= VM_END) return err(EINVAL);
u64 end = address + (count * ARCH_PAGE_SIZE);
auto update_region = [=](VMRegion* region) {
if (!used) region->cleanup_shared();
region->used = used;
region->persistent = persistent;
region->prot = prot;
region->flags = flags;
2023-08-02 20:39:07 +00:00
region->offset = offset;
region->shmid = shmid;
};
for (auto* region : m_regions)
{
if (region->end < address) continue;
if (region->start > end) return false;
if (region->persistent) return false;
if (region->used == used)
{
if (used) return false;
continue;
}
if (region->start >= address && region->end <= end)
{
update_region(region);
if (region->start == address && region->end == end)
{
try_merge_region_with_neighbors(region);
return true;
}
try_merge_region_with_neighbors(region);
continue;
}
if (region->end > end && region->start < address)
{
auto* middle_region = TRY(split_region(region, address));
TRY(split_region(middle_region, end));
update_region(middle_region);
return true;
}
if (region->start < address)
{
bool finished = region->end == end;
auto* split = TRY(split_region(region, address));
update_region(split);
try_merge_region_with_neighbors(split);
if (!finished) continue;
return true;
}
if (region->end > end)
{
TRY(split_region(region, end));
update_region(region);
try_merge_region_with_neighbors(region);
return true;
}
}
return true;
}
2023-08-02 20:39:07 +00:00
Result<void> AddressSpace::sync_regions(u64 address, usize count)
{
if (address >= VM_END) return err(EINVAL);
u64 end = address + (count * ARCH_PAGE_SIZE);
for (auto* region : m_regions)
{
if (region->end < address) continue;
if (region->start > end) return {};
region->sync_shared();
}
return {};
}
void AddressSpace::merge_contiguous_regions(VMRegion* a, VMRegion* b)
{
a->end = b->end;
a->count += b->count;
m_regions.remove(b);
delete b;
}
void AddressSpace::try_merge_region_with_neighbors(VMRegion* region)
{
auto equals = [](VMRegion* a, VMRegion* b) {
if (a->used != b->used) return false;
if (a->persistent != b->persistent) return false;
if (a->prot != b->prot) return false;
if (a->flags != b->flags) return false;
if (a->shmid != b->shmid) return false;
return true;
};
auto prev = m_regions.previous(region);
if (prev.has_value() && equals(*prev, region))
{
merge_contiguous_regions(*prev, region);
region = *prev;
}
auto next = m_regions.next(region);
if (next.has_value() && equals(*next, region)) { merge_contiguous_regions(region, *next); }
}
Result<VMRegion*> AddressSpace::split_region(VMRegion* parent, u64 boundary)
{
auto* region = TRY(make<VMRegion>());
region->start = boundary;
region->end = parent->end;
region->count = (region->end - region->start) / ARCH_PAGE_SIZE;
region->used = parent->used;
region->persistent = parent->persistent;
region->prot = parent->prot;
region->flags = parent->flags;
region->shmid = parent->shmid;
m_regions.add_after(parent, region);
parent->end = boundary;
parent->count -= region->count;
return region;
}
AddressSpace::~AddressSpace()
{
auto* directory = MMU::get_page_directory();
MMU::switch_page_directory(this->m_directory);
m_regions.consume([this](VMRegion* region) {
region->cleanup_shared();
delete region;
});
MMU::switch_page_directory(directory);
if (m_directory) MMU::delete_userspace_page_directory(m_directory);
}
void VMRegion::cleanup_shared()
{
if (used && (flags & MAP_SHARED))
{
SharedMemory* shmem = g_shared_memory_map.try_get_ref(shmid);
if (shmem)
{
for (u64 addr = start; addr < end; addr += ARCH_PAGE_SIZE) { MMU::unmap(addr); }
if (--shmem->refs == 0)
{
shmem->free();
g_shared_memory_map.try_remove(shmid);
}
}
}
}
2023-08-02 20:39:07 +00:00
void VMRegion::sync_shared()
{
if (used && (flags & MAP_SHARED) && (prot & PROT_WRITE))
{
SharedMemory* shmem = g_shared_memory_map.try_get_ref(shmid);
if (shmem) { shmem->inode->write((const u8*)start, offset, count * ARCH_PAGE_SIZE); }
}
}