125 lines
3.2 KiB
C++
125 lines
3.2 KiB
C++
#include "memory/SharedMemory.h"
|
|
#include "memory/MemoryManager.h"
|
|
#include <bits/mmap-flags.h>
|
|
#include <luna/ScopeGuard.h>
|
|
|
|
HashMap<u64, SharedMemory> g_shared_memory_map;
|
|
Atomic<u64> g_next_shmem_id;
|
|
|
|
Result<u64> SharedMemory::create(u8* mem, off_t offset, usize count)
|
|
{
|
|
SharedMemory shmem;
|
|
shmem.offset = offset;
|
|
|
|
auto guard = make_scope_guard([&shmem] {
|
|
for (u64 frame : shmem.frames) { MemoryManager::free_frame(frame); }
|
|
});
|
|
|
|
while (count--)
|
|
{
|
|
u64 frame = TRY(MemoryManager::alloc_frame());
|
|
TRY(shmem.frames.try_append(frame));
|
|
if (mem)
|
|
{
|
|
memcpy((void*)MMU::translate_physical_address(frame), mem, ARCH_PAGE_SIZE);
|
|
mem += ARCH_PAGE_SIZE;
|
|
}
|
|
else { memset((void*)MMU::translate_physical_address(frame), 0, ARCH_PAGE_SIZE); }
|
|
}
|
|
|
|
const u64 id = g_next_shmem_id++;
|
|
|
|
check(TRY(g_shared_memory_map.try_set(id, move(shmem))));
|
|
|
|
guard.deactivate();
|
|
|
|
return id;
|
|
}
|
|
|
|
Result<void> SharedMemory::grow_forward(u8* mem, usize count)
|
|
{
|
|
u64 old_count = frames.size();
|
|
|
|
auto guard = make_scope_guard([old_count, this] {
|
|
while (old_count < this->frames.size()) { MemoryManager::free_frame(*this->frames.try_pop()); }
|
|
});
|
|
|
|
while (count--)
|
|
{
|
|
u64 frame = TRY(MemoryManager::alloc_frame());
|
|
TRY(frames.try_append(frame));
|
|
if (mem)
|
|
{
|
|
memcpy((void*)MMU::translate_physical_address(frame), mem, ARCH_PAGE_SIZE);
|
|
mem += ARCH_PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
guard.deactivate();
|
|
|
|
return {};
|
|
}
|
|
|
|
Result<void> SharedMemory::grow_backward(u8* mem, usize count)
|
|
{
|
|
Vector<u64> new_frames;
|
|
|
|
const usize bytes = count * ARCH_PAGE_SIZE;
|
|
|
|
auto guard = make_scope_guard([&new_frames, count] {
|
|
for (u64 i = 0; i < count && i < new_frames.size(); i++) { MemoryManager::free_frame(new_frames[i]); }
|
|
});
|
|
|
|
while (count--)
|
|
{
|
|
u64 frame = TRY(MemoryManager::alloc_frame());
|
|
TRY(new_frames.try_append(frame));
|
|
if (mem)
|
|
{
|
|
memcpy((void*)MMU::translate_physical_address(frame), mem, ARCH_PAGE_SIZE);
|
|
mem += ARCH_PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
for (u64 frame : frames) { TRY(new_frames.try_append(frame)); }
|
|
|
|
frames = move(new_frames);
|
|
offset -= bytes;
|
|
|
|
guard.deactivate();
|
|
|
|
return {};
|
|
}
|
|
|
|
Result<void> SharedMemory::map(u64 virt, int flags, off_t _offset, usize count)
|
|
{
|
|
usize off = _offset / ARCH_PAGE_SIZE;
|
|
if (off + count > frames.size()) return err(EINVAL);
|
|
|
|
for (usize i = off; i < count; i++)
|
|
{
|
|
TRY(MMU::map(virt, frames[i], flags, MMU::UseHugePages::No));
|
|
virt += ARCH_PAGE_SIZE;
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
void SharedMemory::free()
|
|
{
|
|
if ((inode || device) && (prot & PROT_WRITE))
|
|
{
|
|
for (u64 i = 0; i < frames.size(); i++)
|
|
{
|
|
if (inode)
|
|
inode->write((u8*)MMU::translate_physical_address(frames[i]), offset + (i * ARCH_PAGE_SIZE),
|
|
ARCH_PAGE_SIZE);
|
|
if (device)
|
|
device->write((u8*)MMU::translate_physical_address(frames[i]), offset + (i * ARCH_PAGE_SIZE),
|
|
ARCH_PAGE_SIZE);
|
|
}
|
|
}
|
|
|
|
for (u64 frame : frames) { MemoryManager::free_frame(frame); }
|
|
}
|