kernel: Rename UserVM to AddressSpace
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This is a more appropriate name now that it does more stuff than allocate virtual memory. To be fair, the name was a bit awkward anyway. Should have been UserVMAllocator I guess.
This commit is contained in:
parent
5e564e9ae3
commit
c599251d2a
@ -12,7 +12,7 @@ set(SOURCES
|
||||
src/memory/MemoryManager.cpp
|
||||
src/memory/Heap.cpp
|
||||
src/memory/KernelVM.cpp
|
||||
src/memory/UserVM.cpp
|
||||
src/memory/AddressSpace.cpp
|
||||
src/memory/MemoryMap.cpp
|
||||
src/boot/Init.cpp
|
||||
src/arch/Serial.cpp
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "memory/UserVM.h"
|
||||
#include "memory/AddressSpace.h"
|
||||
#include "Log.h"
|
||||
#include "arch/MMU.h"
|
||||
#include "memory/Heap.h"
|
||||
@ -8,9 +8,9 @@
|
||||
static constexpr u64 VM_START = ARCH_PAGE_SIZE;
|
||||
static constexpr u64 VM_END = 0x0000800000000000;
|
||||
|
||||
Result<OwnedPtr<UserVM>> UserVM::try_create()
|
||||
Result<OwnedPtr<AddressSpace>> AddressSpace::try_create()
|
||||
{
|
||||
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>());
|
||||
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
|
||||
|
||||
TRY(ptr->create_null_region());
|
||||
TRY(ptr->create_default_region());
|
||||
@ -20,7 +20,7 @@ Result<OwnedPtr<UserVM>> UserVM::try_create()
|
||||
return move(ptr);
|
||||
}
|
||||
|
||||
Result<void> UserVM::create_null_region()
|
||||
Result<void> AddressSpace::create_null_region()
|
||||
{
|
||||
// Create a small region at the start of the address space to prevent anyone from mapping page 0.
|
||||
auto* region = TRY(make<VMRegion>());
|
||||
@ -33,7 +33,7 @@ Result<void> UserVM::create_null_region()
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void> UserVM::create_default_region()
|
||||
Result<void> AddressSpace::create_default_region()
|
||||
{
|
||||
// Create a free region covering the rest of the address space.
|
||||
auto* region = TRY(make<VMRegion>());
|
||||
@ -45,9 +45,9 @@ Result<void> UserVM::create_default_region()
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<OwnedPtr<UserVM>> UserVM::clone()
|
||||
Result<OwnedPtr<AddressSpace>> AddressSpace::clone()
|
||||
{
|
||||
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>());
|
||||
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
|
||||
|
||||
for (const auto* region : m_regions)
|
||||
{
|
||||
@ -61,11 +61,11 @@ Result<OwnedPtr<UserVM>> UserVM::clone()
|
||||
return move(ptr);
|
||||
}
|
||||
|
||||
UserVM::UserVM()
|
||||
AddressSpace::AddressSpace()
|
||||
{
|
||||
}
|
||||
|
||||
UserVM& UserVM::operator=(UserVM&& other)
|
||||
AddressSpace& AddressSpace::operator=(AddressSpace&& other)
|
||||
{
|
||||
if (&other == this) return *this;
|
||||
|
||||
@ -78,7 +78,7 @@ UserVM& UserVM::operator=(UserVM&& other)
|
||||
return *this;
|
||||
}
|
||||
|
||||
Result<u64> UserVM::alloc_region(usize count, bool persistent)
|
||||
Result<u64> AddressSpace::alloc_region(usize count, bool persistent)
|
||||
{
|
||||
for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr))
|
||||
{
|
||||
@ -108,7 +108,7 @@ Result<u64> UserVM::alloc_region(usize count, bool persistent)
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
Result<bool> UserVM::set_region(u64 address, usize count, bool used, bool persistent)
|
||||
Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, bool persistent)
|
||||
{
|
||||
if (address >= VM_END) return err(EINVAL);
|
||||
|
||||
@ -171,7 +171,7 @@ Result<bool> UserVM::set_region(u64 address, usize count, bool used, bool persis
|
||||
return true;
|
||||
}
|
||||
|
||||
void UserVM::merge_contiguous_regions(VMRegion* a, VMRegion* b)
|
||||
void AddressSpace::merge_contiguous_regions(VMRegion* a, VMRegion* b)
|
||||
{
|
||||
a->end = b->end;
|
||||
a->count += b->count;
|
||||
@ -179,7 +179,7 @@ void UserVM::merge_contiguous_regions(VMRegion* a, VMRegion* b)
|
||||
delete b;
|
||||
}
|
||||
|
||||
void UserVM::try_merge_region_with_neighbors(VMRegion* region)
|
||||
void AddressSpace::try_merge_region_with_neighbors(VMRegion* region)
|
||||
{
|
||||
auto prev = m_regions.previous(region);
|
||||
if (prev.has_value() && (*prev)->used == region->used && (*prev)->persistent == region->persistent)
|
||||
@ -195,7 +195,7 @@ void UserVM::try_merge_region_with_neighbors(VMRegion* region)
|
||||
}
|
||||
}
|
||||
|
||||
Result<VMRegion*> UserVM::split_region(VMRegion* parent, u64 boundary)
|
||||
Result<VMRegion*> AddressSpace::split_region(VMRegion* parent, u64 boundary)
|
||||
{
|
||||
auto* region = TRY(make<VMRegion>());
|
||||
|
||||
@ -212,7 +212,7 @@ Result<VMRegion*> UserVM::split_region(VMRegion* parent, u64 boundary)
|
||||
return region;
|
||||
}
|
||||
|
||||
UserVM::~UserVM()
|
||||
AddressSpace::~AddressSpace()
|
||||
{
|
||||
m_regions.consume([](VMRegion* region) { delete region; });
|
||||
MMU::delete_userspace_page_directory(m_directory);
|
@ -14,13 +14,13 @@ class VMRegion : LinkedListNode<VMRegion>
|
||||
bool persistent { false };
|
||||
};
|
||||
|
||||
class UserVM
|
||||
class AddressSpace
|
||||
{
|
||||
public:
|
||||
UserVM();
|
||||
~UserVM();
|
||||
AddressSpace();
|
||||
~AddressSpace();
|
||||
|
||||
UserVM& operator=(UserVM&& other);
|
||||
AddressSpace& operator=(AddressSpace&& other);
|
||||
|
||||
Result<u64> alloc_region(usize count, bool persistent = false);
|
||||
|
||||
@ -34,9 +34,9 @@ class UserVM
|
||||
return set_region(address, count, false, false);
|
||||
}
|
||||
|
||||
static Result<OwnedPtr<UserVM>> try_create();
|
||||
static Result<OwnedPtr<AddressSpace>> try_create();
|
||||
|
||||
Result<OwnedPtr<UserVM>> clone();
|
||||
Result<OwnedPtr<AddressSpace>> clone();
|
||||
|
||||
PageDirectory* page_directory() const
|
||||
{
|
||||
@ -50,6 +50,7 @@ class UserVM
|
||||
void try_merge_region_with_neighbors(VMRegion* region);
|
||||
void merge_contiguous_regions(VMRegion* a, VMRegion* b);
|
||||
Result<VMRegion*> split_region(VMRegion* parent, u64 boundary);
|
||||
|
||||
LinkedList<VMRegion> m_regions;
|
||||
PageDirectory* m_directory;
|
||||
};
|
@ -37,12 +37,12 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
||||
Thread* current = Scheduler::current();
|
||||
|
||||
u64 address;
|
||||
if (!addr) address = TRY(current->vm_allocator->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
|
||||
if (!addr) address = TRY(current->address_space->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
|
||||
else
|
||||
{
|
||||
// FIXME: We should be more flexible if MAP_FIXED was not specified.
|
||||
address = align_down<ARCH_PAGE_SIZE>((u64)addr);
|
||||
if (!TRY(current->vm_allocator->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
|
||||
if (!TRY(current->address_space->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ Result<u64> sys_munmap(Registers*, SyscallArgs args)
|
||||
|
||||
Thread* current = Scheduler::current();
|
||||
|
||||
bool ok = TRY(current->vm_allocator->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
||||
bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
||||
|
||||
// POSIX says munmap should silently do nothing if the memory was not already mapped.
|
||||
if (!ok) return 0;
|
||||
|
@ -25,7 +25,7 @@ static bool can_write_segment(u32 flags)
|
||||
|
||||
namespace ELFLoader
|
||||
{
|
||||
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm)
|
||||
Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space)
|
||||
{
|
||||
Elf64_Ehdr elf_header;
|
||||
usize nread = TRY(inode->read((u8*)&elf_header, 0, sizeof elf_header));
|
||||
@ -102,7 +102,7 @@ namespace ELFLoader
|
||||
if (can_write_segment(program_header.p_flags)) flags |= MMU::ReadWrite;
|
||||
if (can_execute_segment(program_header.p_flags)) flags &= ~MMU::NoExecute;
|
||||
|
||||
if (!TRY(vm->test_and_alloc_region(
|
||||
if (!TRY(space->test_and_alloc_region(
|
||||
base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), true)))
|
||||
return err(ENOMEM);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
#include "fs/VFS.h"
|
||||
#include "memory/UserVM.h"
|
||||
#include "memory/AddressSpace.h"
|
||||
#include <luna/Types.h>
|
||||
|
||||
#define ELFMAG "\177ELF"
|
||||
@ -54,5 +54,5 @@ struct ELFData
|
||||
|
||||
namespace ELFLoader
|
||||
{
|
||||
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm);
|
||||
Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space);
|
||||
};
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "arch/MMU.h"
|
||||
#include "fs/VFS.h"
|
||||
#include "memory/UserVM.h"
|
||||
#include "memory/AddressSpace.h"
|
||||
#include <luna/LinkedList.h>
|
||||
#include <luna/OwnedPtr.h>
|
||||
#include <luna/Result.h>
|
||||
@ -70,7 +70,7 @@ struct Thread : public LinkedListNode<Thread>
|
||||
Stack stack;
|
||||
Stack kernel_stack;
|
||||
|
||||
OwnedPtr<UserVM> vm_allocator;
|
||||
OwnedPtr<AddressSpace> address_space;
|
||||
Option<FileDescriptor> fd_table[FD_MAX] = {};
|
||||
|
||||
Result<int> allocate_fd(int min);
|
||||
@ -99,7 +99,7 @@ struct Thread : public LinkedListNode<Thread>
|
||||
|
||||
PageDirectory* self_directory() const
|
||||
{
|
||||
return vm_allocator->page_directory();
|
||||
return address_space->page_directory();
|
||||
}
|
||||
|
||||
PageDirectory* active_directory { nullptr };
|
||||
|
@ -7,11 +7,11 @@
|
||||
static constexpr usize DEFAULT_USER_STACK_PAGES = 6;
|
||||
static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE;
|
||||
|
||||
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, UserVM* vm)
|
||||
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, AddressSpace* space)
|
||||
{
|
||||
const u64 THREAD_STACK_BASE = 0x10000;
|
||||
|
||||
if (!TRY(vm->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM);
|
||||
if (!TRY(space->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM);
|
||||
|
||||
TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES,
|
||||
MMU::ReadWrite | MMU::NoExecute | MMU::User));
|
||||
@ -32,26 +32,26 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::try_load_from_elf(SharedPtr<VFS::Inod
|
||||
{
|
||||
auto image = TRY(make_owned<ThreadImage>());
|
||||
|
||||
auto vm_allocator = TRY(UserVM::try_create());
|
||||
auto address_space = TRY(AddressSpace::try_create());
|
||||
|
||||
auto old_directory = MMU::get_page_directory();
|
||||
|
||||
MMU::switch_page_directory(vm_allocator->page_directory());
|
||||
MMU::switch_page_directory(address_space->page_directory());
|
||||
|
||||
auto guard = make_scope_guard([=] { MMU::switch_page_directory(old_directory); });
|
||||
|
||||
const ELFData data = TRY(ELFLoader::load(inode, vm_allocator.ptr()));
|
||||
const ELFData data = TRY(ELFLoader::load(inode, address_space.ptr()));
|
||||
|
||||
Stack user_stack;
|
||||
Stack kernel_stack;
|
||||
TRY(create_stacks(user_stack, kernel_stack, vm_allocator.ptr()));
|
||||
TRY(create_stacks(user_stack, kernel_stack, address_space.ptr()));
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
image->m_kernel_stack = kernel_stack;
|
||||
image->m_user_stack = user_stack;
|
||||
image->m_loaded_image_data = data;
|
||||
image->m_vm_allocator = move(vm_allocator);
|
||||
image->m_address_space = move(address_space);
|
||||
image->m_sp = user_stack.top();
|
||||
|
||||
return image;
|
||||
@ -61,7 +61,7 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::clone_from_thread(Thread* parent)
|
||||
{
|
||||
auto image = TRY(make_owned<ThreadImage>());
|
||||
|
||||
auto vm_allocator = TRY(parent->vm_allocator->clone());
|
||||
auto address_space = TRY(parent->address_space->clone());
|
||||
|
||||
const ELFData data = { .entry = parent->ip() };
|
||||
|
||||
@ -71,7 +71,7 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::clone_from_thread(Thread* parent)
|
||||
image->m_kernel_stack = kernel_stack;
|
||||
image->m_user_stack = parent->stack;
|
||||
image->m_loaded_image_data = data;
|
||||
image->m_vm_allocator = move(vm_allocator);
|
||||
image->m_address_space = move(address_space);
|
||||
image->m_sp = parent->sp();
|
||||
|
||||
return image;
|
||||
@ -115,7 +115,7 @@ void ThreadImage::apply(Thread* thread)
|
||||
thread->stack = m_user_stack;
|
||||
thread->set_sp(align_down<16>(m_sp));
|
||||
|
||||
thread->active_directory = m_vm_allocator->page_directory();
|
||||
thread->active_directory = m_address_space->page_directory();
|
||||
|
||||
thread->vm_allocator = move(m_vm_allocator);
|
||||
thread->address_space = move(m_address_space);
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "arch/CPU.h"
|
||||
#include "arch/MMU.h"
|
||||
#include "fs/VFS.h"
|
||||
#include "memory/UserVM.h"
|
||||
#include "memory/AddressSpace.h"
|
||||
#include "thread/Thread.h"
|
||||
#include <luna/LinkedList.h>
|
||||
#include <luna/OwnedPtr.h>
|
||||
@ -28,7 +28,7 @@ class ThreadImage
|
||||
void apply(Thread* thread);
|
||||
|
||||
private:
|
||||
OwnedPtr<UserVM> m_vm_allocator;
|
||||
OwnedPtr<AddressSpace> m_address_space;
|
||||
Stack m_user_stack;
|
||||
Stack m_kernel_stack;
|
||||
ELFData m_loaded_image_data;
|
||||
|
Loading…
Reference in New Issue
Block a user