Compare commits

..

No commits in common. "c599251d2afeeedb65c69acb45e37f5987abef8c" and "5c9503ac71002f8bd4dd42da1cc6f171355e1779" have entirely different histories.

11 changed files with 62 additions and 79 deletions

View File

@ -12,7 +12,7 @@ set(SOURCES
src/memory/MemoryManager.cpp
src/memory/Heap.cpp
src/memory/KernelVM.cpp
src/memory/AddressSpace.cpp
src/memory/UserVM.cpp
src/memory/MemoryMap.cpp
src/boot/Init.cpp
src/arch/Serial.cpp

View File

@ -1,4 +1,4 @@
#include "memory/AddressSpace.h"
#include "memory/UserVM.h"
#include "Log.h"
#include "arch/MMU.h"
#include "memory/Heap.h"
@ -8,19 +8,17 @@
static constexpr u64 VM_START = ARCH_PAGE_SIZE;
static constexpr u64 VM_END = 0x0000800000000000;
Result<OwnedPtr<AddressSpace>> AddressSpace::try_create()
Result<OwnedPtr<UserVM>> UserVM::try_create()
{
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>());
TRY(ptr->create_null_region());
TRY(ptr->create_default_region());
ptr->m_directory = TRY(MMU::create_page_directory_for_userspace());
return move(ptr);
}
Result<void> AddressSpace::create_null_region()
Result<void> UserVM::create_null_region()
{
// Create a small region at the start of the address space to prevent anyone from mapping page 0.
auto* region = TRY(make<VMRegion>());
@ -33,7 +31,7 @@ Result<void> AddressSpace::create_null_region()
return {};
}
Result<void> AddressSpace::create_default_region()
Result<void> UserVM::create_default_region()
{
// Create a free region covering the rest of the address space.
auto* region = TRY(make<VMRegion>());
@ -45,9 +43,9 @@ Result<void> AddressSpace::create_default_region()
return {};
}
Result<OwnedPtr<AddressSpace>> AddressSpace::clone()
Result<OwnedPtr<UserVM>> UserVM::clone()
{
OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>());
for (const auto* region : m_regions)
{
@ -56,29 +54,14 @@ Result<OwnedPtr<AddressSpace>> AddressSpace::clone()
ptr->m_regions.append(new_region);
}
ptr->m_directory = TRY(MMU::clone_userspace_page_directory(m_directory));
return move(ptr);
}
AddressSpace::AddressSpace()
UserVM::UserVM()
{
}
AddressSpace& AddressSpace::operator=(AddressSpace&& other)
{
if (&other == this) return *this;
m_regions.consume([](VMRegion* region) { delete region; });
MMU::delete_userspace_page_directory(m_directory);
m_regions = other.m_regions;
m_directory = other.m_directory;
return *this;
}
Result<u64> AddressSpace::alloc_region(usize count, bool persistent)
Result<u64> UserVM::alloc_region(usize count, bool persistent)
{
for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr))
{
@ -108,7 +91,7 @@ Result<u64> AddressSpace::alloc_region(usize count, bool persistent)
return err(ENOMEM);
}
Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, bool persistent)
Result<bool> UserVM::set_region(u64 address, usize count, bool used, bool persistent)
{
if (address >= VM_END) return err(EINVAL);
@ -171,7 +154,7 @@ Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, bool
return true;
}
void AddressSpace::merge_contiguous_regions(VMRegion* a, VMRegion* b)
void UserVM::merge_contiguous_regions(VMRegion* a, VMRegion* b)
{
a->end = b->end;
a->count += b->count;
@ -179,7 +162,7 @@ void AddressSpace::merge_contiguous_regions(VMRegion* a, VMRegion* b)
delete b;
}
void AddressSpace::try_merge_region_with_neighbors(VMRegion* region)
void UserVM::try_merge_region_with_neighbors(VMRegion* region)
{
auto prev = m_regions.previous(region);
if (prev.has_value() && (*prev)->used == region->used && (*prev)->persistent == region->persistent)
@ -195,7 +178,7 @@ void AddressSpace::try_merge_region_with_neighbors(VMRegion* region)
}
}
Result<VMRegion*> AddressSpace::split_region(VMRegion* parent, u64 boundary)
Result<VMRegion*> UserVM::split_region(VMRegion* parent, u64 boundary)
{
auto* region = TRY(make<VMRegion>());
@ -212,8 +195,7 @@ Result<VMRegion*> AddressSpace::split_region(VMRegion* parent, u64 boundary)
return region;
}
AddressSpace::~AddressSpace()
UserVM::~UserVM()
{
m_regions.consume([](VMRegion* region) { delete region; });
MMU::delete_userspace_page_directory(m_directory);
}

View File

@ -1,5 +1,4 @@
#pragma once
#include "arch/MMU.h"
#include <luna/LinkedList.h>
#include <luna/OwnedPtr.h>
#include <luna/Result.h>
@ -14,13 +13,11 @@ class VMRegion : LinkedListNode<VMRegion>
bool persistent { false };
};
class AddressSpace
class UserVM
{
public:
AddressSpace();
~AddressSpace();
AddressSpace& operator=(AddressSpace&& other);
UserVM();
~UserVM();
Result<u64> alloc_region(usize count, bool persistent = false);
@ -34,14 +31,9 @@ class AddressSpace
return set_region(address, count, false, false);
}
static Result<OwnedPtr<AddressSpace>> try_create();
static Result<OwnedPtr<UserVM>> try_create();
Result<OwnedPtr<AddressSpace>> clone();
PageDirectory* page_directory() const
{
return m_directory;
}
Result<OwnedPtr<UserVM>> clone();
private:
Result<bool> set_region(u64 address, usize count, bool used, bool persistent);
@ -50,7 +42,5 @@ class AddressSpace
void try_merge_region_with_neighbors(VMRegion* region);
void merge_contiguous_regions(VMRegion* a, VMRegion* b);
Result<VMRegion*> split_region(VMRegion* parent, u64 boundary);
LinkedList<VMRegion> m_regions;
PageDirectory* m_directory;
};

View File

@ -70,7 +70,7 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
kdbgln("exec: attempting to replace current image with %s", path.chars());
#endif
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory()); });
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory); });
auto image = TRY(ThreadImage::try_load_from_elf(inode));
@ -99,6 +99,8 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
}
}
MMU::delete_userspace_page_directory(current->self_directory);
if (VFS::is_setuid(inode)) current->auth.euid = current->auth.suid = inode->uid();
if (VFS::is_setgid(inode)) current->auth.egid = current->auth.sgid = inode->gid();
@ -106,7 +108,7 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
image->apply(current);
MMU::switch_page_directory(current->self_directory());
MMU::switch_page_directory(current->self_directory);
current->set_arguments(user_argc, user_argv, user_envc, user_envp);
@ -121,7 +123,7 @@ Result<u64> sys_fork(Registers* regs, SyscallArgs)
{
auto current = Scheduler::current();
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory()); });
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory); });
memcpy(&current->regs, regs, sizeof(*regs));

View File

@ -37,12 +37,12 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
Thread* current = Scheduler::current();
u64 address;
if (!addr) address = TRY(current->address_space->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
if (!addr) address = TRY(current->vm_allocator->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
else
{
// FIXME: We should be more flexible if MAP_FIXED was not specified.
address = align_down<ARCH_PAGE_SIZE>((u64)addr);
if (!TRY(current->address_space->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
if (!TRY(current->vm_allocator->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
return err(ENOMEM);
}
@ -69,7 +69,7 @@ Result<u64> sys_munmap(Registers*, SyscallArgs args)
Thread* current = Scheduler::current();
bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
bool ok = TRY(current->vm_allocator->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
// POSIX says munmap should silently do nothing if the memory was not already mapped.
if (!ok) return 0;

View File

@ -25,7 +25,7 @@ static bool can_write_segment(u32 flags)
namespace ELFLoader
{
Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space)
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm)
{
Elf64_Ehdr elf_header;
usize nread = TRY(inode->read((u8*)&elf_header, 0, sizeof elf_header));
@ -102,7 +102,7 @@ namespace ELFLoader
if (can_write_segment(program_header.p_flags)) flags |= MMU::ReadWrite;
if (can_execute_segment(program_header.p_flags)) flags &= ~MMU::NoExecute;
if (!TRY(space->test_and_alloc_region(
if (!TRY(vm->test_and_alloc_region(
base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), true)))
return err(ENOMEM);

View File

@ -1,6 +1,6 @@
#pragma once
#include "fs/VFS.h"
#include "memory/AddressSpace.h"
#include "memory/UserVM.h"
#include <luna/Types.h>
#define ELFMAG "\177ELF"
@ -54,5 +54,5 @@ struct ELFData
namespace ELFLoader
{
Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space);
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm);
};

View File

@ -191,6 +191,8 @@ namespace Scheduler
}
}
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->self_directory);
delete thread;
CPU::enable_interrupts();

View File

@ -2,7 +2,7 @@
#include "arch/MMU.h"
#include "fs/VFS.h"
#include "memory/AddressSpace.h"
#include "memory/UserVM.h"
#include <luna/LinkedList.h>
#include <luna/OwnedPtr.h>
#include <luna/Result.h>
@ -70,7 +70,7 @@ struct Thread : public LinkedListNode<Thread>
Stack stack;
Stack kernel_stack;
OwnedPtr<AddressSpace> address_space;
OwnedPtr<UserVM> vm_allocator;
Option<FileDescriptor> fd_table[FD_MAX] = {};
Result<int> allocate_fd(int min);
@ -97,11 +97,7 @@ struct Thread : public LinkedListNode<Thread>
Thread* parent { nullptr };
Option<pid_t> child_being_waited_for = {};
PageDirectory* self_directory() const
{
return address_space->page_directory();
}
PageDirectory* self_directory;
PageDirectory* active_directory { nullptr };
[[noreturn]] void exit_and_signal_parent(u8 status);

View File

@ -7,11 +7,11 @@
static constexpr usize DEFAULT_USER_STACK_PAGES = 6;
static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE;
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, AddressSpace* space)
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, UserVM* vm)
{
const u64 THREAD_STACK_BASE = 0x10000;
if (!TRY(space->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM);
if (!TRY(vm->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM);
TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES,
MMU::ReadWrite | MMU::NoExecute | MMU::User));
@ -32,26 +32,32 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::try_load_from_elf(SharedPtr<VFS::Inod
{
auto image = TRY(make_owned<ThreadImage>());
auto address_space = TRY(AddressSpace::try_create());
auto vm_allocator = TRY(UserVM::try_create());
auto old_directory = MMU::get_page_directory();
MMU::switch_page_directory(address_space->page_directory());
auto new_directory = TRY(MMU::create_page_directory_for_userspace());
auto guard = make_scope_guard([=] { MMU::switch_page_directory(old_directory); });
MMU::switch_page_directory(new_directory);
const ELFData data = TRY(ELFLoader::load(inode, address_space.ptr()));
auto guard = make_scope_guard([=] {
MMU::delete_userspace_page_directory(new_directory);
MMU::switch_page_directory(old_directory);
});
const ELFData data = TRY(ELFLoader::load(inode, vm_allocator.ptr()));
Stack user_stack;
Stack kernel_stack;
TRY(create_stacks(user_stack, kernel_stack, address_space.ptr()));
TRY(create_stacks(user_stack, kernel_stack, vm_allocator.ptr()));
guard.deactivate();
image->m_directory = new_directory;
image->m_kernel_stack = kernel_stack;
image->m_user_stack = user_stack;
image->m_loaded_image_data = data;
image->m_address_space = move(address_space);
image->m_vm_allocator = move(vm_allocator);
image->m_sp = user_stack.top();
return image;
@ -61,17 +67,20 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::clone_from_thread(Thread* parent)
{
auto image = TRY(make_owned<ThreadImage>());
auto address_space = TRY(parent->address_space->clone());
auto vm_allocator = TRY(parent->vm_allocator->clone());
auto new_directory = TRY(MMU::clone_userspace_page_directory(parent->self_directory));
const ELFData data = { .entry = parent->ip() };
const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute));
Stack kernel_stack { kernel_stack_base, 4 * ARCH_PAGE_SIZE };
image->m_directory = new_directory;
image->m_kernel_stack = kernel_stack;
image->m_user_stack = parent->stack;
image->m_loaded_image_data = data;
image->m_address_space = move(address_space);
image->m_vm_allocator = move(vm_allocator);
image->m_sp = parent->sp();
return image;
@ -115,7 +124,8 @@ void ThreadImage::apply(Thread* thread)
thread->stack = m_user_stack;
thread->set_sp(align_down<16>(m_sp));
thread->active_directory = m_address_space->page_directory();
thread->self_directory = m_directory;
thread->active_directory = m_directory;
thread->address_space = move(m_address_space);
thread->vm_allocator = move(m_vm_allocator);
}

View File

@ -4,7 +4,7 @@
#include "arch/CPU.h"
#include "arch/MMU.h"
#include "fs/VFS.h"
#include "memory/AddressSpace.h"
#include "memory/UserVM.h"
#include "thread/Thread.h"
#include <luna/LinkedList.h>
#include <luna/OwnedPtr.h>
@ -28,7 +28,8 @@ class ThreadImage
void apply(Thread* thread);
private:
OwnedPtr<AddressSpace> m_address_space;
OwnedPtr<UserVM> m_vm_allocator;
PageDirectory* m_directory { nullptr };
Stack m_user_stack;
Stack m_kernel_stack;
ELFData m_loaded_image_data;