Compare commits

...

2 Commits

Author SHA1 Message Date
c599251d2a
kernel: Rename UserVM to AddressSpace
All checks were successful
continuous-integration/drone/push Build is passing
This is a more appropriate name now that it does more stuff than allocate virtual memory.

To be fair, the name was a bit awkward anyway. Should have been UserVMAllocator I guess.
2023-07-09 20:38:04 +02:00
5e564e9ae3
kernel: Move Thread::self_directory to UserVM
Since this is only used by user threads, UserVM is a convenient/related place to store the PageDirectory in a RAII manner.
2023-07-09 20:32:42 +02:00
11 changed files with 79 additions and 62 deletions

View File

@ -12,7 +12,7 @@ set(SOURCES
src/memory/MemoryManager.cpp src/memory/MemoryManager.cpp
src/memory/Heap.cpp src/memory/Heap.cpp
src/memory/KernelVM.cpp src/memory/KernelVM.cpp
src/memory/UserVM.cpp src/memory/AddressSpace.cpp
src/memory/MemoryMap.cpp src/memory/MemoryMap.cpp
src/boot/Init.cpp src/boot/Init.cpp
src/arch/Serial.cpp src/arch/Serial.cpp

View File

@ -1,4 +1,4 @@
#include "memory/UserVM.h" #include "memory/AddressSpace.h"
#include "Log.h" #include "Log.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "memory/Heap.h" #include "memory/Heap.h"
@ -8,17 +8,19 @@
static constexpr u64 VM_START = ARCH_PAGE_SIZE; static constexpr u64 VM_START = ARCH_PAGE_SIZE;
static constexpr u64 VM_END = 0x0000800000000000; static constexpr u64 VM_END = 0x0000800000000000;
Result<OwnedPtr<UserVM>> UserVM::try_create() Result<OwnedPtr<AddressSpace>> AddressSpace::try_create()
{ {
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>()); OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
TRY(ptr->create_null_region()); TRY(ptr->create_null_region());
TRY(ptr->create_default_region()); TRY(ptr->create_default_region());
ptr->m_directory = TRY(MMU::create_page_directory_for_userspace());
return move(ptr); return move(ptr);
} }
Result<void> UserVM::create_null_region() Result<void> AddressSpace::create_null_region()
{ {
// Create a small region at the start of the address space to prevent anyone from mapping page 0. // Create a small region at the start of the address space to prevent anyone from mapping page 0.
auto* region = TRY(make<VMRegion>()); auto* region = TRY(make<VMRegion>());
@ -31,7 +33,7 @@ Result<void> UserVM::create_null_region()
return {}; return {};
} }
Result<void> UserVM::create_default_region() Result<void> AddressSpace::create_default_region()
{ {
// Create a free region covering the rest of the address space. // Create a free region covering the rest of the address space.
auto* region = TRY(make<VMRegion>()); auto* region = TRY(make<VMRegion>());
@ -43,9 +45,9 @@ Result<void> UserVM::create_default_region()
return {}; return {};
} }
Result<OwnedPtr<UserVM>> UserVM::clone() Result<OwnedPtr<AddressSpace>> AddressSpace::clone()
{ {
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>()); OwnedPtr<AddressSpace> ptr = TRY(make_owned<AddressSpace>());
for (const auto* region : m_regions) for (const auto* region : m_regions)
{ {
@ -54,14 +56,29 @@ Result<OwnedPtr<UserVM>> UserVM::clone()
ptr->m_regions.append(new_region); ptr->m_regions.append(new_region);
} }
ptr->m_directory = TRY(MMU::clone_userspace_page_directory(m_directory));
return move(ptr); return move(ptr);
} }
UserVM::UserVM() AddressSpace::AddressSpace()
{ {
} }
Result<u64> UserVM::alloc_region(usize count, bool persistent) AddressSpace& AddressSpace::operator=(AddressSpace&& other)
{
if (&other == this) return *this;
m_regions.consume([](VMRegion* region) { delete region; });
MMU::delete_userspace_page_directory(m_directory);
m_regions = other.m_regions;
m_directory = other.m_directory;
return *this;
}
Result<u64> AddressSpace::alloc_region(usize count, bool persistent)
{ {
for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr)) for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr))
{ {
@ -91,7 +108,7 @@ Result<u64> UserVM::alloc_region(usize count, bool persistent)
return err(ENOMEM); return err(ENOMEM);
} }
Result<bool> UserVM::set_region(u64 address, usize count, bool used, bool persistent) Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, bool persistent)
{ {
if (address >= VM_END) return err(EINVAL); if (address >= VM_END) return err(EINVAL);
@ -154,7 +171,7 @@ Result<bool> UserVM::set_region(u64 address, usize count, bool used, bool persis
return true; return true;
} }
void UserVM::merge_contiguous_regions(VMRegion* a, VMRegion* b) void AddressSpace::merge_contiguous_regions(VMRegion* a, VMRegion* b)
{ {
a->end = b->end; a->end = b->end;
a->count += b->count; a->count += b->count;
@ -162,7 +179,7 @@ void UserVM::merge_contiguous_regions(VMRegion* a, VMRegion* b)
delete b; delete b;
} }
void UserVM::try_merge_region_with_neighbors(VMRegion* region) void AddressSpace::try_merge_region_with_neighbors(VMRegion* region)
{ {
auto prev = m_regions.previous(region); auto prev = m_regions.previous(region);
if (prev.has_value() && (*prev)->used == region->used && (*prev)->persistent == region->persistent) if (prev.has_value() && (*prev)->used == region->used && (*prev)->persistent == region->persistent)
@ -178,7 +195,7 @@ void UserVM::try_merge_region_with_neighbors(VMRegion* region)
} }
} }
Result<VMRegion*> UserVM::split_region(VMRegion* parent, u64 boundary) Result<VMRegion*> AddressSpace::split_region(VMRegion* parent, u64 boundary)
{ {
auto* region = TRY(make<VMRegion>()); auto* region = TRY(make<VMRegion>());
@ -195,7 +212,8 @@ Result<VMRegion*> UserVM::split_region(VMRegion* parent, u64 boundary)
return region; return region;
} }
UserVM::~UserVM() AddressSpace::~AddressSpace()
{ {
m_regions.consume([](VMRegion* region) { delete region; }); m_regions.consume([](VMRegion* region) { delete region; });
MMU::delete_userspace_page_directory(m_directory);
} }

View File

@ -1,4 +1,5 @@
#pragma once #pragma once
#include "arch/MMU.h"
#include <luna/LinkedList.h> #include <luna/LinkedList.h>
#include <luna/OwnedPtr.h> #include <luna/OwnedPtr.h>
#include <luna/Result.h> #include <luna/Result.h>
@ -13,11 +14,13 @@ class VMRegion : LinkedListNode<VMRegion>
bool persistent { false }; bool persistent { false };
}; };
class UserVM class AddressSpace
{ {
public: public:
UserVM(); AddressSpace();
~UserVM(); ~AddressSpace();
AddressSpace& operator=(AddressSpace&& other);
Result<u64> alloc_region(usize count, bool persistent = false); Result<u64> alloc_region(usize count, bool persistent = false);
@ -31,9 +34,14 @@ class UserVM
return set_region(address, count, false, false); return set_region(address, count, false, false);
} }
static Result<OwnedPtr<UserVM>> try_create(); static Result<OwnedPtr<AddressSpace>> try_create();
Result<OwnedPtr<UserVM>> clone(); Result<OwnedPtr<AddressSpace>> clone();
PageDirectory* page_directory() const
{
return m_directory;
}
private: private:
Result<bool> set_region(u64 address, usize count, bool used, bool persistent); Result<bool> set_region(u64 address, usize count, bool used, bool persistent);
@ -42,5 +50,7 @@ class UserVM
void try_merge_region_with_neighbors(VMRegion* region); void try_merge_region_with_neighbors(VMRegion* region);
void merge_contiguous_regions(VMRegion* a, VMRegion* b); void merge_contiguous_regions(VMRegion* a, VMRegion* b);
Result<VMRegion*> split_region(VMRegion* parent, u64 boundary); Result<VMRegion*> split_region(VMRegion* parent, u64 boundary);
LinkedList<VMRegion> m_regions; LinkedList<VMRegion> m_regions;
PageDirectory* m_directory;
}; };

View File

@ -70,7 +70,7 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
kdbgln("exec: attempting to replace current image with %s", path.chars()); kdbgln("exec: attempting to replace current image with %s", path.chars());
#endif #endif
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory); }); auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory()); });
auto image = TRY(ThreadImage::try_load_from_elf(inode)); auto image = TRY(ThreadImage::try_load_from_elf(inode));
@ -99,8 +99,6 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
} }
} }
MMU::delete_userspace_page_directory(current->self_directory);
if (VFS::is_setuid(inode)) current->auth.euid = current->auth.suid = inode->uid(); if (VFS::is_setuid(inode)) current->auth.euid = current->auth.suid = inode->uid();
if (VFS::is_setgid(inode)) current->auth.egid = current->auth.sgid = inode->gid(); if (VFS::is_setgid(inode)) current->auth.egid = current->auth.sgid = inode->gid();
@ -108,7 +106,7 @@ Result<u64> sys_execve(Registers* regs, SyscallArgs args)
image->apply(current); image->apply(current);
MMU::switch_page_directory(current->self_directory); MMU::switch_page_directory(current->self_directory());
current->set_arguments(user_argc, user_argv, user_envc, user_envp); current->set_arguments(user_argc, user_argv, user_envc, user_envp);
@ -123,7 +121,7 @@ Result<u64> sys_fork(Registers* regs, SyscallArgs)
{ {
auto current = Scheduler::current(); auto current = Scheduler::current();
auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory); }); auto guard = make_scope_guard([current] { MMU::switch_page_directory(current->self_directory()); });
memcpy(&current->regs, regs, sizeof(*regs)); memcpy(&current->regs, regs, sizeof(*regs));

View File

@ -37,12 +37,12 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
Thread* current = Scheduler::current(); Thread* current = Scheduler::current();
u64 address; u64 address;
if (!addr) address = TRY(current->vm_allocator->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE))); if (!addr) address = TRY(current->address_space->alloc_region(get_blocks_from_size(len, ARCH_PAGE_SIZE)));
else else
{ {
// FIXME: We should be more flexible if MAP_FIXED was not specified. // FIXME: We should be more flexible if MAP_FIXED was not specified.
address = align_down<ARCH_PAGE_SIZE>((u64)addr); address = align_down<ARCH_PAGE_SIZE>((u64)addr);
if (!TRY(current->vm_allocator->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE)))) if (!TRY(current->address_space->test_and_alloc_region(address, get_blocks_from_size(len, ARCH_PAGE_SIZE))))
return err(ENOMEM); return err(ENOMEM);
} }
@ -69,7 +69,7 @@ Result<u64> sys_munmap(Registers*, SyscallArgs args)
Thread* current = Scheduler::current(); Thread* current = Scheduler::current();
bool ok = TRY(current->vm_allocator->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
// POSIX says munmap should silently do nothing if the memory was not already mapped. // POSIX says munmap should silently do nothing if the memory was not already mapped.
if (!ok) return 0; if (!ok) return 0;

View File

@ -25,7 +25,7 @@ static bool can_write_segment(u32 flags)
namespace ELFLoader namespace ELFLoader
{ {
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm) Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space)
{ {
Elf64_Ehdr elf_header; Elf64_Ehdr elf_header;
usize nread = TRY(inode->read((u8*)&elf_header, 0, sizeof elf_header)); usize nread = TRY(inode->read((u8*)&elf_header, 0, sizeof elf_header));
@ -102,7 +102,7 @@ namespace ELFLoader
if (can_write_segment(program_header.p_flags)) flags |= MMU::ReadWrite; if (can_write_segment(program_header.p_flags)) flags |= MMU::ReadWrite;
if (can_execute_segment(program_header.p_flags)) flags &= ~MMU::NoExecute; if (can_execute_segment(program_header.p_flags)) flags &= ~MMU::NoExecute;
if (!TRY(vm->test_and_alloc_region( if (!TRY(space->test_and_alloc_region(
base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), true))) base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), true)))
return err(ENOMEM); return err(ENOMEM);

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include "fs/VFS.h" #include "fs/VFS.h"
#include "memory/UserVM.h" #include "memory/AddressSpace.h"
#include <luna/Types.h> #include <luna/Types.h>
#define ELFMAG "\177ELF" #define ELFMAG "\177ELF"
@ -54,5 +54,5 @@ struct ELFData
namespace ELFLoader namespace ELFLoader
{ {
Result<ELFData> load(SharedPtr<VFS::Inode> inode, UserVM* vm); Result<ELFData> load(SharedPtr<VFS::Inode> inode, AddressSpace* space);
}; };

View File

@ -191,8 +191,6 @@ namespace Scheduler
} }
} }
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->self_directory);
delete thread; delete thread;
CPU::enable_interrupts(); CPU::enable_interrupts();

View File

@ -2,7 +2,7 @@
#include "arch/MMU.h" #include "arch/MMU.h"
#include "fs/VFS.h" #include "fs/VFS.h"
#include "memory/UserVM.h" #include "memory/AddressSpace.h"
#include <luna/LinkedList.h> #include <luna/LinkedList.h>
#include <luna/OwnedPtr.h> #include <luna/OwnedPtr.h>
#include <luna/Result.h> #include <luna/Result.h>
@ -70,7 +70,7 @@ struct Thread : public LinkedListNode<Thread>
Stack stack; Stack stack;
Stack kernel_stack; Stack kernel_stack;
OwnedPtr<UserVM> vm_allocator; OwnedPtr<AddressSpace> address_space;
Option<FileDescriptor> fd_table[FD_MAX] = {}; Option<FileDescriptor> fd_table[FD_MAX] = {};
Result<int> allocate_fd(int min); Result<int> allocate_fd(int min);
@ -97,7 +97,11 @@ struct Thread : public LinkedListNode<Thread>
Thread* parent { nullptr }; Thread* parent { nullptr };
Option<pid_t> child_being_waited_for = {}; Option<pid_t> child_being_waited_for = {};
PageDirectory* self_directory; PageDirectory* self_directory() const
{
return address_space->page_directory();
}
PageDirectory* active_directory { nullptr }; PageDirectory* active_directory { nullptr };
[[noreturn]] void exit_and_signal_parent(u8 status); [[noreturn]] void exit_and_signal_parent(u8 status);

View File

@ -7,11 +7,11 @@
static constexpr usize DEFAULT_USER_STACK_PAGES = 6; static constexpr usize DEFAULT_USER_STACK_PAGES = 6;
static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE; static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE;
static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, UserVM* vm) static Result<void> create_stacks(Stack& user_stack, Stack& kernel_stack, AddressSpace* space)
{ {
const u64 THREAD_STACK_BASE = 0x10000; const u64 THREAD_STACK_BASE = 0x10000;
if (!TRY(vm->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM); if (!TRY(space->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM);
TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES,
MMU::ReadWrite | MMU::NoExecute | MMU::User)); MMU::ReadWrite | MMU::NoExecute | MMU::User));
@ -32,32 +32,26 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::try_load_from_elf(SharedPtr<VFS::Inod
{ {
auto image = TRY(make_owned<ThreadImage>()); auto image = TRY(make_owned<ThreadImage>());
auto vm_allocator = TRY(UserVM::try_create()); auto address_space = TRY(AddressSpace::try_create());
auto old_directory = MMU::get_page_directory(); auto old_directory = MMU::get_page_directory();
auto new_directory = TRY(MMU::create_page_directory_for_userspace()); MMU::switch_page_directory(address_space->page_directory());
MMU::switch_page_directory(new_directory); auto guard = make_scope_guard([=] { MMU::switch_page_directory(old_directory); });
auto guard = make_scope_guard([=] { const ELFData data = TRY(ELFLoader::load(inode, address_space.ptr()));
MMU::delete_userspace_page_directory(new_directory);
MMU::switch_page_directory(old_directory);
});
const ELFData data = TRY(ELFLoader::load(inode, vm_allocator.ptr()));
Stack user_stack; Stack user_stack;
Stack kernel_stack; Stack kernel_stack;
TRY(create_stacks(user_stack, kernel_stack, vm_allocator.ptr())); TRY(create_stacks(user_stack, kernel_stack, address_space.ptr()));
guard.deactivate(); guard.deactivate();
image->m_directory = new_directory;
image->m_kernel_stack = kernel_stack; image->m_kernel_stack = kernel_stack;
image->m_user_stack = user_stack; image->m_user_stack = user_stack;
image->m_loaded_image_data = data; image->m_loaded_image_data = data;
image->m_vm_allocator = move(vm_allocator); image->m_address_space = move(address_space);
image->m_sp = user_stack.top(); image->m_sp = user_stack.top();
return image; return image;
@ -67,20 +61,17 @@ Result<OwnedPtr<ThreadImage>> ThreadImage::clone_from_thread(Thread* parent)
{ {
auto image = TRY(make_owned<ThreadImage>()); auto image = TRY(make_owned<ThreadImage>());
auto vm_allocator = TRY(parent->vm_allocator->clone()); auto address_space = TRY(parent->address_space->clone());
auto new_directory = TRY(MMU::clone_userspace_page_directory(parent->self_directory));
const ELFData data = { .entry = parent->ip() }; const ELFData data = { .entry = parent->ip() };
const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute)); const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute));
Stack kernel_stack { kernel_stack_base, 4 * ARCH_PAGE_SIZE }; Stack kernel_stack { kernel_stack_base, 4 * ARCH_PAGE_SIZE };
image->m_directory = new_directory;
image->m_kernel_stack = kernel_stack; image->m_kernel_stack = kernel_stack;
image->m_user_stack = parent->stack; image->m_user_stack = parent->stack;
image->m_loaded_image_data = data; image->m_loaded_image_data = data;
image->m_vm_allocator = move(vm_allocator); image->m_address_space = move(address_space);
image->m_sp = parent->sp(); image->m_sp = parent->sp();
return image; return image;
@ -124,8 +115,7 @@ void ThreadImage::apply(Thread* thread)
thread->stack = m_user_stack; thread->stack = m_user_stack;
thread->set_sp(align_down<16>(m_sp)); thread->set_sp(align_down<16>(m_sp));
thread->self_directory = m_directory; thread->active_directory = m_address_space->page_directory();
thread->active_directory = m_directory;
thread->vm_allocator = move(m_vm_allocator); thread->address_space = move(m_address_space);
} }

View File

@ -4,7 +4,7 @@
#include "arch/CPU.h" #include "arch/CPU.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "fs/VFS.h" #include "fs/VFS.h"
#include "memory/UserVM.h" #include "memory/AddressSpace.h"
#include "thread/Thread.h" #include "thread/Thread.h"
#include <luna/LinkedList.h> #include <luna/LinkedList.h>
#include <luna/OwnedPtr.h> #include <luna/OwnedPtr.h>
@ -28,8 +28,7 @@ class ThreadImage
void apply(Thread* thread); void apply(Thread* thread);
private: private:
OwnedPtr<UserVM> m_vm_allocator; OwnedPtr<AddressSpace> m_address_space;
PageDirectory* m_directory { nullptr };
Stack m_user_stack; Stack m_user_stack;
Stack m_kernel_stack; Stack m_kernel_stack;
ELFData m_loaded_image_data; ELFData m_loaded_image_data;