#include "thread/ThreadImage.h" #include "memory/MemoryManager.h" #include "thread/Thread.h" #include #include static constexpr usize DEFAULT_USER_STACK_PAGES = 6; static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE; static Result create_stacks(Stack& user_stack, Stack& kernel_stack, UserVM* vm) { const u64 THREAD_STACK_BASE = 0x10000; if (!TRY(vm->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, true))) return err(ENOMEM); TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, MMU::ReadWrite | MMU::NoExecute | MMU::User)); auto guard = make_scope_guard([&] { MemoryManager::unmap_owned(THREAD_STACK_BASE, 4); }); const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute)); guard.deactivate(); user_stack = { THREAD_STACK_BASE, DEFAULT_USER_STACK_SIZE }; kernel_stack = { kernel_stack_base, 4 * ARCH_PAGE_SIZE }; return {}; } Result> ThreadImage::try_load_from_elf(SharedPtr inode) { auto image = TRY(make_owned()); auto vm_allocator = TRY(UserVM::try_create()); auto old_directory = MMU::get_page_directory(); MMU::switch_page_directory(vm_allocator->page_directory()); auto guard = make_scope_guard([=] { MMU::switch_page_directory(old_directory); }); const ELFData data = TRY(ELFLoader::load(inode, vm_allocator.ptr())); Stack user_stack; Stack kernel_stack; TRY(create_stacks(user_stack, kernel_stack, vm_allocator.ptr())); guard.deactivate(); image->m_kernel_stack = kernel_stack; image->m_user_stack = user_stack; image->m_loaded_image_data = data; image->m_vm_allocator = move(vm_allocator); image->m_sp = user_stack.top(); return image; } Result> ThreadImage::clone_from_thread(Thread* parent) { auto image = TRY(make_owned()); auto vm_allocator = TRY(parent->vm_allocator->clone()); const ELFData data = { .entry = parent->ip() }; const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute)); Stack kernel_stack { kernel_stack_base, 4 * ARCH_PAGE_SIZE }; image->m_kernel_stack = kernel_stack; image->m_user_stack = parent->stack; image->m_loaded_image_data = data; image->m_vm_allocator = move(vm_allocator); image->m_sp = parent->sp(); return image; } Result ThreadImage::push_mem_on_stack(const u8* mem, usize size) { if ((m_sp - size) < m_user_stack.bottom()) return err(E2BIG); if (!MemoryManager::validate_user_write((void*)(m_sp - size), size)) return err(EFAULT); m_sp -= size; memcpy((void*)m_sp, mem, size); return m_sp; } Result ThreadImage::push_string_vector_on_stack(const Vector& vec) { Vector user_vec; for (const auto& item : vec) { // Copy each individual string and retrieve a userspace pointer to said copy u64 addr = TRY(push_mem_on_stack((const u8*)item.chars(), item.length() + 1)); TRY(user_vec.try_append(addr)); } TRY(user_vec.try_append((u64) nullptr)); // Copy the actual vector of userspace pointers to the stack return TRY(push_mem_on_stack((u8*)user_vec.data(), user_vec.size() * sizeof(u64))); } void ThreadImage::apply(Thread* thread) { thread->init_regs_user(); thread->set_ip(m_loaded_image_data.entry); thread->kernel_stack = m_kernel_stack; thread->stack = m_user_stack; thread->set_sp(align_down<16>(m_sp)); thread->active_directory = m_vm_allocator->page_directory(); thread->vm_allocator = move(m_vm_allocator); }