#include "thread/Thread.h" #include "Log.h" #include "arch/CPU.h" #include "memory/MemoryManager.h" #include "thread/Scheduler.h" #include #include #include #include #include #include #include #include static Atomic g_next_id; LinkedList g_threads; LinkedList g_processes; void Thread::init() { g_next_id = 2; } Result new_thread() { Thread* const thread = TRY(make()); thread->tid = g_next_id++; return thread; } pid_t next_thread_id() { return g_next_id.load(); } Result Process::allocate_fd(int min, FileDescriptor& descriptor) { if (min < 0 || min >= FD_MAX) return err(EINVAL); auto table = fd_table.lock(); for (int i = min; i < FD_MAX; i++) { if (!(*table)[i].has_value()) { (*table)[i] = descriptor; return i; } } return err(EMFILE); } Result Process::resolve_fd(int fd) { if (fd < 0 || fd >= FD_MAX) return err(EBADF); auto table = fd_table.lock(); Option& maybe_descriptor = (*table)[fd]; if (!maybe_descriptor.has_value()) return err(EBADF); return maybe_descriptor.value_ptr(); } Credentials Process::credentials() { auto credentials = auth.lock(); return *credentials; } Result> Process::copy_groups() { auto groups = extra_groups.lock(); return groups->shallow_copy(); } Result Process::allocate_timerid() { ScopedMutexLock lock(posix_timer_mutex); for (int i = 0; i < MAX_POSIX_TIMERS; i++) { if (!posix_timers[i].has_value()) { posix_timers[i] = Timer {}; return i; } } return err(EMFILE); } Result Process::resolve_timerid(int _tid) { if (_tid < 0 || _tid >= MAX_POSIX_TIMERS) return err(EBADF); Option& maybe_timer = posix_timers[_tid]; if (!maybe_timer.has_value()) return err(EINVAL); return maybe_timer.value_ptr(); } Result> Process::resolve_atfile(int dirfd, const String& path, bool allow_empty_path, bool follow_last_symlink, SharedPtr* parent_inode) { if (parent_inode) *parent_inode = this->current_directory; if (PathParser::is_absolute(path.view())) return VFS::resolve_path(path.chars(), this, {}, follow_last_symlink); if (dirfd == AT_FDCWD) return VFS::resolve_path(path.chars(), this, this->current_directory, follow_last_symlink); auto descriptor = TRY(resolve_fd(dirfd)); if (parent_inode) *parent_inode = descriptor->inode(); if (path.is_empty() && allow_empty_path) return descriptor->inode(); return VFS::resolve_path(path.chars(), this, descriptor->inode(), follow_last_symlink); } [[noreturn]] void Process::exit(int _status) { check(this == Process::current()); // Process::exit() should only be called by the process itself. #ifndef MOON_ENABLE_TESTING_FEATURES if (id == 1) fail("the init process exited"); #else if (id == 1) CPU::magic_exit(_status); #endif Scheduler::for_each_thread(this, [](Thread* thread) { thread->quit(); return true; }); Scheduler::signal_reap_thread(); thread_count = 0; status = _status; Scheduler::for_each_child(this, [](Process* child) { child->parent = Scheduler::init_process(); return true; }); if (is_session_leader()) { kinfoln("process %d is exiting as a session leader, sending signals to session", id); // FIXME: Send SIGHUP only to the foreground process group if the session has a controlling terminal. Scheduler::for_each_in_session(sid, [this](Process* p) { if (p == this) return true; p->sid = 0; p->controlling_terminal = {}; p->send_signal(SIGHUP); kinfoln("reparenting and sending SIGHUP to %d", p->id); return true; }); } if (parent) { Scheduler::for_each_thread(parent, [&](Thread* t) { if (t->state == ThreadState::Waiting) { pid_t expected = -1; if (t->child_being_waited_for.compare_exchange_strong(expected, id)) { t->wake_up(); return false; } expected = id; if (t->child_being_waited_for.compare_exchange_strong(expected, id)) { t->wake_up(); return false; } } return true; }); parent->send_signal(SIGCHLD); } kernel_yield(); unreachable(); } void Thread::quit() { state = ThreadState::Dying; } void Thread::exit(bool yield) { quit(); process->thread_count--; if (process->thread_count == 0) { process->exit(0); } if (yield) kernel_yield(); } enum class DefaultSignalAction { Ignore, Terminate, Stop, }; // FIXME: Implement coredumps for some signals. static constexpr DefaultSignalAction default_actions[] = { DefaultSignalAction::Terminate, // SIGHUP DefaultSignalAction::Terminate, // SIGINT DefaultSignalAction::Terminate, // SIGQUIT (dump core) DefaultSignalAction::Terminate, // SIGILL (dump core) DefaultSignalAction::Terminate, // SIGTRAP (dump core) DefaultSignalAction::Terminate, // SIGABRT (dump core) DefaultSignalAction::Ignore, // SIGCHLD DefaultSignalAction::Terminate, // SIGFPE (dump core) DefaultSignalAction::Terminate, // SIGKILL DefaultSignalAction::Stop, // SIGSTOP DefaultSignalAction::Terminate, // SIGSEGV (dump core) DefaultSignalAction::Ignore, // SIGCONT (Handled separately) DefaultSignalAction::Terminate, // SIGPIPE DefaultSignalAction::Terminate, // SIGALRM DefaultSignalAction::Terminate, // SIGTERM DefaultSignalAction::Terminate, // SIGTTIN DefaultSignalAction::Terminate, // SIGTTOU DefaultSignalAction::Ignore, // SIGWINCH }; void Thread::process_pending_signals(Registers* current_regs) { interrupted = false; for (int i = 0; i < NSIG; i++) { int signo = i + 1; if (signo != SIGKILL && signo != SIGSTOP && signal_mask.get(i)) continue; if (pending_signals.get(i)) { pending_signals.set(i, false); auto handler = signal_handlers[i]; if (signo != SIGKILL && signo != SIGSTOP && handler.sa_handler == SIG_IGN) return; if (handler.sa_handler == SIG_DFL || signo == SIGKILL || signo == SIGSTOP) { default_signal: if (process->id == 1) { kwarnln("signal: init got a signal it has no handler for, ignoring"); return; } auto action = default_actions[i]; switch (action) { case DefaultSignalAction::Ignore: return; case DefaultSignalAction::Terminate: kwarnln("Terminating thread %d with signal %d", tid, signo); CPU::print_stack_trace_at(current_regs); process->exit(signo | _SIGBIT); unreachable(); case DefaultSignalAction::Stop: stop(); default: return; } } // If we fail to deliver the signal (usually because there's not enough space on the stack), execute the // default action. FIXME: Should this be changed? if (!deliver_signal(signo, current_regs)) goto default_signal; return; } } } int Thread::pending_signal_count() { int result = 0; for (int i = 0; i < NSIG; i++) { if (pending_signals.get(i)) { result++; } } return result; } int Thread::pending_signal() { for (int i = 0; i < NSIG; i++) { if (pending_signals.get(i)) { return i + 1; } } return 0; } bool Thread::will_ignore_pending_signal() { for (int i = 0; i < NSIG; i++) { if (pending_signals.get(i)) { int signo = i + 1; if (signo == SIGKILL || signo == SIGSTOP) return false; if (signal_mask.get(i)) continue; auto handler = signal_handlers[i]; if (handler.sa_handler == SIG_IGN) return true; if (handler.sa_handler == SIG_DFL && default_actions[i] == DefaultSignalAction::Ignore) return true; return false; } } return false; } void Process::send_signal(int signo) { Scheduler::for_each_thread(this, [signo](Thread* t) { t->send_signal(signo); return false; }); } void Thread::send_signal(int signo) { if (is_kernel) return; if (state == ThreadState::Exited || state == ThreadState::Dying) return; if (state == ThreadState::Stopped && signo == SIGCONT) { wake_up(); return; } check(signo > 0 && signo <= NSIG); pending_signals.set(signo - 1, true); if (state == ThreadState::Waiting || state == ThreadState::Sleeping || is_in_kernel(®s)) { if (state == ThreadState::Stopped && signo != SIGKILL) return; interrupted = true; wake_up(); } } static constexpr usize MAX_STACK_SIZE = 8 * 1024 * 1024; // 8 MB bool Thread::check_stack_on_exception(u64 stack_pointer) { if (stack_pointer < stack.bottom() || stack_pointer >= stack.top()) kwarnln("Abnormal stack (Stack pointer outside the normal range, %.16lx-%.16lx)", stack.bottom(), stack.top()); // Check whether the stack pointer is within 8 pages of the bottom of the stack u64 threshold = stack.bottom() - (ARCH_PAGE_SIZE * 8); if (stack_pointer >= threshold && stack_pointer < stack.bottom()) { kwarnln("Likely stack overflow (CPU exception a few pages below the stack)"); // Try to grow the stack usize stack_space_remaining = MAX_STACK_SIZE - stack.bytes(); if (!stack_space_remaining) { kwarnln("Failed to grow stack: this thread already used up all its stack space"); return false; } usize exceeded_bytes = align_up(stack.bottom() - stack_pointer); if (exceeded_bytes > stack_space_remaining) { kwarnln("Failed to grow stack: this thread needs more space than the one it has remaining (%zu bytes out " "of %zu remaining)", exceeded_bytes, stack_space_remaining); return false; } auto address_space = process->address_space.lock(); // If we can, we'll add 2 more pages of buffer space, otherwise we use whatever we can. usize bytes_to_grow = min(stack_space_remaining, exceeded_bytes + 2 * ARCH_PAGE_SIZE); auto maybe_base = (*address_space)->grow_region(stack.bottom(), bytes_to_grow / ARCH_PAGE_SIZE, true); if (maybe_base.has_error()) { kwarnln("Failed to grow stack: could not allocate virtual memory space (%s)", maybe_base.error_string()); return false; } u64 base = maybe_base.release_value(); auto result = MemoryManager::alloc_at_zeroed(base, bytes_to_grow / ARCH_PAGE_SIZE, MMU::ReadWrite | MMU::NoExecute | MMU::User); if (result.has_error()) { (*address_space)->free_region(base, bytes_to_grow / ARCH_PAGE_SIZE); kwarnln("Failed to grow stack: could not allocate physical pages (%s)", result.error_string()); return false; } kinfoln("Stack expanded from %lx (%zu bytes) to %lx (%zu bytes)", stack.bottom(), stack.bytes(), base, stack.bytes() + bytes_to_grow); stack = Stack { base, stack.bytes() + bytes_to_grow }; return true; } return false; } void Thread::stop() { state = ThreadState::Stopped; kernel_yield(); } Process* Process::current() { return Scheduler::current()->process; }