From 4a654bf0933e6fab1b5d0ff685aa11ffe71597b9 Mon Sep 17 00:00:00 2001 From: apio Date: Thu, 17 Aug 2023 20:14:33 +0200 Subject: [PATCH] kernel: Handle OOMs better and without deadlocking Use a separate task to do it. Also fix a bug where the init thread would get no kernel stack ever since 5f698b477. --- kernel/src/fs/StorageCache.cpp | 7 ++++++- kernel/src/main.cpp | 14 ++++++++++++++ kernel/src/memory/MemoryManager.cpp | 6 +++--- kernel/src/thread/Scheduler.cpp | 25 +++++++++++++++++++++++-- kernel/src/thread/Scheduler.h | 3 +++ kernel/src/thread/Thread.h | 2 ++ libluna/include/luna/HashMap.h | 10 ++++++++++ libluna/include/luna/HashTable.h | 19 ++++++++++++++++--- 8 files changed, 77 insertions(+), 9 deletions(-) diff --git a/kernel/src/fs/StorageCache.cpp b/kernel/src/fs/StorageCache.cpp index 95e0b012..9d1bb404 100644 --- a/kernel/src/fs/StorageCache.cpp +++ b/kernel/src/fs/StorageCache.cpp @@ -1,5 +1,6 @@ #include "fs/StorageCache.h" #include "Log.h" +#include #include static LinkedList g_storage_caches; @@ -23,9 +24,13 @@ Result StorageCache::fetch_entry(u64 block) void StorageCache::clear() { - ScopedKMutexLock<100> lock(m_mutex); + m_mutex.lock(); + kdbgln("cache: clearing %lu entries, out of %lu buckets", m_cache_entries.size(), m_cache_entries.capacity()); m_cache_entries.clear(); + kdbgln("cache: done"); + + m_mutex.unlock(); } StorageCache::StorageCache() diff --git a/kernel/src/main.cpp b/kernel/src/main.cpp index db4943f8..84703bfe 100644 --- a/kernel/src/main.cpp +++ b/kernel/src/main.cpp @@ -31,6 +31,16 @@ void reap_thread() } } +void oom_thread() +{ + while (true) + { + kernel_wait_for_event(); + // OOM! Do everything we can to recover memory. + StorageCache::clear_caches(); + } +} + [[noreturn]] void init() { { @@ -58,6 +68,10 @@ void reap_thread() "Failed to create the process reaper kernel thread"); Scheduler::set_reap_thread(reap); + auto oom = mark_critical(Scheduler::new_kernel_thread(oom_thread, "[oom]"), + "Failed to create the out-of-memory kernel thread"); + Scheduler::set_oom_thread(oom); + #ifdef ARCH_X86_64 ATA::Controller::scan(); #endif diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index d4ae70b1..4507f9fb 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -146,9 +146,9 @@ namespace MemoryManager bool ok = frame_bitmap->find_and_toggle(false, start_index).try_set_value(index); if (!ok) { - kwarnln("OOM alert! Trying to free caches..."); - StorageCache::clear_caches(); - if (!frame_bitmap->find_and_toggle(false, start_index).try_set_value(index)) return err(ENOMEM); + kwarnln("OOM alert! Scheduling the OOM thread..."); + Scheduler::signal_oom_thread(); + return err(ENOMEM); } start_index = index + 1; diff --git a/kernel/src/thread/Scheduler.cpp b/kernel/src/thread/Scheduler.cpp index c74fd68c..cbab9a91 100644 --- a/kernel/src/thread/Scheduler.cpp +++ b/kernel/src/thread/Scheduler.cpp @@ -13,6 +13,7 @@ static Thread g_idle; static Thread* g_current = nullptr; static Thread* g_init = nullptr; static Thread* g_reap = nullptr; +static Thread* g_oom = nullptr; static const usize TICKS_PER_TIMESLICE = 20; @@ -70,6 +71,17 @@ namespace Scheduler if (g_reap) g_reap->wake_up(); } + void set_oom_thread(Thread* thread) + { + g_oom = thread; + g_oom->unrestricted_task = true; + } + + void signal_oom_thread() + { + if (g_oom) g_oom->wake_up(); + } + Result new_kernel_thread_impl(Thread* thread, const char* name) { // If anything fails, make sure to clean up. @@ -154,8 +166,13 @@ namespace Scheduler u64 argv = TRY(image->push_string_vector_on_stack(args)); u64 envp = TRY(image->push_string_vector_on_stack(env)); + const u64 kernel_stack_base = TRY(MemoryManager::alloc_for_kernel(4, MMU::ReadWrite | MMU::NoExecute)); + Stack kernel_stack { kernel_stack_base, 4 * ARCH_PAGE_SIZE }; + guard.deactivate(); + thread->kernel_stack = kernel_stack; + image->apply(thread); thread->set_arguments(args.size(), argv, env.size(), envp); @@ -254,8 +271,12 @@ namespace Scheduler new_thread->ticks_left = 1; // The idle task only runs for 1 tick so we can check for new runnable tasks // as fast as possible. } - else - new_thread->ticks_left = TICKS_PER_TIMESLICE; + else if (new_thread->unrestricted_task) + { + check(new_thread->is_kernel); + new_thread->ticks_left = -1; + } + else { new_thread->ticks_left = TICKS_PER_TIMESLICE; } } void switch_task(Registers* regs) diff --git a/kernel/src/thread/Scheduler.h b/kernel/src/thread/Scheduler.h index 7c846ee5..0d357825 100644 --- a/kernel/src/thread/Scheduler.h +++ b/kernel/src/thread/Scheduler.h @@ -13,6 +13,9 @@ namespace Scheduler void set_reap_thread(Thread*); void signal_reap_thread(); + void set_oom_thread(Thread*); + void signal_oom_thread(); + Result new_kernel_thread(u64 address, const char* name); Result new_kernel_thread(void (*func)(void), const char* name); Result new_kernel_thread(void (*func)(void*), void* arg, const char* name); diff --git a/kernel/src/thread/Thread.h b/kernel/src/thread/Thread.h index ef4f4252..585f02b2 100644 --- a/kernel/src/thread/Thread.h +++ b/kernel/src/thread/Thread.h @@ -109,6 +109,8 @@ struct Thread : public LinkedListNode sigset_t pending_signals { 0 }; bool interrupted { false }; + bool unrestricted_task { false }; + FPData fp_data; ThreadState state = ThreadState::Runnable; diff --git a/libluna/include/luna/HashMap.h b/libluna/include/luna/HashMap.h index e88a1790..6a360d5f 100644 --- a/libluna/include/luna/HashMap.h +++ b/libluna/include/luna/HashMap.h @@ -49,6 +49,16 @@ template struct HashMap return m_table.try_remove(HashPair { key, {} }); } + usize capacity() const + { + return m_table.capacity(); + } + + usize size() const + { + return m_table.size(); + } + void clear() { m_table.clear(); diff --git a/libluna/include/luna/HashTable.h b/libluna/include/luna/HashTable.h index ddb724c7..1a366800 100644 --- a/libluna/include/luna/HashTable.h +++ b/libluna/include/luna/HashTable.h @@ -101,10 +101,23 @@ template class HashTable void clear() { - for (usize i = 0; i < m_capacity; i++) m_buckets[i].~Option(); + if (m_capacity) + { + for (usize i = 0; i < m_capacity; i++) m_buckets[i].~Option(); - free_impl(m_buckets); - m_capacity = m_size = 0; + free_impl(m_buckets); + m_capacity = m_size = 0; + } + } + + usize capacity() const + { + return m_capacity; + } + + usize size() const + { + return m_size; } ~HashTable()