kernel: Try to grow the stack on stack overflows, up to a maximum of 8MB
All checks were successful
Build and test / build (push) Successful in 1m49s
All checks were successful
Build and test / build (push) Successful in 1m49s
This helps keep GCC happy when compiling slightly complex programs :)
This commit is contained in:
parent
e0ed4be0db
commit
de6f5c38d8
@ -14,6 +14,7 @@
|
||||
#include "video/TextConsole.h"
|
||||
#include <bits/signal.h>
|
||||
#include <cpuid.h>
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/CircularQueue.h>
|
||||
#include <luna/Result.h>
|
||||
@ -56,6 +57,8 @@ void FPData::restore()
|
||||
asm volatile("fxrstor (%0)" : : "r"(m_data));
|
||||
}
|
||||
|
||||
static constexpr usize MAX_STACK_SIZE = 8 * 1024 * 1024; // 8 MB
|
||||
|
||||
// Interrupt handling
|
||||
|
||||
#define FIXME_UNHANDLED_INTERRUPT(name) \
|
||||
@ -75,14 +78,61 @@ void decode_page_fault_error_code(u64 code)
|
||||
(code & PF_RESERVED) ? " | Reserved bits set" : "", (code & PF_NX_VIOLATION) ? " | NX violation" : "");
|
||||
}
|
||||
|
||||
static void check_stack(Thread* current, Registers* regs)
|
||||
static bool check_stack(Thread* current, Registers* regs)
|
||||
{
|
||||
if (regs->rsp < current->stack.bottom() || regs->rsp >= current->stack.top())
|
||||
kerrorln("Abnormal stack (RSP outside the normal range, %.16lx-%.16lx)", current->stack.bottom(),
|
||||
current->stack.top());
|
||||
|
||||
if (regs->rsp >= (current->stack.bottom() - ARCH_PAGE_SIZE) && regs->rsp < current->stack.bottom())
|
||||
kerrorln("Likely stack overflow (CPU exception inside guard page)");
|
||||
if (regs->rsp >= (current->stack.bottom() - (ARCH_PAGE_SIZE * 8)) && regs->rsp < current->stack.bottom())
|
||||
{
|
||||
kerrorln("Likely stack overflow (CPU exception a few pages below the stack)");
|
||||
// Try to grow the stack
|
||||
// FIXME: This should be extracted into an architecture-independent file.
|
||||
usize stack_space_remaining = MAX_STACK_SIZE - current->stack.bytes();
|
||||
if (!stack_space_remaining)
|
||||
{
|
||||
kwarnln("Failed to grow stack: this thread already used up all its stack space");
|
||||
return false;
|
||||
}
|
||||
|
||||
usize exceeded_bytes = align_up<ARCH_PAGE_SIZE>(current->stack.bottom() - regs->rsp);
|
||||
if (exceeded_bytes > stack_space_remaining)
|
||||
{
|
||||
kwarnln("Failed to grow stack: this thread needs more space than the one it has remaining (%zu bytes out "
|
||||
"of %zu remaining)",
|
||||
exceeded_bytes, stack_space_remaining);
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we can, we'll add 2 more pages of buffer space, otherwise we use whatever we can.
|
||||
usize bytes_to_grow = min(stack_space_remaining, exceeded_bytes + 2 * ARCH_PAGE_SIZE);
|
||||
auto maybe_base =
|
||||
current->address_space->grow_region(current->stack.bottom(), bytes_to_grow / ARCH_PAGE_SIZE, true);
|
||||
if (maybe_base.has_error())
|
||||
{
|
||||
kwarnln("Failed to grow stack: could not allocate virtual memory space (%s)", maybe_base.error_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 base = maybe_base.release_value();
|
||||
auto result = MemoryManager::alloc_at_zeroed(base, bytes_to_grow / ARCH_PAGE_SIZE,
|
||||
MMU::ReadWrite | MMU::NoExecute | MMU::User);
|
||||
if (result.has_error())
|
||||
{
|
||||
current->address_space->free_region(base, bytes_to_grow / ARCH_PAGE_SIZE);
|
||||
kwarnln("Failed to grow stack: could not allocate physical pages (%s)", result.error_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
kinfoln("Stack expanded from %lx (%zu bytes) to %lx (%zu bytes)", current->stack.bottom(),
|
||||
current->stack.bytes(), base, current->stack.bytes() + bytes_to_grow);
|
||||
|
||||
current->stack = Stack { base, current->stack.bytes() + bytes_to_grow };
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void handle_cpu_exception(int signo, const char* err, Registers* regs)
|
||||
@ -98,7 +148,7 @@ void handle_cpu_exception(int signo, const char* err, Registers* regs)
|
||||
if (!is_in_kernel(regs))
|
||||
{
|
||||
auto* current = Scheduler::current();
|
||||
check_stack(current, regs);
|
||||
if (check_stack(current, regs)) return;
|
||||
|
||||
current->send_signal(signo);
|
||||
current->process_pending_signals(regs);
|
||||
|
@ -105,6 +105,44 @@ Result<u64> AddressSpace::alloc_region(usize count, int prot, int flags, off_t o
|
||||
region->shmid = shmid;
|
||||
};
|
||||
|
||||
for (auto* region = m_regions.expect_first(); region; region = m_regions.next(region).value_or(nullptr))
|
||||
{
|
||||
if (!region->used)
|
||||
{
|
||||
if (region->count < count) continue;
|
||||
if (region->count == count)
|
||||
{
|
||||
update_region(region);
|
||||
u64 address = region->start;
|
||||
try_merge_region_with_neighbors(region);
|
||||
return address;
|
||||
}
|
||||
|
||||
u64 boundary = region->end - (count * ARCH_PAGE_SIZE);
|
||||
|
||||
auto* new_region = TRY(split_region(region, boundary));
|
||||
update_region(new_region);
|
||||
try_merge_region_with_neighbors(new_region);
|
||||
|
||||
return boundary;
|
||||
}
|
||||
}
|
||||
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
Result<u64> AddressSpace::alloc_region_near_end(usize count, int prot, int flags, off_t offset, u64 shmid,
|
||||
bool persistent)
|
||||
{
|
||||
auto update_region = [=](VMRegion* region) {
|
||||
region->used = true;
|
||||
region->persistent = persistent;
|
||||
region->prot = prot;
|
||||
region->flags = flags;
|
||||
region->offset = offset;
|
||||
region->shmid = shmid;
|
||||
};
|
||||
|
||||
for (auto* region = m_regions.expect_last(); region; region = m_regions.previous(region).value_or(nullptr))
|
||||
{
|
||||
if (!region->used)
|
||||
@ -131,6 +169,42 @@ Result<u64> AddressSpace::alloc_region(usize count, int prot, int flags, off_t o
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
Result<u64> AddressSpace::grow_region(u64 address, usize count, bool backwards)
|
||||
{
|
||||
if (address >= VM_END) return err(EINVAL);
|
||||
|
||||
for (auto* region : m_regions)
|
||||
{
|
||||
if (region->start != address) continue;
|
||||
|
||||
auto* neighbor =
|
||||
backwards ? m_regions.previous(region).value_or(nullptr) : m_regions.next(region).value_or(nullptr);
|
||||
|
||||
if (neighbor->persistent || neighbor->used) return err(ENOMEM);
|
||||
|
||||
if (neighbor->count < count) return err(ENOMEM);
|
||||
|
||||
neighbor->count -= count;
|
||||
if (backwards) neighbor->end -= count * ARCH_PAGE_SIZE;
|
||||
else
|
||||
neighbor->start += count * ARCH_PAGE_SIZE;
|
||||
if (neighbor->count == 0)
|
||||
{
|
||||
m_regions.remove(neighbor);
|
||||
delete neighbor;
|
||||
}
|
||||
|
||||
region->count += count;
|
||||
if (backwards) region->start -= count * ARCH_PAGE_SIZE;
|
||||
else
|
||||
region->end += count * ARCH_PAGE_SIZE;
|
||||
|
||||
return region->start;
|
||||
}
|
||||
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
Result<bool> AddressSpace::set_region(u64 address, usize count, bool used, int prot, int flags, off_t offset, u64 shmid,
|
||||
bool persistent)
|
||||
{
|
||||
|
@ -32,6 +32,11 @@ class AddressSpace
|
||||
|
||||
Result<u64> alloc_region(usize count, int prot, int flags, off_t offset, u64 shmid = 0, bool persistent = false);
|
||||
|
||||
Result<u64> alloc_region_near_end(usize count, int prot, int flags, off_t offset, u64 shmid = 0,
|
||||
bool persistent = false);
|
||||
|
||||
Result<u64> grow_region(u64 address, usize count, bool backwards);
|
||||
|
||||
Result<bool> test_and_alloc_region(u64 address, usize count, int prot, int flags, off_t offset, u64 shmid = 0,
|
||||
bool persistent = false)
|
||||
{
|
||||
|
@ -7,23 +7,15 @@
|
||||
|
||||
static constexpr usize DEFAULT_USER_STACK_PAGES = 6;
|
||||
static constexpr usize DEFAULT_USER_STACK_SIZE = DEFAULT_USER_STACK_PAGES * ARCH_PAGE_SIZE;
|
||||
static constexpr u64 THREAD_STACK_BASE = 0x10000;
|
||||
|
||||
static Result<void> create_user_stack(Stack& user_stack, AddressSpace* space)
|
||||
{
|
||||
if (!TRY(space->test_and_alloc_region(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0, true)))
|
||||
return err(ENOMEM);
|
||||
auto base = TRY(space->alloc_region_near_end(DEFAULT_USER_STACK_PAGES, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0, true));
|
||||
|
||||
// Stack overflow guard page, remains unmapped.
|
||||
if (!TRY(space->test_and_alloc_region(THREAD_STACK_BASE - ARCH_PAGE_SIZE, 1, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
0, 0, true)))
|
||||
return err(ENOMEM);
|
||||
TRY(MemoryManager::alloc_at_zeroed(base, DEFAULT_USER_STACK_PAGES, MMU::ReadWrite | MMU::NoExecute | MMU::User));
|
||||
|
||||
TRY(MemoryManager::alloc_at_zeroed(THREAD_STACK_BASE, DEFAULT_USER_STACK_PAGES,
|
||||
MMU::ReadWrite | MMU::NoExecute | MMU::User));
|
||||
|
||||
user_stack = { THREAD_STACK_BASE, DEFAULT_USER_STACK_SIZE };
|
||||
user_stack = { base, DEFAULT_USER_STACK_SIZE };
|
||||
|
||||
return {};
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user