kernel, luna: Port non-VFS changes over from pull request #22
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
OwnedPtr, SharedPtr: Add operator bool Option, Result: Make try_move_value() non-const since it modifies the Option kernel: Switch to a stack we control for the main task as soon as we leave early boot Heap: Fix GPF caused when making many small allocations Heap: Avoid accessing a block after it's potentially deleted luna: Skip UBSAN.cpp in CMakeLists as that's not implemented yet luna: Use spinlocks in the heap implementation kernel, luna: Move Spinlock.h to luna Option: Use __builtin_launder to ensure that the compiler doesn't label this as UB SharedPtr: Implement make_shared using adopt_shared SharedPtr: Delete ptr on failure in all adopt_shared* functions
This commit is contained in:
parent
89958fbc74
commit
794567327f
@ -18,7 +18,6 @@ set(SOURCES
|
||||
src/arch/Serial.cpp
|
||||
src/arch/Timer.cpp
|
||||
src/arch/PCI.cpp
|
||||
src/thread/Spinlock.cpp
|
||||
src/thread/Thread.cpp
|
||||
src/thread/Scheduler.cpp
|
||||
src/sys/Syscall.cpp
|
||||
|
@ -2,10 +2,10 @@
|
||||
#include "arch/CPU.h"
|
||||
#include "arch/Serial.h"
|
||||
#include "arch/Timer.h"
|
||||
#include "thread/Spinlock.h"
|
||||
#include "video/TextConsole.h"
|
||||
#include <luna/Format.h>
|
||||
#include <luna/SourceLocation.h>
|
||||
#include <luna/Spinlock.h>
|
||||
|
||||
static bool g_debug_enabled = true;
|
||||
static bool g_serial_enabled = true;
|
||||
|
@ -26,5 +26,7 @@ namespace CPU
|
||||
void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg);
|
||||
void print_stack_trace_at(Registers* regs);
|
||||
|
||||
[[noreturn]] void bootstrap_switch_stack(u64 stack, void* function);
|
||||
|
||||
void pause();
|
||||
}
|
||||
|
@ -295,6 +295,15 @@ namespace CPU
|
||||
&frame_index);
|
||||
}
|
||||
|
||||
[[noreturn]] void bootstrap_switch_stack(u64 stack, void* function)
|
||||
{
|
||||
asm volatile("mov %0, %%rsp\n"
|
||||
"jmp *%1"
|
||||
:
|
||||
: "r"(stack), "r"(function));
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
void pause()
|
||||
{
|
||||
asm volatile("pause");
|
||||
|
@ -87,11 +87,19 @@ Result<void> init()
|
||||
return {};
|
||||
}
|
||||
|
||||
extern "C" [[noreturn]] void _start()
|
||||
[[noreturn]] void init_wrapper()
|
||||
{
|
||||
Init::check_magic();
|
||||
Init::early_init();
|
||||
auto rc = init();
|
||||
if (rc.has_error()) kerrorln("Runtime error: %s", rc.error_string());
|
||||
CPU::idle_loop();
|
||||
}
|
||||
|
||||
// FIXME: Add a guard page to make sure the stack doesn't end up in random kernel memory. Also reclaim this memory after
|
||||
// leaving the init task.
|
||||
extern "C" [[noreturn]] void _start()
|
||||
{
|
||||
Init::check_magic();
|
||||
Init::early_init();
|
||||
Stack stack { MemoryManager::alloc_for_kernel(8, MMU::ReadWrite | MMU::NoExecute).value(), 8 * ARCH_PAGE_SIZE };
|
||||
CPU::bootstrap_switch_stack(stack.top(), (void*)init_wrapper);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "memory/KernelVM.h"
|
||||
#include "arch/MMU.h"
|
||||
#include "thread/Spinlock.h"
|
||||
#include <luna/Bitmap.h>
|
||||
#include <luna/Spinlock.h>
|
||||
|
||||
static const u64 KERNEL_VM_RANGE_START = 0xffffffffc0000000;
|
||||
|
||||
|
@ -2,10 +2,10 @@
|
||||
#include "arch/MMU.h"
|
||||
#include "memory/KernelVM.h"
|
||||
#include "memory/MemoryMap.h"
|
||||
#include "thread/Spinlock.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Bitmap.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/Spinlock.h>
|
||||
#include <luna/SystemError.h>
|
||||
#include <luna/Types.h>
|
||||
|
||||
|
@ -16,6 +16,7 @@ set(FREESTANDING_SOURCES
|
||||
src/TarStream.cpp
|
||||
src/DebugLog.cpp
|
||||
src/Heap.cpp
|
||||
src/Spinlock.cpp
|
||||
)
|
||||
|
||||
set(SOURCES
|
||||
|
@ -101,7 +101,7 @@ template <typename T> class Option
|
||||
return true;
|
||||
}
|
||||
|
||||
bool try_move_value(T& ref) const
|
||||
bool try_move_value(T& ref)
|
||||
{
|
||||
if (!has_value()) return false;
|
||||
m_has_value = false;
|
||||
@ -138,26 +138,16 @@ template <typename T> class Option
|
||||
private:
|
||||
struct Storage
|
||||
{
|
||||
u8 buffer[sizeof(T)];
|
||||
|
||||
T* fetch_ptr()
|
||||
{
|
||||
return (T*)buffer;
|
||||
}
|
||||
alignas(T) u8 buffer[sizeof(T)];
|
||||
|
||||
T& fetch_reference()
|
||||
{
|
||||
return *fetch_ptr();
|
||||
}
|
||||
|
||||
const T* fetch_ptr() const
|
||||
{
|
||||
return (const T*)buffer;
|
||||
return *__builtin_launder(reinterpret_cast<T*>(&buffer));
|
||||
}
|
||||
|
||||
const T& fetch_reference() const
|
||||
{
|
||||
return *fetch_ptr();
|
||||
return *__builtin_launder(reinterpret_cast<const T*>(&buffer));
|
||||
}
|
||||
|
||||
void store_reference(const T& ref)
|
||||
|
@ -59,6 +59,11 @@ template <typename T> class OwnedPtr
|
||||
return *m_ptr;
|
||||
}
|
||||
|
||||
operator bool() const
|
||||
{
|
||||
return m_ptr != nullptr;
|
||||
}
|
||||
|
||||
template <typename Type> friend Result<SharedPtr<Type>> adopt_shared_from_owned(OwnedPtr<Type>&&);
|
||||
|
||||
private:
|
||||
|
@ -110,7 +110,7 @@ template <typename T> class Result
|
||||
return m_value.try_set_value(ref);
|
||||
}
|
||||
|
||||
bool try_move_value(T& ref) const
|
||||
bool try_move_value(T& ref)
|
||||
{
|
||||
return m_value.try_move_value(ref);
|
||||
}
|
||||
|
@ -99,31 +99,34 @@ template <typename T> class SharedPtr
|
||||
return *m_ptr;
|
||||
}
|
||||
|
||||
operator bool() const
|
||||
{
|
||||
return m_ptr != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
T* m_ptr;
|
||||
RefCount* m_ref_count;
|
||||
};
|
||||
|
||||
template <typename T, class... Args> Result<SharedPtr<T>> make_shared(Args... args)
|
||||
// NOTE: ptr is deleted if any of the adopt_shared* functions fail to construct a SharedPtr.
|
||||
template <typename T> Result<SharedPtr<T>> adopt_shared(T* ptr)
|
||||
{
|
||||
using RefCount = __detail::RefCount;
|
||||
|
||||
RefCount* const ref_count = TRY(make<RefCount>());
|
||||
auto guard = make_scope_guard([&] { delete ref_count; });
|
||||
auto guard = make_scope_guard([ptr] { delete ptr; });
|
||||
|
||||
RefCount* const ref_count = TRY(make<RefCount>());
|
||||
|
||||
T* const ptr = TRY(make<T>(args...));
|
||||
guard.deactivate();
|
||||
|
||||
return SharedPtr<T> { ptr, ref_count };
|
||||
}
|
||||
|
||||
template <typename T> Result<SharedPtr<T>> adopt_shared(T* ptr)
|
||||
template <typename T, class... Args> Result<SharedPtr<T>> make_shared(Args... args)
|
||||
{
|
||||
using RefCount = __detail::RefCount;
|
||||
|
||||
RefCount* const ref_count = TRY(make<RefCount>());
|
||||
|
||||
return SharedPtr<T> { ptr, ref_count };
|
||||
T* raw_ptr = TRY(make<T>(args...));
|
||||
return adopt_shared(raw_ptr);
|
||||
}
|
||||
|
||||
template <typename T> Result<SharedPtr<T>> adopt_shared_if_nonnull(T* ptr)
|
||||
@ -138,13 +141,7 @@ template <typename T> Result<SharedPtr<T>> adopt_shared_from_owned(OwnedPtr<T>&&
|
||||
T* ptr = other.m_ptr;
|
||||
other.m_ptr = nullptr;
|
||||
|
||||
// FIXME: Should the pointee magically vanish on failure? Or go back into the OwnedPtr, even though it's been
|
||||
// moved...
|
||||
auto guard = make_scope_guard([&] { delete ptr; });
|
||||
|
||||
const SharedPtr<T> shared_ptr = TRY(adopt_shared(ptr));
|
||||
|
||||
guard.deactivate();
|
||||
|
||||
return shared_ptr;
|
||||
}
|
||||
|
130
luna/include/luna/Spinlock.h
Normal file
130
luna/include/luna/Spinlock.h
Normal file
@ -0,0 +1,130 @@
|
||||
#pragma once
|
||||
#include <luna/Atomic.h>
|
||||
#include <luna/Option.h>
|
||||
|
||||
class Spinlock
|
||||
{
|
||||
public:
|
||||
void lock();
|
||||
void unlock();
|
||||
|
||||
bool try_lock();
|
||||
|
||||
bool is_locked() const
|
||||
{
|
||||
return m_lock.load() != 0;
|
||||
}
|
||||
|
||||
private:
|
||||
Atomic<int> m_lock { 0 };
|
||||
};
|
||||
|
||||
class ScopeLock
|
||||
{
|
||||
public:
|
||||
ScopeLock(Spinlock& lock);
|
||||
~ScopeLock();
|
||||
|
||||
ScopeLock(const ScopeLock&) = delete;
|
||||
ScopeLock(ScopeLock&&) = delete;
|
||||
|
||||
Spinlock& take_over()
|
||||
{
|
||||
m_taken_over = true;
|
||||
return m_lock;
|
||||
}
|
||||
|
||||
private:
|
||||
Spinlock& m_lock;
|
||||
bool m_taken_over { false };
|
||||
};
|
||||
|
||||
class SafeScopeLock
|
||||
{
|
||||
public:
|
||||
SafeScopeLock(Spinlock& lock);
|
||||
~SafeScopeLock();
|
||||
|
||||
SafeScopeLock(const SafeScopeLock&) = delete;
|
||||
SafeScopeLock(SafeScopeLock&&) = delete;
|
||||
|
||||
bool did_succeed() const
|
||||
{
|
||||
return m_success;
|
||||
}
|
||||
|
||||
private:
|
||||
Spinlock& m_lock;
|
||||
bool m_success { false };
|
||||
};
|
||||
|
||||
template <typename T> class LockedValue
|
||||
{
|
||||
struct LockedValueGuard
|
||||
{
|
||||
LockedValueGuard(LockedValue& value_ref) : m_value_ref(&value_ref)
|
||||
{
|
||||
}
|
||||
|
||||
LockedValueGuard(const LockedValueGuard& other) = delete;
|
||||
LockedValueGuard(LockedValueGuard&& other)
|
||||
{
|
||||
m_value_ref = other.m_value_ref;
|
||||
other.m_value_ref = nullptr;
|
||||
}
|
||||
|
||||
~LockedValueGuard()
|
||||
{
|
||||
if (m_value_ref) m_value_ref->m_lock.unlock();
|
||||
}
|
||||
|
||||
T& ref()
|
||||
{
|
||||
expect(m_value_ref, "LockedValueGuard::ref() called on a moved LockedValueGuard");
|
||||
return m_value_ref->m_value;
|
||||
}
|
||||
|
||||
void set(const T& other)
|
||||
{
|
||||
ref() = other;
|
||||
}
|
||||
|
||||
T* operator->()
|
||||
{
|
||||
return &ref();
|
||||
}
|
||||
|
||||
T& operator*()
|
||||
{
|
||||
return ref();
|
||||
}
|
||||
|
||||
private:
|
||||
LockedValue* m_value_ref;
|
||||
};
|
||||
|
||||
public:
|
||||
LockedValue() : m_value()
|
||||
{
|
||||
}
|
||||
|
||||
LockedValue(T value) : m_value(value)
|
||||
{
|
||||
}
|
||||
|
||||
LockedValueGuard lock()
|
||||
{
|
||||
m_lock.lock();
|
||||
return { *this };
|
||||
}
|
||||
|
||||
Option<LockedValueGuard> try_lock()
|
||||
{
|
||||
if (m_lock.try_lock()) { return { *this }; }
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
T m_value;
|
||||
Spinlock m_lock;
|
||||
};
|
@ -6,6 +6,7 @@
|
||||
#include <luna/LinkedList.h>
|
||||
#include <luna/SafeArithmetic.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/Spinlock.h>
|
||||
#include <luna/SystemError.h>
|
||||
|
||||
#ifdef USE_FREESTANDING
|
||||
@ -45,9 +46,10 @@ static_assert(sizeof(HeapBlock) == 48UL);
|
||||
static const isize HEAP_BLOCK_SIZE = 48;
|
||||
|
||||
static LinkedList<HeapBlock> heap;
|
||||
static Spinlock g_heap_lock;
|
||||
|
||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
||||
// pages.
|
||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount
|
||||
// of pages.
|
||||
static usize get_pages_for_allocation(usize bytes)
|
||||
{
|
||||
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
||||
@ -97,7 +99,7 @@ static Option<HeapBlock*> split(HeapBlock* block, usize size)
|
||||
const usize old_size =
|
||||
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
||||
|
||||
if (available < (size + sizeof(HeapBlock)))
|
||||
if (available <= (size + sizeof(HeapBlock)))
|
||||
return {}; // This block hasn't got enough free space to hold the requested size.
|
||||
|
||||
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
||||
@ -128,6 +130,8 @@ static Result<void> combine_forward(HeapBlock* block)
|
||||
heap.remove(next);
|
||||
next->magic = BLOCK_DEAD;
|
||||
|
||||
block->full_size += next->full_size + sizeof(HeapBlock);
|
||||
|
||||
if (next->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (next->status & BLOCK_START_MEM)
|
||||
@ -140,8 +144,6 @@ static Result<void> combine_forward(HeapBlock* block)
|
||||
block->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
block->full_size += next->full_size + sizeof(HeapBlock);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -157,6 +159,8 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||
heap.remove(block);
|
||||
block->magic = BLOCK_DEAD;
|
||||
|
||||
last->full_size += block->full_size + sizeof(HeapBlock);
|
||||
|
||||
if (block->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (block->status & BLOCK_START_MEM)
|
||||
@ -169,8 +173,6 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||
last->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
last->full_size += block->full_size + sizeof(HeapBlock);
|
||||
|
||||
return last;
|
||||
}
|
||||
|
||||
@ -178,6 +180,8 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
|
||||
{
|
||||
if (!size) return (void*)BLOCK_MAGIC;
|
||||
|
||||
ScopeLock lock(g_heap_lock);
|
||||
|
||||
size = align_up<16>(size);
|
||||
|
||||
Option<HeapBlock*> block = heap.first();
|
||||
@ -231,6 +235,8 @@ Result<void> free_impl(void* ptr)
|
||||
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
||||
if (!ptr) return {};
|
||||
|
||||
ScopeLock lock(g_heap_lock);
|
||||
|
||||
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
@ -286,6 +292,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
|
||||
return (void*)BLOCK_MAGIC;
|
||||
}
|
||||
|
||||
ScopeLock lock(g_heap_lock);
|
||||
|
||||
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
@ -327,6 +335,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
|
||||
|
||||
usize old_size = block->req_size;
|
||||
|
||||
lock.take_over().unlock();
|
||||
|
||||
void* const new_ptr = TRY(malloc_impl(size, false));
|
||||
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
||||
TRY(free_impl(ptr));
|
||||
|
58
luna/src/Spinlock.cpp
Normal file
58
luna/src/Spinlock.cpp
Normal file
@ -0,0 +1,58 @@
|
||||
#include <luna/DebugLog.h>
|
||||
#include <luna/Spinlock.h>
|
||||
|
||||
#ifdef ARCH_X86_64
|
||||
#define pause() asm volatile("pause")
|
||||
#else
|
||||
#error "Unsupported architecture"
|
||||
#endif
|
||||
|
||||
void Spinlock::lock()
|
||||
{
|
||||
int expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, 1))
|
||||
{
|
||||
expected = 0;
|
||||
pause();
|
||||
}
|
||||
}
|
||||
|
||||
bool Spinlock::try_lock()
|
||||
{
|
||||
int expected = 0;
|
||||
return m_lock.compare_exchange_strong(expected, 1);
|
||||
}
|
||||
|
||||
void Spinlock::unlock()
|
||||
{
|
||||
int expected = 1;
|
||||
if (!m_lock.compare_exchange_strong(expected, 0))
|
||||
{
|
||||
dbgln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
|
||||
}
|
||||
}
|
||||
|
||||
ScopeLock::ScopeLock(Spinlock& lock) : m_lock(lock)
|
||||
{
|
||||
m_lock.lock();
|
||||
}
|
||||
|
||||
ScopeLock::~ScopeLock()
|
||||
{
|
||||
if (!m_taken_over) m_lock.unlock();
|
||||
}
|
||||
|
||||
const u32 RETRIES = 5000000;
|
||||
|
||||
SafeScopeLock::SafeScopeLock(Spinlock& lock) : m_lock(lock)
|
||||
{
|
||||
u32 tries_left = RETRIES;
|
||||
while (!lock.try_lock() && --tries_left) { pause(); }
|
||||
|
||||
if (tries_left) m_success = true;
|
||||
}
|
||||
|
||||
SafeScopeLock::~SafeScopeLock()
|
||||
{
|
||||
if (m_success) m_lock.unlock();
|
||||
}
|
Loading…
Reference in New Issue
Block a user