Compare commits

..

3 Commits

Author SHA1 Message Date
38bd3d3eef
luna: Use spinlocks in the heap implementation
Some checks failed
continuous-integration/drone/pr Build is failing
2023-02-14 20:07:30 +01:00
4e88a7482c
kernel, luna: Move Spinlock.h to luna 2023-02-14 20:05:00 +01:00
6eff41e7af
Option: Use __builtin_launder to ensure that the compiler doesn't label this as UB 2023-02-14 20:03:29 +01:00
9 changed files with 39 additions and 42 deletions

View File

@ -18,7 +18,6 @@ set(SOURCES
src/arch/Serial.cpp
src/arch/Timer.cpp
src/arch/PCI.cpp
src/thread/Spinlock.cpp
src/thread/Thread.cpp
src/thread/Scheduler.cpp
src/sys/Syscall.cpp

View File

@ -2,10 +2,10 @@
#include "arch/CPU.h"
#include "arch/Serial.h"
#include "arch/Timer.h"
#include "thread/Spinlock.h"
#include "video/TextConsole.h"
#include <luna/Format.h>
#include <luna/SourceLocation.h>
#include <luna/Spinlock.h>
static bool g_debug_enabled = true;
static bool g_serial_enabled = true;

View File

@ -1,7 +1,7 @@
#include "memory/KernelVM.h"
#include "arch/MMU.h"
#include "thread/Spinlock.h"
#include <luna/Bitmap.h>
#include <luna/Spinlock.h>
static const u64 KERNEL_VM_RANGE_START = 0xffffffffc0000000;

View File

@ -2,10 +2,10 @@
#include "arch/MMU.h"
#include "memory/KernelVM.h"
#include "memory/MemoryMap.h"
#include "thread/Spinlock.h"
#include <luna/Alignment.h>
#include <luna/Bitmap.h>
#include <luna/ScopeGuard.h>
#include <luna/Spinlock.h>
#include <luna/SystemError.h>
#include <luna/Types.h>

View File

@ -16,6 +16,8 @@ set(FREESTANDING_SOURCES
src/TarStream.cpp
src/DebugLog.cpp
src/Heap.cpp
src/Spinlock.cpp
src/UBSAN.cpp
)
set(SOURCES

View File

@ -138,26 +138,16 @@ template <typename T> class Option
private:
struct Storage
{
u8 buffer[sizeof(T)];
T* fetch_ptr()
{
return (T*)buffer;
}
alignas(T) u8 buffer[sizeof(T)];
T& fetch_reference()
{
return *fetch_ptr();
}
const T* fetch_ptr() const
{
return (const T*)buffer;
return *__builtin_launder(reinterpret_cast<T*>(&buffer));
}
const T& fetch_reference() const
{
return *fetch_ptr();
return *__builtin_launder(reinterpret_cast<const T*>(&buffer));
}
void store_reference(const T& ref)

View File

@ -1,6 +1,4 @@
#pragma once
#include "Log.h"
#include "arch/CPU.h"
#include <luna/Atomic.h>
#include <luna/Option.h>
@ -30,8 +28,15 @@ class ScopeLock
ScopeLock(const ScopeLock&) = delete;
ScopeLock(ScopeLock&&) = delete;
Spinlock& take_over()
{
m_taken_over = true;
return m_lock;
}
private:
Spinlock& m_lock;
bool m_taken_over { false };
};
class SafeScopeLock
@ -107,25 +112,11 @@ template <typename T> class LockedValue
{
}
#ifndef LOCKED_VALUE_DEBUG
LockedValueGuard lock()
{
m_lock.lock();
return { *this };
}
#else
LockedValueGuard lock()
{
if (m_lock.try_lock()) { return { *this }; }
kwarnln("Spinning on a locked LockedValue. This might lead to a deadlock...");
CPU::print_stack_trace();
m_lock.lock();
return { *this };
}
#endif
Option<LockedValueGuard> try_lock()
{

View File

@ -6,6 +6,7 @@
#include <luna/LinkedList.h>
#include <luna/SafeArithmetic.h>
#include <luna/ScopeGuard.h>
#include <luna/Spinlock.h>
#include <luna/SystemError.h>
#ifdef USE_FREESTANDING
@ -45,9 +46,10 @@ static_assert(sizeof(HeapBlock) == 48UL);
static const isize HEAP_BLOCK_SIZE = 48;
static LinkedList<HeapBlock> heap;
static Spinlock g_heap_lock;
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
// pages.
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount
// of pages.
static usize get_pages_for_allocation(usize bytes)
{
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
@ -178,6 +180,8 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
{
if (!size) return (void*)BLOCK_MAGIC;
ScopeLock lock(g_heap_lock);
size = align_up<16>(size);
Option<HeapBlock*> block = heap.first();
@ -231,6 +235,8 @@ Result<void> free_impl(void* ptr)
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
if (!ptr) return {};
ScopeLock lock(g_heap_lock);
HeapBlock* block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
@ -286,6 +292,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
return (void*)BLOCK_MAGIC;
}
ScopeLock lock(g_heap_lock);
HeapBlock* const block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
@ -327,6 +335,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
usize old_size = block->req_size;
lock.take_over().unlock();
void* const new_ptr = TRY(malloc_impl(size, false));
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
TRY(free_impl(ptr));

View File

@ -1,6 +1,11 @@
#include "thread/Spinlock.h"
#include "Log.h"
#include "arch/CPU.h"
#include <luna/DebugLog.h>
#include <luna/Spinlock.h>
#ifdef ARCH_X86_64
#define pause() asm volatile("pause")
#else
#error "Unsupported architecture"
#endif
void Spinlock::lock()
{
@ -8,7 +13,7 @@ void Spinlock::lock()
while (!m_lock.compare_exchange_strong(expected, 1))
{
expected = 0;
CPU::pause();
pause();
}
}
@ -23,7 +28,7 @@ void Spinlock::unlock()
int expected = 1;
if (!m_lock.compare_exchange_strong(expected, 0))
{
kwarnln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
dbgln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
}
}
@ -34,7 +39,7 @@ ScopeLock::ScopeLock(Spinlock& lock) : m_lock(lock)
ScopeLock::~ScopeLock()
{
m_lock.unlock();
if (!m_taken_over) m_lock.unlock();
}
const u32 RETRIES = 5000000;
@ -42,7 +47,7 @@ const u32 RETRIES = 5000000;
SafeScopeLock::SafeScopeLock(Spinlock& lock) : m_lock(lock)
{
u32 tries_left = RETRIES;
while (!lock.try_lock() && --tries_left) { CPU::pause(); }
while (!lock.try_lock() && --tries_left) { pause(); }
if (tries_left) m_success = true;
}