luna: Use spinlocks in the heap implementation
Some checks failed
continuous-integration/drone/pr Build is failing
Some checks failed
continuous-integration/drone/pr Build is failing
This commit is contained in:
parent
4e88a7482c
commit
38bd3d3eef
@ -6,6 +6,7 @@
|
|||||||
#include <luna/LinkedList.h>
|
#include <luna/LinkedList.h>
|
||||||
#include <luna/SafeArithmetic.h>
|
#include <luna/SafeArithmetic.h>
|
||||||
#include <luna/ScopeGuard.h>
|
#include <luna/ScopeGuard.h>
|
||||||
|
#include <luna/Spinlock.h>
|
||||||
#include <luna/SystemError.h>
|
#include <luna/SystemError.h>
|
||||||
|
|
||||||
#ifdef USE_FREESTANDING
|
#ifdef USE_FREESTANDING
|
||||||
@ -45,9 +46,10 @@ static_assert(sizeof(HeapBlock) == 48UL);
|
|||||||
static const isize HEAP_BLOCK_SIZE = 48;
|
static const isize HEAP_BLOCK_SIZE = 48;
|
||||||
|
|
||||||
static LinkedList<HeapBlock> heap;
|
static LinkedList<HeapBlock> heap;
|
||||||
|
static Spinlock g_heap_lock;
|
||||||
|
|
||||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount
|
||||||
// pages.
|
// of pages.
|
||||||
static usize get_pages_for_allocation(usize bytes)
|
static usize get_pages_for_allocation(usize bytes)
|
||||||
{
|
{
|
||||||
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
||||||
@ -178,6 +180,8 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
|
|||||||
{
|
{
|
||||||
if (!size) return (void*)BLOCK_MAGIC;
|
if (!size) return (void*)BLOCK_MAGIC;
|
||||||
|
|
||||||
|
ScopeLock lock(g_heap_lock);
|
||||||
|
|
||||||
size = align_up<16>(size);
|
size = align_up<16>(size);
|
||||||
|
|
||||||
Option<HeapBlock*> block = heap.first();
|
Option<HeapBlock*> block = heap.first();
|
||||||
@ -231,6 +235,8 @@ Result<void> free_impl(void* ptr)
|
|||||||
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
||||||
if (!ptr) return {};
|
if (!ptr) return {};
|
||||||
|
|
||||||
|
ScopeLock lock(g_heap_lock);
|
||||||
|
|
||||||
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
||||||
|
|
||||||
if (block->magic != BLOCK_MAGIC)
|
if (block->magic != BLOCK_MAGIC)
|
||||||
@ -286,6 +292,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
|
|||||||
return (void*)BLOCK_MAGIC;
|
return (void*)BLOCK_MAGIC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ScopeLock lock(g_heap_lock);
|
||||||
|
|
||||||
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
||||||
|
|
||||||
if (block->magic != BLOCK_MAGIC)
|
if (block->magic != BLOCK_MAGIC)
|
||||||
@ -327,6 +335,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
|
|||||||
|
|
||||||
usize old_size = block->req_size;
|
usize old_size = block->req_size;
|
||||||
|
|
||||||
|
lock.take_over().unlock();
|
||||||
|
|
||||||
void* const new_ptr = TRY(malloc_impl(size, false));
|
void* const new_ptr = TRY(malloc_impl(size, false));
|
||||||
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
||||||
TRY(free_impl(ptr));
|
TRY(free_impl(ptr));
|
||||||
|
Loading…
Reference in New Issue
Block a user