libluna: Allow callers to optimize heap allocations by telling us they won't resize the returned memory
All checks were successful
continuous-integration/drone/push Build is passing

strdup() now does this. If someone resizes strdup()-ed memory, nothing bad will happen, they will just take a small performance hit the first time.

But I think realloc-ing strdup()-ed memory is rare, that's why I did this.
This commit is contained in:
apio 2023-04-27 17:36:25 +02:00
parent f0fc3ec7ca
commit b1e400d795
Signed by: apio
GPG Key ID: B8A7D06E42258954
3 changed files with 31 additions and 15 deletions

View File

@ -22,9 +22,9 @@ void operator delete(void* ptr, usize size, std::align_val_t alignment) noexcept
extern Result<void*> allocate_pages_impl(usize count);
extern Result<void> release_pages_impl(void* address, usize count);
Result<void*> malloc_impl(usize size, bool should_scrub = true);
Result<void*> calloc_impl(usize nmemb, usize size);
Result<void*> realloc_impl(void* ptr, usize size);
Result<void*> malloc_impl(usize size, bool may_realloc = true, bool should_scrub = true);
Result<void*> calloc_impl(usize nmemb, usize size, bool may_realloc = true);
Result<void*> realloc_impl(void* ptr, usize size, bool may_realloc_again = true);
Result<void> free_impl(void* ptr);
void dump_heap_usage();

View File

@ -87,7 +87,7 @@ extern "C"
{
const usize len = strlen(str);
char* dest = (char*)calloc_impl(len + 1, 1).value_or(nullptr);
char* dest = (char*)calloc_impl(len + 1, 1, false).value_or(nullptr);
if (!dest) return nullptr;
memcpy(dest, str, len);
@ -99,7 +99,7 @@ extern "C"
{
const usize len = strnlen(str, max);
char* dest = (char*)calloc_impl(len + 1, 1).value_or(nullptr);
char* dest = (char*)calloc_impl(len + 1, 1, false).value_or(nullptr);
if (!dest) return nullptr;
memcpy(dest, str, len);

View File

@ -79,6 +79,8 @@ static void* get_pointer_from_heap_block(HeapBlock* block)
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
}
// Used when the caller tells us this block may be realloc-ed. In this case, we split the available space roughly
// equally between both blocks.
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
{
usize available = space_available(block);
@ -93,7 +95,20 @@ static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
return available + block->req_size;
}
static Option<HeapBlock*> split(HeapBlock* block, usize size)
// Used when the caller tells us this block will not be realloc-ed. In this case, we make the new block as small as
// possible.
static usize get_small_offset_to_split_at(HeapBlock* block, usize min)
{
usize available = space_available(block);
available -= min; // reserve only min size for the new block.
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
return available + block->req_size;
}
static Option<HeapBlock*> split(HeapBlock* block, usize size, bool may_realloc)
{
const usize available = space_available(block); // How much space can we steal from this block?
const usize old_size =
@ -102,7 +117,8 @@ static Option<HeapBlock*> split(HeapBlock* block, usize size)
if (available <= (size + sizeof(HeapBlock)))
return {}; // This block hasn't got enough free space to hold the requested size.
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
const usize offset = may_realloc ? get_fair_offset_to_split_at(block, size + sizeof(HeapBlock))
: get_small_offset_to_split_at(block, size + sizeof(HeapBlock));
block->full_size = offset; // shrink the old block to fit this offset
HeapBlock* const new_block = offset_ptr(block, offset + sizeof(HeapBlock));
@ -178,7 +194,7 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
return last;
}
Result<void*> malloc_impl(usize size, bool should_scrub)
Result<void*> malloc_impl(usize size, bool may_realloc, bool should_scrub)
{
if (!size) return (void*)BLOCK_MAGIC;
@ -200,7 +216,7 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
}
break; // We found a free block that's big enough!!
}
auto rc = split(current, size);
auto rc = split(current, size, may_realloc);
if (rc.has_value())
{
block = rc.value(); // We managed to get a free block from a larger used block!!
@ -294,10 +310,10 @@ Result<void> free_impl(void* ptr)
return {};
}
Result<void*> realloc_impl(void* ptr, usize size)
Result<void*> realloc_impl(void* ptr, usize size, bool may_realloc_again)
{
if (!ptr) return malloc_impl(size);
if (ptr == (void*)BLOCK_MAGIC) return malloc_impl(size);
if (!ptr) return malloc_impl(size, may_realloc_again);
if (ptr == (void*)BLOCK_MAGIC) return malloc_impl(size, may_realloc_again);
if (!size)
{
TRY(free_impl(ptr));
@ -349,7 +365,7 @@ Result<void*> realloc_impl(void* ptr, usize size)
lock.take_over().unlock();
void* const new_ptr = TRY(malloc_impl(size, false));
void* const new_ptr = TRY(malloc_impl(size, may_realloc_again, false));
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
TRY(free_impl(ptr));
@ -358,10 +374,10 @@ Result<void*> realloc_impl(void* ptr, usize size)
return new_ptr;
}
Result<void*> calloc_impl(usize nmemb, usize size)
Result<void*> calloc_impl(usize nmemb, usize size, bool may_realloc)
{
const usize realsize = TRY(safe_mul(nmemb, size));
void* const ptr = TRY(malloc_impl(realsize, false));
void* const ptr = TRY(malloc_impl(realsize, may_realloc, false));
return memset(ptr, 0, realsize);
}