apio
0edabd6d87
*facepalm* This was causing page faults when having two blocks and the first one (oldest one) being freed first.
400 lines
11 KiB
C++
400 lines
11 KiB
C++
#include "memory/Heap.h"
|
|
#include "Log.h"
|
|
#include "arch/MMU.h"
|
|
#include "arch/Serial.h"
|
|
#include "memory/MemoryManager.h"
|
|
#include <luna/Alignment.h>
|
|
#include <luna/String.h>
|
|
#include <luna/SystemError.h>
|
|
|
|
static constexpr int BLOCK_USED = 1 << 0;
|
|
static constexpr int BLOCK_START_MEM = 1 << 1;
|
|
static constexpr int BLOCK_END_MEM = 1 << 2;
|
|
|
|
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
|
|
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
|
|
|
|
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
|
|
|
|
struct HeapBlock
|
|
{
|
|
usize req_size;
|
|
usize full_size;
|
|
int status;
|
|
HeapBlock* next;
|
|
HeapBlock* last;
|
|
usize magic;
|
|
};
|
|
|
|
static_assert(sizeof(HeapBlock) == 48UL);
|
|
|
|
static HeapBlock* heap_start = nullptr;
|
|
static HeapBlock* heap_end = nullptr;
|
|
|
|
static usize start_addr = 0xffffffff80000000;
|
|
|
|
static Result<HeapBlock*> allocate_pages(
|
|
usize count) // FIXME: Keep track of virtual address space usage. For now, since the address
|
|
// space is so huge, we can just start at a fairly large address and assume
|
|
// we'll never run into anything, but this will probably bite us in the future.
|
|
{
|
|
void* const ptr = (void*)TRY(MemoryManager::alloc_at(start_addr, count, MMU::ReadWrite | MMU::NoExecute));
|
|
if (ptr) start_addr += (count * ARCH_PAGE_SIZE);
|
|
return (HeapBlock*)ptr;
|
|
}
|
|
|
|
static Result<void> release_pages(void* ptr, usize count)
|
|
{
|
|
return MemoryManager::unmap_owned((u64)ptr, count);
|
|
}
|
|
|
|
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
|
// pages.
|
|
static usize get_pages_for_allocation(usize bytes)
|
|
{
|
|
usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE);
|
|
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
|
return pages;
|
|
}
|
|
|
|
static bool is_block_free(HeapBlock* block)
|
|
{
|
|
return !(block->status & BLOCK_USED);
|
|
}
|
|
|
|
static usize space_available(HeapBlock* block)
|
|
{
|
|
expect(!is_block_free(block), "Attempting to split a free block");
|
|
return block->full_size - block->req_size;
|
|
}
|
|
|
|
static HeapBlock* get_heap_block_for_pointer(void* ptr)
|
|
{
|
|
return (HeapBlock*)offset_ptr(ptr, -48);
|
|
}
|
|
|
|
static void* get_pointer_from_heap_block(HeapBlock* block)
|
|
{
|
|
return (void*)offset_ptr(block, 48);
|
|
}
|
|
|
|
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
|
|
{
|
|
usize available = space_available(block);
|
|
|
|
available -= min; // reserve at least min size for the new block.
|
|
|
|
available -= (available /
|
|
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
|
|
|
|
check(is_aligned(available,
|
|
16UL)); // If necessary, we can just align it. This is more of a sanity check than a requirement.
|
|
|
|
return available + block->req_size;
|
|
}
|
|
|
|
static Result<HeapBlock*> split(HeapBlock* block, usize size)
|
|
{
|
|
const usize available = space_available(block); // How much space can we steal from this block?
|
|
const usize old_size =
|
|
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
|
|
|
if (available < (size + sizeof(HeapBlock))) return err(0); // This error is not propagated.
|
|
|
|
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
|
block->full_size = offset; // shrink the old block to fit this offset
|
|
|
|
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
|
|
|
|
new_block->magic = BLOCK_MAGIC;
|
|
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
|
|
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
|
|
new_block->next = block->next;
|
|
new_block->last = block;
|
|
|
|
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in this memory range
|
|
block->next = new_block;
|
|
|
|
return new_block;
|
|
}
|
|
|
|
static Result<void> combine_forward(HeapBlock* block)
|
|
{
|
|
HeapBlock* const next = block->next;
|
|
if (next == heap_end) heap_end = block;
|
|
next->magic = BLOCK_DEAD;
|
|
|
|
block->next = block->next->next;
|
|
if (block->next) block->next->last = block;
|
|
|
|
if (next->status & BLOCK_END_MEM)
|
|
{
|
|
if (next->status & BLOCK_START_MEM)
|
|
{
|
|
TRY(release_pages(next, get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
|
|
return {};
|
|
}
|
|
else
|
|
block->status |= BLOCK_END_MEM;
|
|
}
|
|
|
|
block->full_size += next->full_size + sizeof(HeapBlock);
|
|
|
|
return {};
|
|
}
|
|
|
|
static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
|
{
|
|
HeapBlock* const last = block->last;
|
|
if (block == heap_end) heap_end = last;
|
|
block->magic = BLOCK_DEAD;
|
|
|
|
last->next = block->next;
|
|
if (last->next) last->next->last = last;
|
|
|
|
if (block->status & BLOCK_END_MEM)
|
|
{
|
|
if (block->status & BLOCK_START_MEM)
|
|
{
|
|
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
|
|
return last;
|
|
}
|
|
else
|
|
last->status |= BLOCK_END_MEM;
|
|
}
|
|
|
|
last->full_size += block->full_size + sizeof(HeapBlock);
|
|
|
|
return last;
|
|
}
|
|
|
|
Result<void*> kmalloc(usize size)
|
|
{
|
|
if (!size) return (void*)BLOCK_MAGIC;
|
|
|
|
size = align_up(size, 16UL);
|
|
|
|
if (!heap_start)
|
|
{
|
|
const usize pages = get_pages_for_allocation(size);
|
|
HeapBlock* const block = TRY(allocate_pages(pages));
|
|
|
|
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
|
block->magic = BLOCK_MAGIC;
|
|
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
|
block->next = block->last = nullptr;
|
|
heap_start = block;
|
|
|
|
check(!heap_end);
|
|
|
|
heap_end = heap_start;
|
|
}
|
|
|
|
HeapBlock* block = heap_start;
|
|
while (block)
|
|
{
|
|
// Trying to find a free block...
|
|
if (is_block_free(block))
|
|
{
|
|
if (block->full_size < size)
|
|
{
|
|
block = block->next; // Let's not try to split this block, it's not big enough
|
|
continue;
|
|
}
|
|
break; // We found a free block that's big enough!!
|
|
}
|
|
auto rc = split(block, size);
|
|
if (rc.has_value())
|
|
{
|
|
block = rc.release_value(); // We managed to get a free block from a larger used block!!
|
|
break;
|
|
}
|
|
block = block->next;
|
|
}
|
|
|
|
if (!block) // No free blocks, let's allocate a new one
|
|
{
|
|
usize pages = get_pages_for_allocation(size);
|
|
block = TRY(allocate_pages(pages));
|
|
|
|
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
|
block->magic = BLOCK_MAGIC;
|
|
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
|
block->next = nullptr;
|
|
block->last = heap_end;
|
|
heap_end->next = block;
|
|
|
|
heap_end = block;
|
|
}
|
|
|
|
block->req_size = size;
|
|
block->status |= BLOCK_USED;
|
|
|
|
return get_pointer_from_heap_block(block);
|
|
}
|
|
|
|
Result<void> kfree(void* ptr)
|
|
{
|
|
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
|
if (!ptr) return {};
|
|
|
|
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
|
|
|
if (block->magic != BLOCK_MAGIC)
|
|
{
|
|
if (block->magic == BLOCK_DEAD)
|
|
{
|
|
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
|
}
|
|
else
|
|
kerrorln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
|
|
|
|
return err(EFAULT);
|
|
}
|
|
|
|
if (is_block_free(block))
|
|
{
|
|
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
|
return err(EFAULT);
|
|
}
|
|
else
|
|
block->status &= ~BLOCK_USED;
|
|
|
|
if (block->next && is_block_free(block->next))
|
|
{
|
|
// The next block is also free, thus we can merge!
|
|
TRY(combine_forward(block));
|
|
}
|
|
|
|
if (block->last && is_block_free(block->last))
|
|
{
|
|
// The last block is also free, thus we can merge!
|
|
block = TRY(combine_backward(block));
|
|
}
|
|
|
|
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
|
{
|
|
if (block == heap_start) heap_start = block->next;
|
|
if (block == heap_end) heap_end = block->last;
|
|
if (block->last) block->last->next = block->next;
|
|
if (block->next) block->next->last = block->last;
|
|
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
Result<void*> krealloc(void* ptr, usize size)
|
|
{
|
|
if (!ptr) return kmalloc(size);
|
|
if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size);
|
|
if (!size)
|
|
{
|
|
TRY(kfree(ptr));
|
|
return (void*)BLOCK_MAGIC;
|
|
}
|
|
|
|
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
|
|
|
if (block->magic != BLOCK_MAGIC)
|
|
{
|
|
if (block->magic == BLOCK_DEAD)
|
|
{
|
|
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
|
}
|
|
else
|
|
kerrorln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
|
|
|
|
return err(EFAULT);
|
|
}
|
|
|
|
size = align_up(size, 16UL);
|
|
|
|
if (is_block_free(block))
|
|
{
|
|
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
|
return err(EFAULT);
|
|
}
|
|
|
|
if (block->full_size >= size)
|
|
{
|
|
// This block is already large enough!
|
|
block->req_size = size;
|
|
return ptr;
|
|
}
|
|
|
|
void* const new_ptr = TRY(kmalloc(size));
|
|
memcpy(new_ptr, ptr, block->req_size > size ? size : block->req_size);
|
|
TRY(kfree(ptr));
|
|
|
|
return new_ptr;
|
|
}
|
|
|
|
Result<void*> kcalloc(usize nmemb, usize size)
|
|
{
|
|
// FIXME: Check for overflows.
|
|
const usize realsize = nmemb * size;
|
|
void* const ptr = TRY(kmalloc(realsize));
|
|
return memset(ptr, 0, realsize);
|
|
}
|
|
|
|
void dump_heap_usage()
|
|
{
|
|
kdbgln("-- Dumping usage stats for kernel heap:");
|
|
if (!heap_start)
|
|
{
|
|
kdbgln("- Heap is not currently being used");
|
|
return;
|
|
}
|
|
usize alloc_total = 0;
|
|
usize alloc_used = 0;
|
|
HeapBlock* block = heap_start;
|
|
while (block)
|
|
{
|
|
if (is_block_free(block))
|
|
{
|
|
kdbgln("- Available block, of size %zu", block->full_size);
|
|
alloc_total += block->full_size + sizeof(HeapBlock);
|
|
}
|
|
else
|
|
{
|
|
kdbgln("- Used block, of size %zu, of which %zu bytes are being used", block->full_size, block->req_size);
|
|
alloc_total += block->full_size + sizeof(HeapBlock);
|
|
alloc_used += block->req_size;
|
|
}
|
|
block = block->next;
|
|
}
|
|
|
|
kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
|
|
kdbgln("-- Heap memory in use by the kernel: %zu bytes", alloc_used);
|
|
}
|
|
|
|
void* operator new(usize size) noexcept
|
|
{
|
|
return kmalloc(size).value_or(nullptr);
|
|
}
|
|
|
|
void* operator new[](usize size) noexcept
|
|
{
|
|
return kmalloc(size).value_or(nullptr);
|
|
}
|
|
|
|
void operator delete(void* p) noexcept
|
|
{
|
|
kfree(p);
|
|
}
|
|
|
|
void operator delete[](void* p) noexcept
|
|
{
|
|
kfree(p);
|
|
}
|
|
|
|
void operator delete(void* p, usize) noexcept
|
|
{
|
|
kfree(p);
|
|
}
|
|
|
|
void operator delete[](void* p, usize) noexcept
|
|
{
|
|
kfree(p);
|
|
} |