Luna/kernel/src/memory/Heap.cpp

385 lines
11 KiB
C++
Raw Normal View History

#include "memory/Heap.h"
2022-11-30 16:16:36 +00:00
#include "Log.h"
#include "arch/MMU.h"
#include "arch/Serial.h"
#include "memory/KernelVM.h"
#include "memory/MemoryManager.h"
#include <luna/Alignment.h>
#include <luna/LinkedList.h>
2022-12-06 14:44:21 +00:00
#include <luna/SafeArithmetic.h>
2022-12-08 14:01:04 +00:00
#include <luna/ScopeGuard.h>
#include <luna/String.h>
#include <luna/SystemError.h>
static constexpr int BLOCK_USED = 1 << 0;
static constexpr int BLOCK_START_MEM = 1 << 1;
static constexpr int BLOCK_END_MEM = 1 << 2;
2022-12-04 12:38:48 +00:00
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
struct HeapBlock : DoublyLinkedListNode<HeapBlock>
{
usize req_size;
usize full_size;
int status;
usize magic;
};
static_assert(sizeof(HeapBlock) == 48UL);
2022-12-06 18:05:00 +00:00
static const isize HEAP_BLOCK_SIZE = 48;
static DoublyLinkedList<HeapBlock> heap;
2022-12-06 17:20:18 +00:00
static Result<HeapBlock*> allocate_pages(usize count)
{
u64 virt = TRY(KernelVM::alloc_several_pages(count));
2022-12-08 14:01:04 +00:00
auto vm_guard = make_scope_guard([&] { KernelVM::free_several_pages(virt, count).value(); });
void* const ptr = (void*)TRY(MemoryManager::alloc_at(virt, count, MMU::ReadWrite | MMU::NoExecute));
2022-12-08 14:01:04 +00:00
vm_guard.deactivate();
return (HeapBlock*)ptr;
}
static Result<void> release_pages(void* ptr, usize count)
{
TRY(KernelVM::free_several_pages((u64)ptr, count));
return MemoryManager::unmap_owned((u64)ptr, count);
}
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
// pages.
static usize get_pages_for_allocation(usize bytes)
{
usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE);
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
return pages;
}
static bool is_block_free(HeapBlock* block)
{
return !(block->status & BLOCK_USED);
}
static usize space_available(HeapBlock* block)
{
expect(!is_block_free(block), "Attempting to split a free block");
return block->full_size - block->req_size;
}
2022-12-06 18:05:00 +00:00
// The heap block is stored right behind a memory block.
static HeapBlock* get_heap_block_for_pointer(void* ptr)
{
2022-12-06 18:05:00 +00:00
return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE);
}
static void* get_pointer_from_heap_block(HeapBlock* block)
{
2022-12-06 18:05:00 +00:00
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
}
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
{
usize available = space_available(block);
available -= min; // reserve at least min size for the new block.
available -= (available /
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
return available + block->req_size;
}
2022-12-08 15:09:12 +00:00
static Option<HeapBlock*> split(HeapBlock* block, usize size)
{
2022-12-05 12:41:58 +00:00
const usize available = space_available(block); // How much space can we steal from this block?
const usize old_size =
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
if (available < (size + sizeof(HeapBlock)))
2022-12-08 15:09:12 +00:00
return {}; // This block hasn't got enough free space to hold the requested size.
2022-12-05 12:41:58 +00:00
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
block->full_size = offset; // shrink the old block to fit this offset
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
new_block->magic = BLOCK_MAGIC;
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
heap.append_after(block, new_block);
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
return new_block;
}
static Result<void> combine_forward(HeapBlock* block)
{
// The caller needs to ensure there is a next block.
HeapBlock* const next = heap.next(block).value();
heap.remove(next);
next->magic = BLOCK_DEAD;
if (next->status & BLOCK_END_MEM)
{
if (next->status & BLOCK_START_MEM)
{
TRY(release_pages(next, get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
return {};
}
else
block->status |= BLOCK_END_MEM;
}
block->full_size += next->full_size + sizeof(HeapBlock);
return {};
}
static Result<HeapBlock*> combine_backward(HeapBlock* block)
{
// The caller needs to ensure there is a last block.
HeapBlock* const last = heap.previous(block).value();
heap.remove(block);
block->magic = BLOCK_DEAD;
if (block->status & BLOCK_END_MEM)
{
if (block->status & BLOCK_START_MEM)
{
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
return last;
}
else
last->status |= BLOCK_END_MEM;
}
last->full_size += block->full_size + sizeof(HeapBlock);
return last;
}
Result<void*> kmalloc(usize size)
{
if (!size) return (void*)BLOCK_MAGIC;
size = align_up<16>(size);
if (!heap.first().has_value())
{
const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
2022-12-05 12:41:58 +00:00
HeapBlock* const block = TRY(allocate_pages(pages));
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
heap.append(block);
}
HeapBlock* block = heap.expect_first();
while (block)
{
// Trying to find a free block...
if (is_block_free(block))
{
if (block->full_size < size)
{
block = heap.next(block).value_or(nullptr);
continue;
}
break; // We found a free block that's big enough!!
}
auto rc = split(block, size);
if (rc.has_value())
{
block = rc.value(); // We managed to get a free block from a larger used block!!
break;
}
block = heap.next(block).value_or(nullptr);
}
if (!block) // No free blocks, let's allocate a new one
{
usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
block = TRY(allocate_pages(pages));
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
heap.append(block);
}
block->req_size = size;
block->status |= BLOCK_USED;
return get_pointer_from_heap_block(block);
}
Result<void> kfree(void* ptr)
{
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
if (!ptr) return {};
HeapBlock* block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
{
if (block->magic == BLOCK_DEAD)
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
}
else
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
2022-11-30 16:13:59 +00:00
return err(EFAULT);
}
if (is_block_free(block))
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
2022-11-30 16:13:59 +00:00
return err(EFAULT);
}
else
block->status &= ~BLOCK_USED;
auto maybe_next = heap.next(block);
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
{
// The next block is also free, thus we can merge!
TRY(combine_forward(block));
}
auto maybe_last = heap.previous(block);
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
{
// The last block is also free, thus we can merge!
block = TRY(combine_backward(block));
}
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
{
heap.remove(block);
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
}
return {};
}
Result<void*> krealloc(void* ptr, usize size)
{
if (!ptr) return kmalloc(size);
if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size);
if (!size)
{
TRY(kfree(ptr));
return (void*)BLOCK_MAGIC;
}
2022-12-05 12:41:58 +00:00
HeapBlock* const block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
{
if (block->magic == BLOCK_DEAD)
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
}
else
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
2022-11-30 16:13:59 +00:00
return err(EFAULT);
}
size = align_up<16>(size);
if (is_block_free(block))
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
2022-11-30 16:13:59 +00:00
return err(EFAULT);
}
if (block->full_size >= size)
{
// This block is already large enough!
block->req_size = size;
return ptr;
}
2022-12-05 12:41:58 +00:00
void* const new_ptr = TRY(kmalloc(size));
memcpy(new_ptr, ptr, block->req_size > size ? size : block->req_size);
TRY(kfree(ptr));
return new_ptr;
}
Result<void*> kcalloc(usize nmemb, usize size)
{
2022-12-06 14:44:21 +00:00
const usize realsize = TRY(safe_mul(nmemb, size));
2022-12-05 12:41:58 +00:00
void* const ptr = TRY(kmalloc(realsize));
return memset(ptr, 0, realsize);
}
void dump_heap_usage()
{
2022-11-30 16:16:36 +00:00
kdbgln("-- Dumping usage stats for kernel heap:");
if (!heap.count())
{
2022-11-30 16:16:36 +00:00
kdbgln("- Heap is not currently being used");
return;
}
usize alloc_total = 0;
usize alloc_used = 0;
HeapBlock* block = heap.expect_first();
while (block)
{
if (is_block_free(block))
{
2022-11-30 16:16:36 +00:00
kdbgln("- Available block, of size %zu", block->full_size);
alloc_total += block->full_size + sizeof(HeapBlock);
}
else
{
2022-11-30 16:16:36 +00:00
kdbgln("- Used block, of size %zu, of which %zu bytes are being used", block->full_size, block->req_size);
alloc_total += block->full_size + sizeof(HeapBlock);
alloc_used += block->req_size;
}
block = heap.next(block).value_or(nullptr);
}
2022-11-30 16:16:36 +00:00
kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
kdbgln("-- Heap memory in use by the kernel: %zu bytes", alloc_used);
2022-12-04 11:52:49 +00:00
}
void* operator new(usize size) noexcept
2022-12-04 11:52:49 +00:00
{
return kmalloc(size).value_or(nullptr);
2022-12-04 11:52:49 +00:00
}
void* operator new[](usize size) noexcept
2022-12-04 11:52:49 +00:00
{
return kmalloc(size).value_or(nullptr);
2022-12-04 11:52:49 +00:00
}
void operator delete(void* p) noexcept
2022-12-04 11:52:49 +00:00
{
kfree(p);
2022-12-04 11:52:49 +00:00
}
void operator delete[](void* p) noexcept
2022-12-04 11:52:49 +00:00
{
kfree(p);
2022-12-04 11:52:49 +00:00
}
void operator delete(void* p, usize) noexcept
2022-12-04 11:52:49 +00:00
{
kfree(p);
2022-12-04 11:52:49 +00:00
}
void operator delete[](void* p, usize) noexcept
2022-12-04 11:52:49 +00:00
{
kfree(p);
}