kernel, luna, libc: Move the heap code to a common zone, to be used by both kernel and userspace
This commit is contained in:
parent
d864bda751
commit
5fb2ff09c7
@ -1,422 +1,14 @@
|
||||
#include "memory/Heap.h"
|
||||
#include "Log.h"
|
||||
#include "arch/MMU.h"
|
||||
#include "arch/Serial.h"
|
||||
#include "memory/KernelVM.h"
|
||||
#include "memory/MemoryManager.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Alloc.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/LinkedList.h>
|
||||
#include <luna/SafeArithmetic.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/SystemError.h>
|
||||
#include <luna/Heap.h>
|
||||
|
||||
namespace std
|
||||
Result<void*> allocate_pages_impl(usize count)
|
||||
{
|
||||
const nothrow_t nothrow;
|
||||
return (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute));
|
||||
}
|
||||
|
||||
static constexpr int BLOCK_USED = 1 << 0;
|
||||
static constexpr int BLOCK_START_MEM = 1 << 1;
|
||||
static constexpr int BLOCK_END_MEM = 1 << 2;
|
||||
|
||||
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
|
||||
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
|
||||
|
||||
static constexpr u8 KMALLOC_SCRUB_BYTE = 0xac;
|
||||
static constexpr u8 KFREE_SCRUB_BYTE = 0xde;
|
||||
|
||||
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
|
||||
|
||||
struct HeapBlock : LinkedListNode<HeapBlock>
|
||||
Result<void> release_pages_impl(void* address, usize count)
|
||||
{
|
||||
usize req_size;
|
||||
usize full_size;
|
||||
int status;
|
||||
usize magic;
|
||||
};
|
||||
|
||||
static_assert(sizeof(HeapBlock) == 48UL);
|
||||
|
||||
static const isize HEAP_BLOCK_SIZE = 48;
|
||||
|
||||
static LinkedList<HeapBlock> heap;
|
||||
|
||||
static Result<HeapBlock*> allocate_pages(usize count)
|
||||
{
|
||||
void* const ptr = (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute));
|
||||
return (HeapBlock*)ptr;
|
||||
}
|
||||
|
||||
static Result<void> release_pages(void* ptr, usize count)
|
||||
{
|
||||
return MemoryManager::unmap_owned_and_free_vm((u64)ptr, count);
|
||||
}
|
||||
|
||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
||||
// pages.
|
||||
static usize get_pages_for_allocation(usize bytes)
|
||||
{
|
||||
usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE);
|
||||
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
||||
return pages;
|
||||
}
|
||||
|
||||
static bool is_block_free(HeapBlock* block)
|
||||
{
|
||||
return !(block->status & BLOCK_USED);
|
||||
}
|
||||
|
||||
static usize space_available(HeapBlock* block)
|
||||
{
|
||||
expect(!is_block_free(block), "Attempting to split a free block");
|
||||
return block->full_size - block->req_size;
|
||||
}
|
||||
|
||||
// The heap block is stored right behind a memory block.
|
||||
static HeapBlock* get_heap_block_for_pointer(void* ptr)
|
||||
{
|
||||
return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static void* get_pointer_from_heap_block(HeapBlock* block)
|
||||
{
|
||||
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
|
||||
{
|
||||
usize available = space_available(block);
|
||||
|
||||
available -= min; // reserve at least min size for the new block.
|
||||
|
||||
available -= (available /
|
||||
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
|
||||
|
||||
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
|
||||
|
||||
return available + block->req_size;
|
||||
}
|
||||
|
||||
static Option<HeapBlock*> split(HeapBlock* block, usize size)
|
||||
{
|
||||
const usize available = space_available(block); // How much space can we steal from this block?
|
||||
const usize old_size =
|
||||
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
||||
|
||||
if (available < (size + sizeof(HeapBlock)))
|
||||
return {}; // This block hasn't got enough free space to hold the requested size.
|
||||
|
||||
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
||||
block->full_size = offset; // shrink the old block to fit this offset
|
||||
|
||||
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
|
||||
|
||||
new_block->magic = BLOCK_MAGIC;
|
||||
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
|
||||
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
|
||||
heap.add_after(block, new_block);
|
||||
|
||||
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
|
||||
|
||||
return new_block;
|
||||
}
|
||||
|
||||
static Result<void> combine_forward(HeapBlock* block)
|
||||
{
|
||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||
if (block->status & BLOCK_END_MEM) return {};
|
||||
|
||||
// The caller needs to ensure there is a next block.
|
||||
HeapBlock* const next = heap.next(block).value();
|
||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||
if (next->status & BLOCK_START_MEM) return {};
|
||||
|
||||
heap.remove(next);
|
||||
next->magic = BLOCK_DEAD;
|
||||
|
||||
if (next->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (next->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
||||
TRY(release_pages(next, pages));
|
||||
return {};
|
||||
}
|
||||
else
|
||||
block->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
block->full_size += next->full_size + sizeof(HeapBlock);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||
{
|
||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||
if (block->status & BLOCK_START_MEM) return block;
|
||||
|
||||
// The caller needs to ensure there is a last block.
|
||||
HeapBlock* const last = heap.previous(block).value();
|
||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||
if (last->status & BLOCK_END_MEM) return block;
|
||||
heap.remove(block);
|
||||
block->magic = BLOCK_DEAD;
|
||||
|
||||
if (block->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (block->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
||||
TRY(release_pages(block, pages));
|
||||
return last;
|
||||
}
|
||||
else
|
||||
last->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
last->full_size += block->full_size + sizeof(HeapBlock);
|
||||
|
||||
return last;
|
||||
}
|
||||
|
||||
Result<void*> kmalloc(usize size, bool should_scrub)
|
||||
{
|
||||
if (!size) return (void*)BLOCK_MAGIC;
|
||||
|
||||
size = align_up<16>(size);
|
||||
|
||||
if (heap.count() == 0)
|
||||
{
|
||||
const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
||||
HeapBlock* const block = TRY(allocate_pages(pages));
|
||||
|
||||
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
||||
block->magic = BLOCK_MAGIC;
|
||||
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
||||
heap.append(block);
|
||||
}
|
||||
|
||||
Option<HeapBlock*> block = heap.first();
|
||||
while (block.has_value())
|
||||
{
|
||||
HeapBlock* const current = block.value();
|
||||
// Trying to find a free block...
|
||||
if (is_block_free(current))
|
||||
{
|
||||
if (current->full_size < size)
|
||||
{
|
||||
block = heap.next(current);
|
||||
continue;
|
||||
}
|
||||
break; // We found a free block that's big enough!!
|
||||
}
|
||||
auto rc = split(current, size);
|
||||
if (rc.has_value())
|
||||
{
|
||||
block = rc.value(); // We managed to get a free block from a larger used block!!
|
||||
break;
|
||||
}
|
||||
block = heap.next(current);
|
||||
}
|
||||
|
||||
if (!block.has_value()) // No free blocks, let's allocate a new one
|
||||
{
|
||||
usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
||||
HeapBlock* const current = TRY(allocate_pages(pages));
|
||||
|
||||
current->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
||||
current->magic = BLOCK_MAGIC;
|
||||
current->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
||||
heap.append(current);
|
||||
|
||||
block = current;
|
||||
}
|
||||
|
||||
HeapBlock* const current = block.value();
|
||||
|
||||
current->req_size = size;
|
||||
current->status |= BLOCK_USED;
|
||||
|
||||
if (should_scrub) { memset(get_pointer_from_heap_block(current), KMALLOC_SCRUB_BYTE, size); }
|
||||
|
||||
return get_pointer_from_heap_block(current);
|
||||
}
|
||||
|
||||
Result<void> kfree(void* ptr)
|
||||
{
|
||||
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
||||
if (!ptr) return {};
|
||||
|
||||
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
{
|
||||
if (block->magic == BLOCK_DEAD)
|
||||
{
|
||||
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
||||
}
|
||||
else
|
||||
kerrorln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
if (is_block_free(block))
|
||||
{
|
||||
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
||||
return err(EFAULT);
|
||||
}
|
||||
else
|
||||
block->status &= ~BLOCK_USED;
|
||||
|
||||
memset(ptr, KFREE_SCRUB_BYTE, block->req_size);
|
||||
|
||||
auto maybe_next = heap.next(block);
|
||||
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
|
||||
{
|
||||
// The next block is also free, thus we can merge!
|
||||
TRY(combine_forward(block));
|
||||
}
|
||||
|
||||
auto maybe_last = heap.previous(block);
|
||||
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
|
||||
{
|
||||
// The last block is also free, thus we can merge!
|
||||
block = TRY(combine_backward(block));
|
||||
}
|
||||
|
||||
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
||||
{
|
||||
heap.remove(block);
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
||||
TRY(release_pages(block, pages));
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void*> krealloc(void* ptr, usize size)
|
||||
{
|
||||
if (!ptr) return kmalloc(size);
|
||||
if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size);
|
||||
if (!size)
|
||||
{
|
||||
TRY(kfree(ptr));
|
||||
return (void*)BLOCK_MAGIC;
|
||||
}
|
||||
|
||||
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
{
|
||||
if (block->magic == BLOCK_DEAD)
|
||||
{
|
||||
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||
}
|
||||
else
|
||||
kerrorln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
size = align_up<16>(size);
|
||||
|
||||
if (is_block_free(block))
|
||||
{
|
||||
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
if (block->full_size >= size)
|
||||
{
|
||||
// This block is already large enough!
|
||||
// FIXME: Scrub this if necessary.
|
||||
block->req_size = size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
usize old_size = block->req_size;
|
||||
|
||||
void* const new_ptr = TRY(kmalloc(size, false));
|
||||
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
||||
TRY(kfree(ptr));
|
||||
|
||||
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), KMALLOC_SCRUB_BYTE, size - old_size); }
|
||||
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
Result<void*> kcalloc(usize nmemb, usize size)
|
||||
{
|
||||
const usize realsize = TRY(safe_mul(nmemb, size));
|
||||
void* const ptr = TRY(kmalloc(realsize, false));
|
||||
return memset(ptr, 0, realsize);
|
||||
}
|
||||
|
||||
void dump_heap_usage()
|
||||
{
|
||||
kdbgln("-- Dumping usage stats for kernel heap:");
|
||||
if (!heap.count())
|
||||
{
|
||||
kdbgln("- Heap is not currently being used");
|
||||
return;
|
||||
}
|
||||
usize alloc_total = 0;
|
||||
usize alloc_used = 0;
|
||||
auto block = heap.first();
|
||||
while (block.has_value())
|
||||
{
|
||||
HeapBlock* current = block.value();
|
||||
if (is_block_free(current))
|
||||
{
|
||||
kdbgln("- Available block (%p), of size %zu (%s%s)", (void*)current, current->full_size,
|
||||
current->status & BLOCK_START_MEM ? "b" : "-", current->status & BLOCK_END_MEM ? "e" : "-");
|
||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||
}
|
||||
else
|
||||
{
|
||||
kdbgln("- Used block (%p), of size %zu, of which %zu bytes are being used (%s%s)", (void*)current,
|
||||
current->full_size, current->req_size, current->status & BLOCK_START_MEM ? "b" : "-",
|
||||
current->status & BLOCK_END_MEM ? "e" : "-");
|
||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||
alloc_used += current->req_size;
|
||||
}
|
||||
block = heap.next(current);
|
||||
}
|
||||
|
||||
kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
|
||||
kdbgln("-- Heap memory in use by the kernel: %zu bytes", alloc_used);
|
||||
}
|
||||
|
||||
void* operator new(usize size, const std::nothrow_t&) noexcept
|
||||
{
|
||||
return kmalloc(size).value_or(nullptr);
|
||||
}
|
||||
|
||||
void* operator new[](usize size, const std::nothrow_t&) noexcept
|
||||
{
|
||||
return kmalloc(size).value_or(nullptr);
|
||||
}
|
||||
|
||||
void operator delete(void* p) noexcept
|
||||
{
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
void operator delete[](void* p) noexcept
|
||||
{
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
void operator delete(void* p, usize) noexcept
|
||||
{
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
void operator delete[](void* p, usize) noexcept
|
||||
{
|
||||
kfree(p);
|
||||
return MemoryManager::unmap_owned_and_free_vm((u64)address, count);
|
||||
}
|
||||
|
@ -1,10 +1,7 @@
|
||||
#pragma once
|
||||
#include <luna/PlacementNew.h>
|
||||
#include <luna/Result.h>
|
||||
#include <luna/Heap.h>
|
||||
|
||||
Result<void*> kmalloc(usize size, bool should_scrub = true);
|
||||
Result<void*> kcalloc(usize nmemb, usize size);
|
||||
Result<void*> krealloc(void* ptr, usize size);
|
||||
Result<void> kfree(void* ptr);
|
||||
|
||||
void dump_heap_usage();
|
||||
#define kmalloc malloc_impl
|
||||
#define kcalloc calloc_impl
|
||||
#define krealloc realloc_impl
|
||||
#define kfree free_impl
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <bits/mmap-flags.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
#define PAGE_SIZE 4096UL
|
||||
|
||||
#define MAP_FAILED (void*)-1
|
||||
|
||||
|
@ -1,4 +1,6 @@
|
||||
#define _LUNA_SYSTEM_ERROR_EXTENSIONS
|
||||
#include <bits/errno-return.h>
|
||||
#include <luna/Heap.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
@ -17,3 +19,17 @@ extern "C"
|
||||
__errno_return(rc, int);
|
||||
}
|
||||
}
|
||||
|
||||
Result<void*> allocate_pages_impl(usize count)
|
||||
{
|
||||
long rc = syscall(SYS_allocate_memory, count * PAGE_SIZE, PROT_READ | PROT_WRITE);
|
||||
if (rc < 0) { return err((int)-rc); }
|
||||
return (void*)rc;
|
||||
}
|
||||
|
||||
Result<void> release_pages_impl(void* address, usize count)
|
||||
{
|
||||
long rc = syscall(SYS_deallocate_memory, address, count * PAGE_SIZE);
|
||||
if (rc < 0) { return err((int)-rc); }
|
||||
return {};
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ set(FREESTANDING_SOURCES
|
||||
src/Utf8.cpp
|
||||
src/TarStream.cpp
|
||||
src/DebugLog.cpp
|
||||
src/Heap.cpp
|
||||
)
|
||||
|
||||
set(SOURCES
|
||||
@ -34,6 +35,7 @@ target_compile_options(luna-freestanding PRIVATE -fno-asynchronous-unwind-tables
|
||||
target_compile_options(luna-freestanding PRIVATE -nostdlib -mcmodel=kernel)
|
||||
|
||||
target_include_directories(luna-freestanding PUBLIC include/)
|
||||
target_include_directories(luna-freestanding PRIVATE ${LUNA_ROOT}/kernel/src)
|
||||
set_target_properties(luna-freestanding PROPERTIES CXX_STANDARD 20)
|
||||
|
||||
add_library(luna ${SOURCES})
|
||||
|
12
luna/include/luna/Heap.h
Normal file
12
luna/include/luna/Heap.h
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
#include <luna/Result.h>
|
||||
|
||||
extern Result<void*> allocate_pages_impl(usize count);
|
||||
extern Result<void> release_pages_impl(void* address, usize count);
|
||||
|
||||
Result<void*> malloc_impl(usize size, bool should_scrub = true);
|
||||
Result<void*> calloc_impl(usize nmemb, usize size);
|
||||
Result<void*> realloc_impl(void* ptr, usize size);
|
||||
Result<void> free_impl(void* ptr);
|
||||
|
||||
void dump_heap_usage();
|
411
luna/src/Heap.cpp
Normal file
411
luna/src/Heap.cpp
Normal file
@ -0,0 +1,411 @@
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Alloc.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/DebugLog.h>
|
||||
#include <luna/Heap.h>
|
||||
#include <luna/LinkedList.h>
|
||||
#include <luna/SafeArithmetic.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/SystemError.h>
|
||||
|
||||
#ifdef USE_FREESTANDING
|
||||
#include "arch/MMU.h"
|
||||
#define PAGE_SIZE ARCH_PAGE_SIZE
|
||||
#else
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
namespace std
|
||||
{
|
||||
const nothrow_t nothrow;
|
||||
}
|
||||
|
||||
static constexpr int BLOCK_USED = 1 << 0;
|
||||
static constexpr int BLOCK_START_MEM = 1 << 1;
|
||||
static constexpr int BLOCK_END_MEM = 1 << 2;
|
||||
|
||||
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
|
||||
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
|
||||
|
||||
static constexpr u8 KMALLOC_SCRUB_BYTE = 0xac;
|
||||
static constexpr u8 KFREE_SCRUB_BYTE = 0xde;
|
||||
|
||||
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
|
||||
|
||||
struct HeapBlock : LinkedListNode<HeapBlock>
|
||||
{
|
||||
usize req_size;
|
||||
usize full_size;
|
||||
int status;
|
||||
usize magic;
|
||||
};
|
||||
|
||||
static_assert(sizeof(HeapBlock) == 48UL);
|
||||
|
||||
static const isize HEAP_BLOCK_SIZE = 48;
|
||||
|
||||
static LinkedList<HeapBlock> heap;
|
||||
|
||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
||||
// pages.
|
||||
static usize get_pages_for_allocation(usize bytes)
|
||||
{
|
||||
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
||||
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
||||
return pages;
|
||||
}
|
||||
|
||||
static bool is_block_free(HeapBlock* block)
|
||||
{
|
||||
return !(block->status & BLOCK_USED);
|
||||
}
|
||||
|
||||
static usize space_available(HeapBlock* block)
|
||||
{
|
||||
expect(!is_block_free(block), "Attempting to split a free block");
|
||||
return block->full_size - block->req_size;
|
||||
}
|
||||
|
||||
// The heap block is stored right behind a memory block.
|
||||
static HeapBlock* get_heap_block_for_pointer(void* ptr)
|
||||
{
|
||||
return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static void* get_pointer_from_heap_block(HeapBlock* block)
|
||||
{
|
||||
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
|
||||
{
|
||||
usize available = space_available(block);
|
||||
|
||||
available -= min; // reserve at least min size for the new block.
|
||||
|
||||
available -= (available /
|
||||
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
|
||||
|
||||
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
|
||||
|
||||
return available + block->req_size;
|
||||
}
|
||||
|
||||
static Option<HeapBlock*> split(HeapBlock* block, usize size)
|
||||
{
|
||||
const usize available = space_available(block); // How much space can we steal from this block?
|
||||
const usize old_size =
|
||||
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
||||
|
||||
if (available < (size + sizeof(HeapBlock)))
|
||||
return {}; // This block hasn't got enough free space to hold the requested size.
|
||||
|
||||
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
||||
block->full_size = offset; // shrink the old block to fit this offset
|
||||
|
||||
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
|
||||
|
||||
new_block->magic = BLOCK_MAGIC;
|
||||
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
|
||||
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
|
||||
heap.add_after(block, new_block);
|
||||
|
||||
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
|
||||
|
||||
return new_block;
|
||||
}
|
||||
|
||||
static Result<void> combine_forward(HeapBlock* block)
|
||||
{
|
||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||
if (block->status & BLOCK_END_MEM) return {};
|
||||
|
||||
// The caller needs to ensure there is a next block.
|
||||
HeapBlock* const next = heap.next(block).value();
|
||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||
if (next->status & BLOCK_START_MEM) return {};
|
||||
|
||||
heap.remove(next);
|
||||
next->magic = BLOCK_DEAD;
|
||||
|
||||
if (next->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (next->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(next, pages));
|
||||
return {};
|
||||
}
|
||||
else
|
||||
block->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
block->full_size += next->full_size + sizeof(HeapBlock);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||
{
|
||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||
if (block->status & BLOCK_START_MEM) return block;
|
||||
|
||||
// The caller needs to ensure there is a last block.
|
||||
HeapBlock* const last = heap.previous(block).value();
|
||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||
if (last->status & BLOCK_END_MEM) return block;
|
||||
heap.remove(block);
|
||||
block->magic = BLOCK_DEAD;
|
||||
|
||||
if (block->status & BLOCK_END_MEM)
|
||||
{
|
||||
if (block->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(block, pages));
|
||||
return last;
|
||||
}
|
||||
else
|
||||
last->status |= BLOCK_END_MEM;
|
||||
}
|
||||
|
||||
last->full_size += block->full_size + sizeof(HeapBlock);
|
||||
|
||||
return last;
|
||||
}
|
||||
|
||||
Result<void*> malloc_impl(usize size, bool should_scrub)
|
||||
{
|
||||
if (!size) return (void*)BLOCK_MAGIC;
|
||||
|
||||
size = align_up<16>(size);
|
||||
|
||||
if (heap.count() == 0)
|
||||
{
|
||||
const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
||||
HeapBlock* const block = (HeapBlock*)TRY(allocate_pages_impl(pages));
|
||||
|
||||
block->full_size = (pages * PAGE_SIZE) - sizeof(HeapBlock);
|
||||
block->magic = BLOCK_MAGIC;
|
||||
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
||||
heap.append(block);
|
||||
}
|
||||
|
||||
Option<HeapBlock*> block = heap.first();
|
||||
while (block.has_value())
|
||||
{
|
||||
HeapBlock* const current = block.value();
|
||||
// Trying to find a free block...
|
||||
if (is_block_free(current))
|
||||
{
|
||||
if (current->full_size < size)
|
||||
{
|
||||
block = heap.next(current);
|
||||
continue;
|
||||
}
|
||||
break; // We found a free block that's big enough!!
|
||||
}
|
||||
auto rc = split(current, size);
|
||||
if (rc.has_value())
|
||||
{
|
||||
block = rc.value(); // We managed to get a free block from a larger used block!!
|
||||
break;
|
||||
}
|
||||
block = heap.next(current);
|
||||
}
|
||||
|
||||
if (!block.has_value()) // No free blocks, let's allocate a new one
|
||||
{
|
||||
usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
||||
HeapBlock* const current = (HeapBlock*)TRY(allocate_pages_impl(pages));
|
||||
|
||||
current->full_size = (pages * PAGE_SIZE) - sizeof(HeapBlock);
|
||||
current->magic = BLOCK_MAGIC;
|
||||
current->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
||||
heap.append(current);
|
||||
|
||||
block = current;
|
||||
}
|
||||
|
||||
HeapBlock* const current = block.value();
|
||||
|
||||
current->req_size = size;
|
||||
current->status |= BLOCK_USED;
|
||||
|
||||
if (should_scrub) { memset(get_pointer_from_heap_block(current), KMALLOC_SCRUB_BYTE, size); }
|
||||
|
||||
return get_pointer_from_heap_block(current);
|
||||
}
|
||||
|
||||
Result<void> free_impl(void* ptr)
|
||||
{
|
||||
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
||||
if (!ptr) return {};
|
||||
|
||||
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
{
|
||||
if (block->magic == BLOCK_DEAD) { dbgln("ERROR: Attempt to free memory at %p, which was already freed", ptr); }
|
||||
else
|
||||
dbgln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
if (is_block_free(block))
|
||||
{
|
||||
dbgln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
||||
return err(EFAULT);
|
||||
}
|
||||
else
|
||||
block->status &= ~BLOCK_USED;
|
||||
|
||||
memset(ptr, KFREE_SCRUB_BYTE, block->req_size);
|
||||
|
||||
auto maybe_next = heap.next(block);
|
||||
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
|
||||
{
|
||||
// The next block is also free, thus we can merge!
|
||||
TRY(combine_forward(block));
|
||||
}
|
||||
|
||||
auto maybe_last = heap.previous(block);
|
||||
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
|
||||
{
|
||||
// The last block is also free, thus we can merge!
|
||||
block = TRY(combine_backward(block));
|
||||
}
|
||||
|
||||
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
||||
{
|
||||
heap.remove(block);
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(block, pages));
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Result<void*> realloc_impl(void* ptr, usize size)
|
||||
{
|
||||
if (!ptr) return malloc_impl(size);
|
||||
if (ptr == (void*)BLOCK_MAGIC) return malloc_impl(size);
|
||||
if (!size)
|
||||
{
|
||||
TRY(free_impl(ptr));
|
||||
return (void*)BLOCK_MAGIC;
|
||||
}
|
||||
|
||||
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
||||
|
||||
if (block->magic != BLOCK_MAGIC)
|
||||
{
|
||||
if (block->magic == BLOCK_DEAD)
|
||||
{
|
||||
dbgln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||
}
|
||||
else
|
||||
dbgln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
size = align_up<16>(size);
|
||||
|
||||
if (is_block_free(block))
|
||||
{
|
||||
dbgln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||
return err(EFAULT);
|
||||
}
|
||||
|
||||
if (block->full_size >= size)
|
||||
{
|
||||
// This block is already large enough!
|
||||
// FIXME: Scrub this if necessary.
|
||||
block->req_size = size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
usize old_size = block->req_size;
|
||||
|
||||
void* const new_ptr = TRY(malloc_impl(size, false));
|
||||
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
||||
TRY(free_impl(ptr));
|
||||
|
||||
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), KMALLOC_SCRUB_BYTE, size - old_size); }
|
||||
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
Result<void*> kcalloc(usize nmemb, usize size)
|
||||
{
|
||||
const usize realsize = TRY(safe_mul(nmemb, size));
|
||||
void* const ptr = TRY(malloc_impl(realsize, false));
|
||||
return memset(ptr, 0, realsize);
|
||||
}
|
||||
|
||||
void dump_heap_usage()
|
||||
{
|
||||
dbgln("-- Dumping usage stats for heap:");
|
||||
if (!heap.count())
|
||||
{
|
||||
dbgln("- Heap is not currently being used");
|
||||
return;
|
||||
}
|
||||
usize alloc_total = 0;
|
||||
usize alloc_used = 0;
|
||||
auto block = heap.first();
|
||||
while (block.has_value())
|
||||
{
|
||||
HeapBlock* current = block.value();
|
||||
if (is_block_free(current))
|
||||
{
|
||||
dbgln("- Available block (%p), of size %zu (%s%s)", (void*)current, current->full_size,
|
||||
current->status & BLOCK_START_MEM ? "b" : "-", current->status & BLOCK_END_MEM ? "e" : "-");
|
||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||
}
|
||||
else
|
||||
{
|
||||
dbgln("- Used block (%p), of size %zu, of which %zu bytes are being used (%s%s)", (void*)current,
|
||||
current->full_size, current->req_size, current->status & BLOCK_START_MEM ? "b" : "-",
|
||||
current->status & BLOCK_END_MEM ? "e" : "-");
|
||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||
alloc_used += current->req_size;
|
||||
}
|
||||
block = heap.next(current);
|
||||
}
|
||||
|
||||
dbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
|
||||
dbgln("-- Heap memory in use: %zu bytes", alloc_used);
|
||||
}
|
||||
|
||||
void* operator new(usize size, const std::nothrow_t&) noexcept
|
||||
{
|
||||
return malloc_impl(size).value_or(nullptr);
|
||||
}
|
||||
|
||||
void* operator new[](usize size, const std::nothrow_t&) noexcept
|
||||
{
|
||||
return malloc_impl(size).value_or(nullptr);
|
||||
}
|
||||
|
||||
void operator delete(void* p) noexcept
|
||||
{
|
||||
free_impl(p);
|
||||
}
|
||||
|
||||
void operator delete[](void* p) noexcept
|
||||
{
|
||||
free_impl(p);
|
||||
}
|
||||
|
||||
void operator delete(void* p, usize) noexcept
|
||||
{
|
||||
free_impl(p);
|
||||
}
|
||||
|
||||
void operator delete[](void* p, usize) noexcept
|
||||
{
|
||||
free_impl(p);
|
||||
}
|
Loading…
Reference in New Issue
Block a user