Compare commits

...

3 Commits

Author SHA1 Message Date
caf6d1563c Kernel: Add a heap memory allocator
This is the first time I've actually dropped liballoc in favor of writing my own implementation.
Usually, malloc() and such looked so complicated that I preferred to let a nice external library do the job.
But I've decided to try writing my own allocator, and now we have heap memory without any 3rd party code!
2022-11-20 15:15:26 +01:00
3815f9aa9f Introduce an offset_ptr function to avoid quirky C pointer arithmetic 2022-11-20 15:12:18 +01:00
cb88630d86 Bugfix: Make alloc_at return the start of the virtual memory range instead of the end 2022-11-20 15:11:53 +01:00
6 changed files with 361 additions and 2 deletions

View File

@ -2,6 +2,7 @@ set(SOURCES
src/main.cpp
src/video/Framebuffer.cpp
src/memory/MemoryManager.cpp
src/memory/Heap.cpp
src/boot/Init.cpp
src/arch/Serial.cpp
src/arch/Timer.cpp

View File

@ -3,6 +3,7 @@
#include "arch/Serial.h"
#include "arch/Timer.h"
#include "boot/Init.h"
#include "memory/Heap.h"
#include "memory/MemoryManager.h"
#include "video/Framebuffer.h"
@ -101,6 +102,16 @@ extern "C" [[noreturn]] void _start()
usize start = 0;
int* mem = (int*)kmalloc(sizeof(int)).release_value();
*(volatile int*)mem = 6;
Serial::printf("Read %d from memory\n", *mem);
mem = (int*)krealloc(mem, 60).release_value();
Serial::printf("Resized to %p\n", (void*)mem);
kfree(mem);
while (1)
{
while ((Timer::ticks_ms() - start) < 20) { CPU::wait_for_interrupt(); }

331
kernel/src/memory/Heap.cpp Normal file
View File

@ -0,0 +1,331 @@
#include "memory/Heap.h"
#include "arch/MMU.h"
#include "arch/Serial.h"
#include "memory/MemoryManager.h"
#include <Alignment.h>
#include <String.h>
static constexpr int BLOCK_USED = 1 << 0;
static constexpr int BLOCK_START_MEM = 1 << 1;
static constexpr int BLOCK_END_MEM = 1 << 2;
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo "malloc\!" | hexdump -C
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
struct HeapBlock
{
usize req_size;
usize full_size;
int status;
HeapBlock* next;
HeapBlock* last;
usize magic;
};
static_assert(sizeof(HeapBlock) == 48UL);
static HeapBlock* heap_start = nullptr;
static HeapBlock* heap_end = nullptr;
static usize start_addr = 0xffffffff80000000;
static Result<HeapBlock*> allocate_pages(
usize count) // FIXME: Keep track of virtual address space usage. For now, since the address
// space is so huge, we can just start at a fairly large address and assume
// we'll never run into anything, but this will probably bite us in the future.
{
void* ptr = (void*)TRY(MemoryManager::alloc_at(start_addr, count, MMU::ReadWrite | MMU::NoExecute));
if (ptr) start_addr += (count * ARCH_PAGE_SIZE);
return (HeapBlock*)ptr;
}
static Result<void> release_pages(void* ptr, usize count)
{
return MemoryManager::unmap_owned((u64)ptr, count);
}
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
// pages.
static usize get_pages_for_allocation(usize bytes)
{
usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE);
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
return pages;
}
static bool is_block_free(HeapBlock* block)
{
return !(block->status & BLOCK_USED);
}
static usize space_available(HeapBlock* block)
{
check(!is_block_free(block));
return block->full_size - block->req_size;
}
static HeapBlock* get_heap_block_for_pointer(void* ptr)
{
return (HeapBlock*)offset_ptr(ptr, -48);
}
static void* get_pointer_from_heap_block(HeapBlock* block)
{
return (void*)offset_ptr(block, 48);
}
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
{
usize available = space_available(block);
available -= min; // reserve at least min size for the new block.
available -= (available /
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
return available + block->req_size;
}
static Result<HeapBlock*> split(HeapBlock* block, usize size)
{
usize available = space_available(block); // How much space can we steal from this block?
usize old_size =
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
if (available < (size + sizeof(HeapBlock))) return err;
usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
block->full_size = offset; // shrink the old block to fit this offset
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
new_block->magic = BLOCK_MAGIC;
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
new_block->next = block->next;
new_block->last = block;
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in this memory range
block->next = new_block;
return new_block;
}
static Result<void> combine_forward(HeapBlock* block)
{
HeapBlock* next = block->next;
if (next == heap_end) heap_end = block;
next->magic = BLOCK_DEAD;
block->next = block->next->next;
if (block->next) block->next->last = block;
if (next->status & BLOCK_END_MEM)
{
if (next->status & BLOCK_START_MEM)
{
TRY(release_pages(next, get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
return {};
}
else
block->status |= BLOCK_END_MEM;
}
block->full_size += next->full_size + sizeof(HeapBlock);
return {};
}
static Result<HeapBlock*> combine_backward(HeapBlock* block)
{
HeapBlock* last = block->last;
if (block == heap_end) heap_end = last;
block->magic = BLOCK_DEAD;
last->next = block->next;
if (last->next) last->next->last = last;
if (block->status & BLOCK_END_MEM)
{
if (block->status & BLOCK_START_MEM)
{
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
return last;
}
else
last->status |= BLOCK_END_MEM;
}
last->full_size += block->full_size + sizeof(HeapBlock);
return last;
}
Result<void*> kmalloc(usize size)
{
if (!size) return (void*)BLOCK_MAGIC;
size = align_up(size, 16UL);
if (!heap_start)
{
usize pages = get_pages_for_allocation(size);
auto* block = TRY(allocate_pages(pages));
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
block->next = block->last = nullptr;
heap_start = block;
if (!heap_end) heap_end = heap_start;
}
HeapBlock* block = heap_start;
while (block)
{
// Trying to find a free block...
if (is_block_free(block))
{
if (block->full_size < size)
{
block = block->next; // Let's not try to split this block, it's not big enough
continue;
}
break; // We found a free block that's big enough!!
}
auto rc = split(block, size);
if (rc.has_value())
{
block = rc.release_value(); // We managed to get a free block from a larger used block!!
break;
}
block = block->next;
}
if (!block) // No free blocks, let's allocate a new one
{
usize pages = get_pages_for_allocation(size);
block = TRY(allocate_pages(pages));
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
block->next = nullptr;
block->last = heap_end;
heap_end = block;
}
block->req_size = size;
block->status |= BLOCK_USED;
return get_pointer_from_heap_block(block);
}
Result<void> kfree(void* ptr)
{
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
if (!ptr) return {};
HeapBlock* block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
{
if (block->magic == BLOCK_DEAD)
{
Serial::printf("ERROR: Attempt to free memory at %p, which was already freed\n", ptr);
}
else
Serial::printf("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc\n", ptr);
return err;
}
if (is_block_free(block))
{
Serial::printf("ERROR: Attempt to free memory at %p, which was already freed\n", ptr);
return err;
}
else
block->status &= ~BLOCK_USED;
if (block->next && is_block_free(block->next))
{
// The next block is also free, thus we can merge!
TRY(combine_forward(block));
}
if (block->last && is_block_free(block->last))
{
// The last block is also free, thus we can merge!
block = TRY(combine_backward(block));
}
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
{
if (block == heap_start) heap_start = block->next;
if (block == heap_end) heap_end = block->last;
if (block->last) block->last->next = block->next;
if (block->next) block->next->last = block->last;
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
}
return {};
}
Result<void*> krealloc(void* ptr, usize size)
{
if (!ptr) return kmalloc(size);
if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size);
if (!size)
{
TRY(kfree(ptr));
return (void*)BLOCK_MAGIC;
}
HeapBlock* block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC)
{
if (block->magic == BLOCK_DEAD)
{
Serial::printf("ERROR: Attempt to realloc memory at %p, which was already freed\n", ptr);
}
else
Serial::printf("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc\n", ptr);
return err;
}
size = align_up(size, 16UL);
if (is_block_free(block))
{
Serial::printf("ERROR: Attempt to realloc memory at %p, which was already freed\n", ptr);
return err;
}
if (block->full_size >= size)
{
// This block is already large enough!
block->req_size = size;
return ptr;
}
void* new_ptr = TRY(kmalloc(size));
memcpy(new_ptr, ptr, block->req_size > size ? size : block->req_size);
TRY(kfree(ptr));
return new_ptr;
}
Result<void*> kcalloc(usize nmemb, usize size)
{
// FIXME: Check for overflows.
usize realsize = nmemb * size;
void* ptr = TRY(kmalloc(realsize));
return memset(ptr, 0, realsize);
}

7
kernel/src/memory/Heap.h Normal file
View File

@ -0,0 +1,7 @@
#pragma once
#include <Result.h>
Result<void*> kmalloc(usize size);
Result<void*> kcalloc(usize nmemb, usize size);
Result<void*> krealloc(void* ptr, usize size);
Result<void> kfree(void* ptr);

View File

@ -190,6 +190,8 @@ namespace MemoryManager
{
CHECK_PAGE_ALIGNED(virt);
u64 start = virt;
while (count--)
{
u64 frame = TRY(alloc_frame());
@ -197,7 +199,7 @@ namespace MemoryManager
virt += ARCH_PAGE_SIZE;
}
return virt;
return start;
}
Result<void> unmap_owned(u64 virt, usize count)

View File

@ -36,4 +36,11 @@ template <typename T> constexpr T get_blocks_from_size(T value, T block_size)
static_assert(get_blocks_from_size(40960, 4096) == 10);
static_assert(get_blocks_from_size(194, 64) == 4);
static_assert(get_blocks_from_size(2, 32) == 1);
static_assert(get_blocks_from_size(0, 256) == 0);
static_assert(get_blocks_from_size(0, 256) == 0);
// Offset a pointer by exactly <offset> bytes, no matter the type. Useful to avoid the quirks that come from C pointer
// arithmetic.
template <typename T, typename Offset> constexpr T* offset_ptr(T* ptr, Offset offset)
{
return (T*)((char*)ptr + offset);
}