#include "memory/Heap.h" #include "Log.h" #include "arch/MMU.h" #include "arch/Serial.h" #include "memory/KernelVM.h" #include "memory/MemoryManager.h" #include #include #include #include #include #include #include namespace std { const std::nothrow_t nothrow; } static constexpr int BLOCK_USED = 1 << 0; static constexpr int BLOCK_START_MEM = 1 << 1; static constexpr int BLOCK_END_MEM = 1 << 2; static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline) static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead; static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4; struct HeapBlock : DoublyLinkedListNode { usize req_size; usize full_size; int status; usize magic; }; static_assert(sizeof(HeapBlock) == 48UL); static const isize HEAP_BLOCK_SIZE = 48; static DoublyLinkedList heap; static Result allocate_pages(usize count) { void* const ptr = (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute)); return (HeapBlock*)ptr; } static Result release_pages(void* ptr, usize count) { return MemoryManager::unmap_owned_and_free_vm((u64)ptr, count); } // If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of // pages. static usize get_pages_for_allocation(usize bytes) { usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE); if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION; return pages; } static bool is_block_free(HeapBlock* block) { return !(block->status & BLOCK_USED); } static usize space_available(HeapBlock* block) { expect(!is_block_free(block), "Attempting to split a free block"); return block->full_size - block->req_size; } // The heap block is stored right behind a memory block. static HeapBlock* get_heap_block_for_pointer(void* ptr) { return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE); } static void* get_pointer_from_heap_block(HeapBlock* block) { return (void*)offset_ptr(block, HEAP_BLOCK_SIZE); } static usize get_fair_offset_to_split_at(HeapBlock* block, usize min) { usize available = space_available(block); available -= min; // reserve at least min size for the new block. available -= (available / 2); // reserve half of the rest for the new block, while still leaving another half for the old one. available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary return available + block->req_size; } static Option split(HeapBlock* block, usize size) { const usize available = space_available(block); // How much space can we steal from this block? const usize old_size = block->full_size; // Save the old value of this variable since we are going to use it after modifying it if (available < (size + sizeof(HeapBlock))) return {}; // This block hasn't got enough free space to hold the requested size. const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock)); block->full_size = offset; // shrink the old block to fit this offset HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock)); new_block->magic = BLOCK_MAGIC; new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0; new_block->full_size = old_size - (offset + sizeof(HeapBlock)); heap.append_after(block, new_block); block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range return new_block; } static Result combine_forward(HeapBlock* block) { // The caller needs to ensure there is a next block. HeapBlock* const next = heap.next(block).value(); heap.remove(next); next->magic = BLOCK_DEAD; if (next->status & BLOCK_END_MEM) { if (next->status & BLOCK_START_MEM) { TRY(release_pages(next, get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE))); return {}; } else block->status |= BLOCK_END_MEM; } block->full_size += next->full_size + sizeof(HeapBlock); return {}; } static Result combine_backward(HeapBlock* block) { // The caller needs to ensure there is a last block. HeapBlock* const last = heap.previous(block).value(); heap.remove(block); block->magic = BLOCK_DEAD; if (block->status & BLOCK_END_MEM) { if (block->status & BLOCK_START_MEM) { TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE))); return last; } else last->status |= BLOCK_END_MEM; } last->full_size += block->full_size + sizeof(HeapBlock); return last; } Result kmalloc(usize size) { if (!size) return (void*)BLOCK_MAGIC; size = align_up<16>(size); if (!heap.first().has_value()) { const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock)); HeapBlock* const block = TRY(allocate_pages(pages)); block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock); block->magic = BLOCK_MAGIC; block->status = BLOCK_START_MEM | BLOCK_END_MEM; heap.append(block); } HeapBlock* block = heap.expect_first(); while (block) { // Trying to find a free block... if (is_block_free(block)) { if (block->full_size < size) { block = heap.next(block).value_or(nullptr); continue; } break; // We found a free block that's big enough!! } auto rc = split(block, size); if (rc.has_value()) { block = rc.value(); // We managed to get a free block from a larger used block!! break; } block = heap.next(block).value_or(nullptr); } if (!block) // No free blocks, let's allocate a new one { usize pages = get_pages_for_allocation(size + sizeof(HeapBlock)); block = TRY(allocate_pages(pages)); block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock); block->magic = BLOCK_MAGIC; block->status = BLOCK_START_MEM | BLOCK_END_MEM; heap.append(block); } block->req_size = size; block->status |= BLOCK_USED; return get_pointer_from_heap_block(block); } Result kfree(void* ptr) { if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0) if (!ptr) return {}; HeapBlock* block = get_heap_block_for_pointer(ptr); if (block->magic != BLOCK_MAGIC) { if (block->magic == BLOCK_DEAD) { kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr); } else kerrorln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr); return err(EFAULT); } if (is_block_free(block)) { kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr); return err(EFAULT); } else block->status &= ~BLOCK_USED; auto maybe_next = heap.next(block); if (maybe_next.has_value() && is_block_free(maybe_next.value())) { // The next block is also free, thus we can merge! TRY(combine_forward(block)); } auto maybe_last = heap.previous(block); if (maybe_last.has_value() && is_block_free(maybe_last.value())) { // The last block is also free, thus we can merge! block = TRY(combine_backward(block)); } if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM)) { heap.remove(block); TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE))); } return {}; } Result krealloc(void* ptr, usize size) { if (!ptr) return kmalloc(size); if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size); if (!size) { TRY(kfree(ptr)); return (void*)BLOCK_MAGIC; } HeapBlock* const block = get_heap_block_for_pointer(ptr); if (block->magic != BLOCK_MAGIC) { if (block->magic == BLOCK_DEAD) { kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr); } else kerrorln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr); return err(EFAULT); } size = align_up<16>(size); if (is_block_free(block)) { kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr); return err(EFAULT); } if (block->full_size >= size) { // This block is already large enough! block->req_size = size; return ptr; } void* const new_ptr = TRY(kmalloc(size)); memcpy(new_ptr, ptr, block->req_size > size ? size : block->req_size); TRY(kfree(ptr)); return new_ptr; } Result kcalloc(usize nmemb, usize size) { const usize realsize = TRY(safe_mul(nmemb, size)); void* const ptr = TRY(kmalloc(realsize)); return memset(ptr, 0, realsize); } void dump_heap_usage() { kdbgln("-- Dumping usage stats for kernel heap:"); if (!heap.count()) { kdbgln("- Heap is not currently being used"); return; } usize alloc_total = 0; usize alloc_used = 0; HeapBlock* block = heap.expect_first(); while (block) { if (is_block_free(block)) { kdbgln("- Available block, of size %zu", block->full_size); alloc_total += block->full_size + sizeof(HeapBlock); } else { kdbgln("- Used block, of size %zu, of which %zu bytes are being used", block->full_size, block->req_size); alloc_total += block->full_size + sizeof(HeapBlock); alloc_used += block->req_size; } block = heap.next(block).value_or(nullptr); } kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total); kdbgln("-- Heap memory in use by the kernel: %zu bytes", alloc_used); } void* operator new(usize size, const std::nothrow_t&) noexcept { return kmalloc(size).value_or(nullptr); } void* operator new[](usize size, const std::nothrow_t&) noexcept { return kmalloc(size).value_or(nullptr); } void operator delete(void* p) noexcept { kfree(p); } void operator delete[](void* p) noexcept { kfree(p); } void operator delete(void* p, usize) noexcept { kfree(p); } void operator delete[](void* p, usize) noexcept { kfree(p); }