Compare commits

...

7 Commits

Author SHA1 Message Date
b5c6ae253d
Make LinkedList a lot better
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-06 18:41:35 +01:00
09e447d9d2
Heap: Use LinkedList instead of doing things manually 2022-12-06 18:28:04 +01:00
d8f75f1d3c
LinkedList: Add an append_after() method
Can be used to append an item after another one instead of at the end of the list.
With doubly linked lists, this is extremely easy to achieve (O(1)).
2022-12-06 18:25:08 +01:00
146da13e43
LinkedList: Make sure to explicitly mark the first node's next and last nodes as nullptr 2022-12-06 18:23:19 +01:00
07e6ebd3cc
LinkedList: Fix nonnull_or_error 2022-12-06 18:22:45 +01:00
2734353a5d
Heap: Just align it on a 16-byte boundary if it's not aligned 2022-12-06 18:21:19 +01:00
cccf89dd16
Heap: Remove outdated FIXME 2022-12-06 18:20:18 +01:00
2 changed files with 95 additions and 75 deletions

View File

@ -5,6 +5,7 @@
#include "memory/KernelVM.h"
#include "memory/MemoryManager.h"
#include <luna/Alignment.h>
#include <luna/LinkedList.h>
#include <luna/SafeArithmetic.h>
#include <luna/String.h>
#include <luna/SystemError.h>
@ -18,25 +19,19 @@ static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
struct HeapBlock
struct HeapBlock : DoublyLinkedListNode<HeapBlock>
{
usize req_size;
usize full_size;
int status;
HeapBlock* next;
HeapBlock* last;
usize magic;
};
static_assert(sizeof(HeapBlock) == 48UL);
static HeapBlock* heap_start = nullptr;
static HeapBlock* heap_end = nullptr;
static DoublyLinkedList<HeapBlock> heap;
static Result<HeapBlock*> allocate_pages(
usize count) // FIXME: Keep track of virtual address space usage. For now, since the address
// space is so huge, we can just start at a fairly large address and assume
// we'll never run into anything, but this will probably bite us in the future.
static Result<HeapBlock*> allocate_pages(usize count)
{
u64 virt = TRY(KernelVM::alloc_several_pages(count));
void* const ptr = (void*)TRY(MemoryManager::alloc_at(virt, count, MMU::ReadWrite | MMU::NoExecute));
@ -88,8 +83,7 @@ static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
available -= (available /
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
check(is_aligned(available,
16UL)); // If necessary, we can just align it. This is more of a sanity check than a requirement.
available = align_down(available, 16UL); // Everything has to be aligned on a 16-byte boundary
return available + block->req_size;
}
@ -100,7 +94,8 @@ static Result<HeapBlock*> split(HeapBlock* block, usize size)
const usize old_size =
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
if (available < (size + sizeof(HeapBlock))) return err(0); // This error is not propagated.
if (available < (size + sizeof(HeapBlock)))
return err(ENONE); // This block hasn't got enough free space to hold the requested size.
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
block->full_size = offset; // shrink the old block to fit this offset
@ -110,24 +105,20 @@ static Result<HeapBlock*> split(HeapBlock* block, usize size)
new_block->magic = BLOCK_MAGIC;
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
new_block->next = block->next;
new_block->last = block;
heap.append_after(block, new_block);
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in this memory range
block->next = new_block;
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
return new_block;
}
static Result<void> combine_forward(HeapBlock* block)
{
HeapBlock* const next = block->next;
if (next == heap_end) heap_end = block;
// The caller needs to ensure there is a next block.
HeapBlock* const next = heap.next(block).value();
heap.remove(next);
next->magic = BLOCK_DEAD;
block->next = block->next->next;
if (block->next) block->next->last = block;
if (next->status & BLOCK_END_MEM)
{
if (next->status & BLOCK_START_MEM)
@ -146,13 +137,11 @@ static Result<void> combine_forward(HeapBlock* block)
static Result<HeapBlock*> combine_backward(HeapBlock* block)
{
HeapBlock* const last = block->last;
if (block == heap_end) heap_end = last;
// The caller needs to ensure there is a last block.
HeapBlock* const last = heap.previous(block).value();
heap.remove(block);
block->magic = BLOCK_DEAD;
last->next = block->next;
if (last->next) last->next->last = last;
if (block->status & BLOCK_END_MEM)
{
if (block->status & BLOCK_START_MEM)
@ -175,7 +164,7 @@ Result<void*> kmalloc(usize size)
size = align_up(size, 16UL);
if (!heap_start)
if (!heap.first().has_value())
{
const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
HeapBlock* const block = TRY(allocate_pages(pages));
@ -183,15 +172,10 @@ Result<void*> kmalloc(usize size)
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
block->next = block->last = nullptr;
heap_start = block;
check(!heap_end);
heap_end = heap_start;
heap.append(block);
}
HeapBlock* block = heap_start;
HeapBlock* block = heap.first().value();
while (block)
{
// Trying to find a free block...
@ -199,7 +183,7 @@ Result<void*> kmalloc(usize size)
{
if (block->full_size < size)
{
block = block->next; // Let's not try to split this block, it's not big enough
block = heap.next(block).value_or(nullptr);
continue;
}
break; // We found a free block that's big enough!!
@ -207,10 +191,10 @@ Result<void*> kmalloc(usize size)
auto rc = split(block, size);
if (rc.has_value())
{
block = rc.release_value(); // We managed to get a free block from a larger used block!!
block = rc.value(); // We managed to get a free block from a larger used block!!
break;
}
block = block->next;
block = heap.next(block).value_or(nullptr);
}
if (!block) // No free blocks, let's allocate a new one
@ -221,11 +205,7 @@ Result<void*> kmalloc(usize size)
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
block->magic = BLOCK_MAGIC;
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
block->next = nullptr;
block->last = heap_end;
heap_end->next = block;
heap_end = block;
heap.append(block);
}
block->req_size = size;
@ -261,13 +241,15 @@ Result<void> kfree(void* ptr)
else
block->status &= ~BLOCK_USED;
if (block->next && is_block_free(block->next))
auto maybe_next = heap.next(block);
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
{
// The next block is also free, thus we can merge!
TRY(combine_forward(block));
}
if (block->last && is_block_free(block->last))
auto maybe_last = heap.previous(block);
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
{
// The last block is also free, thus we can merge!
block = TRY(combine_backward(block));
@ -275,10 +257,7 @@ Result<void> kfree(void* ptr)
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
{
if (block == heap_start) heap_start = block->next;
if (block == heap_end) heap_end = block->last;
if (block->last) block->last->next = block->next;
if (block->next) block->next->last = block->last;
heap.remove(block);
TRY(release_pages(block, get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE)));
}
@ -341,14 +320,14 @@ Result<void*> kcalloc(usize nmemb, usize size)
void dump_heap_usage()
{
kdbgln("-- Dumping usage stats for kernel heap:");
if (!heap_start)
if (!heap.count())
{
kdbgln("- Heap is not currently being used");
return;
}
usize alloc_total = 0;
usize alloc_used = 0;
HeapBlock* block = heap_start;
HeapBlock* block = heap.first().value();
while (block)
{
if (is_block_free(block))
@ -362,7 +341,7 @@ void dump_heap_usage()
alloc_total += block->full_size + sizeof(HeapBlock);
alloc_used += block->req_size;
}
block = block->next;
block = heap.next(block).value_or(nullptr);
}
kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);

View File

@ -3,17 +3,40 @@
template <typename T> inline Result<T*> nonnull_or_error(T* ptr)
{
return ptr == nullptr ? err(ENONE) : ptr;
if (ptr == nullptr) return err(ENONE);
else
return ptr;
}
template <typename T> class DoublyLinkedList;
template <typename T> class DoublyLinkedListNode
{
using SelfType = DoublyLinkedListNode<T>;
private:
DoublyLinkedListNode<T>* m_next_node;
DoublyLinkedListNode<T>* m_last_node;
SelfType* m_next_node;
SelfType* m_last_node;
void set_next(SelfType* next)
{
m_next_node = next;
}
void set_last(SelfType* last)
{
m_last_node = last;
}
SelfType* get_next()
{
return m_next_node;
}
SelfType* get_last()
{
return m_last_node;
}
void detach_from_list()
{
@ -21,7 +44,7 @@ template <typename T> class DoublyLinkedListNode
m_last_node->m_next_node = m_next_node;
}
void add_to_list(DoublyLinkedListNode<T>* end_node)
void add_to_list(SelfType* end_node)
{
end_node->m_next_node = this;
this->m_last_node = end_node;
@ -32,23 +55,44 @@ template <typename T> class DoublyLinkedListNode
template <typename T> class DoublyLinkedList
{
using Node = DoublyLinkedListNode<T>;
public:
void append(T* ptr)
{
DoublyLinkedListNode<T>* node = (DoublyLinkedListNode<T>*)ptr;
Node* const node = extract_node(ptr);
if (!m_start_node) m_start_node = node;
if (m_end_node) node->add_to_list(m_end_node);
else
{
node->set_next(nullptr);
node->set_last(nullptr);
}
m_end_node = node;
m_count++;
}
void append_after(T* base, T* ptr)
{
Node* const new_node = extract_node(ptr);
Node* const base_node = extract_node(base);
if (m_end_node == base_node) m_end_node = new_node;
new_node->set_next(base_node->get_next());
base_node->set_next(new_node);
new_node->set_last(base_node);
m_count++;
}
T* remove(T* ptr)
{
DoublyLinkedListNode<T>* node = (DoublyLinkedListNode<T>*)ptr;
Node* const node = extract_node(ptr);
if (node == m_end_node) m_end_node = node->m_last_node;
if (node == m_start_node) m_start_node = node->m_next_node;
if (node == m_end_node) m_end_node = node->get_last();
if (node == m_start_node) m_start_node = node->get_next();
node->detach_from_list();
@ -69,40 +113,32 @@ template <typename T> class DoublyLinkedList
Result<T*> next(T* item)
{
return nonnull_or_error(((DoublyLinkedListNode<T>*)item)->m_next_node);
return nonnull_or_error((T*)extract_node(item)->get_next());
}
Result<T*> previous(T* item)
{
return nonnull_or_error(((DoublyLinkedListNode<T>*)item)->m_last_node);
return nonnull_or_error((T*)extract_node(item)->get_last());
}
template <typename Callback> void for_each(Callback callback)
{
for (DoublyLinkedListNode<T>* node = m_start_node; node; node = node->m_next_node) { callback((T*)node); }
for (Node* node = m_start_node; node; node = node->get_next()) { callback((T*)node); }
}
template <typename Callback> void for_each_reversed(Callback callback)
{
for (DoublyLinkedListNode<T>* node = m_end_node; node; node = node->m_last_node) { callback((T*)node); }
for (Node* node = m_end_node; node; node = node->get_last()) { callback((T*)node); }
}
template <typename Callback> void for_each_after(T* start, Callback callback)
{
for (DoublyLinkedListNode<T>* node = ((DoublyLinkedListNode<T>*)start)->m_next_node; node;
node = node->m_next_node)
{
callback((T*)node);
}
for (Node* node = extract_node(start)->m_next_node; node; node = node->get_next()) { callback((T*)node); }
}
template <typename Callback> void for_each_before(T* end, Callback callback)
{
for (DoublyLinkedListNode<T>* node = ((DoublyLinkedListNode<T>*)end)->m_last_node; node;
node = node->m_last_node)
{
callback((T*)node);
}
for (Node* node = extract_node(end)->m_last_node; node; node = node->get_last()) { callback((T*)node); }
}
usize count()
@ -111,8 +147,13 @@ template <typename T> class DoublyLinkedList
}
private:
DoublyLinkedListNode<T>* m_start_node = nullptr;
DoublyLinkedListNode<T>* m_end_node = nullptr;
Node* m_start_node = nullptr;
Node* m_end_node = nullptr;
Node* extract_node(T* item)
{
return (Node*)item;
}
usize m_count = 0;
};