Compare commits
10 Commits
586ca19b62
...
2d2db300b0
Author | SHA1 | Date | |
---|---|---|---|
2d2db300b0 | |||
5fb2ff09c7 | |||
d864bda751 | |||
139c0b5eb1 | |||
7462b764d8 | |||
09dc8bd522 | |||
59db656f25 | |||
16a62552db | |||
445aeed80d | |||
9454b65682 |
14
apps/app.c
14
apps/app.c
@ -1,11 +1,9 @@
|
|||||||
#include <bits/mmap-flags.h>
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <sys/syscall.h>
|
#include <sys/mman.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
void bye()
|
void bye()
|
||||||
{
|
{
|
||||||
@ -24,17 +22,11 @@ int main()
|
|||||||
|
|
||||||
console_write("\n", 1);
|
console_write("\n", 1);
|
||||||
|
|
||||||
long rc = syscall(SYS_allocate_memory, 4096, PROT_READ | PROT_WRITE);
|
char* address = (char*)malloc(1);
|
||||||
if (rc < 0)
|
|
||||||
{
|
|
||||||
printf("allocate_memory: %s\n", strerror(-rc));
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
char* address = (char*)rc;
|
|
||||||
printf("address: %p\n", address);
|
printf("address: %p\n", address);
|
||||||
printf("memory at address: %c\n", *address);
|
printf("memory at address: %c\n", *address);
|
||||||
*address = 'e';
|
*address = 'e';
|
||||||
printf("memory at address: %c\n", *address);
|
printf("memory at address: %c\n", *address);
|
||||||
|
|
||||||
syscall(SYS_deallocate_memory, address);
|
free(address);
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,13 @@
|
|||||||
set(SOURCES
|
set(SOURCES
|
||||||
src/main.cpp
|
src/main.cpp
|
||||||
src/Log.cpp
|
src/Log.cpp
|
||||||
|
src/cxxabi.cpp
|
||||||
src/video/Framebuffer.cpp
|
src/video/Framebuffer.cpp
|
||||||
src/video/TextConsole.cpp
|
src/video/TextConsole.cpp
|
||||||
src/memory/MemoryManager.cpp
|
src/memory/MemoryManager.cpp
|
||||||
src/memory/Heap.cpp
|
src/memory/Heap.cpp
|
||||||
src/memory/KernelVM.cpp
|
src/memory/KernelVM.cpp
|
||||||
|
src/memory/UserVM.cpp
|
||||||
src/memory/MemoryMap.cpp
|
src/memory/MemoryMap.cpp
|
||||||
src/boot/Init.cpp
|
src/boot/Init.cpp
|
||||||
src/arch/Serial.cpp
|
src/arch/Serial.cpp
|
||||||
|
52
kernel/src/cxxabi.cpp
Normal file
52
kernel/src/cxxabi.cpp
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
typedef void* (*cxa_atexit_func_t)(void*);
|
||||||
|
|
||||||
|
struct cxa_atexit_entry
|
||||||
|
{
|
||||||
|
cxa_atexit_func_t function;
|
||||||
|
void* argument;
|
||||||
|
void* dso_handle;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define CXA_ATEXIT_MAX 64
|
||||||
|
|
||||||
|
int cxa_atexit_entry_count = 0;
|
||||||
|
|
||||||
|
cxa_atexit_entry cxa_atexit_entries[CXA_ATEXIT_MAX];
|
||||||
|
|
||||||
|
__attribute__((visibility("hidden"))) void* __dso_handle = 0;
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
int __cxa_atexit(cxa_atexit_func_t func, void* arg, void* dso)
|
||||||
|
{
|
||||||
|
if (cxa_atexit_entry_count >= CXA_ATEXIT_MAX) return -1;
|
||||||
|
cxa_atexit_entries[cxa_atexit_entry_count].function = func;
|
||||||
|
cxa_atexit_entries[cxa_atexit_entry_count].argument = arg;
|
||||||
|
cxa_atexit_entries[cxa_atexit_entry_count].dso_handle = dso;
|
||||||
|
cxa_atexit_entry_count++;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __cxa_finalize(void* f)
|
||||||
|
{
|
||||||
|
int i = cxa_atexit_entry_count;
|
||||||
|
if (!f)
|
||||||
|
{
|
||||||
|
while (i--)
|
||||||
|
{
|
||||||
|
if (cxa_atexit_entries[i].function) { cxa_atexit_entries[i].function(cxa_atexit_entries[i].argument); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
while (i--)
|
||||||
|
{
|
||||||
|
if (cxa_atexit_entries[i].function == (cxa_atexit_func_t)f)
|
||||||
|
{
|
||||||
|
cxa_atexit_entries[i].function(cxa_atexit_entries[i].argument);
|
||||||
|
cxa_atexit_entries[i].function = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,422 +1,14 @@
|
|||||||
#include "memory/Heap.h"
|
#include "memory/Heap.h"
|
||||||
#include "Log.h"
|
|
||||||
#include "arch/MMU.h"
|
#include "arch/MMU.h"
|
||||||
#include "arch/Serial.h"
|
|
||||||
#include "memory/KernelVM.h"
|
|
||||||
#include "memory/MemoryManager.h"
|
#include "memory/MemoryManager.h"
|
||||||
#include <luna/Alignment.h>
|
#include <luna/Heap.h>
|
||||||
#include <luna/Alloc.h>
|
|
||||||
#include <luna/CString.h>
|
|
||||||
#include <luna/LinkedList.h>
|
|
||||||
#include <luna/SafeArithmetic.h>
|
|
||||||
#include <luna/ScopeGuard.h>
|
|
||||||
#include <luna/SystemError.h>
|
|
||||||
|
|
||||||
namespace std
|
Result<void*> allocate_pages_impl(usize count)
|
||||||
{
|
{
|
||||||
const nothrow_t nothrow;
|
return (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr int BLOCK_USED = 1 << 0;
|
Result<void> release_pages_impl(void* address, usize count)
|
||||||
static constexpr int BLOCK_START_MEM = 1 << 1;
|
|
||||||
static constexpr int BLOCK_END_MEM = 1 << 2;
|
|
||||||
|
|
||||||
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
|
|
||||||
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
|
|
||||||
|
|
||||||
static constexpr u8 KMALLOC_SCRUB_BYTE = 0xac;
|
|
||||||
static constexpr u8 KFREE_SCRUB_BYTE = 0xde;
|
|
||||||
|
|
||||||
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
|
|
||||||
|
|
||||||
struct HeapBlock : LinkedListNode<HeapBlock>
|
|
||||||
{
|
{
|
||||||
usize req_size;
|
return MemoryManager::unmap_owned_and_free_vm((u64)address, count);
|
||||||
usize full_size;
|
|
||||||
int status;
|
|
||||||
usize magic;
|
|
||||||
};
|
|
||||||
|
|
||||||
static_assert(sizeof(HeapBlock) == 48UL);
|
|
||||||
|
|
||||||
static const isize HEAP_BLOCK_SIZE = 48;
|
|
||||||
|
|
||||||
static LinkedList<HeapBlock> heap;
|
|
||||||
|
|
||||||
static Result<HeapBlock*> allocate_pages(usize count)
|
|
||||||
{
|
|
||||||
void* const ptr = (void*)TRY(MemoryManager::alloc_for_kernel(count, MMU::ReadWrite | MMU::NoExecute));
|
|
||||||
return (HeapBlock*)ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Result<void> release_pages(void* ptr, usize count)
|
|
||||||
{
|
|
||||||
return MemoryManager::unmap_owned_and_free_vm((u64)ptr, count);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
|
||||||
// pages.
|
|
||||||
static usize get_pages_for_allocation(usize bytes)
|
|
||||||
{
|
|
||||||
usize pages = get_blocks_from_size(bytes, ARCH_PAGE_SIZE);
|
|
||||||
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
|
||||||
return pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_block_free(HeapBlock* block)
|
|
||||||
{
|
|
||||||
return !(block->status & BLOCK_USED);
|
|
||||||
}
|
|
||||||
|
|
||||||
static usize space_available(HeapBlock* block)
|
|
||||||
{
|
|
||||||
expect(!is_block_free(block), "Attempting to split a free block");
|
|
||||||
return block->full_size - block->req_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The heap block is stored right behind a memory block.
|
|
||||||
static HeapBlock* get_heap_block_for_pointer(void* ptr)
|
|
||||||
{
|
|
||||||
return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void* get_pointer_from_heap_block(HeapBlock* block)
|
|
||||||
{
|
|
||||||
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
|
|
||||||
{
|
|
||||||
usize available = space_available(block);
|
|
||||||
|
|
||||||
available -= min; // reserve at least min size for the new block.
|
|
||||||
|
|
||||||
available -= (available /
|
|
||||||
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
|
|
||||||
|
|
||||||
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
|
|
||||||
|
|
||||||
return available + block->req_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Option<HeapBlock*> split(HeapBlock* block, usize size)
|
|
||||||
{
|
|
||||||
const usize available = space_available(block); // How much space can we steal from this block?
|
|
||||||
const usize old_size =
|
|
||||||
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
|
||||||
|
|
||||||
if (available < (size + sizeof(HeapBlock)))
|
|
||||||
return {}; // This block hasn't got enough free space to hold the requested size.
|
|
||||||
|
|
||||||
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
|
||||||
block->full_size = offset; // shrink the old block to fit this offset
|
|
||||||
|
|
||||||
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
|
|
||||||
|
|
||||||
new_block->magic = BLOCK_MAGIC;
|
|
||||||
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
|
|
||||||
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
|
|
||||||
heap.add_after(block, new_block);
|
|
||||||
|
|
||||||
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
|
|
||||||
|
|
||||||
return new_block;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Result<void> combine_forward(HeapBlock* block)
|
|
||||||
{
|
|
||||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
|
||||||
if (block->status & BLOCK_END_MEM) return {};
|
|
||||||
|
|
||||||
// The caller needs to ensure there is a next block.
|
|
||||||
HeapBlock* const next = heap.next(block).value();
|
|
||||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
|
||||||
if (next->status & BLOCK_START_MEM) return {};
|
|
||||||
|
|
||||||
heap.remove(next);
|
|
||||||
next->magic = BLOCK_DEAD;
|
|
||||||
|
|
||||||
if (next->status & BLOCK_END_MEM)
|
|
||||||
{
|
|
||||||
if (next->status & BLOCK_START_MEM)
|
|
||||||
{
|
|
||||||
const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
|
||||||
TRY(release_pages(next, pages));
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
else
|
|
||||||
block->status |= BLOCK_END_MEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
block->full_size += next->full_size + sizeof(HeapBlock);
|
|
||||||
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
|
||||||
{
|
|
||||||
// This block starts a memory range, cannot be combined with blocks outside its range.
|
|
||||||
if (block->status & BLOCK_START_MEM) return block;
|
|
||||||
|
|
||||||
// The caller needs to ensure there is a last block.
|
|
||||||
HeapBlock* const last = heap.previous(block).value();
|
|
||||||
// This block ends a memory range, cannot be combined with blocks outside its range.
|
|
||||||
if (last->status & BLOCK_END_MEM) return block;
|
|
||||||
heap.remove(block);
|
|
||||||
block->magic = BLOCK_DEAD;
|
|
||||||
|
|
||||||
if (block->status & BLOCK_END_MEM)
|
|
||||||
{
|
|
||||||
if (block->status & BLOCK_START_MEM)
|
|
||||||
{
|
|
||||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
|
||||||
TRY(release_pages(block, pages));
|
|
||||||
return last;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
last->status |= BLOCK_END_MEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
last->full_size += block->full_size + sizeof(HeapBlock);
|
|
||||||
|
|
||||||
return last;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result<void*> kmalloc(usize size, bool should_scrub)
|
|
||||||
{
|
|
||||||
if (!size) return (void*)BLOCK_MAGIC;
|
|
||||||
|
|
||||||
size = align_up<16>(size);
|
|
||||||
|
|
||||||
if (heap.count() == 0)
|
|
||||||
{
|
|
||||||
const usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
|
||||||
HeapBlock* const block = TRY(allocate_pages(pages));
|
|
||||||
|
|
||||||
block->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
|
||||||
block->magic = BLOCK_MAGIC;
|
|
||||||
block->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
|
||||||
heap.append(block);
|
|
||||||
}
|
|
||||||
|
|
||||||
Option<HeapBlock*> block = heap.first();
|
|
||||||
while (block.has_value())
|
|
||||||
{
|
|
||||||
HeapBlock* const current = block.value();
|
|
||||||
// Trying to find a free block...
|
|
||||||
if (is_block_free(current))
|
|
||||||
{
|
|
||||||
if (current->full_size < size)
|
|
||||||
{
|
|
||||||
block = heap.next(current);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break; // We found a free block that's big enough!!
|
|
||||||
}
|
|
||||||
auto rc = split(current, size);
|
|
||||||
if (rc.has_value())
|
|
||||||
{
|
|
||||||
block = rc.value(); // We managed to get a free block from a larger used block!!
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
block = heap.next(current);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!block.has_value()) // No free blocks, let's allocate a new one
|
|
||||||
{
|
|
||||||
usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
|
||||||
HeapBlock* const current = TRY(allocate_pages(pages));
|
|
||||||
|
|
||||||
current->full_size = (pages * ARCH_PAGE_SIZE) - sizeof(HeapBlock);
|
|
||||||
current->magic = BLOCK_MAGIC;
|
|
||||||
current->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
|
||||||
heap.append(current);
|
|
||||||
|
|
||||||
block = current;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapBlock* const current = block.value();
|
|
||||||
|
|
||||||
current->req_size = size;
|
|
||||||
current->status |= BLOCK_USED;
|
|
||||||
|
|
||||||
if (should_scrub) { memset(get_pointer_from_heap_block(current), KMALLOC_SCRUB_BYTE, size); }
|
|
||||||
|
|
||||||
return get_pointer_from_heap_block(current);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result<void> kfree(void* ptr)
|
|
||||||
{
|
|
||||||
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
|
||||||
if (!ptr) return {};
|
|
||||||
|
|
||||||
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
|
||||||
|
|
||||||
if (block->magic != BLOCK_MAGIC)
|
|
||||||
{
|
|
||||||
if (block->magic == BLOCK_DEAD)
|
|
||||||
{
|
|
||||||
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
kerrorln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
|
|
||||||
|
|
||||||
return err(EFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_block_free(block))
|
|
||||||
{
|
|
||||||
kerrorln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
|
||||||
return err(EFAULT);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
block->status &= ~BLOCK_USED;
|
|
||||||
|
|
||||||
memset(ptr, KFREE_SCRUB_BYTE, block->req_size);
|
|
||||||
|
|
||||||
auto maybe_next = heap.next(block);
|
|
||||||
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
|
|
||||||
{
|
|
||||||
// The next block is also free, thus we can merge!
|
|
||||||
TRY(combine_forward(block));
|
|
||||||
}
|
|
||||||
|
|
||||||
auto maybe_last = heap.previous(block);
|
|
||||||
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
|
|
||||||
{
|
|
||||||
// The last block is also free, thus we can merge!
|
|
||||||
block = TRY(combine_backward(block));
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
|
||||||
{
|
|
||||||
heap.remove(block);
|
|
||||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), ARCH_PAGE_SIZE);
|
|
||||||
TRY(release_pages(block, pages));
|
|
||||||
}
|
|
||||||
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
Result<void*> krealloc(void* ptr, usize size)
|
|
||||||
{
|
|
||||||
if (!ptr) return kmalloc(size);
|
|
||||||
if (ptr == (void*)BLOCK_MAGIC) return kmalloc(size);
|
|
||||||
if (!size)
|
|
||||||
{
|
|
||||||
TRY(kfree(ptr));
|
|
||||||
return (void*)BLOCK_MAGIC;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
|
||||||
|
|
||||||
if (block->magic != BLOCK_MAGIC)
|
|
||||||
{
|
|
||||||
if (block->magic == BLOCK_DEAD)
|
|
||||||
{
|
|
||||||
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
kerrorln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
|
|
||||||
|
|
||||||
return err(EFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
size = align_up<16>(size);
|
|
||||||
|
|
||||||
if (is_block_free(block))
|
|
||||||
{
|
|
||||||
kerrorln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
|
||||||
return err(EFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (block->full_size >= size)
|
|
||||||
{
|
|
||||||
// This block is already large enough!
|
|
||||||
// FIXME: Scrub this if necessary.
|
|
||||||
block->req_size = size;
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
usize old_size = block->req_size;
|
|
||||||
|
|
||||||
void* const new_ptr = TRY(kmalloc(size, false));
|
|
||||||
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
|
||||||
TRY(kfree(ptr));
|
|
||||||
|
|
||||||
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), KMALLOC_SCRUB_BYTE, size - old_size); }
|
|
||||||
|
|
||||||
return new_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result<void*> kcalloc(usize nmemb, usize size)
|
|
||||||
{
|
|
||||||
const usize realsize = TRY(safe_mul(nmemb, size));
|
|
||||||
void* const ptr = TRY(kmalloc(realsize, false));
|
|
||||||
return memset(ptr, 0, realsize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void dump_heap_usage()
|
|
||||||
{
|
|
||||||
kdbgln("-- Dumping usage stats for kernel heap:");
|
|
||||||
if (!heap.count())
|
|
||||||
{
|
|
||||||
kdbgln("- Heap is not currently being used");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
usize alloc_total = 0;
|
|
||||||
usize alloc_used = 0;
|
|
||||||
auto block = heap.first();
|
|
||||||
while (block.has_value())
|
|
||||||
{
|
|
||||||
HeapBlock* current = block.value();
|
|
||||||
if (is_block_free(current))
|
|
||||||
{
|
|
||||||
kdbgln("- Available block (%p), of size %zu (%s%s)", (void*)current, current->full_size,
|
|
||||||
current->status & BLOCK_START_MEM ? "b" : "-", current->status & BLOCK_END_MEM ? "e" : "-");
|
|
||||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
kdbgln("- Used block (%p), of size %zu, of which %zu bytes are being used (%s%s)", (void*)current,
|
|
||||||
current->full_size, current->req_size, current->status & BLOCK_START_MEM ? "b" : "-",
|
|
||||||
current->status & BLOCK_END_MEM ? "e" : "-");
|
|
||||||
alloc_total += current->full_size + sizeof(HeapBlock);
|
|
||||||
alloc_used += current->req_size;
|
|
||||||
}
|
|
||||||
block = heap.next(current);
|
|
||||||
}
|
|
||||||
|
|
||||||
kdbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
|
|
||||||
kdbgln("-- Heap memory in use by the kernel: %zu bytes", alloc_used);
|
|
||||||
}
|
|
||||||
|
|
||||||
void* operator new(usize size, const std::nothrow_t&) noexcept
|
|
||||||
{
|
|
||||||
return kmalloc(size).value_or(nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void* operator new[](usize size, const std::nothrow_t&) noexcept
|
|
||||||
{
|
|
||||||
return kmalloc(size).value_or(nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete(void* p) noexcept
|
|
||||||
{
|
|
||||||
kfree(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete[](void* p) noexcept
|
|
||||||
{
|
|
||||||
kfree(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete(void* p, usize) noexcept
|
|
||||||
{
|
|
||||||
kfree(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete[](void* p, usize) noexcept
|
|
||||||
{
|
|
||||||
kfree(p);
|
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <luna/PlacementNew.h>
|
#include <luna/Heap.h>
|
||||||
#include <luna/Result.h>
|
|
||||||
|
|
||||||
Result<void*> kmalloc(usize size, bool should_scrub = true);
|
#define kmalloc malloc_impl
|
||||||
Result<void*> kcalloc(usize nmemb, usize size);
|
#define kcalloc calloc_impl
|
||||||
Result<void*> krealloc(void* ptr, usize size);
|
#define krealloc realloc_impl
|
||||||
Result<void> kfree(void* ptr);
|
#define kfree free_impl
|
||||||
|
|
||||||
void dump_heap_usage();
|
|
||||||
|
115
kernel/src/memory/UserVM.cpp
Normal file
115
kernel/src/memory/UserVM.cpp
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
#include "memory/UserVM.h"
|
||||||
|
#include "Log.h"
|
||||||
|
#include "arch/MMU.h"
|
||||||
|
#include "memory/Heap.h"
|
||||||
|
#include <luna/ScopeGuard.h>
|
||||||
|
|
||||||
|
static constexpr u64 VM_BASE = 0x10000000;
|
||||||
|
|
||||||
|
static constexpr usize INITIAL_VM_SIZE = 80;
|
||||||
|
static constexpr usize MAX_VM_SIZE = 1024 * 1024 * 16;
|
||||||
|
|
||||||
|
Result<OwnedPtr<UserVM>> UserVM::try_create()
|
||||||
|
{
|
||||||
|
void* const base = TRY(kmalloc(INITIAL_VM_SIZE));
|
||||||
|
|
||||||
|
auto guard = make_scope_guard([&] { kfree(base); });
|
||||||
|
|
||||||
|
OwnedPtr<UserVM> ptr = TRY(make_owned<UserVM>(base, INITIAL_VM_SIZE));
|
||||||
|
|
||||||
|
guard.deactivate();
|
||||||
|
|
||||||
|
return move(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
UserVM::UserVM(void* base, usize size)
|
||||||
|
{
|
||||||
|
kdbgln("user vm created with base=%p, size=%zu", base, size);
|
||||||
|
m_bitmap.initialize(base, size);
|
||||||
|
m_bitmap.clear(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<bool> UserVM::try_expand(usize size)
|
||||||
|
{
|
||||||
|
if (m_bitmap.size_in_bytes() == MAX_VM_SIZE) { return false; }
|
||||||
|
|
||||||
|
usize new_size = m_bitmap.size_in_bytes() + size;
|
||||||
|
|
||||||
|
if (new_size > MAX_VM_SIZE) new_size = MAX_VM_SIZE;
|
||||||
|
|
||||||
|
usize old_size = m_bitmap.size_in_bytes();
|
||||||
|
|
||||||
|
void* const base = TRY(krealloc(m_bitmap.location(), new_size));
|
||||||
|
|
||||||
|
m_bitmap.initialize(base, new_size);
|
||||||
|
m_bitmap.clear_region(old_size * 8, (new_size - old_size) * 8, false);
|
||||||
|
|
||||||
|
kdbgln("user vm expanded to base=%p, size=%zu", base, new_size);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<u64> UserVM::alloc_one_page()
|
||||||
|
{
|
||||||
|
u64 index;
|
||||||
|
const auto maybe_index = m_bitmap.find_and_toggle(false);
|
||||||
|
if (!maybe_index.has_value())
|
||||||
|
{
|
||||||
|
bool success = TRY(try_expand());
|
||||||
|
if (!success) return err(ENOMEM);
|
||||||
|
index = TRY(Result<u64>::from_option(m_bitmap.find_and_toggle(false), ENOMEM));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
index = maybe_index.value();
|
||||||
|
|
||||||
|
return VM_BASE + index * ARCH_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<u64> UserVM::alloc_several_pages(usize count)
|
||||||
|
{
|
||||||
|
u64 index;
|
||||||
|
const auto maybe_index = m_bitmap.find_and_toggle_region(false, count);
|
||||||
|
if (!maybe_index.has_value())
|
||||||
|
{
|
||||||
|
bool success = TRY(try_expand((count / 8) + INITIAL_VM_SIZE));
|
||||||
|
if (!success) return err(ENOMEM);
|
||||||
|
index = TRY(Result<u64>::from_option(m_bitmap.find_and_toggle_region(false, count), ENOMEM));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
index = maybe_index.value();
|
||||||
|
|
||||||
|
return VM_BASE + index * ARCH_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void> UserVM::free_one_page(u64 address)
|
||||||
|
{
|
||||||
|
if (address < VM_BASE) return err(EINVAL);
|
||||||
|
const u64 index = (address - VM_BASE) / ARCH_PAGE_SIZE;
|
||||||
|
if (index > (MAX_VM_SIZE * 8)) return err(EINVAL);
|
||||||
|
|
||||||
|
if (!m_bitmap.get(index)) return err(EFAULT);
|
||||||
|
|
||||||
|
m_bitmap.set(index, false);
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void> UserVM::free_several_pages(u64 address, usize count)
|
||||||
|
{
|
||||||
|
if (address < VM_BASE) return err(EINVAL);
|
||||||
|
const u64 index = (address - VM_BASE) / ARCH_PAGE_SIZE;
|
||||||
|
if ((index + count) > (MAX_VM_SIZE * 8)) return err(EINVAL);
|
||||||
|
|
||||||
|
// FIXME: Is it necessary to check all pages?
|
||||||
|
if (!m_bitmap.get(index)) return err(EFAULT);
|
||||||
|
|
||||||
|
m_bitmap.clear_region(index, count, false);
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
UserVM::~UserVM()
|
||||||
|
{
|
||||||
|
kdbgln("user vm destroyed: base=%p, size=%zu", m_bitmap.location(), m_bitmap.size_in_bytes());
|
||||||
|
kfree(m_bitmap.location());
|
||||||
|
}
|
23
kernel/src/memory/UserVM.h
Normal file
23
kernel/src/memory/UserVM.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <luna/Bitmap.h>
|
||||||
|
#include <luna/OwnedPtr.h>
|
||||||
|
#include <luna/Result.h>
|
||||||
|
|
||||||
|
class UserVM
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
UserVM(void* base, usize size);
|
||||||
|
~UserVM();
|
||||||
|
|
||||||
|
Result<u64> alloc_one_page();
|
||||||
|
Result<u64> alloc_several_pages(usize count);
|
||||||
|
|
||||||
|
Result<void> free_one_page(u64 address);
|
||||||
|
Result<void> free_several_pages(u64 address, usize count);
|
||||||
|
|
||||||
|
static Result<OwnedPtr<UserVM>> try_create();
|
||||||
|
|
||||||
|
private:
|
||||||
|
Result<bool> try_expand(usize size = 160);
|
||||||
|
Bitmap m_bitmap;
|
||||||
|
};
|
@ -13,48 +13,41 @@ Result<u64> sys_allocate_memory(Registers*, SyscallArgs args)
|
|||||||
usize size = (usize)args[0];
|
usize size = (usize)args[0];
|
||||||
int flags = (int)args[1];
|
int flags = (int)args[1];
|
||||||
|
|
||||||
if (size != ARCH_PAGE_SIZE) return err(EINVAL);
|
|
||||||
if (flags < 0) return err(EINVAL);
|
if (flags < 0) return err(EINVAL);
|
||||||
|
|
||||||
if (size == 0) return 0;
|
if (size == 0) return 0;
|
||||||
|
|
||||||
|
size = align_up<ARCH_PAGE_SIZE>(size);
|
||||||
|
|
||||||
Thread* current = Scheduler::current();
|
Thread* current = Scheduler::current();
|
||||||
if (!current->heap_bitmap.initialized())
|
|
||||||
{
|
|
||||||
void* bitmap_location = (void*)TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite));
|
|
||||||
current->heap_bitmap.initialize(bitmap_location, ARCH_PAGE_SIZE);
|
|
||||||
current->heap_bitmap.clear(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 index = TRY(Result<u64>::from_option(current->heap_bitmap.find_and_toggle(false), ENOMEM));
|
u64 address = TRY(current->vm_allocator->alloc_several_pages(size / ARCH_PAGE_SIZE));
|
||||||
|
|
||||||
u64 address = USERSPACE_HEAP_BASE + (index * ARCH_PAGE_SIZE);
|
|
||||||
|
|
||||||
int mmu_flags = MMU::User | MMU::NoExecute;
|
int mmu_flags = MMU::User | MMU::NoExecute;
|
||||||
if (flags & PROT_WRITE) mmu_flags |= MMU::ReadWrite;
|
if (flags & PROT_WRITE) mmu_flags |= MMU::ReadWrite;
|
||||||
if (flags & PROT_EXEC) mmu_flags &= ~MMU::NoExecute;
|
if (flags & PROT_EXEC) mmu_flags &= ~MMU::NoExecute;
|
||||||
|
if (flags == PROT_NONE) mmu_flags = MMU::NoExecute;
|
||||||
|
|
||||||
kdbgln("allocate_memory: allocating memory at %#lx", address);
|
kdbgln("allocate_memory: allocating memory at %#lx, size=%zu", address, size);
|
||||||
|
|
||||||
return MemoryManager::alloc_at(address, 1, mmu_flags);
|
return MemoryManager::alloc_at(address, size / ARCH_PAGE_SIZE, mmu_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result<u64> sys_deallocate_memory(Registers*, SyscallArgs args)
|
Result<u64> sys_deallocate_memory(Registers*, SyscallArgs args)
|
||||||
{
|
{
|
||||||
u64 address = (u64)args[0];
|
u64 address = (u64)args[0];
|
||||||
|
usize size = (usize)args[1];
|
||||||
|
|
||||||
|
if (size == 0) return 0;
|
||||||
|
|
||||||
|
size = align_up<ARCH_PAGE_SIZE>(size);
|
||||||
|
|
||||||
Thread* current = Scheduler::current();
|
Thread* current = Scheduler::current();
|
||||||
if (!current->heap_bitmap.initialized()) return err(EFAULT);
|
|
||||||
|
|
||||||
u64 index = (address - USERSPACE_HEAP_BASE) / ARCH_PAGE_SIZE;
|
TRY(current->vm_allocator->free_several_pages(address, size / ARCH_PAGE_SIZE));
|
||||||
|
|
||||||
if (!current->heap_bitmap.get(index)) return err(EFAULT);
|
kdbgln("deallocate_memory: deallocating memory at %#lx, size=%zu", address, size);
|
||||||
|
|
||||||
current->heap_bitmap.set(index, false);
|
TRY(MemoryManager::unmap_owned(address, size / ARCH_PAGE_SIZE));
|
||||||
|
|
||||||
kdbgln("deallocate_memory: deallocating memory at %#lx", address);
|
|
||||||
|
|
||||||
TRY(MemoryManager::unmap_owned(address, 1));
|
|
||||||
|
|
||||||
return { 0 };
|
return { 0 };
|
||||||
}
|
}
|
||||||
|
@ -126,6 +126,8 @@ namespace Scheduler
|
|||||||
|
|
||||||
auto guard = make_scope_guard([&] { delete thread; });
|
auto guard = make_scope_guard([&] { delete thread; });
|
||||||
|
|
||||||
|
thread->vm_allocator = TRY(UserVM::try_create());
|
||||||
|
|
||||||
PageDirectory* const directory = TRY(MMU::create_page_directory_for_userspace());
|
PageDirectory* const directory = TRY(MMU::create_page_directory_for_userspace());
|
||||||
|
|
||||||
auto directory_guard = make_scope_guard([&] {
|
auto directory_guard = make_scope_guard([&] {
|
||||||
@ -178,11 +180,6 @@ namespace Scheduler
|
|||||||
|
|
||||||
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->directory);
|
if (!thread->is_kernel) MMU::delete_userspace_page_directory(thread->directory);
|
||||||
|
|
||||||
if (thread->heap_bitmap.initialized())
|
|
||||||
MemoryManager::unmap_owned_and_free_vm(
|
|
||||||
(u64)thread->heap_bitmap.location(),
|
|
||||||
get_blocks_from_size(thread->heap_bitmap.size_in_bytes(), ARCH_PAGE_SIZE));
|
|
||||||
|
|
||||||
delete thread;
|
delete thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "arch/MMU.h"
|
#include "arch/MMU.h"
|
||||||
#include <luna/Bitmap.h>
|
#include "memory/UserVM.h"
|
||||||
#include <luna/LinkedList.h>
|
#include <luna/LinkedList.h>
|
||||||
|
#include <luna/OwnedPtr.h>
|
||||||
#include <luna/Result.h>
|
#include <luna/Result.h>
|
||||||
#include <luna/Stack.h>
|
#include <luna/Stack.h>
|
||||||
|
|
||||||
@ -36,7 +37,7 @@ struct Thread : public LinkedListNode<Thread>
|
|||||||
Stack stack;
|
Stack stack;
|
||||||
Stack kernel_stack;
|
Stack kernel_stack;
|
||||||
|
|
||||||
Bitmap heap_bitmap;
|
OwnedPtr<UserVM> vm_allocator;
|
||||||
|
|
||||||
ThreadState state = ThreadState::Runnable;
|
ThreadState state = ThreadState::Runnable;
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ set(SOURCES
|
|||||||
src/atexit.cpp
|
src/atexit.cpp
|
||||||
src/ctype.cpp
|
src/ctype.cpp
|
||||||
src/time.cpp
|
src/time.cpp
|
||||||
|
src/sys/mman.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
if(${ARCH} STREQUAL "x86_64")
|
if(${ARCH} STREQUAL "x86_64")
|
||||||
|
@ -52,9 +52,16 @@ extern "C"
|
|||||||
/* Return the result of dividing two long long integers, including the remainder. */
|
/* Return the result of dividing two long long integers, including the remainder. */
|
||||||
lldiv_t lldiv(long long, long long);
|
lldiv_t lldiv(long long, long long);
|
||||||
|
|
||||||
void* malloc(size_t);
|
/* Allocate heap memory. */
|
||||||
void* calloc(size_t, size_t);
|
void* malloc(size_t size);
|
||||||
void* realloc(void*, size_t);
|
|
||||||
|
/* Allocate zero-initialized heap memory. */
|
||||||
|
void* calloc(size_t nmemb, size_t size);
|
||||||
|
|
||||||
|
/* Resize allocated heap memory. */
|
||||||
|
void* realloc(void* ptr, size_t size);
|
||||||
|
|
||||||
|
/* Free heap memory. */
|
||||||
void free(void*);
|
void free(void*);
|
||||||
|
|
||||||
/* Abort the program without performing any normal cleanup. */
|
/* Abort the program without performing any normal cleanup. */
|
||||||
|
23
libc/include/sys/mman.h
Normal file
23
libc/include/sys/mman.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#ifndef _LUNA_MMAN_H
|
||||||
|
#define _LUNA_MMAN_H
|
||||||
|
|
||||||
|
#include <bits/mmap-flags.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
#define PAGE_SIZE 4096UL
|
||||||
|
|
||||||
|
#define MAP_FAILED (void*)-1
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void* allocate_memory(size_t size, int flags);
|
||||||
|
int deallocate_memory(void* address, size_t size);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -71,3 +71,9 @@ extern "C"
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void debug_log_impl(const char* format, va_list ap)
|
||||||
|
{
|
||||||
|
pure_cstyle_format(
|
||||||
|
format, [](char c, void*) { console_write(&c, 1); }, nullptr, ap);
|
||||||
|
}
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
|
#define _LUNA_SYSTEM_ERROR_EXTENSIONS
|
||||||
|
#include <errno.h>
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
#include <luna/Heap.h>
|
||||||
#include <luna/NumberParsing.h>
|
#include <luna/NumberParsing.h>
|
||||||
#include <luna/Utf8.h>
|
#include <luna/Utf8.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@ -118,4 +121,42 @@ extern "C"
|
|||||||
|
|
||||||
return code_points;
|
return code_points;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* malloc(size_t size)
|
||||||
|
{
|
||||||
|
auto rc = malloc_impl(size);
|
||||||
|
if (rc.has_error())
|
||||||
|
{
|
||||||
|
errno = rc.error();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return rc.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
void* calloc(size_t nmemb, size_t size)
|
||||||
|
{
|
||||||
|
auto rc = calloc_impl(nmemb, size);
|
||||||
|
if (rc.has_error())
|
||||||
|
{
|
||||||
|
errno = rc.error();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return rc.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
void* realloc(void* ptr, size_t size)
|
||||||
|
{
|
||||||
|
auto rc = realloc_impl(ptr, size);
|
||||||
|
if (rc.has_error())
|
||||||
|
{
|
||||||
|
errno = rc.error();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return rc.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
void free(void* ptr)
|
||||||
|
{
|
||||||
|
free_impl(ptr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
35
libc/src/sys/mman.cpp
Normal file
35
libc/src/sys/mman.cpp
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#define _LUNA_SYSTEM_ERROR_EXTENSIONS
|
||||||
|
#include <bits/errno-return.h>
|
||||||
|
#include <luna/Heap.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#include <sys/syscall.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
void* allocate_memory(size_t size, int flags)
|
||||||
|
{
|
||||||
|
long rc = syscall(SYS_allocate_memory, size, flags);
|
||||||
|
__errno_return(rc, void*);
|
||||||
|
}
|
||||||
|
|
||||||
|
int deallocate_memory(void* address, size_t size)
|
||||||
|
{
|
||||||
|
long rc = syscall(SYS_deallocate_memory, address, size);
|
||||||
|
__errno_return(rc, int);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void*> allocate_pages_impl(usize count)
|
||||||
|
{
|
||||||
|
long rc = syscall(SYS_allocate_memory, count * PAGE_SIZE, PROT_READ | PROT_WRITE);
|
||||||
|
if (rc < 0) { return err((int)-rc); }
|
||||||
|
return (void*)rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void> release_pages_impl(void* address, usize count)
|
||||||
|
{
|
||||||
|
long rc = syscall(SYS_deallocate_memory, address, count * PAGE_SIZE);
|
||||||
|
if (rc < 0) { return err((int)-rc); }
|
||||||
|
return {};
|
||||||
|
}
|
@ -11,11 +11,11 @@ set(FREESTANDING_SOURCES
|
|||||||
src/SystemError.cpp
|
src/SystemError.cpp
|
||||||
src/Bitmap.cpp
|
src/Bitmap.cpp
|
||||||
src/Stack.cpp
|
src/Stack.cpp
|
||||||
src/Alloc.cpp
|
|
||||||
src/OwnedStringView.cpp
|
src/OwnedStringView.cpp
|
||||||
src/Utf8.cpp
|
src/Utf8.cpp
|
||||||
src/TarStream.cpp
|
src/TarStream.cpp
|
||||||
src/DebugLog.cpp
|
src/DebugLog.cpp
|
||||||
|
src/Heap.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
@ -34,6 +34,7 @@ target_compile_options(luna-freestanding PRIVATE -fno-asynchronous-unwind-tables
|
|||||||
target_compile_options(luna-freestanding PRIVATE -nostdlib -mcmodel=kernel)
|
target_compile_options(luna-freestanding PRIVATE -nostdlib -mcmodel=kernel)
|
||||||
|
|
||||||
target_include_directories(luna-freestanding PUBLIC include/)
|
target_include_directories(luna-freestanding PUBLIC include/)
|
||||||
|
target_include_directories(luna-freestanding PRIVATE ${LUNA_ROOT}/kernel/src)
|
||||||
set_target_properties(luna-freestanding PROPERTIES CXX_STANDARD 20)
|
set_target_properties(luna-freestanding PROPERTIES CXX_STANDARD 20)
|
||||||
|
|
||||||
add_library(luna ${SOURCES})
|
add_library(luna ${SOURCES})
|
||||||
|
@ -1,38 +1,19 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
#include <luna/Heap.h>
|
||||||
|
#include <luna/PlacementNew.h>
|
||||||
#include <luna/Result.h>
|
#include <luna/Result.h>
|
||||||
|
|
||||||
namespace std
|
|
||||||
{
|
|
||||||
struct nothrow_t
|
|
||||||
{
|
|
||||||
explicit nothrow_t() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
extern const nothrow_t nothrow;
|
|
||||||
|
|
||||||
enum class align_val_t : usize
|
|
||||||
{
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
[[nodiscard]] void* raw_malloc(usize);
|
|
||||||
void raw_free(void*);
|
|
||||||
|
|
||||||
void* operator new(usize size, const std::nothrow_t&) noexcept;
|
|
||||||
void* operator new[](usize size, const std::nothrow_t&) noexcept;
|
|
||||||
void operator delete(void* ptr, usize size, std::align_val_t alignment) noexcept;
|
|
||||||
|
|
||||||
template <typename T, class... Args> [[nodiscard]] Result<T*> make(Args... args)
|
template <typename T, class... Args> [[nodiscard]] Result<T*> make(Args... args)
|
||||||
{
|
{
|
||||||
T* const result = new (std::nothrow) T(args...);
|
T* const result = (T*)TRY(malloc_impl(sizeof(T)));
|
||||||
if (!result) return err(ENOMEM);
|
new (result) T(args...);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> [[nodiscard]] Result<T*> make_array(usize count)
|
template <typename T> [[nodiscard]] Result<T*> make_array(usize count)
|
||||||
{
|
{
|
||||||
T* const result = new (std::nothrow) T[count];
|
T* const result = (T*)TRY(calloc_impl(count, sizeof(T)));
|
||||||
if (!result) return err(ENOMEM);
|
new (result) T[count];
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,10 @@ class Bitmap
|
|||||||
|
|
||||||
Option<usize> find_and_toggle(bool value, usize begin = 0);
|
Option<usize> find_and_toggle(bool value, usize begin = 0);
|
||||||
|
|
||||||
|
Option<usize> find_region(bool value, usize count, usize begin = 0) const;
|
||||||
|
|
||||||
|
Option<usize> find_and_toggle_region(bool value, usize count, usize begin = 0);
|
||||||
|
|
||||||
void clear(bool value);
|
void clear(bool value);
|
||||||
void clear_region(usize start, usize bits, bool value);
|
void clear_region(usize start, usize bits, bool value);
|
||||||
|
|
||||||
|
30
luna/include/luna/Heap.h
Normal file
30
luna/include/luna/Heap.h
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <luna/Result.h>
|
||||||
|
|
||||||
|
namespace std
|
||||||
|
{
|
||||||
|
struct nothrow_t
|
||||||
|
{
|
||||||
|
explicit nothrow_t() = default;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const nothrow_t nothrow;
|
||||||
|
|
||||||
|
enum class align_val_t : usize
|
||||||
|
{
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
void* operator new(usize size, const std::nothrow_t&) noexcept;
|
||||||
|
void* operator new[](usize size, const std::nothrow_t&) noexcept;
|
||||||
|
void operator delete(void* ptr, usize size, std::align_val_t alignment) noexcept;
|
||||||
|
|
||||||
|
extern Result<void*> allocate_pages_impl(usize count);
|
||||||
|
extern Result<void> release_pages_impl(void* address, usize count);
|
||||||
|
|
||||||
|
Result<void*> malloc_impl(usize size, bool should_scrub = true);
|
||||||
|
Result<void*> calloc_impl(usize nmemb, usize size);
|
||||||
|
Result<void*> realloc_impl(void* ptr, usize size);
|
||||||
|
Result<void> free_impl(void* ptr);
|
||||||
|
|
||||||
|
void dump_heap_usage();
|
@ -7,6 +7,11 @@ template <typename T> class SharedPtr;
|
|||||||
template <typename T> class OwnedPtr
|
template <typename T> class OwnedPtr
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
OwnedPtr()
|
||||||
|
{
|
||||||
|
m_ptr = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
OwnedPtr(T* ptr)
|
OwnedPtr(T* ptr)
|
||||||
{
|
{
|
||||||
m_ptr = ptr;
|
m_ptr = ptr;
|
||||||
@ -25,6 +30,20 @@ template <typename T> class OwnedPtr
|
|||||||
other.m_ptr = nullptr;
|
other.m_ptr = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
OwnedPtr<T>& operator=(const OwnedPtr<T>& other) = delete;
|
||||||
|
|
||||||
|
OwnedPtr<T>& operator=(OwnedPtr<T>&& other)
|
||||||
|
{
|
||||||
|
if (&other == this) return *this;
|
||||||
|
|
||||||
|
if (m_ptr) delete m_ptr;
|
||||||
|
|
||||||
|
m_ptr = other.m_ptr;
|
||||||
|
other.m_ptr = nullptr;
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
T* ptr() const
|
T* ptr() const
|
||||||
{
|
{
|
||||||
return m_ptr;
|
return m_ptr;
|
||||||
|
@ -30,6 +30,12 @@ template <typename T> class SharedPtr
|
|||||||
using RefCount = __detail::RefCount;
|
using RefCount = __detail::RefCount;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
SharedPtr()
|
||||||
|
{
|
||||||
|
m_ptr = nullptr;
|
||||||
|
m_ref_count = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
SharedPtr(T* ptr, RefCount* ref_count) : m_ptr(ptr), m_ref_count(ref_count)
|
SharedPtr(T* ptr, RefCount* ref_count) : m_ptr(ptr), m_ref_count(ref_count)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ class Utf8StringDecoder
|
|||||||
// The caller must ensure that 'buf' is at least code_points() + a NULL wide.
|
// The caller must ensure that 'buf' is at least code_points() + a NULL wide.
|
||||||
Result<void> decode(wchar_t* buf) const;
|
Result<void> decode(wchar_t* buf) const;
|
||||||
|
|
||||||
Result<void> decode(wchar_t* buf, size_t max) const;
|
Result<void> decode(wchar_t* buf, usize max) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const char* m_str;
|
const char* m_str;
|
||||||
@ -40,7 +40,7 @@ class Utf8StringEncoder
|
|||||||
// The caller must ensure that 'buf' is at least byte_length() + a NULL wide.
|
// The caller must ensure that 'buf' is at least byte_length() + a NULL wide.
|
||||||
Result<void> encode(char* buf) const;
|
Result<void> encode(char* buf) const;
|
||||||
|
|
||||||
Result<void> encode(char* buf, size_t max) const;
|
Result<void> encode(char* buf, usize max) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const wchar_t* m_str;
|
const wchar_t* m_str;
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
#include <luna/Alloc.h>
|
|
||||||
|
|
||||||
#ifndef USE_FREESTANDING
|
|
||||||
#include <stdlib.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
[[nodiscard]] void* raw_malloc(usize size)
|
|
||||||
{
|
|
||||||
#ifdef USE_FREESTANDING
|
|
||||||
char* const rc = new (std::nothrow) char[size];
|
|
||||||
return (void*)rc;
|
|
||||||
#else
|
|
||||||
// return malloc(size);
|
|
||||||
(void)size;
|
|
||||||
return NULL;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void raw_free(void* ptr)
|
|
||||||
{
|
|
||||||
#ifdef USE_FREESTANDING
|
|
||||||
char* const arr = (char*)ptr;
|
|
||||||
delete[] arr;
|
|
||||||
#else
|
|
||||||
// return free(ptr);
|
|
||||||
(void)ptr;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void operator delete(void* ptr, usize size, std::align_val_t) noexcept
|
|
||||||
{
|
|
||||||
#ifdef USE_FREESTANDING
|
|
||||||
operator delete(ptr, size);
|
|
||||||
#else
|
|
||||||
(void)ptr;
|
|
||||||
(void)size;
|
|
||||||
#endif
|
|
||||||
}
|
|
@ -132,3 +132,38 @@ Option<usize> Bitmap::find_and_toggle(bool value, usize begin)
|
|||||||
set(index, !value);
|
set(index, !value);
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Option<usize> Bitmap::find_region(bool value, usize count, usize begin) const
|
||||||
|
{
|
||||||
|
// FIXME: Optimize this using bit and byte manipulation.
|
||||||
|
u64 region_bits_found = 0;
|
||||||
|
u64 region_start = 0;
|
||||||
|
|
||||||
|
for (u64 index = begin; index < m_size_in_bytes * 8; index++)
|
||||||
|
{
|
||||||
|
if (get(index) != value)
|
||||||
|
{
|
||||||
|
region_bits_found = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (region_bits_found == 0)
|
||||||
|
{
|
||||||
|
region_start = index;
|
||||||
|
region_bits_found++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
region_bits_found++;
|
||||||
|
|
||||||
|
if (region_bits_found == count) return region_start;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Option<usize> Bitmap::find_and_toggle_region(bool value, usize count, usize begin)
|
||||||
|
{
|
||||||
|
usize index = TRY(find_region(value, count, begin));
|
||||||
|
clear_region(index, count, !value);
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
@ -68,7 +68,7 @@ extern "C"
|
|||||||
{
|
{
|
||||||
const usize len = strlen(str);
|
const usize len = strlen(str);
|
||||||
|
|
||||||
char* dest = (char*)raw_malloc(len + 1);
|
char* dest = (char*)malloc_impl(len + 1).value_or(nullptr);
|
||||||
if (!dest) return nullptr;
|
if (!dest) return nullptr;
|
||||||
|
|
||||||
memcpy(dest, str, len + 1);
|
memcpy(dest, str, len + 1);
|
||||||
|
400
luna/src/Heap.cpp
Normal file
400
luna/src/Heap.cpp
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
#include <luna/Alignment.h>
|
||||||
|
#include <luna/Alloc.h>
|
||||||
|
#include <luna/CString.h>
|
||||||
|
#include <luna/DebugLog.h>
|
||||||
|
#include <luna/Heap.h>
|
||||||
|
#include <luna/LinkedList.h>
|
||||||
|
#include <luna/SafeArithmetic.h>
|
||||||
|
#include <luna/ScopeGuard.h>
|
||||||
|
#include <luna/SystemError.h>
|
||||||
|
|
||||||
|
#ifdef USE_FREESTANDING
|
||||||
|
#include "arch/MMU.h"
|
||||||
|
#define PAGE_SIZE ARCH_PAGE_SIZE
|
||||||
|
#else
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace std
|
||||||
|
{
|
||||||
|
const nothrow_t nothrow;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr int BLOCK_USED = 1 << 0;
|
||||||
|
static constexpr int BLOCK_START_MEM = 1 << 1;
|
||||||
|
static constexpr int BLOCK_END_MEM = 1 << 2;
|
||||||
|
|
||||||
|
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
|
||||||
|
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
|
||||||
|
|
||||||
|
static constexpr u8 KMALLOC_SCRUB_BYTE = 0xac;
|
||||||
|
static constexpr u8 KFREE_SCRUB_BYTE = 0xde;
|
||||||
|
|
||||||
|
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
|
||||||
|
|
||||||
|
struct HeapBlock : LinkedListNode<HeapBlock>
|
||||||
|
{
|
||||||
|
usize req_size;
|
||||||
|
usize full_size;
|
||||||
|
int status;
|
||||||
|
usize magic;
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(sizeof(HeapBlock) == 48UL);
|
||||||
|
|
||||||
|
static const isize HEAP_BLOCK_SIZE = 48;
|
||||||
|
|
||||||
|
static LinkedList<HeapBlock> heap;
|
||||||
|
|
||||||
|
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of
|
||||||
|
// pages.
|
||||||
|
static usize get_pages_for_allocation(usize bytes)
|
||||||
|
{
|
||||||
|
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
||||||
|
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
||||||
|
return pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_block_free(HeapBlock* block)
|
||||||
|
{
|
||||||
|
return !(block->status & BLOCK_USED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static usize space_available(HeapBlock* block)
|
||||||
|
{
|
||||||
|
expect(!is_block_free(block), "Attempting to split a free block");
|
||||||
|
return block->full_size - block->req_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The heap block is stored right behind a memory block.
|
||||||
|
static HeapBlock* get_heap_block_for_pointer(void* ptr)
|
||||||
|
{
|
||||||
|
return (HeapBlock*)offset_ptr(ptr, -HEAP_BLOCK_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* get_pointer_from_heap_block(HeapBlock* block)
|
||||||
|
{
|
||||||
|
return (void*)offset_ptr(block, HEAP_BLOCK_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static usize get_fair_offset_to_split_at(HeapBlock* block, usize min)
|
||||||
|
{
|
||||||
|
usize available = space_available(block);
|
||||||
|
|
||||||
|
available -= min; // reserve at least min size for the new block.
|
||||||
|
|
||||||
|
available -= (available /
|
||||||
|
2); // reserve half of the rest for the new block, while still leaving another half for the old one.
|
||||||
|
|
||||||
|
available = align_down<16>(available); // Everything has to be aligned on a 16-byte boundary
|
||||||
|
|
||||||
|
return available + block->req_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static Option<HeapBlock*> split(HeapBlock* block, usize size)
|
||||||
|
{
|
||||||
|
const usize available = space_available(block); // How much space can we steal from this block?
|
||||||
|
const usize old_size =
|
||||||
|
block->full_size; // Save the old value of this variable since we are going to use it after modifying it
|
||||||
|
|
||||||
|
if (available < (size + sizeof(HeapBlock)))
|
||||||
|
return {}; // This block hasn't got enough free space to hold the requested size.
|
||||||
|
|
||||||
|
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
|
||||||
|
block->full_size = offset; // shrink the old block to fit this offset
|
||||||
|
|
||||||
|
HeapBlock* new_block = offset_ptr(block, offset + sizeof(HeapBlock));
|
||||||
|
|
||||||
|
new_block->magic = BLOCK_MAGIC;
|
||||||
|
new_block->status = (block->status & BLOCK_END_MEM) ? BLOCK_END_MEM : 0;
|
||||||
|
new_block->full_size = old_size - (offset + sizeof(HeapBlock));
|
||||||
|
heap.add_after(block, new_block);
|
||||||
|
|
||||||
|
block->status &= ~BLOCK_END_MEM; // this block is no longer the last block in its memory range
|
||||||
|
|
||||||
|
return new_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
static Result<void> combine_forward(HeapBlock* block)
|
||||||
|
{
|
||||||
|
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||||
|
if (block->status & BLOCK_END_MEM) return {};
|
||||||
|
|
||||||
|
// The caller needs to ensure there is a next block.
|
||||||
|
HeapBlock* const next = heap.next(block).value();
|
||||||
|
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||||
|
if (next->status & BLOCK_START_MEM) return {};
|
||||||
|
|
||||||
|
heap.remove(next);
|
||||||
|
next->magic = BLOCK_DEAD;
|
||||||
|
|
||||||
|
if (next->status & BLOCK_END_MEM)
|
||||||
|
{
|
||||||
|
if (next->status & BLOCK_START_MEM)
|
||||||
|
{
|
||||||
|
const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||||
|
TRY(release_pages_impl(next, pages));
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
block->status |= BLOCK_END_MEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
block->full_size += next->full_size + sizeof(HeapBlock);
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||||
|
{
|
||||||
|
// This block starts a memory range, cannot be combined with blocks outside its range.
|
||||||
|
if (block->status & BLOCK_START_MEM) return block;
|
||||||
|
|
||||||
|
// The caller needs to ensure there is a last block.
|
||||||
|
HeapBlock* const last = heap.previous(block).value();
|
||||||
|
// This block ends a memory range, cannot be combined with blocks outside its range.
|
||||||
|
if (last->status & BLOCK_END_MEM) return block;
|
||||||
|
heap.remove(block);
|
||||||
|
block->magic = BLOCK_DEAD;
|
||||||
|
|
||||||
|
if (block->status & BLOCK_END_MEM)
|
||||||
|
{
|
||||||
|
if (block->status & BLOCK_START_MEM)
|
||||||
|
{
|
||||||
|
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||||
|
TRY(release_pages_impl(block, pages));
|
||||||
|
return last;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
last->status |= BLOCK_END_MEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
last->full_size += block->full_size + sizeof(HeapBlock);
|
||||||
|
|
||||||
|
return last;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void*> malloc_impl(usize size, bool should_scrub)
|
||||||
|
{
|
||||||
|
if (!size) return (void*)BLOCK_MAGIC;
|
||||||
|
|
||||||
|
size = align_up<16>(size);
|
||||||
|
|
||||||
|
Option<HeapBlock*> block = heap.first();
|
||||||
|
while (block.has_value())
|
||||||
|
{
|
||||||
|
HeapBlock* const current = block.value();
|
||||||
|
// Trying to find a free block...
|
||||||
|
if (is_block_free(current))
|
||||||
|
{
|
||||||
|
if (current->full_size < size)
|
||||||
|
{
|
||||||
|
block = heap.next(current);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break; // We found a free block that's big enough!!
|
||||||
|
}
|
||||||
|
auto rc = split(current, size);
|
||||||
|
if (rc.has_value())
|
||||||
|
{
|
||||||
|
block = rc.value(); // We managed to get a free block from a larger used block!!
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
block = heap.next(current);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!block.has_value()) // No free blocks, let's allocate a new one
|
||||||
|
{
|
||||||
|
usize pages = get_pages_for_allocation(size + sizeof(HeapBlock));
|
||||||
|
HeapBlock* const current = (HeapBlock*)TRY(allocate_pages_impl(pages));
|
||||||
|
|
||||||
|
current->full_size = (pages * PAGE_SIZE) - sizeof(HeapBlock);
|
||||||
|
current->magic = BLOCK_MAGIC;
|
||||||
|
current->status = BLOCK_START_MEM | BLOCK_END_MEM;
|
||||||
|
heap.append(current);
|
||||||
|
|
||||||
|
block = current;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapBlock* const current = block.value();
|
||||||
|
|
||||||
|
current->req_size = size;
|
||||||
|
current->status |= BLOCK_USED;
|
||||||
|
|
||||||
|
if (should_scrub) { memset(get_pointer_from_heap_block(current), KMALLOC_SCRUB_BYTE, size); }
|
||||||
|
|
||||||
|
return get_pointer_from_heap_block(current);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void> free_impl(void* ptr)
|
||||||
|
{
|
||||||
|
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
|
||||||
|
if (!ptr) return {};
|
||||||
|
|
||||||
|
HeapBlock* block = get_heap_block_for_pointer(ptr);
|
||||||
|
|
||||||
|
if (block->magic != BLOCK_MAGIC)
|
||||||
|
{
|
||||||
|
if (block->magic == BLOCK_DEAD) { dbgln("ERROR: Attempt to free memory at %p, which was already freed", ptr); }
|
||||||
|
else
|
||||||
|
dbgln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||||
|
|
||||||
|
return err(EFAULT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_block_free(block))
|
||||||
|
{
|
||||||
|
dbgln("ERROR: Attempt to free memory at %p, which was already freed", ptr);
|
||||||
|
return err(EFAULT);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
block->status &= ~BLOCK_USED;
|
||||||
|
|
||||||
|
memset(ptr, KFREE_SCRUB_BYTE, block->req_size);
|
||||||
|
|
||||||
|
auto maybe_next = heap.next(block);
|
||||||
|
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
|
||||||
|
{
|
||||||
|
// The next block is also free, thus we can merge!
|
||||||
|
TRY(combine_forward(block));
|
||||||
|
}
|
||||||
|
|
||||||
|
auto maybe_last = heap.previous(block);
|
||||||
|
if (maybe_last.has_value() && is_block_free(maybe_last.value()))
|
||||||
|
{
|
||||||
|
// The last block is also free, thus we can merge!
|
||||||
|
block = TRY(combine_backward(block));
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
||||||
|
{
|
||||||
|
heap.remove(block);
|
||||||
|
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||||
|
TRY(release_pages_impl(block, pages));
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void*> realloc_impl(void* ptr, usize size)
|
||||||
|
{
|
||||||
|
if (!ptr) return malloc_impl(size);
|
||||||
|
if (ptr == (void*)BLOCK_MAGIC) return malloc_impl(size);
|
||||||
|
if (!size)
|
||||||
|
{
|
||||||
|
TRY(free_impl(ptr));
|
||||||
|
return (void*)BLOCK_MAGIC;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapBlock* const block = get_heap_block_for_pointer(ptr);
|
||||||
|
|
||||||
|
if (block->magic != BLOCK_MAGIC)
|
||||||
|
{
|
||||||
|
if (block->magic == BLOCK_DEAD)
|
||||||
|
{
|
||||||
|
dbgln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
dbgln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
|
||||||
|
|
||||||
|
return err(EFAULT);
|
||||||
|
}
|
||||||
|
|
||||||
|
size = align_up<16>(size);
|
||||||
|
|
||||||
|
if (is_block_free(block))
|
||||||
|
{
|
||||||
|
dbgln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
|
||||||
|
return err(EFAULT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (block->full_size >= size)
|
||||||
|
{
|
||||||
|
// This block is already large enough!
|
||||||
|
// FIXME: Scrub this if necessary.
|
||||||
|
block->req_size = size;
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
usize old_size = block->req_size;
|
||||||
|
|
||||||
|
void* const new_ptr = TRY(malloc_impl(size, false));
|
||||||
|
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
|
||||||
|
TRY(free_impl(ptr));
|
||||||
|
|
||||||
|
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), KMALLOC_SCRUB_BYTE, size - old_size); }
|
||||||
|
|
||||||
|
return new_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result<void*> calloc_impl(usize nmemb, usize size)
|
||||||
|
{
|
||||||
|
const usize realsize = TRY(safe_mul(nmemb, size));
|
||||||
|
void* const ptr = TRY(malloc_impl(realsize, false));
|
||||||
|
return memset(ptr, 0, realsize);
|
||||||
|
}
|
||||||
|
|
||||||
|
void dump_heap_usage()
|
||||||
|
{
|
||||||
|
dbgln("-- Dumping usage stats for heap:");
|
||||||
|
if (!heap.count())
|
||||||
|
{
|
||||||
|
dbgln("- Heap is not currently being used");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
usize alloc_total = 0;
|
||||||
|
usize alloc_used = 0;
|
||||||
|
auto block = heap.first();
|
||||||
|
while (block.has_value())
|
||||||
|
{
|
||||||
|
HeapBlock* current = block.value();
|
||||||
|
if (is_block_free(current))
|
||||||
|
{
|
||||||
|
dbgln("- Available block (%p), of size %zu (%s%s)", (void*)current, current->full_size,
|
||||||
|
current->status & BLOCK_START_MEM ? "b" : "-", current->status & BLOCK_END_MEM ? "e" : "-");
|
||||||
|
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
dbgln("- Used block (%p), of size %zu, of which %zu bytes are being used (%s%s)", (void*)current,
|
||||||
|
current->full_size, current->req_size, current->status & BLOCK_START_MEM ? "b" : "-",
|
||||||
|
current->status & BLOCK_END_MEM ? "e" : "-");
|
||||||
|
alloc_total += current->full_size + sizeof(HeapBlock);
|
||||||
|
alloc_used += current->req_size;
|
||||||
|
}
|
||||||
|
block = heap.next(current);
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgln("-- Total memory allocated for heap: %zu bytes", alloc_total);
|
||||||
|
dbgln("-- Heap memory in use: %zu bytes", alloc_used);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* operator new(usize size, const std::nothrow_t&) noexcept
|
||||||
|
{
|
||||||
|
return malloc_impl(size).value_or(nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* operator new[](usize size, const std::nothrow_t&) noexcept
|
||||||
|
{
|
||||||
|
return malloc_impl(size).value_or(nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator delete(void* p) noexcept
|
||||||
|
{
|
||||||
|
free_impl(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator delete[](void* p) noexcept
|
||||||
|
{
|
||||||
|
free_impl(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator delete(void* p, usize) noexcept
|
||||||
|
{
|
||||||
|
free_impl(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator delete[](void* p, usize) noexcept
|
||||||
|
{
|
||||||
|
free_impl(p);
|
||||||
|
}
|
@ -23,7 +23,7 @@ OwnedStringView::OwnedStringView(char* c_str)
|
|||||||
|
|
||||||
OwnedStringView::~OwnedStringView()
|
OwnedStringView::~OwnedStringView()
|
||||||
{
|
{
|
||||||
if (m_string) raw_free(m_string);
|
if (m_string) free_impl(m_string);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result<OwnedStringView> OwnedStringView::clone() const
|
Result<OwnedStringView> OwnedStringView::clone() const
|
||||||
|
@ -146,7 +146,7 @@ Result<usize> Utf8StringDecoder::code_points() const
|
|||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result<void> Utf8StringDecoder::decode(wchar_t* buf, size_t max) const
|
Result<void> Utf8StringDecoder::decode(wchar_t* buf, usize max) const
|
||||||
{
|
{
|
||||||
const char* it = m_str;
|
const char* it = m_str;
|
||||||
|
|
||||||
@ -165,7 +165,7 @@ Result<void> Utf8StringDecoder::decode(wchar_t* buf, size_t max) const
|
|||||||
|
|
||||||
Result<void> Utf8StringDecoder::decode(wchar_t* buf) const
|
Result<void> Utf8StringDecoder::decode(wchar_t* buf) const
|
||||||
{
|
{
|
||||||
return decode(buf, (size_t)-1);
|
return decode(buf, (usize)-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
Utf8StringEncoder::Utf8StringEncoder(const wchar_t* str) : m_str(str), m_code_points(wcslen(str))
|
Utf8StringEncoder::Utf8StringEncoder(const wchar_t* str) : m_str(str), m_code_points(wcslen(str))
|
||||||
@ -186,7 +186,7 @@ Result<usize> Utf8StringEncoder::byte_length() const
|
|||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result<void> Utf8StringEncoder::encode(char* buf, size_t max) const
|
Result<void> Utf8StringEncoder::encode(char* buf, usize max) const
|
||||||
{
|
{
|
||||||
const wchar_t* it = m_str;
|
const wchar_t* it = m_str;
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ Result<void> Utf8StringEncoder::encode(char* buf, size_t max) const
|
|||||||
|
|
||||||
Result<void> Utf8StringEncoder::encode(char* buf) const
|
Result<void> Utf8StringEncoder::encode(char* buf) const
|
||||||
{
|
{
|
||||||
return encode(buf, (size_t)-1);
|
return encode(buf, (usize)-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
Utf8StateDecoder::Utf8StateDecoder() : m_state_len(0), m_state_index(0)
|
Utf8StateDecoder::Utf8StateDecoder() : m_state_len(0), m_state_index(0)
|
||||||
|
Loading…
Reference in New Issue
Block a user