Compare commits

...

3 Commits

Author SHA1 Message Date
1d92784608
luna, kernel: Replace some uses of char by truly 1-byte wide types
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-16 19:52:34 +01:00
329e8ab182
luna/Heap: Scrub reallocations properly 2023-01-16 19:50:35 +01:00
1b807a4e06
arch/Timer: Make sure ARCH_TIMER_FREQ is a power of two
(avoid division and modulo, division is slow)
Fortunately, GCC will optimize divisions by powers of two to simple bitwise shifts :)
2023-01-16 19:43:05 +01:00
5 changed files with 28 additions and 16 deletions

View File

@ -2,8 +2,10 @@
#include "Log.h"
#include "arch/Serial.h"
#include "boot/bootboot.h"
#include <luna/TypeTraits.h>
// FIXME: Storing these values as unsigned integers doesn't allow for pre-epoch times.
// NOTE: Storing these values as unsigned integers doesn't allow for pre-epoch times.
// We are in 2023 anyway, not sure why anybody would want to set their computer's time to 1945.
static u64 timer_ticks = 0;
static u64 boot_timestamp;
@ -134,8 +136,9 @@ namespace Timer
}
}
static_assert(IsPowerOfTwo<usize, ARCH_TIMER_FREQ>, "ARCH_TIMER_FREQ must be a power of two");
bool should_invoke_scheduler()
{
// FIXME: Modulo is SLOW. We're calling this every tick.
return (timer_ticks % ARCH_TIMER_FREQ) == 0;
}

View File

@ -1,4 +1,4 @@
#pragma once
#include <luna/Types.h>
const usize ARCH_TIMER_FREQ = 5;
const usize ARCH_TIMER_FREQ = 4;

View File

@ -433,7 +433,7 @@ namespace MemoryManager
uintptr_t user_ptr = (uintptr_t)user;
uintptr_t user_page = align_down<ARCH_PAGE_SIZE>(user_ptr);
const char* kernel_ptr = (const char*)kernel;
const u8* kernel_ptr = (const u8*)kernel;
// Userspace pointer not aligned on page boundary
if (user_ptr != user_page)
@ -449,7 +449,7 @@ namespace MemoryManager
if (!validate_user_writable_page(user_ptr)) return false;
}
*(char*)user_ptr = *kernel_ptr++;
*(u8*)user_ptr = *kernel_ptr++;
user_ptr++;
}
@ -461,7 +461,7 @@ namespace MemoryManager
uintptr_t user_ptr = (uintptr_t)user;
uintptr_t user_page = align_down<ARCH_PAGE_SIZE>(user_ptr);
char* kernel_ptr = (char*)kernel;
u8* kernel_ptr = (u8*)kernel;
// Userspace pointer not aligned on page boundary
if (user_ptr != user_page)
@ -477,7 +477,7 @@ namespace MemoryManager
if (!validate_user_readable_page(user_ptr)) return false;
}
*kernel_ptr++ = *(const char*)user_ptr;
*kernel_ptr++ = *(const u8*)user_ptr;
user_ptr++;
}

View File

@ -49,5 +49,5 @@ static_assert(get_blocks_from_size(0, 256) == 0);
// arithmetic.
template <typename T, typename Offset> constexpr T* offset_ptr(T* ptr, Offset offset)
{
return (T*)((char*)ptr + offset);
return (T*)((u8*)ptr + offset);
}

View File

@ -27,8 +27,8 @@ static constexpr int BLOCK_END_MEM = 1 << 2;
static constexpr usize BLOCK_MAGIC = 0x6d616c6c6f63210a; // echo 'malloc!' | hexdump -C (includes a newline)
static constexpr usize BLOCK_DEAD = 0xdeaddeaddeaddead;
static constexpr u8 KMALLOC_SCRUB_BYTE = 0xac;
static constexpr u8 KFREE_SCRUB_BYTE = 0xde;
static constexpr u8 MALLOC_SCRUB_BYTE = 0xac;
static constexpr u8 FREE_SCRUB_BYTE = 0xde;
static constexpr usize MINIMUM_PAGES_PER_ALLOCATION = 4;
@ -221,7 +221,7 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
current->req_size = size;
current->status |= BLOCK_USED;
if (should_scrub) { memset(get_pointer_from_heap_block(current), KMALLOC_SCRUB_BYTE, size); }
if (should_scrub) { memset(get_pointer_from_heap_block(current), MALLOC_SCRUB_BYTE, size); }
return get_pointer_from_heap_block(current);
}
@ -237,7 +237,7 @@ Result<void> free_impl(void* ptr)
{
if (block->magic == BLOCK_DEAD) { dbgln("ERROR: Attempt to free memory at %p, which was already freed", ptr); }
else
dbgln("ERROR: Attempt to free memory at %p, which wasn't allocated with kmalloc", ptr);
dbgln("ERROR: Attempt to free memory at %p, which wasn't allocated with malloc", ptr);
return err(EFAULT);
}
@ -250,7 +250,7 @@ Result<void> free_impl(void* ptr)
else
block->status &= ~BLOCK_USED;
memset(ptr, KFREE_SCRUB_BYTE, block->req_size);
memset(ptr, FREE_SCRUB_BYTE, block->req_size);
auto maybe_next = heap.next(block);
if (maybe_next.has_value() && is_block_free(maybe_next.value()))
@ -295,7 +295,7 @@ Result<void*> realloc_impl(void* ptr, usize size)
dbgln("ERROR: Attempt to realloc memory at %p, which was already freed", ptr);
}
else
dbgln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with kmalloc", ptr);
dbgln("ERROR: Attempt to realloc memory at %p, which wasn't allocated with malloc", ptr);
return err(EFAULT);
}
@ -311,7 +311,16 @@ Result<void*> realloc_impl(void* ptr, usize size)
if (block->full_size >= size)
{
// This block is already large enough!
// FIXME: Scrub this if necessary.
if (size > block->req_size)
{
// If the new size is larger, scrub the newly allocated space.
memset(offset_ptr(ptr, block->req_size), MALLOC_SCRUB_BYTE, size - block->req_size);
}
else if (size < block->req_size)
{
// If the new size is smaller, scrub the removed space as if it was freed.
memset(offset_ptr(ptr, size), FREE_SCRUB_BYTE, block->req_size - size);
}
block->req_size = size;
return ptr;
}
@ -322,7 +331,7 @@ Result<void*> realloc_impl(void* ptr, usize size)
memcpy(new_ptr, ptr, old_size > size ? size : old_size);
TRY(free_impl(ptr));
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), KMALLOC_SCRUB_BYTE, size - old_size); }
if (old_size < size) { memset(offset_ptr(new_ptr, old_size), MALLOC_SCRUB_BYTE, size - old_size); }
return new_ptr;
}