libluna: Move get_blocks_from_size to a new header and call it ceil_div instead
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
917203ef11
commit
b63a8ff245
@ -7,6 +7,7 @@
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Alloc.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/Common.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
|
||||
static bool can_execute_segment(u32 flags)
|
||||
@ -113,14 +114,14 @@ Result<u64> ELFLoader::load(AddressSpace* space)
|
||||
if (can_write_segment(program_header.p_flags)) prot |= PROT_WRITE;
|
||||
if (can_execute_segment(program_header.p_flags)) prot |= PROT_EXEC;
|
||||
|
||||
if (!TRY(space->test_and_alloc_region(
|
||||
base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), prot,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0, true)))
|
||||
if (!TRY(space->test_and_alloc_region(base_vaddr,
|
||||
ceil_div(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), prot,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0, true)))
|
||||
return err(ENOMEM);
|
||||
|
||||
// Allocate physical memory for the segment
|
||||
TRY(MemoryManager::alloc_at(
|
||||
base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), flags));
|
||||
TRY(MemoryManager::alloc_at(base_vaddr, ceil_div(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE),
|
||||
flags));
|
||||
|
||||
// Zero out unused memory (before the start of the segment)
|
||||
memset((void*)base_vaddr, 0, vaddr_diff);
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "memory/MemoryManager.h"
|
||||
#include "thread/Thread.h"
|
||||
#include <bits/modes.h>
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Common.h>
|
||||
|
||||
TarStream g_initrd;
|
||||
extern const BOOTBOOT bootboot;
|
||||
@ -47,7 +47,7 @@ Result<void> InitRD::populate_vfs()
|
||||
}
|
||||
|
||||
// Now we don't need the original initrd anymore
|
||||
MemoryManager::free_frames(bootboot.initrd_ptr, get_blocks_from_size(bootboot.initrd_size, ARCH_PAGE_SIZE));
|
||||
MemoryManager::free_frames(bootboot.initrd_ptr, ceil_div(bootboot.initrd_size, ARCH_PAGE_SIZE));
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "fs/ext2/FileSystem.h"
|
||||
#include "fs/ext2/Inode.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Common.h>
|
||||
|
||||
static VFS::InodeType vfs_type_from_ext2_type(mode_t mode)
|
||||
{
|
||||
@ -114,7 +114,7 @@ namespace Ext2
|
||||
fs->m_host_device = host_device;
|
||||
|
||||
fs->m_block_size = 1024 << fs->m_superblock.log_block_size;
|
||||
fs->m_block_groups = get_blocks_from_size(fs->m_superblock.nr_blocks, fs->m_superblock.blocks_per_block_group);
|
||||
fs->m_block_groups = ceil_div(fs->m_superblock.nr_blocks, fs->m_superblock.blocks_per_block_group);
|
||||
|
||||
#ifdef EXT2_DEBUG
|
||||
kdbgln("ext2: Mounting new Ext2 file system, block size=%lu, blocks=%u, inodes=%u, block group=(%u blocks, %u "
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "memory/MemoryMap.h"
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Bitmap.h>
|
||||
#include <luna/Common.h>
|
||||
#include <luna/ScopeGuard.h>
|
||||
#include <luna/Spinlock.h>
|
||||
#include <luna/SystemError.h>
|
||||
@ -40,11 +41,11 @@ namespace MemoryManager
|
||||
Result<void> protect_kernel_sections()
|
||||
{
|
||||
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
|
||||
const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE);
|
||||
const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE);
|
||||
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
|
||||
|
||||
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
|
||||
const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE);
|
||||
const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE);
|
||||
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
|
||||
|
||||
return {};
|
||||
@ -67,7 +68,7 @@ namespace MemoryManager
|
||||
// We store our frame bitmap at the beginning of the largest free memory block.
|
||||
char* const frame_bitmap_addr = (char*)largest_free_entry.ptr();
|
||||
|
||||
const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL);
|
||||
const usize frame_bitmap_size = ceil_div(physical_address_space_size / ARCH_PAGE_SIZE, 8UL);
|
||||
|
||||
// This should never happen, unless memory is very fragmented. Usually there is always a very big block of
|
||||
// usable memory and then some tiny blocks around it.
|
||||
@ -96,7 +97,7 @@ namespace MemoryManager
|
||||
}
|
||||
|
||||
// Make sure that the physical frames used by the bitmap aren't handed out to anyone else.
|
||||
lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE));
|
||||
lock_frames(largest_free_entry.address(), ceil_div(frame_bitmap_size, ARCH_PAGE_SIZE));
|
||||
}
|
||||
|
||||
void init()
|
||||
@ -526,7 +527,7 @@ namespace MemoryManager
|
||||
|
||||
uintptr_t diff = address - page;
|
||||
|
||||
usize pages = get_blocks_from_size(size + diff, ARCH_PAGE_SIZE);
|
||||
usize pages = ceil_div(size + diff, ARCH_PAGE_SIZE);
|
||||
|
||||
while (pages--)
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <bits/mmap.h>
|
||||
#include <bits/open-flags.h>
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Common.h>
|
||||
|
||||
constexpr uintptr_t USERSPACE_HEAP_BASE = 0x3000000;
|
||||
|
||||
@ -32,7 +33,7 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
||||
|
||||
if (!is_aligned<ARCH_PAGE_SIZE>(params.offset)) return err(EINVAL);
|
||||
|
||||
params.len = align_up<ARCH_PAGE_SIZE>(params.len);
|
||||
const usize pages = ceil_div(params.len, ARCH_PAGE_SIZE);
|
||||
|
||||
SharedMemory* shmem = nullptr;
|
||||
u64 shmid = 0;
|
||||
@ -41,12 +42,12 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
||||
if (!description)
|
||||
{
|
||||
params.offset = 0;
|
||||
shmid = TRY(SharedMemory::create(nullptr, 0, params.len / ARCH_PAGE_SIZE));
|
||||
shmid = TRY(SharedMemory::create(nullptr, 0, pages));
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((params.prot & PROT_WRITE) && !(description->flags & O_WRONLY)) return err(EACCES);
|
||||
shmid = TRY(description->inode->query_shared_memory(params.offset, params.len / ARCH_PAGE_SIZE));
|
||||
shmid = TRY(description->inode->query_shared_memory(params.offset, pages));
|
||||
}
|
||||
shmem = g_shared_memory_map.try_get_ref(shmid);
|
||||
shmem->refs++;
|
||||
@ -55,15 +56,13 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
||||
|
||||
u64 address;
|
||||
if (!params.addr)
|
||||
address = TRY(current->address_space->alloc_region(get_blocks_from_size(params.len, ARCH_PAGE_SIZE),
|
||||
params.prot, params.flags, params.offset, shmid));
|
||||
address = TRY(current->address_space->alloc_region(pages, params.prot, params.flags, params.offset, shmid));
|
||||
else
|
||||
{
|
||||
// FIXME: We should be more flexible if MAP_FIXED was not specified.
|
||||
address = align_down<ARCH_PAGE_SIZE>((u64)params.addr);
|
||||
if (!TRY(current->address_space->test_and_alloc_region(address,
|
||||
get_blocks_from_size(params.len, ARCH_PAGE_SIZE),
|
||||
params.prot, params.flags, params.offset, shmid)))
|
||||
if (!TRY(current->address_space->test_and_alloc_region(address, pages, params.prot, params.flags, params.offset,
|
||||
shmid)))
|
||||
return err(ENOMEM);
|
||||
}
|
||||
|
||||
@ -76,10 +75,10 @@ Result<u64> sys_mmap(Registers*, SyscallArgs args)
|
||||
kdbgln("mmap: mapping memory at %#lx, size=%zu", address, params.len);
|
||||
#endif
|
||||
|
||||
if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, params.len / ARCH_PAGE_SIZE)); }
|
||||
if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, pages)); }
|
||||
else
|
||||
{
|
||||
TRY(MemoryManager::alloc_at_zeroed(address, get_blocks_from_size(params.len, ARCH_PAGE_SIZE), mmu_flags));
|
||||
TRY(MemoryManager::alloc_at_zeroed(address, pages, mmu_flags));
|
||||
if (description) { TRY(description->inode->read((u8*)address, params.offset, params.len)); }
|
||||
}
|
||||
|
||||
@ -92,11 +91,11 @@ Result<u64> sys_munmap(Registers*, SyscallArgs args)
|
||||
usize size = (usize)args[1];
|
||||
|
||||
if (size == 0) return err(EINVAL);
|
||||
if (!is_aligned<ARCH_PAGE_SIZE>(size)) return err(EINVAL);
|
||||
if (!is_aligned<ARCH_PAGE_SIZE>(address)) return err(EINVAL);
|
||||
|
||||
Thread* current = Scheduler::current();
|
||||
|
||||
bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
||||
bool ok = TRY(current->address_space->free_region(address, ceil_div(size, ARCH_PAGE_SIZE)));
|
||||
|
||||
// POSIX says munmap should silently do nothing if the memory was not already mapped.
|
||||
if (!ok) return 0;
|
||||
@ -105,7 +104,7 @@ Result<u64> sys_munmap(Registers*, SyscallArgs args)
|
||||
kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size);
|
||||
#endif
|
||||
|
||||
TRY(MemoryManager::unmap_owned_if_possible(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
||||
TRY(MemoryManager::unmap_owned_if_possible(address, ceil_div(size, ARCH_PAGE_SIZE)));
|
||||
|
||||
return { 0 };
|
||||
}
|
||||
@ -120,7 +119,7 @@ Result<u64> sys_msync(Registers*, SyscallArgs args)
|
||||
|
||||
Thread* current = Scheduler::current();
|
||||
|
||||
TRY(current->address_space->sync_regions(address, get_blocks_from_size(size, ARCH_PAGE_SIZE)));
|
||||
TRY(current->address_space->sync_regions(address, ceil_div(size, ARCH_PAGE_SIZE)));
|
||||
|
||||
return { 0 };
|
||||
}
|
||||
|
@ -30,16 +30,6 @@ static_assert(align_up<512>(598ul) == 1024ul);
|
||||
static_assert(align_up<64>(194ul) == 256ul);
|
||||
static_assert(align_up<32>(64ul) == 64ul);
|
||||
|
||||
template <typename T> constexpr T get_blocks_from_size(T value, T block_size)
|
||||
{
|
||||
return (value + (block_size - 1)) / block_size;
|
||||
}
|
||||
|
||||
static_assert(get_blocks_from_size(40960, 4096) == 10);
|
||||
static_assert(get_blocks_from_size(194, 64) == 4);
|
||||
static_assert(get_blocks_from_size(2, 32) == 1);
|
||||
static_assert(get_blocks_from_size(0, 256) == 0);
|
||||
|
||||
// Offset a pointer by exactly <offset> bytes, no matter the type. Useful to avoid the quirks that come from C pointer
|
||||
// arithmetic.
|
||||
template <typename T, typename Offset> constexpr inline T* offset_ptr(T* ptr, Offset offset)
|
||||
|
23
libluna/include/luna/Common.h
Normal file
23
libluna/include/luna/Common.h
Normal file
@ -0,0 +1,23 @@
|
||||
/**
|
||||
* @file Common.h
|
||||
* @author apio (cloudapio.eu)
|
||||
* @brief Common utility functions.
|
||||
*
|
||||
* @copyright Copyright (c) 2023, the Luna authors.
|
||||
*
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @brief Divide a by b, rounding the quotient up to the next multiple of b.
|
||||
*
|
||||
* @tparam T The type of a and b.
|
||||
* @param a The dividend.
|
||||
* @param b The divisor.
|
||||
* @return constexpr T The result of the operation.
|
||||
*/
|
||||
template <typename T> inline constexpr T ceil_div(T a, T b)
|
||||
{
|
||||
return (a + (b - 1)) / b;
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
#include <luna/Alignment.h>
|
||||
#include <luna/Alloc.h>
|
||||
#include <luna/CString.h>
|
||||
#include <luna/Common.h>
|
||||
#include <luna/DebugLog.h>
|
||||
#include <luna/Heap.h>
|
||||
#include <luna/LinkedList.h>
|
||||
@ -52,7 +53,7 @@ static Spinlock g_heap_lock;
|
||||
// of pages.
|
||||
static usize get_pages_for_allocation(usize bytes)
|
||||
{
|
||||
usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
|
||||
usize pages = ceil_div(bytes, PAGE_SIZE);
|
||||
if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION;
|
||||
return pages;
|
||||
}
|
||||
@ -154,7 +155,7 @@ static Result<void> combine_forward(HeapBlock* block)
|
||||
{
|
||||
if (next->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
const usize pages = ceil_div(next->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(next, pages));
|
||||
return {};
|
||||
}
|
||||
@ -183,7 +184,7 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
|
||||
{
|
||||
if (block->status & BLOCK_START_MEM)
|
||||
{
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
const usize pages = ceil_div(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(block, pages));
|
||||
return last;
|
||||
}
|
||||
@ -303,7 +304,7 @@ Result<void> free_impl(void* ptr)
|
||||
if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM))
|
||||
{
|
||||
heap.remove(block);
|
||||
const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
const usize pages = ceil_div(block->full_size + sizeof(HeapBlock), PAGE_SIZE);
|
||||
TRY(release_pages_impl(block, pages));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user