From b63a8ff24591c12b997d8e36970cbcf6eab07211 Mon Sep 17 00:00:00 2001 From: apio Date: Tue, 8 Aug 2023 11:58:33 +0200 Subject: [PATCH] libluna: Move get_blocks_from_size to a new header and call it ceil_div instead --- kernel/src/binfmt/ELF.cpp | 11 ++++++----- kernel/src/fs/InitRD.cpp | 4 ++-- kernel/src/fs/ext2/FileSystem.cpp | 4 ++-- kernel/src/memory/MemoryManager.cpp | 11 ++++++----- kernel/src/sys/mmap.cpp | 27 +++++++++++++-------------- libluna/include/luna/Alignment.h | 10 ---------- libluna/include/luna/Common.h | 23 +++++++++++++++++++++++ libluna/src/Heap.cpp | 9 +++++---- 8 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 libluna/include/luna/Common.h diff --git a/kernel/src/binfmt/ELF.cpp b/kernel/src/binfmt/ELF.cpp index d5c0dc34..4ce2df14 100644 --- a/kernel/src/binfmt/ELF.cpp +++ b/kernel/src/binfmt/ELF.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include static bool can_execute_segment(u32 flags) @@ -113,14 +114,14 @@ Result ELFLoader::load(AddressSpace* space) if (can_write_segment(program_header.p_flags)) prot |= PROT_WRITE; if (can_execute_segment(program_header.p_flags)) prot |= PROT_EXEC; - if (!TRY(space->test_and_alloc_region( - base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), prot, - MAP_ANONYMOUS | MAP_PRIVATE, 0, true))) + if (!TRY(space->test_and_alloc_region(base_vaddr, + ceil_div(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), prot, + MAP_ANONYMOUS | MAP_PRIVATE, 0, true))) return err(ENOMEM); // Allocate physical memory for the segment - TRY(MemoryManager::alloc_at( - base_vaddr, get_blocks_from_size(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), flags)); + TRY(MemoryManager::alloc_at(base_vaddr, ceil_div(program_header.p_memsz + vaddr_diff, ARCH_PAGE_SIZE), + flags)); // Zero out unused memory (before the start of the segment) memset((void*)base_vaddr, 0, vaddr_diff); diff --git a/kernel/src/fs/InitRD.cpp b/kernel/src/fs/InitRD.cpp index a083198b..026ad603 100644 --- a/kernel/src/fs/InitRD.cpp +++ b/kernel/src/fs/InitRD.cpp @@ -6,7 +6,7 @@ #include "memory/MemoryManager.h" #include "thread/Thread.h" #include -#include +#include TarStream g_initrd; extern const BOOTBOOT bootboot; @@ -47,7 +47,7 @@ Result InitRD::populate_vfs() } // Now we don't need the original initrd anymore - MemoryManager::free_frames(bootboot.initrd_ptr, get_blocks_from_size(bootboot.initrd_size, ARCH_PAGE_SIZE)); + MemoryManager::free_frames(bootboot.initrd_ptr, ceil_div(bootboot.initrd_size, ARCH_PAGE_SIZE)); return {}; } diff --git a/kernel/src/fs/ext2/FileSystem.cpp b/kernel/src/fs/ext2/FileSystem.cpp index 6d8b936a..60701756 100644 --- a/kernel/src/fs/ext2/FileSystem.cpp +++ b/kernel/src/fs/ext2/FileSystem.cpp @@ -1,6 +1,6 @@ #include "fs/ext2/FileSystem.h" #include "fs/ext2/Inode.h" -#include +#include static VFS::InodeType vfs_type_from_ext2_type(mode_t mode) { @@ -114,7 +114,7 @@ namespace Ext2 fs->m_host_device = host_device; fs->m_block_size = 1024 << fs->m_superblock.log_block_size; - fs->m_block_groups = get_blocks_from_size(fs->m_superblock.nr_blocks, fs->m_superblock.blocks_per_block_group); + fs->m_block_groups = ceil_div(fs->m_superblock.nr_blocks, fs->m_superblock.blocks_per_block_group); #ifdef EXT2_DEBUG kdbgln("ext2: Mounting new Ext2 file system, block size=%lu, blocks=%u, inodes=%u, block group=(%u blocks, %u " diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index abbdec06..4f925bcd 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -5,6 +5,7 @@ #include "memory/MemoryMap.h" #include #include +#include #include #include #include @@ -40,11 +41,11 @@ namespace MemoryManager Result protect_kernel_sections() { const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata); - const usize rodata_pages = get_blocks_from_size(rodata_size, ARCH_PAGE_SIZE); + const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute)); const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data); - const usize data_pages = get_blocks_from_size(data_size, ARCH_PAGE_SIZE); + const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE); TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite)); return {}; @@ -67,7 +68,7 @@ namespace MemoryManager // We store our frame bitmap at the beginning of the largest free memory block. char* const frame_bitmap_addr = (char*)largest_free_entry.ptr(); - const usize frame_bitmap_size = get_blocks_from_size(physical_address_space_size / ARCH_PAGE_SIZE, 8UL); + const usize frame_bitmap_size = ceil_div(physical_address_space_size / ARCH_PAGE_SIZE, 8UL); // This should never happen, unless memory is very fragmented. Usually there is always a very big block of // usable memory and then some tiny blocks around it. @@ -96,7 +97,7 @@ namespace MemoryManager } // Make sure that the physical frames used by the bitmap aren't handed out to anyone else. - lock_frames(largest_free_entry.address(), get_blocks_from_size(frame_bitmap_size, ARCH_PAGE_SIZE)); + lock_frames(largest_free_entry.address(), ceil_div(frame_bitmap_size, ARCH_PAGE_SIZE)); } void init() @@ -526,7 +527,7 @@ namespace MemoryManager uintptr_t diff = address - page; - usize pages = get_blocks_from_size(size + diff, ARCH_PAGE_SIZE); + usize pages = ceil_div(size + diff, ARCH_PAGE_SIZE); while (pages--) { diff --git a/kernel/src/sys/mmap.cpp b/kernel/src/sys/mmap.cpp index 80ea465d..9d9f2566 100644 --- a/kernel/src/sys/mmap.cpp +++ b/kernel/src/sys/mmap.cpp @@ -8,6 +8,7 @@ #include #include #include +#include constexpr uintptr_t USERSPACE_HEAP_BASE = 0x3000000; @@ -32,7 +33,7 @@ Result sys_mmap(Registers*, SyscallArgs args) if (!is_aligned(params.offset)) return err(EINVAL); - params.len = align_up(params.len); + const usize pages = ceil_div(params.len, ARCH_PAGE_SIZE); SharedMemory* shmem = nullptr; u64 shmid = 0; @@ -41,12 +42,12 @@ Result sys_mmap(Registers*, SyscallArgs args) if (!description) { params.offset = 0; - shmid = TRY(SharedMemory::create(nullptr, 0, params.len / ARCH_PAGE_SIZE)); + shmid = TRY(SharedMemory::create(nullptr, 0, pages)); } else { if ((params.prot & PROT_WRITE) && !(description->flags & O_WRONLY)) return err(EACCES); - shmid = TRY(description->inode->query_shared_memory(params.offset, params.len / ARCH_PAGE_SIZE)); + shmid = TRY(description->inode->query_shared_memory(params.offset, pages)); } shmem = g_shared_memory_map.try_get_ref(shmid); shmem->refs++; @@ -55,15 +56,13 @@ Result sys_mmap(Registers*, SyscallArgs args) u64 address; if (!params.addr) - address = TRY(current->address_space->alloc_region(get_blocks_from_size(params.len, ARCH_PAGE_SIZE), - params.prot, params.flags, params.offset, shmid)); + address = TRY(current->address_space->alloc_region(pages, params.prot, params.flags, params.offset, shmid)); else { // FIXME: We should be more flexible if MAP_FIXED was not specified. address = align_down((u64)params.addr); - if (!TRY(current->address_space->test_and_alloc_region(address, - get_blocks_from_size(params.len, ARCH_PAGE_SIZE), - params.prot, params.flags, params.offset, shmid))) + if (!TRY(current->address_space->test_and_alloc_region(address, pages, params.prot, params.flags, params.offset, + shmid))) return err(ENOMEM); } @@ -76,10 +75,10 @@ Result sys_mmap(Registers*, SyscallArgs args) kdbgln("mmap: mapping memory at %#lx, size=%zu", address, params.len); #endif - if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, params.len / ARCH_PAGE_SIZE)); } + if (shmem) { TRY(shmem->map(address, mmu_flags, params.offset, pages)); } else { - TRY(MemoryManager::alloc_at_zeroed(address, get_blocks_from_size(params.len, ARCH_PAGE_SIZE), mmu_flags)); + TRY(MemoryManager::alloc_at_zeroed(address, pages, mmu_flags)); if (description) { TRY(description->inode->read((u8*)address, params.offset, params.len)); } } @@ -92,11 +91,11 @@ Result sys_munmap(Registers*, SyscallArgs args) usize size = (usize)args[1]; if (size == 0) return err(EINVAL); - if (!is_aligned(size)) return err(EINVAL); + if (!is_aligned(address)) return err(EINVAL); Thread* current = Scheduler::current(); - bool ok = TRY(current->address_space->free_region(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); + bool ok = TRY(current->address_space->free_region(address, ceil_div(size, ARCH_PAGE_SIZE))); // POSIX says munmap should silently do nothing if the memory was not already mapped. if (!ok) return 0; @@ -105,7 +104,7 @@ Result sys_munmap(Registers*, SyscallArgs args) kdbgln("munmap: unmapping memory at %#lx, size=%zu", address, size); #endif - TRY(MemoryManager::unmap_owned_if_possible(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); + TRY(MemoryManager::unmap_owned_if_possible(address, ceil_div(size, ARCH_PAGE_SIZE))); return { 0 }; } @@ -120,7 +119,7 @@ Result sys_msync(Registers*, SyscallArgs args) Thread* current = Scheduler::current(); - TRY(current->address_space->sync_regions(address, get_blocks_from_size(size, ARCH_PAGE_SIZE))); + TRY(current->address_space->sync_regions(address, ceil_div(size, ARCH_PAGE_SIZE))); return { 0 }; } diff --git a/libluna/include/luna/Alignment.h b/libluna/include/luna/Alignment.h index 2a34f1f3..1f7a65d8 100644 --- a/libluna/include/luna/Alignment.h +++ b/libluna/include/luna/Alignment.h @@ -30,16 +30,6 @@ static_assert(align_up<512>(598ul) == 1024ul); static_assert(align_up<64>(194ul) == 256ul); static_assert(align_up<32>(64ul) == 64ul); -template constexpr T get_blocks_from_size(T value, T block_size) -{ - return (value + (block_size - 1)) / block_size; -} - -static_assert(get_blocks_from_size(40960, 4096) == 10); -static_assert(get_blocks_from_size(194, 64) == 4); -static_assert(get_blocks_from_size(2, 32) == 1); -static_assert(get_blocks_from_size(0, 256) == 0); - // Offset a pointer by exactly bytes, no matter the type. Useful to avoid the quirks that come from C pointer // arithmetic. template constexpr inline T* offset_ptr(T* ptr, Offset offset) diff --git a/libluna/include/luna/Common.h b/libluna/include/luna/Common.h new file mode 100644 index 00000000..6ed20e0a --- /dev/null +++ b/libluna/include/luna/Common.h @@ -0,0 +1,23 @@ +/** + * @file Common.h + * @author apio (cloudapio.eu) + * @brief Common utility functions. + * + * @copyright Copyright (c) 2023, the Luna authors. + * + */ + +#pragma once + +/** + * @brief Divide a by b, rounding the quotient up to the next multiple of b. + * + * @tparam T The type of a and b. + * @param a The dividend. + * @param b The divisor. + * @return constexpr T The result of the operation. + */ +template inline constexpr T ceil_div(T a, T b) +{ + return (a + (b - 1)) / b; +} diff --git a/libluna/src/Heap.cpp b/libluna/src/Heap.cpp index 74133f0a..dbd34a1c 100644 --- a/libluna/src/Heap.cpp +++ b/libluna/src/Heap.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +53,7 @@ static Spinlock g_heap_lock; // of pages. static usize get_pages_for_allocation(usize bytes) { - usize pages = get_blocks_from_size(bytes, PAGE_SIZE); + usize pages = ceil_div(bytes, PAGE_SIZE); if (pages < MINIMUM_PAGES_PER_ALLOCATION) pages = MINIMUM_PAGES_PER_ALLOCATION; return pages; } @@ -154,7 +155,7 @@ static Result combine_forward(HeapBlock* block) { if (next->status & BLOCK_START_MEM) { - const usize pages = get_blocks_from_size(next->full_size + sizeof(HeapBlock), PAGE_SIZE); + const usize pages = ceil_div(next->full_size + sizeof(HeapBlock), PAGE_SIZE); TRY(release_pages_impl(next, pages)); return {}; } @@ -183,7 +184,7 @@ static Result combine_backward(HeapBlock* block) { if (block->status & BLOCK_START_MEM) { - const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE); + const usize pages = ceil_div(block->full_size + sizeof(HeapBlock), PAGE_SIZE); TRY(release_pages_impl(block, pages)); return last; } @@ -303,7 +304,7 @@ Result free_impl(void* ptr) if ((block->status & BLOCK_START_MEM) && (block->status & BLOCK_END_MEM)) { heap.remove(block); - const usize pages = get_blocks_from_size(block->full_size + sizeof(HeapBlock), PAGE_SIZE); + const usize pages = ceil_div(block->full_size + sizeof(HeapBlock), PAGE_SIZE); TRY(release_pages_impl(block, pages)); }