Define PAGE_SIZE as 4096 and use it everywhere instead of using 4096 as a magic number

This commit is contained in:
apio 2022-10-08 14:52:28 +02:00
parent 1235ce8b32
commit abcf1b6118
12 changed files with 67 additions and 51 deletions

View File

@ -1,6 +1,10 @@
#pragma once
#include <stdint.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
#define MAP_READ_WRITE 1 << 0
#define MAP_USER 1 << 1
#define MAP_EXEC 1 << 2

View File

@ -1,6 +1,10 @@
#pragma once
#include <stdint.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
namespace Paging
{
struct PageDirectoryEntry

View File

@ -9,17 +9,17 @@
extern BOOTBOOT bootboot;
ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt() // FIXME: Refactor this ugly code.
{
static void* cache = nullptr;
if (cache) return (SDTHeader*)cache;
kdbgln("First time accessing the RSDT/XSDT, mapping it into memory");
void* physical = (void*)bootboot.arch.x86_64.acpi_ptr;
uint64_t offset = (uint64_t)physical % 4096;
uint64_t offset = (uint64_t)physical % PAGE_SIZE;
kdbgln("RSDT/XSDT physical address: %lx", (uint64_t)physical);
cache = MemoryManager::get_unaligned_mapping(physical);
uint64_t numPages = 1;
while ((offset + ((SDTHeader*)cache)->Length) > (numPages * 4096))
while ((offset + ((SDTHeader*)cache)->Length) > (numPages * PAGE_SIZE))
{
kwarnln("RSDT/XSDT extends beyond the mapped page, mapping one more page");
MemoryManager::release_unaligned_mappings(cache, numPages);

View File

@ -84,7 +84,8 @@ void GDT::load()
gdtr.offset = (uint64_t)&internal_gdt;
gdtr.size = sizeof(InternalGDT);
memset(&main_tss, 0, sizeof(TSS));
main_tss.rsp[0] = (uint64_t)MemoryManager::get_pages(4) + (4096 * 4) - 8; // allocate 16KB for the syscall stack
main_tss.rsp[0] =
(uint64_t)MemoryManager::get_pages(4) + (PAGE_SIZE * 4) - 8; // allocate 16KB for the syscall stack
main_tss.iomap_base = sizeof(TSS);
set_base(&internal_gdt.tss, (uint64_t)&main_tss & 0xffffffff);
internal_gdt.tss2.base_high = (uint32_t)(((uint64_t)&main_tss >> 32) & 0xffffffff);

View File

@ -114,7 +114,8 @@ void InitRD::for_each(void (*callback)(File& f))
void InitRD::init()
{
initrd_base = MemoryManager::get_unaligned_mappings((void*)bootboot.initrd_ptr, bootboot.initrd_size / 4096 + 1);
initrd_base =
MemoryManager::get_unaligned_mappings((void*)bootboot.initrd_ptr, bootboot.initrd_size / PAGE_SIZE + 1);
kdbgln("physical base at %lx, size %lx, mapped to %lx", bootboot.initrd_ptr, bootboot.initrd_size,
(uint64_t)initrd_base);
initrd_initialized = true;

View File

@ -1,6 +1,10 @@
#include "memory/KernelHeap.h"
#include "assert.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
static uint8_t page_bitmap[2048];
#define ALLOC_BASE 0xfffffffff8000000
@ -28,7 +32,7 @@ uint64_t KernelHeap::request_virtual_page()
if (bitmap_read(index)) continue;
bitmap_set(index, true);
start_index = index + 1;
return ALLOC_BASE + (index * 4096);
return ALLOC_BASE + (index * PAGE_SIZE);
}
return 0;
@ -55,7 +59,7 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
if (contiguous == count)
{
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
return ALLOC_BASE + (contiguous_start * 4096);
return ALLOC_BASE + (contiguous_start * PAGE_SIZE);
}
}
@ -65,7 +69,7 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
void KernelHeap::free_virtual_page(uint64_t address)
{
ASSERT(address >= ALLOC_BASE && address < ALLOC_END);
uint64_t index = (address - ALLOC_BASE) / 4096;
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
bitmap_set(index, false);
if (start_index > index) start_index = index;
}
@ -73,7 +77,7 @@ void KernelHeap::free_virtual_page(uint64_t address)
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
{
ASSERT(address >= ALLOC_BASE && address < ALLOC_END);
uint64_t index = (address - ALLOC_BASE) / 4096;
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
if (start_index > index) start_index = index;
}

View File

@ -32,7 +32,7 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
{
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE;
uint64_t virtualAddress = KernelHeap::request_virtual_page();
if (!virtualAddress)
{
@ -49,7 +49,7 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
{
if (!count) return 0;
if (count == 1) return get_unaligned_mapping(physicalAddress, flags);
uint64_t offset = (uint64_t)physicalAddress % 4096;
uint64_t offset = (uint64_t)physicalAddress % PAGE_SIZE;
uint64_t virtualAddress = KernelHeap::request_virtual_pages(count);
if (!virtualAddress)
{
@ -62,14 +62,14 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
}
for (uint64_t i = 0; i < count; i++)
{
kernelVMM.map(virtualAddress + (i * 4096), ((uint64_t)physicalAddress - offset) + (i * 4096), flags);
kernelVMM.map(virtualAddress + (i * PAGE_SIZE), ((uint64_t)physicalAddress - offset) + (i * PAGE_SIZE), flags);
}
return (void*)(virtualAddress + offset);
}
void MemoryManager::release_unaligned_mapping(void* mapping)
{
uint64_t offset = (uint64_t)mapping % 4096;
uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
kernelVMM.unmap((uint64_t)mapping - offset);
KernelHeap::free_virtual_page((uint64_t)mapping - offset);
}
@ -78,9 +78,9 @@ void MemoryManager::release_unaligned_mappings(void* mapping, uint64_t count)
{
if (!count) return;
if (count == 1) return release_unaligned_mapping(mapping);
uint64_t offset = (uint64_t)mapping % 4096;
uint64_t offset = (uint64_t)mapping % PAGE_SIZE;
KernelHeap::free_virtual_pages((uint64_t)mapping - offset, count);
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * 4096)); }
for (uint64_t i = 0; i < count; i++) { kernelVMM.unmap(((uint64_t)mapping - offset) + (i * PAGE_SIZE)); }
}
void MemoryManager::release_mapping(void* mapping)
@ -162,9 +162,9 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
#endif
return 0;
}
kernelVMM.map(addr + (i * 4096), (uint64_t)physicalAddress, flags);
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * 4096), (uint64_t)physicalAddress);
kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * PAGE_SIZE), (uint64_t)physicalAddress);
#endif
}
return (void*)addr;
@ -179,7 +179,7 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
#endif
for (uint64_t i = 0; i < count; i++)
{
void* page = (void*)((uint64_t)pages + (i * 4096));
void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE));
uint64_t physicalAddress = kernelVMM.getPhysical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page);

View File

@ -50,24 +50,24 @@ void PMM::init()
bitmap_addr = (char*)biggest_chunk;
virtual_bitmap_addr =
bitmap_addr; // FIXME: map this to a virtual address (ideally in the kernel heap between -128M and -64M)
ASSERT((total_mem / 4096 / 8) < biggest_chunk_size);
bitmap_size = total_mem / 4096 / 8 + 1;
ASSERT((total_mem / PAGE_SIZE / 8) < biggest_chunk_size);
bitmap_size = total_mem / PAGE_SIZE / 8 + 1;
memset(bitmap_addr, 0xFF, bitmap_size);
ptr = &bootboot.mmap;
for (uint64_t i = 0; i < mmap_entries; i++)
{
uint64_t index = MMapEnt_Ptr(ptr) / 4096;
uint64_t index = MMapEnt_Ptr(ptr) / PAGE_SIZE;
if (!MMapEnt_IsFree(ptr)) { reserved_mem += MMapEnt_Size(ptr); }
else
{
free_mem += MMapEnt_Size(ptr);
for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / 4096); j++) { bitmap_set(index + j, false); }
for (uint64_t j = 0; j < (MMapEnt_Size(ptr) / PAGE_SIZE); j++) { bitmap_set(index + j, false); }
}
ptr++;
}
lock_pages(bitmap_addr, bitmap_size / 4096 + 1);
lock_pages(bitmap_addr, bitmap_size / PAGE_SIZE + 1);
}
static bool bitmap_read(uint64_t index)
@ -90,9 +90,9 @@ void* PMM::request_page()
if (bitmap_read(index)) continue;
bitmap_set(index, true);
start_index = index + 1;
free_mem -= 4096;
used_mem += 4096;
return (void*)(index * 4096);
free_mem -= PAGE_SIZE;
used_mem += PAGE_SIZE;
return (void*)(index * PAGE_SIZE);
}
return PMM_FAILED;
@ -119,9 +119,9 @@ void* PMM::request_pages(uint64_t count)
if (contiguous == count)
{
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
free_mem -= (count * 4096);
used_mem += (count * 4096);
return (void*)(contiguous_start * 4096);
free_mem -= (count * PAGE_SIZE);
used_mem += (count * PAGE_SIZE);
return (void*)(contiguous_start * PAGE_SIZE);
}
}
@ -130,11 +130,11 @@ void* PMM::request_pages(uint64_t count)
void PMM::free_page(void* address)
{
uint64_t index = (uint64_t)address / 4096;
uint64_t index = (uint64_t)address / PAGE_SIZE;
if (!bitmap_read(index)) return;
bitmap_set(index, false);
used_mem -= 4096;
free_mem += 4096;
used_mem -= PAGE_SIZE;
free_mem += PAGE_SIZE;
if (start_index > index) start_index = index;
}
@ -145,11 +145,11 @@ void PMM::free_pages(void* address, uint64_t count)
void PMM::lock_page(void* address)
{
uint64_t index = ((uint64_t)address) / 4096;
uint64_t index = ((uint64_t)address) / PAGE_SIZE;
if (bitmap_read(index)) return;
bitmap_set(index, true);
used_mem += 4096;
free_mem -= 4096;
used_mem += PAGE_SIZE;
free_mem -= PAGE_SIZE;
}
void PMM::lock_pages(void* address, uint64_t count)
@ -179,6 +179,6 @@ uint64_t PMM::get_bitmap_size()
void PMM::map_bitmap_to_virtual()
{
virtual_bitmap_addr =
(char*)MemoryManager::get_unaligned_mappings(bitmap_addr, Utilities::get_blocks_from_size(4096, bitmap_size));
virtual_bitmap_addr = (char*)MemoryManager::get_unaligned_mappings(
bitmap_addr, Utilities::get_blocks_from_size(PAGE_SIZE, bitmap_size));
}

View File

@ -11,7 +11,7 @@
void sys_mmap(Context* context, void* address, size_t size, int flags)
{
if (size < 4096)
if (size < PAGE_SIZE)
{
kdbgln("sys_mmap: size too small");
context->rax = MAP_FAIL(EINVAL);
@ -21,7 +21,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
if (flags & MAP_READ_WRITE) real_flags |= MAP_READ_WRITE;
if (address)
{
kdbgln("sys_mmap: %ld pages at address %lx, %s", size / 4096, (uint64_t)address,
kdbgln("sys_mmap: %ld pages at address %lx, %s", size / PAGE_SIZE, (uint64_t)address,
real_flags & MAP_READ_WRITE ? "rw" : "ro");
if (kernelVMM.getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used.
{
@ -29,8 +29,8 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
context->rax = MAP_FAIL(ENOMEM);
return;
}
uint64_t offset = (uint64_t)address % 4096;
void* result = MemoryManager::get_pages_at((uint64_t)address - offset, size / 4096, real_flags);
uint64_t offset = (uint64_t)address % PAGE_SIZE;
void* result = MemoryManager::get_pages_at((uint64_t)address - offset, size / PAGE_SIZE, real_flags);
if (result)
{
kdbgln("mmap succeeded: %lx", (uint64_t)result);
@ -44,8 +44,8 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
return;
}
}
kdbgln("sys_mmap: %ld pages at any address, %s", size / 4096, real_flags & MAP_READ_WRITE ? "rw" : "ro");
void* result = MemoryManager::get_pages(size / 4096, real_flags);
kdbgln("sys_mmap: %ld pages at any address, %s", size / PAGE_SIZE, real_flags & MAP_READ_WRITE ? "rw" : "ro");
void* result = MemoryManager::get_pages(size / PAGE_SIZE, real_flags);
if (result)
{
kdbgln("mmap succeeded: %lx", (uint64_t)result);
@ -63,7 +63,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
void sys_munmap(Context* context, void* address, size_t size)
{
kdbgln("sys_munmap: attempting to unmap %lx", (uint64_t)address);
if (size < 4096)
if (size < PAGE_SIZE)
{
kdbgln("munmap failed: size is too small");
context->rax = -EINVAL;
@ -82,8 +82,8 @@ void sys_munmap(Context* context, void* address, size_t size)
context->rax = -EINVAL;
return;
}
uint64_t offset = (uint64_t)address % 4096;
MemoryManager::release_pages((void*)((uint64_t)address - offset), size / 4096);
uint64_t offset = (uint64_t)address % PAGE_SIZE;
MemoryManager::release_pages((void*)((uint64_t)address - offset), size / PAGE_SIZE);
kdbgln("munmap succeeded");
context->rax = 0;
return;

View File

@ -64,7 +64,7 @@ void Scheduler::add_kernel_task(void (*task)(void))
new_task->regs.rip = (uint64_t)task;
new_task->allocated_stack =
(uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK); // 16 KB is enough for everyone, right?
new_task->regs.rsp = new_task->allocated_stack + (4096 * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.rsp = new_task->allocated_stack + (PAGE_SIZE * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.cs = 0x08;
new_task->regs.ss = 0x10;
new_task->regs.ds = 0x10;
@ -92,7 +92,7 @@ void Scheduler::add_user_task(void* task)
new_task->regs.rip = (uint64_t)task;
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages(
TASK_PAGES_IN_STACK, MAP_READ_WRITE | MAP_USER); // 16 KB is enough for everyone, right?
new_task->regs.rsp = new_task->allocated_stack + (4096 * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.rsp = new_task->allocated_stack + (PAGE_SIZE * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.cs = 0x18 | 0x03;
new_task->regs.ss = 0x20 | 0x03;
new_task->regs.ds = 0x20 | 0x03;
@ -133,7 +133,7 @@ void Scheduler::load_user_task(const char* filename)
}
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages(
TASK_PAGES_IN_STACK, MAP_READ_WRITE | MAP_USER); // 16 KB is enough for everyone, right?
new_task->regs.rsp = new_task->allocated_stack + (4096 * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.rsp = new_task->allocated_stack + (PAGE_SIZE * TASK_PAGES_IN_STACK) - sizeof(uintptr_t);
new_task->regs.cs = 0x18 | 0x03;
new_task->regs.ss = 0x20 | 0x03;
new_task->regs.ds = 0x20 | 0x03;

View File

@ -9,6 +9,8 @@ typedef unsigned long off_t;
#define PROT_READ_WRITE 1
#define PAGE_SIZE 4096
#ifdef __cplusplus
extern "C"
{

View File

@ -13,12 +13,12 @@ int liballoc_unlock()
void* liballoc_alloc(size_t size)
{
void* result = mmap(NULL, size * 4096, PROT_READ_WRITE, 0, 0, 0);
void* result = mmap(NULL, size * PAGE_SIZE, PROT_READ_WRITE, 0, 0, 0);
if (result == MAP_FAILED) return 0;
return (void*)result;
}
int liballoc_free(void* address, size_t size)
{
return munmap(address, size * 4096);
return munmap(address, size * PAGE_SIZE);
}