Merge branch printf_pointers into main

Reviewed-on: #9
This commit is contained in:
apio 2022-10-08 16:27:37 +00:00
commit 4b74c14f1b
6 changed files with 16 additions and 17 deletions

View File

@ -28,7 +28,7 @@ int main()
{ {
char* variable = malloc(200); char* variable = malloc(200);
*variable = 3; *variable = 3;
printf("Allocated variable at address %lx\n", (unsigned long int)variable); printf("Allocated variable at address %p\n", variable);
free(variable); free(variable);
} }

View File

@ -12,7 +12,7 @@ int main()
sleep(1); sleep(1);
void* allocated = malloc(CHUNK); void* allocated = malloc(CHUNK);
do { do {
printf("Allocating 4 MB of memory... %lx\n", (unsigned long)allocated); printf("Allocating 4 MB of memory... %p\n", allocated);
sleep(1); sleep(1);
} while ((allocated = malloc(CHUNK))); } while ((allocated = malloc(CHUNK)));
perror("malloc"); perror("malloc");

View File

@ -17,7 +17,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
kdbgln("First time accessing the RSDT/XSDT, mapping it into memory"); kdbgln("First time accessing the RSDT/XSDT, mapping it into memory");
void* physical = (void*)bootboot.arch.x86_64.acpi_ptr; void* physical = (void*)bootboot.arch.x86_64.acpi_ptr;
kdbgln("RSDT/XSDT physical address: %lx", (uint64_t)physical); kdbgln("RSDT/XSDT physical address: %p", physical);
SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical); SDTHeader* rsdt = (SDTHeader*)MemoryManager::get_unaligned_mapping(physical);
@ -30,7 +30,7 @@ ACPI::SDTHeader* ACPI::get_rsdt_or_xsdt()
rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages); rsdt = (SDTHeader*)MemoryManager::get_unaligned_mappings(cache, rsdt_pages);
} }
kdbgln("Mapped RSDT/XSDT to virtual address %lx, uses %ld pages", (uint64_t)rsdt, rsdt_pages); kdbgln("Mapped RSDT/XSDT to virtual address %p, uses %ld pages", (void*)rsdt, rsdt_pages);
cache = rsdt; cache = rsdt;
return rsdt; return rsdt;
} }
@ -58,8 +58,8 @@ void* ACPI::find_table(ACPI::SDTHeader* root_sdt, const char* signature)
{ {
bool isXSDT = is_xsdt(); bool isXSDT = is_xsdt();
uint64_t entries = (root_sdt->Length - sizeof(SDTHeader)) / (isXSDT ? 8 : 4); uint64_t entries = (root_sdt->Length - sizeof(SDTHeader)) / (isXSDT ? 8 : 4);
kdbgln("Searching for table %s in the %s at %lx (table contains %ld entries)", signature, isXSDT ? "XSDT" : "RSDT", kdbgln("Searching for table %s in the %s at %p (table contains %ld entries)", signature, isXSDT ? "XSDT" : "RSDT",
(uint64_t)root_sdt, entries); (void*)root_sdt, entries);
for (uint64_t i = 0; i < entries; i++) for (uint64_t i = 0; i < entries; i++)
{ {
@ -81,9 +81,9 @@ void* ACPI::find_table(ACPI::SDTHeader* root_sdt, const char* signature)
kwarnln("Entry %ld in the %s points to null", i, isXSDT ? "XSDT" : "RSDT"); kwarnln("Entry %ld in the %s points to null", i, isXSDT ? "XSDT" : "RSDT");
continue; continue;
} }
kdbgln("Physical address of entry: %lx", (uint64_t)h); kdbgln("Physical address of entry: %p", (void*)h);
SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h); SDTHeader* realHeader = (SDTHeader*)MemoryManager::get_unaligned_mapping(h);
kdbgln("Mapped entry to virtual address %lx", (uint64_t)realHeader); kdbgln("Mapped entry to virtual address %p", (void*)realHeader);
if (!validate_sdt_header(realHeader)) if (!validate_sdt_header(realHeader))
{ {
kwarnln("Header of entry %ld is not valid, skipping this entry", i); kwarnln("Header of entry %ld is not valid, skipping this entry", i);

View File

@ -116,7 +116,6 @@ void InitRD::init()
{ {
initrd_base = initrd_base =
MemoryManager::get_unaligned_mappings((void*)bootboot.initrd_ptr, bootboot.initrd_size / PAGE_SIZE + 1); MemoryManager::get_unaligned_mappings((void*)bootboot.initrd_ptr, bootboot.initrd_size / PAGE_SIZE + 1);
kdbgln("physical base at %lx, size %lx, mapped to %lx", bootboot.initrd_ptr, bootboot.initrd_size, kdbgln("physical base at %lx, size %lx, mapped to %p", bootboot.initrd_ptr, bootboot.initrd_size, initrd_base);
(uint64_t)initrd_base);
initrd_initialized = true; initrd_initialized = true;
} }

View File

@ -148,7 +148,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
if (!count) return 0; if (!count) return 0;
if (count == 1) return get_page_at(addr, flags); if (count == 1) return get_page_at(addr, flags);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("allocating several pages (%ld), at address %ld", count, addr); kdbgln("allocating several pages (%ld), at address %lx", count, addr);
#endif #endif
for (uint64_t i = 0; i < count; i++) for (uint64_t i = 0; i < count; i++)
{ {
@ -164,7 +164,7 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
} }
kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags); kernelVMM.map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("allocating virtual %lx, physical %lx", virtualAddress + (i * PAGE_SIZE), (uint64_t)physicalAddress); kdbgln("allocating virtual %lx, physical %p", virtualAddress + (i * PAGE_SIZE), physicalAddress);
#endif #endif
} }
return (void*)addr; return (void*)addr;
@ -184,7 +184,7 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
ASSERT(physicalAddress != UINT64_MAX); ASSERT(physicalAddress != UINT64_MAX);
kernelVMM.unmap((uint64_t)page); kernelVMM.unmap((uint64_t)page);
#ifdef MM_DEBUG #ifdef MM_DEBUG
kdbgln("releasing virtual %lx, physical %lx", (uint64_t)page, physicalAddress); kdbgln("releasing virtual %p, physical %lx", page, physicalAddress);
#endif #endif
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }

View File

@ -21,7 +21,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
if (flags & MAP_READ_WRITE) real_flags |= MAP_READ_WRITE; if (flags & MAP_READ_WRITE) real_flags |= MAP_READ_WRITE;
if (address) if (address)
{ {
kdbgln("sys_mmap: %ld pages at address %lx, %s", size / PAGE_SIZE, (uint64_t)address, kdbgln("sys_mmap: %ld pages at address %p, %s", size / PAGE_SIZE, address,
real_flags & MAP_READ_WRITE ? "rw" : "ro"); real_flags & MAP_READ_WRITE ? "rw" : "ro");
if (kernelVMM.getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used. if (kernelVMM.getPhysical((uint64_t)address) != (uint64_t)-1) // Address is already used.
{ {
@ -33,7 +33,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
void* result = MemoryManager::get_pages_at((uint64_t)address - offset, size / PAGE_SIZE, real_flags); void* result = MemoryManager::get_pages_at((uint64_t)address - offset, size / PAGE_SIZE, real_flags);
if (result) if (result)
{ {
kdbgln("mmap succeeded: %lx", (uint64_t)result); kdbgln("mmap succeeded: %p", result);
context->rax = (uint64_t)result; context->rax = (uint64_t)result;
return; return;
} }
@ -48,7 +48,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
void* result = MemoryManager::get_pages(size / PAGE_SIZE, real_flags); void* result = MemoryManager::get_pages(size / PAGE_SIZE, real_flags);
if (result) if (result)
{ {
kdbgln("mmap succeeded: %lx", (uint64_t)result); kdbgln("mmap succeeded: %p", result);
context->rax = (uint64_t)result; context->rax = (uint64_t)result;
return; return;
} }
@ -62,7 +62,7 @@ void sys_mmap(Context* context, void* address, size_t size, int flags)
void sys_munmap(Context* context, void* address, size_t size) void sys_munmap(Context* context, void* address, size_t size)
{ {
kdbgln("sys_munmap: attempting to unmap %lx", (uint64_t)address); kdbgln("sys_munmap: attempting to unmap %p", address);
if (size < PAGE_SIZE) if (size < PAGE_SIZE)
{ {
kdbgln("munmap failed: size is too small"); kdbgln("munmap failed: size is too small");