Compare commits
No commits in common. "27448611b39edeb401434648828f287314066cbf" and "de167c3c67f7f65292e1f98cc66ed9a91051cf3c" have entirely different histories.
27448611b3
...
de167c3c67
@ -8,8 +8,4 @@ namespace KernelHeap
|
|||||||
|
|
||||||
void free_virtual_page(uint64_t address);
|
void free_virtual_page(uint64_t address);
|
||||||
void free_virtual_pages(uint64_t address, uint64_t count);
|
void free_virtual_pages(uint64_t address, uint64_t count);
|
||||||
|
|
||||||
void clear();
|
|
||||||
|
|
||||||
void dump_usage();
|
|
||||||
}
|
}
|
@ -1,35 +1,21 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#ifndef MODULE
|
|
||||||
#define MODULE "mem"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "log/Log.h"
|
|
||||||
#include "memory/MemoryManager.h"
|
#include "memory/MemoryManager.h"
|
||||||
#include "memory/VMM.h"
|
#include "memory/VMM.h"
|
||||||
#include "misc/utils.h"
|
#include "misc/utils.h"
|
||||||
|
|
||||||
char* strdup_from_user(const char* user_string);
|
char* strdup_from_user(const char* user_string);
|
||||||
|
|
||||||
// FIXME: Map the physical addresses into kernel address space. Right now, something overwrites KernelHeap and crashes
|
|
||||||
// it, so that's not really possible. But it should be done in the future.
|
|
||||||
|
|
||||||
template <typename T, unsigned long S = sizeof(T), typename V> T* user_address_to_typed_pointer(V address)
|
template <typename T, unsigned long S = sizeof(T), typename V> T* user_address_to_typed_pointer(V address)
|
||||||
{
|
{
|
||||||
uint64_t phys = VMM::get_physical((uint64_t)address);
|
uint64_t phys = VMM::get_physical((uint64_t)address);
|
||||||
if (phys == (uint64_t)-1)
|
if (phys == (uint64_t)-1) return nullptr;
|
||||||
{
|
return (T*)MemoryManager::get_unaligned_mappings((void*)phys, Utilities::get_blocks_from_size(PAGE_SIZE, S),
|
||||||
kinfoln("warning: user pointer is not mapped in its address space");
|
MAP_READ_WRITE);
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
// return (T*)MemoryManager::get_unaligned_mappings((void*)phys, Utilities::get_blocks_from_size(PAGE_SIZE, S),
|
|
||||||
// MAP_READ_WRITE);
|
|
||||||
return (T*)phys;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, unsigned long S = sizeof(T)> void free_user_typed_pointer(T*)
|
template <typename T, unsigned long S = sizeof(T)> void free_user_typed_pointer(T* ptr)
|
||||||
{
|
{
|
||||||
// MemoryManager::release_unaligned_mappings(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, S));
|
MemoryManager::release_unaligned_mappings(ptr, Utilities::get_blocks_from_size(PAGE_SIZE, S));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> T* obtain_user_ref(T* user_ptr)
|
template <typename T> T* obtain_user_ref(T* user_ptr)
|
||||||
|
@ -375,10 +375,8 @@ void InitRD::init()
|
|||||||
(void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size));
|
(void*)bootboot.initrd_ptr, Utilities::get_blocks_from_size(PAGE_SIZE, bootboot.initrd_size));
|
||||||
kdbgln("physical base at %lx, size %lx, mapped to %p", bootboot.initrd_ptr, bootboot.initrd_size, initrd_base);
|
kdbgln("physical base at %lx, size %lx, mapped to %p", bootboot.initrd_ptr, bootboot.initrd_size, initrd_base);
|
||||||
kdbgln("total blocks: %ld", get_total_blocks());
|
kdbgln("total blocks: %ld", get_total_blocks());
|
||||||
void* leak = kmalloc(4); // leak some memory so that kmalloc doesn't continually allocate and free pages
|
|
||||||
initrd_initialize_root();
|
initrd_initialize_root();
|
||||||
initrd_scan();
|
initrd_scan();
|
||||||
VFS::mount_root(&initrd_root);
|
VFS::mount_root(&initrd_root);
|
||||||
initrd_initialized = true;
|
initrd_initialized = true;
|
||||||
kfree(leak);
|
|
||||||
}
|
}
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
#include "memory/KernelHeap.h"
|
#include "memory/KernelHeap.h"
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
#include "log/Log.h"
|
|
||||||
#include "std/string.h"
|
|
||||||
|
|
||||||
#ifndef PAGE_SIZE
|
#ifndef PAGE_SIZE
|
||||||
#define PAGE_SIZE 4096
|
#define PAGE_SIZE 4096
|
||||||
@ -11,13 +9,10 @@
|
|||||||
|
|
||||||
static uint8_t page_bitmap[2048];
|
static uint8_t page_bitmap[2048];
|
||||||
|
|
||||||
static int64_t kheap_free = sizeof(page_bitmap) * 8 * PAGE_SIZE;
|
|
||||||
static int64_t kheap_used = 0;
|
|
||||||
|
|
||||||
#define ALLOC_BASE 0xfffffffff8000000
|
#define ALLOC_BASE 0xfffffffff8000000
|
||||||
#define ALLOC_END 0xfffffffffc000000
|
#define ALLOC_END 0xfffffffffc000000
|
||||||
|
|
||||||
// static uint64_t start_index = 0;
|
static uint64_t start_index = 0;
|
||||||
|
|
||||||
static bool bitmap_read(uint64_t index)
|
static bool bitmap_read(uint64_t index)
|
||||||
{
|
{
|
||||||
@ -32,28 +27,13 @@ static void bitmap_set(uint64_t index, bool value)
|
|||||||
if (value) { page_bitmap[byteIndex] |= bitIndexer; }
|
if (value) { page_bitmap[byteIndex] |= bitIndexer; }
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelHeap::clear()
|
|
||||||
{
|
|
||||||
memset(page_bitmap, 0, sizeof(page_bitmap));
|
|
||||||
kinfoln("page bitmap located at %p", (void*)page_bitmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t KernelHeap::request_virtual_page()
|
uint64_t KernelHeap::request_virtual_page()
|
||||||
{
|
{
|
||||||
for (uint64_t index = 0; index < sizeof(page_bitmap) * 8; index++)
|
for (uint64_t index = start_index; index < sizeof(page_bitmap) * 8; index++)
|
||||||
{
|
{
|
||||||
if (bitmap_read(index)) continue;
|
if (bitmap_read(index)) continue;
|
||||||
bitmap_set(index, true);
|
bitmap_set(index, true);
|
||||||
// start_index = index + 1;
|
start_index = index + 1;
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
kinfoln("allocating one page for caller %p, returning %lx", __builtin_return_address(0),
|
|
||||||
ALLOC_BASE + (index * PAGE_SIZE));
|
|
||||||
#endif
|
|
||||||
kheap_free -= PAGE_SIZE;
|
|
||||||
kheap_used += PAGE_SIZE;
|
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
dump_usage();
|
|
||||||
#endif
|
|
||||||
return ALLOC_BASE + (index * PAGE_SIZE);
|
return ALLOC_BASE + (index * PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,7 +44,7 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
|
|||||||
{
|
{
|
||||||
uint64_t contiguous = 0;
|
uint64_t contiguous = 0;
|
||||||
uint64_t contiguous_start = 0;
|
uint64_t contiguous_start = 0;
|
||||||
for (uint64_t index = 0; index < sizeof(page_bitmap) * 8; index++)
|
for (uint64_t index = start_index; index < sizeof(page_bitmap) * 8; index++)
|
||||||
{
|
{
|
||||||
if (bitmap_read(index))
|
if (bitmap_read(index))
|
||||||
{
|
{
|
||||||
@ -81,15 +61,6 @@ uint64_t KernelHeap::request_virtual_pages(uint64_t count)
|
|||||||
if (contiguous == count)
|
if (contiguous == count)
|
||||||
{
|
{
|
||||||
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
|
for (uint64_t i = 0; i < count; i++) bitmap_set(contiguous_start + i, true);
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
kinfoln("allocating %lu pages for caller %p, returning %lx", count, __builtin_return_address(0),
|
|
||||||
ALLOC_BASE + (contiguous_start * PAGE_SIZE));
|
|
||||||
#endif
|
|
||||||
kheap_free -= (count * PAGE_SIZE);
|
|
||||||
kheap_used += (count * PAGE_SIZE);
|
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
dump_usage();
|
|
||||||
#endif
|
|
||||||
return ALLOC_BASE + (contiguous_start * PAGE_SIZE);
|
return ALLOC_BASE + (contiguous_start * PAGE_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,15 +73,7 @@ void KernelHeap::free_virtual_page(uint64_t address)
|
|||||||
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
||||||
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
||||||
bitmap_set(index, false);
|
bitmap_set(index, false);
|
||||||
#ifdef KHEAP_DEBUG
|
if (start_index > index) start_index = index;
|
||||||
kinfoln("releasing one page for caller %p, %lx", __builtin_return_address(0), address);
|
|
||||||
#endif
|
|
||||||
kheap_free += PAGE_SIZE;
|
|
||||||
kheap_used -= PAGE_SIZE;
|
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
dump_usage();
|
|
||||||
#endif
|
|
||||||
// if (start_index > index) start_index = index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
|
void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
|
||||||
@ -118,19 +81,5 @@ void KernelHeap::free_virtual_pages(uint64_t address, uint64_t count)
|
|||||||
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
if (address < ALLOC_BASE || address >= ALLOC_END) return;
|
||||||
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
uint64_t index = (address - ALLOC_BASE) / PAGE_SIZE;
|
||||||
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
|
for (uint64_t i = 0; i < count; i++) { bitmap_set(index + i, false); }
|
||||||
#ifdef KHEAP_DEBUG
|
if (start_index > index) start_index = index;
|
||||||
kinfoln("releasing %lu pages for caller %p, %lx", count, __builtin_return_address(0), address);
|
|
||||||
#endif
|
|
||||||
kheap_free += (count * PAGE_SIZE);
|
|
||||||
kheap_used -= (count * PAGE_SIZE);
|
|
||||||
#ifdef KHEAP_DEBUG
|
|
||||||
dump_usage();
|
|
||||||
#endif
|
|
||||||
// if (start_index > index) start_index = index;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KernelHeap::dump_usage()
|
|
||||||
{
|
|
||||||
kinfoln("Used: %ld KB", kheap_used / 1024);
|
|
||||||
kinfoln("Free: %ld KB", kheap_free / 1024);
|
|
||||||
}
|
}
|
@ -11,7 +11,6 @@
|
|||||||
|
|
||||||
void MemoryManager::init()
|
void MemoryManager::init()
|
||||||
{
|
{
|
||||||
KernelHeap::clear();
|
|
||||||
PMM::init();
|
PMM::init();
|
||||||
VMM::init();
|
VMM::init();
|
||||||
PMM::map_bitmap_to_virtual();
|
PMM::map_bitmap_to_virtual();
|
||||||
@ -25,7 +24,6 @@ void* MemoryManager::get_mapping(void* physicalAddress, int flags)
|
|||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||||
#endif
|
#endif
|
||||||
KernelHeap::dump_usage();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
|
VMM::map(virtualAddress, (uint64_t)physicalAddress, flags);
|
||||||
@ -41,7 +39,6 @@ void* MemoryManager::get_unaligned_mapping(void* physicalAddress, int flags)
|
|||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||||
#endif
|
#endif
|
||||||
KernelHeap::dump_usage();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
|
VMM::map(virtualAddress, (uint64_t)physicalAddress - offset, flags);
|
||||||
@ -61,7 +58,6 @@ void* MemoryManager::get_unaligned_mappings(void* physicalAddress, uint64_t coun
|
|||||||
"-64M)",
|
"-64M)",
|
||||||
count);
|
count);
|
||||||
#endif
|
#endif
|
||||||
KernelHeap::dump_usage();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
for (uint64_t i = 0; i < count; i++)
|
for (uint64_t i = 0; i < count; i++)
|
||||||
@ -101,7 +97,6 @@ void* MemoryManager::get_page(int flags)
|
|||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||||
#endif
|
#endif
|
||||||
KernelHeap::dump_usage();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return get_page_at(virtualAddress, flags);
|
return get_page_at(virtualAddress, flags);
|
||||||
@ -142,7 +137,6 @@ void* MemoryManager::get_pages(uint64_t count, int flags)
|
|||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
kwarnln("No kernel heap space (virtual address space from -128M to -64M) left");
|
||||||
#endif
|
#endif
|
||||||
KernelHeap::dump_usage();
|
|
||||||
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
|
return 0; // Out of virtual address in the kernel heap range (-128M to -64M). This should be difficult to
|
||||||
// achieve...
|
// achieve...
|
||||||
}
|
}
|
||||||
@ -169,6 +163,9 @@ void* MemoryManager::get_pages_at(uint64_t addr, uint64_t count, int flags)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
VMM::map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
|
VMM::map(addr + (i * PAGE_SIZE), (uint64_t)physicalAddress, flags);
|
||||||
|
#ifdef MM_DEBUG
|
||||||
|
kdbgln("allocating virtual %lx, physical %p", addr + (i * PAGE_SIZE), physicalAddress);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
return (void*)addr;
|
return (void*)addr;
|
||||||
}
|
}
|
||||||
@ -186,6 +183,9 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
|
|||||||
uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
|
uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
|
||||||
ASSERT(physicalAddress != UINT64_MAX);
|
ASSERT(physicalAddress != UINT64_MAX);
|
||||||
VMM::unmap((uint64_t)page);
|
VMM::unmap((uint64_t)page);
|
||||||
|
#ifdef MM_DEBUG
|
||||||
|
kdbgln("releasing virtual %p, physical %lx", page, physicalAddress);
|
||||||
|
#endif
|
||||||
PMM::free_page((void*)physicalAddress);
|
PMM::free_page((void*)physicalAddress);
|
||||||
}
|
}
|
||||||
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
#include "memory/PMM.h"
|
#include "memory/PMM.h"
|
||||||
#include "bootboot.h"
|
#include "bootboot.h"
|
||||||
#include "log/Log.h"
|
|
||||||
#include "memory/Memory.h"
|
#include "memory/Memory.h"
|
||||||
#include "memory/MemoryManager.h"
|
#include "memory/MemoryManager.h"
|
||||||
#include "misc/utils.h"
|
#include "misc/utils.h"
|
||||||
@ -131,11 +130,7 @@ void* PMM::request_pages(uint64_t count)
|
|||||||
void PMM::free_page(void* address)
|
void PMM::free_page(void* address)
|
||||||
{
|
{
|
||||||
uint64_t index = (uint64_t)address / PAGE_SIZE;
|
uint64_t index = (uint64_t)address / PAGE_SIZE;
|
||||||
if (index > (bitmap_size * 8))
|
if (index > (bitmap_size * 8)) return;
|
||||||
{
|
|
||||||
kinfoln("attempt to free out-of-range address %p", address);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!bitmap_read(index)) return;
|
if (!bitmap_read(index)) return;
|
||||||
bitmap_set(index, false);
|
bitmap_set(index, false);
|
||||||
used_mem -= PAGE_SIZE;
|
used_mem -= PAGE_SIZE;
|
||||||
|
@ -482,6 +482,8 @@ void sys_waitpid(Context* context, long pid, int* wstatus,
|
|||||||
}
|
}
|
||||||
if (wstatus)
|
if (wstatus)
|
||||||
{
|
{
|
||||||
|
VMM::switch_to_user_address_space(sched_current_task->address_space);
|
||||||
|
VMM::enter_syscall_context();
|
||||||
int* kwstatus = obtain_user_ref(wstatus);
|
int* kwstatus = obtain_user_ref(wstatus);
|
||||||
if (kwstatus)
|
if (kwstatus)
|
||||||
{
|
{
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
source $(dirname $0)/env.sh
|
|
||||||
|
|
||||||
cd $LUNA_ROOT
|
|
||||||
|
|
||||||
qemu-system-x86_64 -cdrom Luna.iso -smp 1 -m 256M -serial stdio -enable-kvm $@
|
|
@ -7,4 +7,4 @@ cd $LUNA_ROOT
|
|||||||
|
|
||||||
tools/build-iso.sh
|
tools/build-iso.sh
|
||||||
|
|
||||||
tools/fast-run.sh
|
qemu-system-x86_64 -cdrom Luna.iso -smp 1 -m 256M -serial stdio -enable-kvm $@
|
Loading…
Reference in New Issue
Block a user