Compare commits

...

18 Commits

Author SHA1 Message Date
e11280ad3f Merge branch 'address-spaces' 2022-10-14 18:20:42 +02:00
1c3377fc98 Prepare for cloning address spaces, not there yet 2022-10-14 18:17:57 +02:00
e43777bd31 Apparently, it just works now. 2022-10-14 18:00:33 +02:00
97a8a4a4a2 Solve rebase 2022-10-14 17:39:24 +02:00
81f56083c5 Almost there! 2022-10-14 17:38:29 +02:00
cdb73836b0 Some more multiple address space stuff
This page-faults. This is because the memory where the ELF should be is all zeroes, which the CPU tries to interpret.
2022-10-14 17:37:51 +02:00
bb7887a29d Add basic address space infrastructure 2022-10-14 17:34:29 +02:00
177282d79c Use the more appropriate size_t 2022-10-14 17:33:06 +02:00
5abd8814e3 Kernel: Continue moving utilities to a separate subdirectory 2022-10-14 17:33:06 +02:00
e21b608af4 Utilities: Start moving utilities into specific headers in a utils/ subdirectory 2022-10-14 17:31:47 +02:00
9b3c7816a3 Scheduler: pop the interrupt state when returning early from load_user_task() 2022-10-14 16:54:52 +02:00
26211bd49f It (almost) works now
The only thing doing weird stuff is exec(), so that's commented out and throws ENOSYS right now.

But we have two user tasks running in parallel, isolated from each other!
2022-10-14 16:46:00 +02:00
0c7c249935 exec is still doing some weird stuff, totally corrupting the page tables 2022-10-13 22:20:24 +02:00
5d41b4b113 Almost there...
exec() is not working yet. But the rest are!!
2022-10-13 22:13:04 +02:00
24272c57ef Almost there! 2022-10-13 21:55:51 +02:00
83982a24e2 add a comment 2022-10-13 21:21:02 +02:00
ee712432bd Some more multiple address space stuff
This page-faults. This is because the memory where the ELF should be is all zeroes, which the CPU tries to interpret.
2022-10-13 21:14:39 +02:00
229b06c63b Add basic address space infrastructure 2022-10-13 19:19:51 +02:00
18 changed files with 379 additions and 60 deletions

View File

@ -1,8 +1,11 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
int main()
{
sleep(6);
FILE* syms = fopen("/sys/moon.sym", "r");
if (!syms)
{

View File

@ -0,0 +1,27 @@
#pragma once
#include "memory/Paging.h"
struct AddressSpace
{
static AddressSpace create();
void destroy();
void detach();
AddressSpace clone();
PageTable* get_pml4()
{
return m_pml4;
}
bool is_cloned()
{
return m_cloned;
}
private:
PageTable* m_pml4;
bool m_cloned;
};

View File

@ -29,4 +29,6 @@ namespace MemoryManager
void release_page(void* page);
void release_pages(void* pages, uint64_t count);
void protect(void* page, uint64_t count, int flags);
}

View File

@ -1,4 +1,5 @@
#pragma once
#include "memory/AddressSpace.h"
#include "memory/Paging.h"
enum Flags
@ -11,6 +12,17 @@ namespace VMM
{
void init(); // Fetch page table from cr3
void switch_to_user_address_space(AddressSpace& space);
void switch_to_previous_user_address_space();
void switch_back_to_kernel_address_space();
void enter_syscall_context();
void exit_syscall_context();
void apply_address_space();
bool is_using_kernel_address_space();
void map(uint64_t vaddr, uint64_t paddr, int flags);
void remap(uint64_t vaddr, int flags);
void unmap(uint64_t vaddr);
@ -27,4 +39,7 @@ namespace VMM
void decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index,
uint64_t& pdp_index);
uint64_t recompose_vaddr(uint64_t page_index, uint64_t pt_index, uint64_t pd_index, uint64_t pdp_index);
void install_kernel_page_directory_into_address_space(AddressSpace& space);
};

View File

@ -20,6 +20,8 @@
namespace Syscall
{
void entry(Context* context);
char* strdup_from_user(const char* user_string);
}
void sys_exit(Context* context, int status);

View File

@ -10,7 +10,8 @@ namespace Scheduler
void exit(int status);
void sleep(unsigned long ms);
void add_kernel_task(void (*task)(void));
void add_user_task(void* task);
Task* create_user_task();
void load_user_task(const char* filename);

View File

@ -1,6 +1,7 @@
#pragma once
#include "fs/FileDescriptor.h"
#include "interrupts/Context.h"
#include "memory/AddressSpace.h"
#include "sys/elf/Image.h"
#define TASK_MAX_FDS 8
@ -43,6 +44,8 @@ struct Task
ELFImage* image = nullptr;
Descriptor files[TASK_MAX_FDS];
AddressSpace address_space;
};
void set_context_from_task(Task& task, Context* ctx);

View File

@ -51,6 +51,8 @@ extern "C" void common_handler(Context* context)
StackTracer tracer(context->rbp);
tracer.trace_with_ip(context->rip);
hang(); // FIXME: Remove this when multiple address spaces are working.
Scheduler::task_misbehave(context, -3);
}
}

View File

@ -16,6 +16,7 @@
#include "io/PIC.h"
#include "io/Serial.h"
#include "log/Log.h"
#include "memory/AddressSpace.h"
#include "memory/Memory.h"
#include "memory/MemoryManager.h"
#include "memory/MemoryMap.h"
@ -56,12 +57,6 @@ extern "C" void _start()
kinfoln("Loaded IDT");
PIC::remap();
PIC::enable_master(0b11111100); // enable keyboard and PIT
PIC::enable_slave(0b11111111);
kinfoln("Prepared PIC");
PIT::initialize(1000); // 1000 times per second
kinfoln("Prepared PIT");
@ -79,6 +74,7 @@ extern "C" void _start()
});
Scheduler::load_user_task("/bin/init");
Scheduler::load_user_task("/bin/sym");
kinfoln("Prepared scheduler tasks");
@ -87,7 +83,9 @@ extern "C" void _start()
Init::finish_kernel_boot();
Interrupts::enable(); // Task switching commences here
PIC::remap();
PIC::enable_master(0b11111100); // enable keyboard and PIT
PIC::enable_slave(0b11111111);
kinfoln("Interrupts enabled");

View File

@ -0,0 +1,93 @@
#define MODULE "vmm"
#include "memory/AddressSpace.h"
#include "log/Log.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
AddressSpace AddressSpace::create()
{
AddressSpace result;
result.m_pml4 = (PageTable*)PMM::request_page();
VMM::install_kernel_page_directory_into_address_space(result);
return result;
}
void AddressSpace::destroy()
{
if (m_cloned)
{
kdbgln("Will not destroy a cloned address space, I don't own it");
return;
}
uint64_t pages_freed = 0;
for (int i = 0; i < 512; i++)
{
PageDirectoryEntry& pdp_pde = m_pml4->entries[i];
if (!pdp_pde.present) continue;
if (pdp_pde.larger_pages)
{
pages_freed++;
PMM::free_page((void*)pdp_pde.get_address());
continue;
}
PageTable* pdp = (PageTable*)pdp_pde.get_address();
for (int j = 0; j < 511; j++) // skip the last page directory, it's the kernel one
{
PageDirectoryEntry& pd_pde = pdp->entries[j];
if (!pd_pde.present) continue;
if (pd_pde.larger_pages)
{
pages_freed++;
PMM::free_page((void*)pd_pde.get_address());
continue;
}
PageTable* pd = (PageTable*)pd_pde.get_address();
for (int k = 0; k < 512; k++)
{
PageDirectoryEntry& pt_pde = pd->entries[k];
if (!pt_pde.present) continue;
if (pt_pde.larger_pages)
{
pages_freed++;
PMM::free_page((void*)pt_pde.get_address());
continue;
}
PageTable* pt = (PageTable*)pt_pde.get_address();
for (int l = 0; l < 512; l++)
{
PageDirectoryEntry& pde = pt->entries[l];
if (!pde.present) continue;
pages_freed++;
PMM::free_page((void*)pde.get_address());
}
pages_freed++;
PMM::free_page(pt);
}
pages_freed++;
PMM::free_page(pd);
}
pages_freed++;
PMM::free_page(pdp);
}
pages_freed++;
PMM::free_page(m_pml4);
kdbgln("Reclaimed %ld pages from address space!", pages_freed);
}
void AddressSpace::detach()
{
if (!m_cloned) return;
m_pml4 = (PageTable*)PMM::request_page();
VMM::install_kernel_page_directory_into_address_space(*this);
m_cloned = false;
}
AddressSpace AddressSpace::clone()
{
AddressSpace result;
result.m_pml4 = m_pml4;
result.m_cloned = true;
return result;
}

View File

@ -189,4 +189,9 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
PMM::free_page((void*)physicalAddress);
}
KernelHeap::free_virtual_pages((uint64_t)pages, count);
}
void MemoryManager::protect(void* page, uint64_t count, int flags)
{
for (uint64_t i = 0; i < count; i++) { VMM::remap((uint64_t)page + (i * PAGE_SIZE), flags); }
}

View File

@ -9,29 +9,88 @@
#include "utils/Addresses.h"
#include "utils/Registers.h"
// FIXME: There is a lot of duplicate code in this file. This should probably be refactored.
static PageTable* kernel_pml4;
static PageTable* current_pml4;
static AddressSpace* user_address_space;
static PageTable* PML4;
void VMM::switch_back_to_kernel_address_space()
{
if (current_pml4 != kernel_pml4) { current_pml4 = kernel_pml4; }
}
void VMM::switch_to_user_address_space(AddressSpace& space)
{
user_address_space = &space;
current_pml4 = user_address_space->get_pml4();
}
void VMM::switch_to_previous_user_address_space()
{
current_pml4 = user_address_space->get_pml4();
}
void VMM::enter_syscall_context()
{
if (current_pml4 != kernel_pml4)
{
current_pml4 = kernel_pml4;
apply_address_space();
switch_to_previous_user_address_space();
}
}
void VMM::exit_syscall_context()
{
if (current_pml4 != user_address_space->get_pml4()) { switch_to_previous_user_address_space(); }
apply_address_space();
}
void VMM::apply_address_space()
{
write_cr3(current_pml4);
}
bool VMM::is_using_kernel_address_space()
{
return current_pml4 == kernel_pml4;
}
void VMM::init()
{
PML4 = (PageTable*)read_cr3();
kernel_pml4 = (PageTable*)read_cr3();
current_pml4 = kernel_pml4;
}
void VMM::unmap(uint64_t vaddr)
{
vaddr = round_down_to_nearest_page(vaddr);
PageDirectoryEntry* pde = find_pde(PML4, vaddr);
PageDirectoryEntry* pde = find_pde(current_pml4, vaddr);
if (!pde) return; // Already unmapped
memset(pde, 0, sizeof(PageDirectoryEntry));
flush_tlb(vaddr);
}
void VMM::remap(uint64_t vaddr, int flags)
{
vaddr = round_down_to_nearest_page(vaddr);
PageDirectoryEntry* pde = find_pde(current_pml4, vaddr);
if (!pde) return; // Not mapped
if (flags & User) propagate_user(current_pml4, vaddr);
else
pde->user = false;
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
else
pde->read_write = false;
flush_tlb(vaddr);
}
uint64_t VMM::get_physical(uint64_t vaddr)
{
PageDirectoryEntry* pde = find_pde(PML4, round_down_to_nearest_page(vaddr));
PageDirectoryEntry* pde = find_pde(current_pml4, round_down_to_nearest_page(vaddr));
if (!pde) return UINT64_MAX; // Not mapped
return pde->get_address() | (vaddr % PAGE_SIZE);
@ -39,7 +98,7 @@ uint64_t VMM::get_physical(uint64_t vaddr)
uint64_t VMM::get_flags(uint64_t vaddr)
{
PageDirectoryEntry* pde = find_pde(PML4, round_down_to_nearest_page(vaddr));
PageDirectoryEntry* pde = find_pde(current_pml4, round_down_to_nearest_page(vaddr));
if (!pde) return 0; // Not mapped
uint64_t flags = 0;
@ -51,35 +110,38 @@ uint64_t VMM::get_flags(uint64_t vaddr)
void VMM::map(uint64_t vaddr, uint64_t paddr, int flags)
{
vaddr = round_down_to_nearest_page(vaddr);
PageDirectoryEntry* pde = find_pde(PML4, vaddr);
PageDirectoryEntry* pde = find_pde(current_pml4, vaddr);
bool will_flush_tlb = true;
if (!pde)
{
pde = create_pde_if_not_exists(PML4, vaddr);
pde = create_pde_if_not_exists(current_pml4, vaddr);
will_flush_tlb = false;
}
else if (pde->larger_pages)
{
unmap(vaddr);
pde = create_pde_if_not_exists(PML4, vaddr);
will_flush_tlb = false;
pde = create_pde_if_not_exists(current_pml4, vaddr);
}
pde->set_address(round_down_to_nearest_page(paddr));
if (flags & User) propagate_user(PML4, vaddr);
if (flags & ReadWrite) propagate_read_write(PML4, vaddr);
if (flags & User) propagate_user(current_pml4, vaddr);
else
pde->user = false;
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
else
pde->read_write = false;
if (will_flush_tlb) flush_tlb(vaddr);
}
PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
{
uint64_t page_index, pt_index, pd_index, pdp_index;
uint64_t page_index;
PageDirectoryEntry* pde;
PageTable* pt = root;
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
uint64_t indexes[3];
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
for (int i = 0; i < 3;
i++) // Walk through the page map level 4, page directory pointer, and page directory to find the page table.
@ -98,11 +160,13 @@ PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vaddr)
{
uint64_t page_index, pt_index, pd_index, pdp_index;
uint64_t page_index;
PageDirectoryEntry* pde;
PageTable* pt = root;
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
uint64_t indexes[3];
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
auto pde_create_if_not_present = [&]() {
pt = (PageTable*)PMM::request_page();
@ -112,8 +176,6 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vadd
pde->present = true;
};
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
for (int i = 0; i < 3; i++)
{
pde = &pt->entries[indexes[i]];
@ -130,13 +192,13 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vadd
void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
{
uint64_t page_index, pt_index, pd_index, pdp_index;
uint64_t page_index;
PageDirectoryEntry* pde;
PageTable* pt = root;
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
uint64_t indexes[3];
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
for (int i = 0; i < 3; i++)
{
@ -158,13 +220,13 @@ void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
void VMM::propagate_user(PageTable* root, uint64_t vaddr)
{
uint64_t page_index, pt_index, pd_index, pdp_index;
uint64_t page_index;
PageDirectoryEntry* pde;
PageTable* pt = root;
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
uint64_t indexes[3];
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
for (int i = 0; i < 3; i++)
{
@ -200,4 +262,30 @@ void VMM::decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_ind
pd_index = vaddr & 0x1ff;
vaddr >>= 9;
pdp_index = vaddr & 0x1ff;
}
uint64_t VMM::recompose_vaddr(uint64_t page_index, uint64_t pt_index, uint64_t pd_index, uint64_t pdp_index)
{
return pdp_index << 39 | pd_index << 30 | pt_index << 21 | page_index << 12;
}
void VMM::install_kernel_page_directory_into_address_space(AddressSpace& space)
{
PageTable* space_pml4 = space.get_pml4();
PageTable* kernel_last_pdp = (PageTable*)kernel_pml4->entries[511].get_address();
PageTable* kernel_last_pd = (PageTable*)kernel_last_pdp->entries[511].get_address();
PageTable* space_last_pdp = (PageTable*)PMM::request_page();
PageDirectoryEntry& space_last_pdp_pde = space_pml4->entries[511];
space_last_pdp_pde.present = true;
space_last_pdp_pde.read_write = true;
space_last_pdp_pde.set_address((uint64_t)space_last_pdp);
PageDirectoryEntry& space_last_pd_pde = space_last_pdp->entries[511];
space_last_pd_pde.present = true;
space_last_pd_pde.read_write = true;
space_last_pd_pde.set_address((uint64_t)kernel_last_pd);
}

View File

@ -8,6 +8,7 @@
#include "interrupts/Interrupts.h"
#include "io/IO.h"
#include "log/Log.h"
#include "memory/VMM.h"
#include "misc/hang.h"
#include "std/string.h"
@ -85,6 +86,11 @@ static void try_idt_triple_fault()
[[noreturn]] void reboot()
{
Interrupts::disable();
if (!VMM::is_using_kernel_address_space())
{
VMM::switch_back_to_kernel_address_space();
VMM::apply_address_space();
}
kinfoln("Attempting reboot using ACPI");
try_acpi_reboot();
kinfoln("Attempting reboot using keyboard RESET pulsing");

View File

@ -1,11 +1,14 @@
#include "sys/Syscall.h"
#include "errno.h"
#include "io/Serial.h"
#include "memory/VMM.h"
#include "std/string.h"
#include "thread/Scheduler.h"
void Syscall::entry(Context* context)
{
asm volatile("cli");
VMM::enter_syscall_context();
switch (context->rax)
{
case SYS_exit: sys_exit(context, (int)context->rdi); break;
@ -24,4 +27,12 @@ void Syscall::entry(Context* context)
case SYS_exec: sys_exec(context, (const char*)context->rdi); break;
default: context->rax = -ENOSYS; break;
}
VMM::exit_syscall_context();
}
char* Syscall::strdup_from_user(const char* user_string)
{
uint64_t phys = VMM::get_physical((uint64_t)user_string);
if (phys == (uint64_t)-1) { return nullptr; }
return strdup((const char*)phys);
}

View File

@ -8,6 +8,7 @@
#include "log/Log.h"
#include "memory/Memory.h"
#include "memory/MemoryManager.h"
#include "memory/VMM.h"
#include "misc/utils.h"
#include "std/stdlib.h"
#include "std/string.h"
@ -67,7 +68,9 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
{
Elf64_Ehdr elf_ehdr;
ASSERT(VFS::read(node, 0, sizeof(elf_ehdr), (char*)&elf_ehdr) >= 0);
ASSERT(strncmp((const char*)elf_ehdr.e_ident, ELFMAG, SELFMAG) == 0);
ASSERT(strncmp((const char*)elf_ehdr.e_ident, ELFMAG, SELFMAG) ==
0); // If you haven't checked the ELF executable with check_elf_image() first, then an assertion fail is your
// fault =D
ASSERT(elf_ehdr.e_ident[EI_CLASS] == ELFCLASS64);
ASSERT(elf_ehdr.e_ident[EI_DATA] == ELFDATA2LSB);
ASSERT(elf_ehdr.e_type == ET_EXEC);
@ -86,13 +89,24 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
kdbgln("Loading loadable segment at address %lx, file size %ld, mem size %ld, permissions %s", phdr.p_vaddr,
phdr.p_filesz, phdr.p_memsz, format_permissions(phdr.p_flags));
ASSERT(phdr.p_vaddr);
uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz);
void* buffer =
(void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr), pages,
phdr.p_flags & 2 ? MAP_READ_WRITE | MAP_USER : MAP_USER) +
(phdr.p_vaddr % PAGE_SIZE));
void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr),
pages, MAP_READ_WRITE) +
(phdr.p_vaddr % PAGE_SIZE));
if (VMM::is_using_kernel_address_space()) { VMM::switch_to_previous_user_address_space(); }
VMM::apply_address_space();
VFS::read(node, phdr.p_offset, phdr.p_filesz, (char*)buffer);
memset((void*)((uint64_t)buffer + phdr.p_filesz), 0, phdr.p_memsz - phdr.p_filesz);
VMM::switch_back_to_kernel_address_space();
VMM::apply_address_space();
VMM::switch_to_previous_user_address_space();
MemoryManager::protect(buffer, pages, phdr.p_flags & 2 ? MAP_READ_WRITE | MAP_USER : MAP_USER);
image = (ELFImage*)krealloc(image, (sizeof(ELFImage) - sizeof(ELFSection)) +
(image->section_count + 1) * sizeof(ELFSection));
ELFSection& section = image->sections[image->section_count];

View File

@ -5,30 +5,38 @@
#include "interrupts/Interrupts.h"
#include "memory/MemoryManager.h"
#include "memory/PMM.h"
#include "memory/VMM.h"
#include "std/stdlib.h"
#include "std/string.h"
#include "sys/Syscall.h"
#include "sys/elf/ELFLoader.h"
#include "thread/Scheduler.h"
void sys_exec(Context* context, const char* pathname)
{
if (!pathname)
/*context->rax = -ENOSYS; // FIXME: Make exec() work under separate address spaces.
return;*/
char* kpathname = Syscall::strdup_from_user(pathname);
if (!kpathname)
{
context->rax = -EFAULT;
return;
}
kinfoln("exec(): executing %s", pathname);
kinfoln("exec(): executing %s", kpathname);
VFS::Node* program = VFS::resolve_path(pathname);
VFS::Node* program = VFS::resolve_path(kpathname);
if (!program)
{
kfree(kpathname);
context->rax = -ENOENT;
return;
}
if (program->type == VFS_DIRECTORY)
{
kfree(kpathname);
context->rax = -EISDIR;
return;
}
@ -36,6 +44,7 @@ void sys_exec(Context* context, const char* pathname)
long memusage;
if ((memusage = ELFLoader::check_elf_image(program)) < 0)
{
kfree(kpathname);
context->rax = -ENOEXEC;
return;
}
@ -43,12 +52,16 @@ void sys_exec(Context* context, const char* pathname)
uint64_t allocated_stack = (uint64_t)MemoryManager::get_pages(TASK_PAGES_IN_STACK, MAP_READ_WRITE | MAP_USER);
if (!allocated_stack)
{
kfree(kpathname);
context->rax = -ENOMEM;
return;
}
uint64_t allocated_stack_phys = VMM::get_physical(allocated_stack);
if ((uint64_t)memusage > PMM::get_free())
{
kfree(kpathname);
MemoryManager::release_pages((void*)allocated_stack, TASK_PAGES_IN_STACK);
context->rax = -ENOMEM;
return;
@ -62,19 +75,22 @@ void sys_exec(Context* context, const char* pathname)
// At this point, pretty much nothing can fail.
ELFLoader::release_elf_image(Scheduler::current_task()->image);
ELFImage* image = ELFLoader::load_elf_from_vfs(program);
ASSERT(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly
// wrong.
MemoryManager::release_pages((void*)task->allocated_stack, TASK_PAGES_IN_STACK);
task->allocated_stack = allocated_stack;
for (uint64_t i = 0; i < TASK_PAGES_IN_STACK; i++)
{
VMM::map(allocated_stack + (i * PAGE_SIZE), allocated_stack_phys + (i * PAGE_SIZE), MAP_READ_WRITE | MAP_USER);
}
Scheduler::reset_task(task, image);
set_context_from_task(*task, context);
kfree(kpathname);
return;
}

View File

@ -4,7 +4,10 @@
#include "interrupts/Context.h"
#include "io/Serial.h"
#include "log/Log.h"
#include "memory/VMM.h"
#include "render/TextRenderer.h"
#include "std/stdlib.h"
#include "sys/Syscall.h"
#include "thread/Scheduler.h"
#include "thread/Task.h"
@ -79,7 +82,7 @@ void sys_write(Context* context, int fd, size_t size, const char* addr)
context->rax = -EBADF;
return;
}
ssize_t result = current_task->files[fd].write(size, addr);
ssize_t result = current_task->files[fd].write(size, (const char*)VMM::get_physical((uint64_t)addr));
context->rax = (size_t)result;
return;
}
@ -99,9 +102,17 @@ void sys_open(Context* context, const char* filename, int flags)
return;
}
VFS::Node* node = VFS::resolve_path(filename);
char* kernel_filename = Syscall::strdup_from_user(filename);
if (!kernel_filename)
{
context->rax = -EFAULT;
return;
}
VFS::Node* node = VFS::resolve_path(kernel_filename);
if (!node)
{
kfree(kernel_filename);
context->rax = -ENOENT;
return;
}
@ -110,16 +121,18 @@ void sys_open(Context* context, const char* filename, int flags)
bool can_write = (flags & OPEN_WRITE) > 0;
if (!can_read && !can_write)
{
kfree(kernel_filename);
context->rax = -EINVAL;
return;
}
kdbgln("open(): opening %s %s, allocated file descriptor %d", filename,
kdbgln("open(): opening %s %s, allocated file descriptor %d", kernel_filename,
(can_read && can_write) ? "rw"
: can_read ? "r-"
: "-w",
fd);
kfree(kernel_filename);
current_task->files[fd].open(node, can_read, can_write);
context->rax = fd;
return;
@ -143,7 +156,7 @@ void sys_read(Context* context, int fd, size_t size, char* buffer)
context->rax = -EBADF;
return;
}
ssize_t result = current_task->files[fd].read(size, buffer);
ssize_t result = current_task->files[fd].read(size, (char*)VMM::get_physical((uint64_t)buffer));
context->rax = (size_t)result;
return;
}

View File

@ -87,21 +87,16 @@ void Scheduler::add_kernel_task(void (*task)(void))
new_task->id, new_task->regs.rsp, task_num);
}
void Scheduler::add_user_task(void* task)
Task* Scheduler::create_user_task()
{
Task* new_task = new Task;
ASSERT(new_task);
memset(&new_task->regs, 0, sizeof(Context));
new_task->user_task = true;
new_task->id = free_tid++;
new_task->regs.rip = (uint64_t)task;
new_task->allocated_stack = (uint64_t)MemoryManager::get_pages(
TASK_PAGES_IN_STACK, MAP_READ_WRITE | MAP_USER); // 16 KB is enough for everyone, right?
new_task->regs.rsp = get_top_of_stack(new_task->allocated_stack, TASK_PAGES_IN_STACK);
new_task->regs.cs = 0x18 | 0x03;
new_task->regs.ss = 0x20 | 0x03;
new_task->regs.ds = 0x20 | 0x03;
new_task->regs.rflags = (1 << 21) | (1 << 9); // enable interrupts
new_task->task_sleep = 0;
new_task->task_time = 0;
new_task->cpu_time = 0;
@ -110,10 +105,7 @@ void Scheduler::add_user_task(void* task)
base_task->prev_task = new_task;
new_task->next_task = base_task;
end_task = new_task;
new_task->state = new_task->Running;
task_num++;
kinfoln("Adding user task: starts at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
new_task->id, new_task->regs.rsp, task_num);
}
void Scheduler::load_user_task(const char* filename)
@ -130,6 +122,8 @@ void Scheduler::load_user_task(const char* filename)
ASSERT(new_task);
memset(&new_task->regs, 0, sizeof(Context));
new_task->id = free_tid++;
new_task->address_space = AddressSpace::create();
VMM::switch_to_user_address_space(new_task->address_space);
ELFImage* image = ELFLoader::load_elf_from_filesystem(
filename); // FIXME: TOCTOU? Right now, impossible, since interrupts are disabled and SMP is not a thing. But in
// the future, it might be possible.
@ -156,6 +150,7 @@ void Scheduler::load_user_task(const char* filename)
task_num++;
kinfoln("Adding user task: loaded at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
new_task->id, new_task->regs.rsp, task_num);
VMM::switch_back_to_kernel_address_space();
Interrupts::pop();
}
@ -182,12 +177,27 @@ void Scheduler::reap_task(Task* task)
task_num--;
Task* exiting_task = task;
ASSERT(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK?
if (exiting_task->is_user_task())
{
VMM::switch_back_to_kernel_address_space();
VMM::apply_address_space();
VMM::switch_to_user_address_space(exiting_task->address_space);
}
kinfoln("reaping task %ld, exited with code %ld", exiting_task->id, exiting_task->exit_status);
if (exiting_task->allocated_stack)
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
if (exiting_task->image) // FIXME: Also free pages the task has mmap-ed but not munmap-ed.
{
ELFLoader::release_elf_image(exiting_task->image);
// ELFLoader::release_elf_image(exiting_task->image);
kfree(exiting_task->image);
}
if (exiting_task->is_user_task())
{
VMM::switch_back_to_kernel_address_space();
VMM::apply_address_space();
Interrupts::push_and_enable();
exiting_task->address_space.destroy();
Interrupts::pop();
}
for (int i = 0; i < TASK_MAX_FDS; i++) { exiting_task->files[i].close(); }
delete exiting_task;
@ -316,7 +326,17 @@ void Scheduler::task_yield(Context* context)
{
task_save_floating(*original_task);
}
if (sched_current_task->is_user_task()) { task_restore_floating(*sched_current_task); }
if (sched_current_task->is_user_task())
{
VMM::switch_to_user_address_space(sched_current_task->address_space);
VMM::apply_address_space();
task_restore_floating(*sched_current_task);
}
else if (!was_idle && original_task->is_user_task() && !sched_current_task->is_user_task())
{
VMM::switch_back_to_kernel_address_space();
VMM::apply_address_space();
}
}
sched_current_task->task_time = 20;
set_context_from_task(*sched_current_task, context);