Some more multiple address space stuff
This page-faults. This is because the memory where the ELF should be is all zeroes, which the CPU tries to interpret.
This commit is contained in:
parent
229b06c63b
commit
ee712432bd
@ -29,4 +29,6 @@ namespace MemoryManager
|
|||||||
|
|
||||||
void release_page(void* page);
|
void release_page(void* page);
|
||||||
void release_pages(void* pages, uint64_t count);
|
void release_pages(void* pages, uint64_t count);
|
||||||
|
|
||||||
|
void protect(void* page, uint64_t count, int flags);
|
||||||
}
|
}
|
@ -12,6 +12,13 @@ namespace VMM
|
|||||||
{
|
{
|
||||||
void init(); // Fetch page table from cr3
|
void init(); // Fetch page table from cr3
|
||||||
|
|
||||||
|
void switch_to_user_address_space(AddressSpace& space);
|
||||||
|
void switch_back_to_kernel_address_space();
|
||||||
|
|
||||||
|
void apply_address_space();
|
||||||
|
|
||||||
|
bool is_using_kernel_address_space();
|
||||||
|
|
||||||
void map(uint64_t vaddr, uint64_t paddr, int flags);
|
void map(uint64_t vaddr, uint64_t paddr, int flags);
|
||||||
void remap(uint64_t vaddr, int flags);
|
void remap(uint64_t vaddr, int flags);
|
||||||
void unmap(uint64_t vaddr);
|
void unmap(uint64_t vaddr);
|
||||||
@ -28,6 +35,7 @@ namespace VMM
|
|||||||
|
|
||||||
void decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index,
|
void decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_index, uint64_t& pd_index,
|
||||||
uint64_t& pdp_index);
|
uint64_t& pdp_index);
|
||||||
|
uint64_t recompose_vaddr(uint64_t page_index, uint64_t pt_index, uint64_t pd_index, uint64_t pdp_index);
|
||||||
|
|
||||||
void install_kernel_page_directory_into_address_space(AddressSpace& space);
|
void install_kernel_page_directory_into_address_space(AddressSpace& space);
|
||||||
};
|
};
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include "fs/FileDescriptor.h"
|
#include "fs/FileDescriptor.h"
|
||||||
#include "interrupts/Context.h"
|
#include "interrupts/Context.h"
|
||||||
|
#include "memory/AddressSpace.h"
|
||||||
#include "sys/elf/Image.h"
|
#include "sys/elf/Image.h"
|
||||||
|
|
||||||
#define TASK_MAX_FDS 8
|
#define TASK_MAX_FDS 8
|
||||||
@ -43,6 +44,8 @@ struct Task
|
|||||||
ELFImage* image = nullptr;
|
ELFImage* image = nullptr;
|
||||||
|
|
||||||
Descriptor files[TASK_MAX_FDS];
|
Descriptor files[TASK_MAX_FDS];
|
||||||
|
|
||||||
|
AddressSpace address_space;
|
||||||
};
|
};
|
||||||
|
|
||||||
void set_context_from_task(Task& task, Context* ctx);
|
void set_context_from_task(Task& task, Context* ctx);
|
||||||
|
@ -51,6 +51,8 @@ extern "C" void common_handler(Context* context)
|
|||||||
StackTracer tracer(context->rbp);
|
StackTracer tracer(context->rbp);
|
||||||
tracer.trace_with_ip(context->rip);
|
tracer.trace_with_ip(context->rip);
|
||||||
|
|
||||||
|
hang(); // FIXME: Remove this when multiple address spaces are working.
|
||||||
|
|
||||||
Scheduler::task_misbehave(context, -3);
|
Scheduler::task_misbehave(context, -3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,16 +57,6 @@ extern "C" void _start()
|
|||||||
|
|
||||||
kinfoln("Loaded IDT");
|
kinfoln("Loaded IDT");
|
||||||
|
|
||||||
PIC::remap();
|
|
||||||
PIC::enable_master(0b11111100); // enable keyboard and PIT
|
|
||||||
PIC::enable_slave(0b11111111);
|
|
||||||
|
|
||||||
kinfoln("Prepared PIC");
|
|
||||||
|
|
||||||
PIT::initialize(1000); // 1000 times per second
|
|
||||||
|
|
||||||
kinfoln("Prepared PIT");
|
|
||||||
|
|
||||||
Scheduler::init();
|
Scheduler::init();
|
||||||
|
|
||||||
kinfoln("Prepared scheduler");
|
kinfoln("Prepared scheduler");
|
||||||
@ -88,9 +78,15 @@ extern "C" void _start()
|
|||||||
|
|
||||||
Init::finish_kernel_boot();
|
Init::finish_kernel_boot();
|
||||||
|
|
||||||
AddressSpace vaspace = AddressSpace::create();
|
PIT::initialize(1000); // 1000 times per second
|
||||||
|
|
||||||
vaspace.destroy();
|
kinfoln("Prepared PIT");
|
||||||
|
|
||||||
|
PIC::remap();
|
||||||
|
PIC::enable_master(0b11111100); // enable keyboard and PIT
|
||||||
|
PIC::enable_slave(0b11111111);
|
||||||
|
|
||||||
|
kinfoln("Prepared PIC");
|
||||||
|
|
||||||
Interrupts::enable(); // Task switching commences here
|
Interrupts::enable(); // Task switching commences here
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ void AddressSpace::destroy()
|
|||||||
for (int l = 0; l < 512; l++)
|
for (int l = 0; l < 512; l++)
|
||||||
{
|
{
|
||||||
PageDirectoryEntry& pde = pt->entries[l];
|
PageDirectoryEntry& pde = pt->entries[l];
|
||||||
if (pde.present) continue;
|
if (!pde.present) continue;
|
||||||
pages_freed++;
|
pages_freed++;
|
||||||
PMM::free_page((void*)pde.get_address());
|
PMM::free_page((void*)pde.get_address());
|
||||||
}
|
}
|
||||||
|
@ -189,4 +189,9 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
|
|||||||
PMM::free_page((void*)physicalAddress);
|
PMM::free_page((void*)physicalAddress);
|
||||||
}
|
}
|
||||||
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
KernelHeap::free_virtual_pages((uint64_t)pages, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::protect(void* page, uint64_t count, int flags)
|
||||||
|
{
|
||||||
|
for (uint64_t i = 0; i < count; i++) { VMM::remap((uint64_t)page + (i * PAGE_SIZE), flags); }
|
||||||
}
|
}
|
@ -9,6 +9,28 @@
|
|||||||
|
|
||||||
static PageTable* kernel_pml4;
|
static PageTable* kernel_pml4;
|
||||||
static PageTable* current_pml4;
|
static PageTable* current_pml4;
|
||||||
|
static AddressSpace* user_address_space;
|
||||||
|
|
||||||
|
void VMM::switch_back_to_kernel_address_space()
|
||||||
|
{
|
||||||
|
if (current_pml4 != kernel_pml4) { current_pml4 = kernel_pml4; }
|
||||||
|
}
|
||||||
|
|
||||||
|
void VMM::switch_to_user_address_space(AddressSpace& space)
|
||||||
|
{
|
||||||
|
user_address_space = &space;
|
||||||
|
current_pml4 = user_address_space->get_pml4();
|
||||||
|
}
|
||||||
|
|
||||||
|
void VMM::apply_address_space()
|
||||||
|
{
|
||||||
|
asm volatile("mov %0, %%cr3" : : "r"(current_pml4));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VMM::is_using_kernel_address_space()
|
||||||
|
{
|
||||||
|
return current_pml4 == kernel_pml4;
|
||||||
|
}
|
||||||
|
|
||||||
void VMM::init()
|
void VMM::init()
|
||||||
{
|
{
|
||||||
@ -27,6 +49,22 @@ void VMM::unmap(uint64_t vaddr)
|
|||||||
flush_tlb(vaddr);
|
flush_tlb(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VMM::remap(uint64_t vaddr, int flags)
|
||||||
|
{
|
||||||
|
vaddr = Utilities::round_down_to_nearest_page(vaddr);
|
||||||
|
|
||||||
|
PageDirectoryEntry* pde = find_pde(current_pml4, vaddr);
|
||||||
|
if (!pde) return; // Not mapped
|
||||||
|
|
||||||
|
if (flags & User) propagate_user(current_pml4, vaddr);
|
||||||
|
else
|
||||||
|
pde->user = false;
|
||||||
|
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
|
||||||
|
else
|
||||||
|
pde->read_write = false;
|
||||||
|
flush_tlb(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t VMM::get_physical(uint64_t vaddr)
|
uint64_t VMM::get_physical(uint64_t vaddr)
|
||||||
{
|
{
|
||||||
PageDirectoryEntry* pde = find_pde(current_pml4, Utilities::round_down_to_nearest_page(vaddr));
|
PageDirectoryEntry* pde = find_pde(current_pml4, Utilities::round_down_to_nearest_page(vaddr));
|
||||||
@ -60,24 +98,27 @@ void VMM::map(uint64_t vaddr, uint64_t paddr, int flags)
|
|||||||
{
|
{
|
||||||
unmap(vaddr);
|
unmap(vaddr);
|
||||||
pde = create_pde_if_not_exists(current_pml4, vaddr);
|
pde = create_pde_if_not_exists(current_pml4, vaddr);
|
||||||
will_flush_tlb = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pde->set_address(Utilities::round_down_to_nearest_page(paddr));
|
pde->set_address(Utilities::round_down_to_nearest_page(paddr));
|
||||||
if (flags & User) propagate_user(current_pml4, vaddr);
|
if (flags & User) propagate_user(current_pml4, vaddr);
|
||||||
|
else
|
||||||
|
pde->user = false;
|
||||||
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
|
if (flags & ReadWrite) propagate_read_write(current_pml4, vaddr);
|
||||||
|
else
|
||||||
|
pde->read_write = false;
|
||||||
if (will_flush_tlb) flush_tlb(vaddr);
|
if (will_flush_tlb) flush_tlb(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
|
PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
|
||||||
{
|
{
|
||||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
uint64_t page_index;
|
||||||
PageDirectoryEntry* pde;
|
PageDirectoryEntry* pde;
|
||||||
PageTable* pt = root;
|
PageTable* pt = root;
|
||||||
|
|
||||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
uint64_t indexes[3];
|
||||||
|
|
||||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
|
||||||
|
|
||||||
for (int i = 0; i < 3;
|
for (int i = 0; i < 3;
|
||||||
i++) // Walk through the page map level 4, page directory pointer, and page directory to find the page table.
|
i++) // Walk through the page map level 4, page directory pointer, and page directory to find the page table.
|
||||||
@ -96,11 +137,13 @@ PageDirectoryEntry* VMM::find_pde(PageTable* root, uint64_t vaddr)
|
|||||||
|
|
||||||
PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vaddr)
|
PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vaddr)
|
||||||
{
|
{
|
||||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
uint64_t page_index;
|
||||||
PageDirectoryEntry* pde;
|
PageDirectoryEntry* pde;
|
||||||
PageTable* pt = root;
|
PageTable* pt = root;
|
||||||
|
|
||||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
uint64_t indexes[3];
|
||||||
|
|
||||||
|
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
|
||||||
|
|
||||||
auto pde_create_if_not_present = [&]() {
|
auto pde_create_if_not_present = [&]() {
|
||||||
pt = (PageTable*)PMM::request_page();
|
pt = (PageTable*)PMM::request_page();
|
||||||
@ -110,8 +153,6 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vadd
|
|||||||
pde->present = true;
|
pde->present = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++)
|
for (int i = 0; i < 3; i++)
|
||||||
{
|
{
|
||||||
pde = &pt->entries[indexes[i]];
|
pde = &pt->entries[indexes[i]];
|
||||||
@ -128,13 +169,13 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vadd
|
|||||||
|
|
||||||
void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
|
void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
|
||||||
{
|
{
|
||||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
uint64_t page_index;
|
||||||
PageDirectoryEntry* pde;
|
PageDirectoryEntry* pde;
|
||||||
PageTable* pt = root;
|
PageTable* pt = root;
|
||||||
|
|
||||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
uint64_t indexes[3];
|
||||||
|
|
||||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++)
|
for (int i = 0; i < 3; i++)
|
||||||
{
|
{
|
||||||
@ -156,13 +197,13 @@ void VMM::propagate_read_write(PageTable* root, uint64_t vaddr)
|
|||||||
|
|
||||||
void VMM::propagate_user(PageTable* root, uint64_t vaddr)
|
void VMM::propagate_user(PageTable* root, uint64_t vaddr)
|
||||||
{
|
{
|
||||||
uint64_t page_index, pt_index, pd_index, pdp_index;
|
uint64_t page_index;
|
||||||
PageDirectoryEntry* pde;
|
PageDirectoryEntry* pde;
|
||||||
PageTable* pt = root;
|
PageTable* pt = root;
|
||||||
|
|
||||||
decompose_vaddr(vaddr, page_index, pt_index, pd_index, pdp_index);
|
uint64_t indexes[3];
|
||||||
|
|
||||||
uint64_t indexes[3] = {pdp_index, pd_index, pt_index};
|
decompose_vaddr(vaddr, page_index, indexes[2], indexes[1], indexes[0]);
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++)
|
for (int i = 0; i < 3; i++)
|
||||||
{
|
{
|
||||||
@ -200,6 +241,11 @@ void VMM::decompose_vaddr(uint64_t vaddr, uint64_t& page_index, uint64_t& pt_ind
|
|||||||
pdp_index = vaddr & 0x1ff;
|
pdp_index = vaddr & 0x1ff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t VMM::recompose_vaddr(uint64_t page_index, uint64_t pt_index, uint64_t pd_index, uint64_t pdp_index)
|
||||||
|
{
|
||||||
|
return pdp_index << 39 | pd_index << 30 | pt_index << 21 | page_index << 12;
|
||||||
|
}
|
||||||
|
|
||||||
void VMM::install_kernel_page_directory_into_address_space(AddressSpace& space)
|
void VMM::install_kernel_page_directory_into_address_space(AddressSpace& space)
|
||||||
{
|
{
|
||||||
PageTable* space_pml4 = space.get_pml4();
|
PageTable* space_pml4 = space.get_pml4();
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include "log/Log.h"
|
#include "log/Log.h"
|
||||||
#include "memory/Memory.h"
|
#include "memory/Memory.h"
|
||||||
#include "memory/MemoryManager.h"
|
#include "memory/MemoryManager.h"
|
||||||
|
#include "memory/VMM.h"
|
||||||
#include "misc/utils.h"
|
#include "misc/utils.h"
|
||||||
#include "std/stdlib.h"
|
#include "std/stdlib.h"
|
||||||
#include "std/string.h"
|
#include "std/string.h"
|
||||||
@ -85,13 +86,17 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
|
|||||||
kdbgln("Loading loadable segment at address %lx, file size %ld, mem size %ld, permissions %s", phdr.p_vaddr,
|
kdbgln("Loading loadable segment at address %lx, file size %ld, mem size %ld, permissions %s", phdr.p_vaddr,
|
||||||
phdr.p_filesz, phdr.p_memsz, format_permissions(phdr.p_flags));
|
phdr.p_filesz, phdr.p_memsz, format_permissions(phdr.p_flags));
|
||||||
ASSERT(phdr.p_vaddr);
|
ASSERT(phdr.p_vaddr);
|
||||||
|
|
||||||
uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz);
|
uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz);
|
||||||
void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(
|
void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(
|
||||||
Utilities::round_down_to_nearest_page(phdr.p_vaddr), pages,
|
Utilities::round_down_to_nearest_page(phdr.p_vaddr), pages, MAP_READ_WRITE) +
|
||||||
phdr.p_flags & 2 ? MAP_READ_WRITE | MAP_USER : MAP_USER) +
|
|
||||||
(phdr.p_vaddr % PAGE_SIZE));
|
(phdr.p_vaddr % PAGE_SIZE));
|
||||||
|
|
||||||
VFS::read(node, phdr.p_offset, phdr.p_filesz, (char*)buffer);
|
VFS::read(node, phdr.p_offset, phdr.p_filesz, (char*)buffer);
|
||||||
memset((void*)((uint64_t)buffer + phdr.p_filesz), 0, phdr.p_memsz - phdr.p_filesz);
|
memset((void*)((uint64_t)buffer + phdr.p_filesz), 0, phdr.p_memsz - phdr.p_filesz);
|
||||||
|
|
||||||
|
MemoryManager::protect(buffer, pages, phdr.p_flags & 2 ? MAP_READ_WRITE | MAP_USER : MAP_USER);
|
||||||
|
|
||||||
image = (ELFImage*)krealloc(image, (sizeof(ELFImage) - sizeof(ELFSection)) +
|
image = (ELFImage*)krealloc(image, (sizeof(ELFImage) - sizeof(ELFSection)) +
|
||||||
(image->section_count + 1) * sizeof(ELFSection));
|
(image->section_count + 1) * sizeof(ELFSection));
|
||||||
ELFSection& section = image->sections[image->section_count];
|
ELFSection& section = image->sections[image->section_count];
|
||||||
|
@ -127,6 +127,8 @@ void Scheduler::load_user_task(const char* filename)
|
|||||||
ASSERT(new_task);
|
ASSERT(new_task);
|
||||||
memset(&new_task->regs, 0, sizeof(Context));
|
memset(&new_task->regs, 0, sizeof(Context));
|
||||||
new_task->id = free_tid++;
|
new_task->id = free_tid++;
|
||||||
|
new_task->address_space = AddressSpace::create();
|
||||||
|
VMM::switch_to_user_address_space(new_task->address_space);
|
||||||
ELFImage* image = ELFLoader::load_elf_from_filesystem(
|
ELFImage* image = ELFLoader::load_elf_from_filesystem(
|
||||||
filename); // FIXME: TOCTOU? Right now, impossible, since interrupts are disabled and SMP is not a thing. But in
|
filename); // FIXME: TOCTOU? Right now, impossible, since interrupts are disabled and SMP is not a thing. But in
|
||||||
// the future, it might be possible.
|
// the future, it might be possible.
|
||||||
@ -153,6 +155,7 @@ void Scheduler::load_user_task(const char* filename)
|
|||||||
task_num++;
|
task_num++;
|
||||||
kinfoln("Adding user task: loaded at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
|
kinfoln("Adding user task: loaded at %lx, tid %ld, stack at %lx, total tasks: %ld", new_task->regs.rip,
|
||||||
new_task->id, new_task->regs.rsp, task_num);
|
new_task->id, new_task->regs.rsp, task_num);
|
||||||
|
VMM::switch_back_to_kernel_address_space();
|
||||||
Interrupts::pop();
|
Interrupts::pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,12 +182,21 @@ void Scheduler::reap_task(Task* task)
|
|||||||
task_num--;
|
task_num--;
|
||||||
Task* exiting_task = task;
|
Task* exiting_task = task;
|
||||||
ASSERT(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK?
|
ASSERT(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK?
|
||||||
|
if (exiting_task->is_user_task()) { VMM::switch_to_user_address_space(exiting_task->address_space); }
|
||||||
kinfoln("reaping task %ld, exited with code %ld", exiting_task->id, exiting_task->exit_status);
|
kinfoln("reaping task %ld, exited with code %ld", exiting_task->id, exiting_task->exit_status);
|
||||||
if (exiting_task->allocated_stack)
|
if (exiting_task->allocated_stack)
|
||||||
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
|
MemoryManager::release_pages((void*)exiting_task->allocated_stack, TASK_PAGES_IN_STACK);
|
||||||
if (exiting_task->image) // FIXME: Also free pages the task has mmap-ed but not munmap-ed.
|
if (exiting_task->image) // FIXME: Also free pages the task has mmap-ed but not munmap-ed.
|
||||||
{
|
{
|
||||||
ELFLoader::release_elf_image(exiting_task->image);
|
// ELFLoader::release_elf_image(exiting_task->image);
|
||||||
|
kfree(exiting_task->image);
|
||||||
|
}
|
||||||
|
if (exiting_task->is_user_task())
|
||||||
|
{
|
||||||
|
VMM::switch_back_to_kernel_address_space();
|
||||||
|
Interrupts::push_and_enable();
|
||||||
|
exiting_task->address_space.destroy();
|
||||||
|
Interrupts::pop();
|
||||||
}
|
}
|
||||||
for (int i = 0; i < TASK_MAX_FDS; i++) { exiting_task->files[i].close(); }
|
for (int i = 0; i < TASK_MAX_FDS; i++) { exiting_task->files[i].close(); }
|
||||||
delete exiting_task;
|
delete exiting_task;
|
||||||
@ -313,7 +325,17 @@ void Scheduler::task_yield(Context* context)
|
|||||||
{
|
{
|
||||||
task_save_floating(*original_task);
|
task_save_floating(*original_task);
|
||||||
}
|
}
|
||||||
if (sched_current_task->is_user_task()) { task_restore_floating(*sched_current_task); }
|
if (sched_current_task->is_user_task())
|
||||||
|
{
|
||||||
|
VMM::switch_to_user_address_space(sched_current_task->address_space);
|
||||||
|
VMM::apply_address_space();
|
||||||
|
task_restore_floating(*sched_current_task);
|
||||||
|
}
|
||||||
|
else if (!was_idle && original_task->is_user_task() && !sched_current_task->is_user_task())
|
||||||
|
{
|
||||||
|
VMM::switch_back_to_kernel_address_space();
|
||||||
|
VMM::apply_address_space();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sched_current_task->task_time = 20;
|
sched_current_task->task_time = 20;
|
||||||
set_context_from_task(*sched_current_task, context);
|
set_context_from_task(*sched_current_task, context);
|
||||||
|
Loading…
Reference in New Issue
Block a user