Kernel: Add validate_user_write() and validate_user_read()

Not at copy_from_user and copy_to_user yet, but an improvement over blindly using physical memory.
This commit is contained in:
apio 2022-11-04 22:46:48 +01:00
parent cbc2e76082
commit ffcaac0ca3
4 changed files with 76 additions and 9 deletions

View File

@ -8,8 +8,13 @@
#include "memory/MemoryManager.h"
#include "memory/VMM.h"
#include "misc/utils.h"
#include <stddef.h>
char* strdup_from_user(const char* user_string);
bool validate_user_readable_page(uintptr_t address);
bool validate_user_writable_page(uintptr_t address);
bool validate_user_read(uintptr_t address, size_t size);
bool validate_user_write(uintptr_t address, size_t size);
// FIXME: Map the physical addresses into kernel address space. Right now, something overwrites KernelHeap and crashes
// it, so that's not really possible. But it should be done in the future.

View File

@ -1,5 +1,7 @@
#include "sys/UserMemory.h"
#include "memory/Memory.h"
#include "std/string.h"
#include "utils/Addresses.h"
char* strdup_from_user(
const char* user_string) // FIXME: This function is a little hacky. Use the obtain_user_ref and similar functions.
@ -7,4 +9,58 @@ char* strdup_from_user(
uint64_t phys = VMM::get_physical((uint64_t)user_string);
if (phys == (uint64_t)-1) { return nullptr; }
return strdup((const char*)phys);
}
bool validate_user_readable_page(uintptr_t address)
{
if (Memory::is_kernel_address(address)) return false;
auto rc = VMM::get_flags(address);
if (rc == (uint64_t)-1) return false;
if (rc & MAP_USER) return true;
return false;
}
bool validate_user_writable_page(uintptr_t address)
{
if (Memory::is_kernel_address(address)) return false;
auto rc = VMM::get_flags(address);
if (rc == (uint64_t)-1) return false;
if (rc & (MAP_USER | MAP_READ_WRITE)) return true;
return false;
}
bool validate_user_read(uintptr_t address, size_t size)
{
auto aligned = round_down_to_nearest_page(address);
if (aligned != address) // Otherwise, we already do this check below.
{
if (!validate_user_readable_page(aligned)) return false;
}
while (size--)
{
if (address % PAGE_SIZE == 0)
{
if (!validate_user_readable_page(address)) return false;
}
address++;
}
return true;
}
bool validate_user_write(uintptr_t address, size_t size)
{
auto aligned = round_down_to_nearest_page(address);
if (aligned != address) // Otherwise, we already do this check below.
{
if (!validate_user_writable_page(aligned)) return false;
}
while (size--)
{
if (address % PAGE_SIZE == 0)
{
if (!validate_user_writable_page(address)) return false;
}
address++;
}
return true;
}

View File

@ -131,15 +131,17 @@ void sys_execv(Context* context, const char* pathname, char** argv)
if (kargv) kfree(kargv);
};
// FIXME: This is a bit messy.
do {
if (Memory::is_kernel_address((uintptr_t)argv))
if (!validate_user_read((uintptr_t)argv, sizeof(char*)))
{
free_kernel_argv_copy();
context->rax = -EFAULT;
return;
}
arg = (char**)VMM::get_physical((uint64_t)argv);
if (arg == (char**)UINT64_MAX)
arg = obtain_user_ref(argv);
if (!arg) // This should never happen, since it was already checked just before.
{
free_kernel_argv_copy();
context->rax = -EFAULT;
@ -148,6 +150,7 @@ void sys_execv(Context* context, const char* pathname, char** argv)
kargv = (char**)krealloc(kargv, (kargc + 1) * sizeof(char*));
if (!kargv)
{
release_user_ref(arg);
free_kernel_argv_copy();
context->rax = -ENOMEM;
return;
@ -157,6 +160,7 @@ void sys_execv(Context* context, const char* pathname, char** argv)
char* kcopy = strdup_from_user(*arg);
if (!kcopy)
{
release_user_ref(arg);
free_kernel_argv_copy();
context->rax = -ENOMEM;
return;
@ -168,6 +172,7 @@ void sys_execv(Context* context, const char* pathname, char** argv)
kargv[kargc] = nullptr;
break;
}
release_user_ref(arg);
kargc++;
argv++;
} while (arg != nullptr);
@ -216,8 +221,8 @@ void sys_execv(Context* context, const char* pathname, char** argv)
task->address_space.clear();
task->allocated_stack = (uint64_t)MemoryManager::get_pages_at(
0x100000, TASK_PAGES_IN_STACK,
MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK); // If we had enough space for the old stack, there should be enough space for the
// new stack.
MAP_USER | MAP_READ_WRITE | MAP_AS_OWNED_BY_TASK); // If we had enough space for the old stack, there should be
// enough space for the new stack.
ELFImage* image = ELFLoader::load_elf_from_vfs(program);
ensure(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly

View File

@ -138,7 +138,7 @@ void sys_seek(Context* context, int fd, long offset, int whence)
void sys_write(Context* context, int fd, size_t size, const char* addr)
{
if (!addr)
if (!validate_user_read((uintptr_t)addr, size))
{
context->rax = -EFAULT;
return;
@ -156,7 +156,8 @@ void sys_write(Context* context, int fd, size_t size, const char* addr)
return;
}
ssize_t result = file->write(
size, (const char*)VMM::get_physical((uint64_t)addr)); // FIXME: Handle big buffers and invalid addresses.
size, (const char*)VMM::get_physical((uint64_t)addr)); // FIXME: Handle buffers bigger than one page, which may
// not be contiguous in physical memory.
context->rax = (size_t)result;
return;
}
@ -269,7 +270,7 @@ void sys_open(Context* context, const char* filename, int flags, mode_t) // FIXM
void sys_read(Context* context, int fd, size_t size, char* buffer)
{
if (!buffer)
if (!validate_user_write((uintptr_t)buffer, size))
{
context->rax = -EFAULT;
return;
@ -302,7 +303,7 @@ void sys_read(Context* context, int fd, size_t size, char* buffer)
return Scheduler::task_yield(context);
}
ssize_t result =
file->read(size, (char*)VMM::get_physical((uint64_t)buffer)); // FIXME: Handle errors, and big buffers which may
file->read(size, (char*)VMM::get_physical((uint64_t)buffer)); // FIXME: Handle big buffers which may
// not be across continuous physical pages.
context->rax = (size_t)result;
return;