Kernel: Rename ASSERT() to ensure()

Doesn't get stripped on release builds, so it shouldn't be named assert.
This commit is contained in:
apio 2022-11-02 19:38:15 +01:00
parent e5cf32c7b3
commit c604c074a1
13 changed files with 41 additions and 43 deletions

View File

@ -1,6 +1,4 @@
#pragma once #pragma once
#include "panic/Panic.h" #include "panic/Panic.h"
#define ASSERT(expr) (bool)(expr) || panic("Assertion failed: " #expr) #define ensure(expr) (bool)(expr) || panic("Check failed: " #expr)
#define TODO(message) panic("TODO: " message)

View File

@ -73,7 +73,7 @@ static void set_base(GDTEntry* entry, uint32_t base)
static void set_limit(GDTEntry* entry, uint32_t limit) static void set_limit(GDTEntry* entry, uint32_t limit)
{ {
ASSERT(limit <= 0xFFFFF); ensure(limit <= 0xFFFFF);
entry->limit0 = limit & 0xFFFF; entry->limit0 = limit & 0xFFFF;
entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF); entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF);
} }

View File

@ -15,7 +15,7 @@
extern "C" void common_handler(Context* context) extern "C" void common_handler(Context* context)
{ {
ASSERT(Interrupts::is_in_handler()); ensure(Interrupts::is_in_handler());
if (context->number >= 0x20 && context->number < 0x30) if (context->number >= 0x20 && context->number < 0x30)
{ {
IRQ::interrupt_handler(context); IRQ::interrupt_handler(context);

View File

@ -37,8 +37,8 @@ uint64_t IDTEntry::get_offset()
void IDT::add_handler(short interrupt_number, void* handler, uint8_t type_attr) void IDT::add_handler(short interrupt_number, void* handler, uint8_t type_attr)
{ {
ASSERT(handler != nullptr); ensure(handler != nullptr);
ASSERT(interrupt_number < 256); ensure(interrupt_number < 256);
IDTEntry* entry_for_handler = &entries[interrupt_number]; IDTEntry* entry_for_handler = &entries[interrupt_number];
entry_for_handler->selector = 0x08; entry_for_handler->selector = 0x08;
entry_for_handler->type_attr = type_attr; entry_for_handler->type_attr = type_attr;

View File

@ -52,9 +52,9 @@ extern "C" void _start()
kinfoln("Prepared scheduler"); kinfoln("Prepared scheduler");
#ifdef RUN_TEST_AS_INIT #ifdef RUN_TEST_AS_INIT
ASSERT(Scheduler::load_user_task(STRINGIZE_VALUE_OF(RUN_TEST_AS_INIT)) > 0); ensure(Scheduler::load_user_task(STRINGIZE_VALUE_OF(RUN_TEST_AS_INIT)) > 0);
#else #else
ASSERT(Scheduler::load_user_task("/bin/init") > 0); ensure(Scheduler::load_user_task("/bin/init") > 0);
#endif #endif
Scheduler::add_kernel_task("[reaper]", []() { Scheduler::add_kernel_task("[reaper]", []() {
@ -67,7 +67,7 @@ extern "C" void _start()
kinfoln("Prepared scheduler tasks"); kinfoln("Prepared scheduler tasks");
ASSERT(VFS::mkdir("/dev") == 0); ensure(VFS::mkdir("/dev") == 0);
VFS::mount("/dev", DeviceFS::get()); VFS::mount("/dev", DeviceFS::get());
Init::finish_kernel_boot(); Init::finish_kernel_boot();

View File

@ -136,7 +136,7 @@ void* MemoryManager::get_page_at(uint64_t addr, int flags)
void MemoryManager::release_page(void* page) void MemoryManager::release_page(void* page)
{ {
uint64_t physicalAddress = VMM::get_physical((uint64_t)page); uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space... ensure(physicalAddress != UINT64_MAX); // this address is not mapped in the virtual address space...
VMM::unmap((uint64_t)page); VMM::unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }
@ -196,7 +196,7 @@ void MemoryManager::release_pages(void* pages, uint64_t count)
{ {
void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE)); void* page = (void*)((uint64_t)pages + (i * PAGE_SIZE));
uint64_t physicalAddress = VMM::get_physical((uint64_t)page); uint64_t physicalAddress = VMM::get_physical((uint64_t)page);
ASSERT(physicalAddress != UINT64_MAX); ensure(physicalAddress != UINT64_MAX);
VMM::unmap((uint64_t)page); VMM::unmap((uint64_t)page);
PMM::free_page((void*)physicalAddress); PMM::free_page((void*)physicalAddress);
} }

View File

@ -50,7 +50,7 @@ void PMM::init()
bitmap_addr = (char*)biggest_chunk; bitmap_addr = (char*)biggest_chunk;
virtual_bitmap_addr = bitmap_addr; virtual_bitmap_addr = bitmap_addr;
ASSERT((total_mem / PAGE_SIZE / 8) < biggest_chunk_size); ensure((total_mem / PAGE_SIZE / 8) < biggest_chunk_size);
bitmap_size = total_mem / PAGE_SIZE / 8 + 1; bitmap_size = total_mem / PAGE_SIZE / 8 + 1;
memset(bitmap_addr, 0xFF, bitmap_size); memset(bitmap_addr, 0xFF, bitmap_size);

View File

@ -186,7 +186,7 @@ PageDirectoryEntry* VMM::create_pde_if_not_exists(PageTable* root, uint64_t vadd
auto pde_create_if_not_present = [&]() { auto pde_create_if_not_present = [&]() {
pt = (PageTable*)PMM::request_page(); pt = (PageTable*)PMM::request_page();
ASSERT(!(PMM_DID_FAIL(pt))); ensure(!(PMM_DID_FAIL(pt)));
memset(pt, 0, PAGE_SIZE); memset(pt, 0, PAGE_SIZE);
pde->set_address((uint64_t)pt); pde->set_address((uint64_t)pt);
pde->present = true; pde->present = true;

View File

@ -47,7 +47,7 @@ uint64_t Mersenne::get()
{ {
if (index >= STATE_SIZE) if (index >= STATE_SIZE)
{ {
ASSERT(index == STATE_SIZE && "Mersenne generator was never seeded"); ensure(index == STATE_SIZE && "Mersenne generator was never seeded");
twist(); twist();
} }

View File

@ -67,15 +67,15 @@ long ELFLoader::check_elf_image_from_filesystem(const char* filename)
ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node) ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
{ {
Elf64_Ehdr elf_ehdr; Elf64_Ehdr elf_ehdr;
ASSERT(VFS::read(node, 0, sizeof(elf_ehdr), (char*)&elf_ehdr) >= 0); ensure(VFS::read(node, 0, sizeof(elf_ehdr), (char*)&elf_ehdr) >= 0);
ASSERT(strncmp((const char*)elf_ehdr.e_ident, ELFMAG, SELFMAG) == ensure(strncmp((const char*)elf_ehdr.e_ident, ELFMAG, SELFMAG) ==
0); // If you haven't checked the ELF executable with check_elf_image() first, then an assertion fail is your 0); // If you haven't checked the ELF executable with check_elf_image() first, then an assertion fail is your
// fault =D // fault =D
ASSERT(elf_ehdr.e_ident[EI_CLASS] == ELFCLASS64); ensure(elf_ehdr.e_ident[EI_CLASS] == ELFCLASS64);
ASSERT(elf_ehdr.e_ident[EI_DATA] == ELFDATA2LSB); ensure(elf_ehdr.e_ident[EI_DATA] == ELFDATA2LSB);
ASSERT(elf_ehdr.e_type == ET_EXEC); ensure(elf_ehdr.e_type == ET_EXEC);
ASSERT(elf_ehdr.e_machine == EM_MACH); ensure(elf_ehdr.e_machine == EM_MACH);
ASSERT(elf_ehdr.e_phnum != 0); ensure(elf_ehdr.e_phnum != 0);
ELFImage* image = (ELFImage*)kmalloc(sizeof(ELFImage) - sizeof(ELFSection)); ELFImage* image = (ELFImage*)kmalloc(sizeof(ELFImage) - sizeof(ELFSection));
memset(image, 0, sizeof(ELFImage) - sizeof(ELFSection)); memset(image, 0, sizeof(ELFImage) - sizeof(ELFSection));
image->entry = elf_ehdr.e_entry; image->entry = elf_ehdr.e_entry;
@ -88,7 +88,7 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
{ {
kdbgln("Loading loadable segment at address %lx, file size %ld, mem size %ld, permissions %s", phdr.p_vaddr, kdbgln("Loading loadable segment at address %lx, file size %ld, mem size %ld, permissions %s", phdr.p_vaddr,
phdr.p_filesz, phdr.p_memsz, format_permissions(phdr.p_flags)); phdr.p_filesz, phdr.p_memsz, format_permissions(phdr.p_flags));
ASSERT(phdr.p_vaddr); ensure(phdr.p_vaddr);
uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz); uint64_t pages = Utilities::get_blocks_from_size(PAGE_SIZE, (phdr.p_vaddr % PAGE_SIZE) + phdr.p_memsz);
void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr), void* buffer = (void*)((uint64_t)MemoryManager::get_pages_at(round_down_to_nearest_page(phdr.p_vaddr),
@ -120,7 +120,7 @@ ELFImage* ELFLoader::load_elf_from_vfs(VFS::Node* node)
} }
else { kdbgln("skipping non-loadable segment"); } else { kdbgln("skipping non-loadable segment"); }
} }
ASSERT(image->section_count); ensure(image->section_count);
return image; return image;
} }

View File

@ -67,7 +67,7 @@ void push_on_user_stack(uint64_t* rsp, char* value,
{ {
(*rsp) -= size; (*rsp) -= size;
char* kvalue = (char*)VMM::get_physical(*rsp); char* kvalue = (char*)VMM::get_physical(*rsp);
ASSERT(kvalue != (char*)UINT64_MAX); ensure(kvalue != (char*)UINT64_MAX);
memcpy(kvalue, value, size); memcpy(kvalue, value, size);
} }
@ -202,10 +202,10 @@ void sys_execv(Context* context, const char* pathname, char** argv)
} }
Interrupts::disable(); Interrupts::disable();
ASSERT(!Interrupts::are_enabled()); // This part is pretty sensitive. ensure(!Interrupts::are_enabled()); // This part is pretty sensitive.
Task* task = Scheduler::current_task(); Task* task = Scheduler::current_task();
ASSERT(task); ensure(task);
// At this point, pretty much nothing can fail. // At this point, pretty much nothing can fail.
@ -220,7 +220,7 @@ void sys_execv(Context* context, const char* pathname, char** argv)
// new stack. // new stack.
ELFImage* image = ELFLoader::load_elf_from_vfs(program); ELFImage* image = ELFLoader::load_elf_from_vfs(program);
ASSERT(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly ensure(image); // If check_elf_image succeeded, load_elf_from_vfs MUST succeed, unless something has gone terribly
// wrong. // wrong.
if (VFS::is_setuid(program)) task->uid = program->uid; if (VFS::is_setuid(program)) task->uid = program->uid;

View File

@ -72,7 +72,7 @@ void Scheduler::append_task(Task* task)
{ {
if (!base_task) if (!base_task)
{ {
ASSERT(!end_task); ensure(!end_task);
base_task = task; base_task = task;
end_task = base_task; end_task = base_task;
task->next_task = task; task->next_task = task;
@ -112,7 +112,7 @@ void Scheduler::init()
void Scheduler::add_kernel_task(const char* taskname, void (*task)(void)) void Scheduler::add_kernel_task(const char* taskname, void (*task)(void))
{ {
Task* new_task = new Task; Task* new_task = new Task;
ASSERT(new_task); ensure(new_task);
new_task->user_task = false; new_task->user_task = false;
new_task->id = free_pid++; new_task->id = free_pid++;
new_task->ppid = 0; new_task->ppid = 0;
@ -159,7 +159,7 @@ long Scheduler::load_user_task(const char* filename)
kinfoln("Loading user task: %s", filename); kinfoln("Loading user task: %s", filename);
Interrupts::push_and_disable(); Interrupts::push_and_disable();
Task* new_task = new Task; Task* new_task = new Task;
ASSERT(new_task); ensure(new_task);
memset(&new_task->regs, 0, sizeof(Context)); memset(&new_task->regs, 0, sizeof(Context));
new_task->id = free_pid++; new_task->id = free_pid++;
new_task->ppid = 0; new_task->ppid = 0;
@ -191,7 +191,7 @@ long Scheduler::load_user_task(const char* filename)
return -ENOMEM; return -ENOMEM;
} }
ELFImage* image = ELFLoader::load_elf_from_filesystem(filename); ELFImage* image = ELFLoader::load_elf_from_filesystem(filename);
ASSERT(image); ensure(image);
new_task->user_task = true; new_task->user_task = true;
new_task->regs.rip = image->entry; new_task->regs.rip = image->entry;
new_task->image = image; new_task->image = image;
@ -247,10 +247,10 @@ void Scheduler::reset_task(Task* task, ELFImage* new_image)
void Scheduler::reap_task(Task* task) void Scheduler::reap_task(Task* task)
{ {
ASSERT(!Interrupts::is_in_handler()); ensure(!Interrupts::is_in_handler());
task_num--; task_num--;
Task* exiting_task = task; Task* exiting_task = task;
ASSERT(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK? ensure(task->id != 0); // WHY IN THE WORLD WOULD WE BE REAPING THE IDLE TASK?
if (exiting_task->is_user_task()) if (exiting_task->is_user_task())
{ {
VMM::switch_back_to_kernel_address_space(); VMM::switch_back_to_kernel_address_space();
@ -302,7 +302,7 @@ void sched_common_exit(Context* context, int64_t status)
void Scheduler::task_exit(Context* context, int64_t status) void Scheduler::task_exit(Context* context, int64_t status)
{ {
ASSERT(Interrupts::is_in_handler()); ensure(Interrupts::is_in_handler());
kdbgln("exit: task %ld finished running, used %ld ms of cpu time", sched_current_task->id, kdbgln("exit: task %ld finished running, used %ld ms of cpu time", sched_current_task->id,
sched_current_task->cpu_time); sched_current_task->cpu_time);
sched_common_exit(context, status); sched_common_exit(context, status);
@ -310,7 +310,7 @@ void Scheduler::task_exit(Context* context, int64_t status)
void Scheduler::task_misbehave(Context* context, int64_t status) void Scheduler::task_misbehave(Context* context, int64_t status)
{ {
ASSERT(Interrupts::is_in_handler()); ensure(Interrupts::is_in_handler());
kdbgln("exit: task %ld misbehaved, used %ld ms of cpu time", sched_current_task->id, sched_current_task->cpu_time); kdbgln("exit: task %ld misbehaved, used %ld ms of cpu time", sched_current_task->id, sched_current_task->cpu_time);
sched_common_exit(context, status); sched_common_exit(context, status);
} }
@ -318,7 +318,7 @@ void Scheduler::task_misbehave(Context* context, int64_t status)
void Scheduler::reap_tasks() void Scheduler::reap_tasks()
{ {
Interrupts::disable(); Interrupts::disable();
ASSERT(!Interrupts::is_in_handler()); ensure(!Interrupts::is_in_handler());
Task* reap_base = nullptr; Task* reap_base = nullptr;
Task* reap_end = nullptr; Task* reap_end = nullptr;
Task* task = base_task; Task* task = base_task;
@ -381,7 +381,7 @@ static void sched_decrement_sleep_times()
void Scheduler::task_tick(Context* context) void Scheduler::task_tick(Context* context)
{ {
ASSERT(Interrupts::is_in_handler()); ensure(Interrupts::is_in_handler());
Interrupts::disable(); Interrupts::disable();
sched_decrement_sleep_times(); sched_decrement_sleep_times();
sched_current_task->task_time -= frequency; sched_current_task->task_time -= frequency;
@ -397,7 +397,7 @@ void Scheduler::task_tick(Context* context)
void Scheduler::task_yield(Context* context) void Scheduler::task_yield(Context* context)
{ {
ASSERT(Interrupts::is_in_handler()); ensure(Interrupts::is_in_handler());
Interrupts::disable(); Interrupts::disable();
sched_current_task->save_context(context); sched_current_task->save_context(context);
bool was_idle = false; bool was_idle = false;
@ -593,7 +593,7 @@ bool Task::is_wait_still_blocking()
else else
{ {
child = Scheduler::find_by_pid(blocking_wait_info.pid); child = Scheduler::find_by_pid(blocking_wait_info.pid);
ASSERT(child); // since sys_waitpid should have validated this child, and the only way for it to disappear from ensure(child); // since sys_waitpid should have validated this child, and the only way for it to disappear from
// the process list is for someone to wait for it, this should be pretty safe. // the process list is for someone to wait for it, this should be pretty safe.
if (child->state != child->Dying) return true; if (child->state != child->Dying) return true;
else else
@ -603,10 +603,10 @@ bool Task::is_wait_still_blocking()
void Task::resume_wait() void Task::resume_wait()
{ {
ASSERT(blocking_wait_info.pid != -1); // is_wait_still_blocking should have chosen a child for us if the user ensure(blocking_wait_info.pid != -1); // is_wait_still_blocking should have chosen a child for us if the user
// process told us to wait for any child. // process told us to wait for any child.
Task* child = Scheduler::find_by_pid(blocking_wait_info.pid); Task* child = Scheduler::find_by_pid(blocking_wait_info.pid);
ASSERT(child); // This should also already have been validated. ensure(child); // This should also already have been validated.
if (blocking_wait_info.wstatus) if (blocking_wait_info.wstatus)
{ {

View File

@ -93,7 +93,7 @@ void Task::resume()
case BlockReason::Reading: resume_read(); break; case BlockReason::Reading: resume_read(); break;
case BlockReason::Waiting: resume_wait(); break; case BlockReason::Waiting: resume_wait(); break;
default: ASSERT(false); default: ensure(false);
} }
VMM::apply_address_space(); VMM::apply_address_space();
block_reason = BlockReason::None; block_reason = BlockReason::None;