Start working on a VFS implementation #22

Closed
apio wants to merge 44 commits from oop-vfs into main
75 changed files with 853 additions and 235 deletions

View File

@ -28,7 +28,7 @@ set(CMAKE_FIND_ROOT_PATH ${LUNA_ROOT}/toolchain/${LUNA_ARCH}-luna)
message(STATUS "Configuring Luna for ${LUNA_ARCH}") message(STATUS "Configuring Luna for ${LUNA_ARCH}")
add_subdirectory(luna) add_subdirectory(libluna)
add_subdirectory(libc) add_subdirectory(libc)
add_subdirectory(kernel) add_subdirectory(kernel)
add_subdirectory(apps) add_subdirectory(apps)

View File

@ -8,7 +8,7 @@ A simple kernel and userspace for desktop computers, written mostly in C++ and C
- [System call](kernel/src/sys/) interface and [C Library](libc/), aiming to be mostly POSIX-compatible. - [System call](kernel/src/sys/) interface and [C Library](libc/), aiming to be mostly POSIX-compatible.
- Designed to be [portable](kernel/src/arch), no need to be restricted to x86_64. - Designed to be [portable](kernel/src/arch), no need to be restricted to x86_64.
- Fully [UTF-8 aware](luna/include/luna/Utf8.h), **everywhere**. - Fully [UTF-8 aware](luna/include/luna/Utf8.h), **everywhere**.
- [Thread](luna/include/luna/Atomic.h) [safety](kernel/src/thread/Spinlock.h). - [Thread](luna/include/luna/Atomic.h) [safety](kernel/src/thread/Spinlock.h) (supposedly).
- Environment-agnostic [utility library](luna/), which can be used in both kernel and userspace. - Environment-agnostic [utility library](luna/), which can be used in both kernel and userspace.
- Return-oriented [error propagation](luna/include/luna/Result.h), inspired by Rust and SerenityOS. - Return-oriented [error propagation](luna/include/luna/Result.h), inspired by Rust and SerenityOS.
- Build system uses [CMake](CMakeLists.txt). - Build system uses [CMake](CMakeLists.txt).
@ -72,7 +72,7 @@ These images do reflect the latest changes on the `main` branch, but are obvious
## Is there third-party software I can use on Luna? ## Is there third-party software I can use on Luna?
Not right now, but hopefully we can start porting some software soon! Not right now, but hopefully we can start porting some software soon! (After the VFS and fork/exec are done, of course. So, in a long time.)
## License ## License
Luna is open-source and free software under the [BSD-2 License](LICENSE). Luna is open-source and free software under the [BSD-2 License](LICENSE).

View File

@ -18,7 +18,6 @@ set(SOURCES
src/arch/Serial.cpp src/arch/Serial.cpp
src/arch/Timer.cpp src/arch/Timer.cpp
src/arch/PCI.cpp src/arch/PCI.cpp
src/thread/Spinlock.cpp
src/thread/Thread.cpp src/thread/Thread.cpp
src/thread/Scheduler.cpp src/thread/Scheduler.cpp
src/sys/Syscall.cpp src/sys/Syscall.cpp
@ -27,6 +26,8 @@ set(SOURCES
src/sys/clock_gettime.cpp src/sys/clock_gettime.cpp
src/sys/allocate_memory.cpp src/sys/allocate_memory.cpp
src/sys/usleep.cpp src/sys/usleep.cpp
src/fs/VFS.cpp
src/fs/tmpfs/FileSystem.cpp
src/InitRD.cpp src/InitRD.cpp
src/ELF.cpp src/ELF.cpp
) )

View File

@ -1 +1,2 @@
target_compile_definitions(moon PRIVATE LOCKED_VALUE_DEBUG) target_compile_definitions(moon PRIVATE LOCKED_VALUE_DEBUG)
target_compile_options(moon PRIVATE -fsanitize=undefined)

View File

@ -1,18 +1,13 @@
#include "InitRD.h" #include "InitRD.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "boot/bootboot.h" #include "boot/bootboot.h"
#include "memory/MemoryManager.h"
#include <luna/Alignment.h>
TarStream g_initrd; TarStream g_initrd;
extern const BOOTBOOT bootboot; extern const BOOTBOOT bootboot;
void InitRD::initialize() void InitRD::initialize()
{ {
u64 virtual_initrd_address = u64 virtual_initrd_address = MMU::translate_physical_address(bootboot.initrd_ptr);
MemoryManager::get_kernel_mapping_for_frames(
bootboot.initrd_ptr, get_blocks_from_size(bootboot.initrd_size, ARCH_PAGE_SIZE), MMU::NoExecute)
.expect_value("Unable to map the initial ramdisk into virtual memory");
g_initrd.initialize((void*)virtual_initrd_address, bootboot.initrd_size); g_initrd.initialize((void*)virtual_initrd_address, bootboot.initrd_size);
} }

View File

@ -2,10 +2,10 @@
#include "arch/CPU.h" #include "arch/CPU.h"
#include "arch/Serial.h" #include "arch/Serial.h"
#include "arch/Timer.h" #include "arch/Timer.h"
#include "thread/Spinlock.h"
#include "video/TextConsole.h" #include "video/TextConsole.h"
#include <luna/Format.h> #include <luna/Format.h>
#include <luna/SourceLocation.h> #include <luna/SourceLocation.h>
#include <luna/Spinlock.h>
static bool g_debug_enabled = true; static bool g_debug_enabled = true;
static bool g_serial_enabled = true; static bool g_serial_enabled = true;

View File

@ -26,5 +26,7 @@ namespace CPU
void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg); void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg);
void print_stack_trace_at(Registers* regs); void print_stack_trace_at(Registers* regs);
[[noreturn]] void bootstrap_switch_stack(u64 stack, void* function);
void pause(); void pause();
} }

View File

@ -7,6 +7,8 @@
#error "Unknown architecture." #error "Unknown architecture."
#endif #endif
constexpr u64 PAGES_PER_HUGE_PAGE = ARCH_HUGE_PAGE_SIZE / ARCH_PAGE_SIZE;
namespace MMU namespace MMU
{ {
enum Flags enum Flags
@ -19,7 +21,15 @@ namespace MMU
CacheDisable = 16, CacheDisable = 16,
}; };
Result<void> map(u64 virt, u64 phys, int flags); enum class UseHugePages
{
No = 0,
Yes = 1
};
u64 translate_physical_address(u64 phys);
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages);
Result<u64> unmap(u64 virt); Result<u64> unmap(u64 virt);
Result<u64> get_physical(u64 virt); Result<u64> get_physical(u64 virt);
Result<int> get_flags(u64 virt); Result<int> get_flags(u64 virt);

View File

@ -295,6 +295,15 @@ namespace CPU
&frame_index); &frame_index);
} }
[[noreturn]] void bootstrap_switch_stack(u64 stack, void* function)
{
asm volatile("mov %0, %%rsp\n"
"jmp *%1"
:
: "r"(stack), "r"(function));
__builtin_unreachable();
}
void pause() void pause()
{ {
asm volatile("pause"); asm volatile("pause");

View File

@ -1,6 +1,8 @@
#include "arch/MMU.h" #include "arch/MMU.h"
#include "Log.h" #include "Log.h"
#include "memory/MemoryManager.h" #include "memory/MemoryManager.h"
#include "memory/MemoryMap.h"
#include <luna/Alignment.h>
#include <luna/CString.h> #include <luna/CString.h>
#include <luna/Result.h> #include <luna/Result.h>
#include <luna/ScopeGuard.h> #include <luna/ScopeGuard.h>
@ -12,6 +14,10 @@
PageDirectory* g_kernel_directory; PageDirectory* g_kernel_directory;
u64 g_kernel_directory_virt; u64 g_kernel_directory_virt;
// The bootloader maps up to 16GiB of physical memory for us at address 0. Using this bootstrap mapping, we'll map (all)
// physical memory at 0xFFFF800000000000.
u64 g_physical_mapping_base = 0;
void PageTableEntry::set_address(u64 addr) void PageTableEntry::set_address(u64 addr)
{ {
this->address = (addr >> 12); this->address = (addr >> 12);
@ -36,90 +42,14 @@ static bool has_flag(int flags, MMU::Flags flag)
namespace MMU namespace MMU
{ {
template <typename T> T translate_physical(T phys)
constexpr PageDirectory* l4_table()
{ {
constexpr u64 l4 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (rindex << 12); return (T)(g_physical_mapping_base + (u64)phys);
return (PageDirectory*)l4;
} }
constexpr u64 l4_index(u64 addr) u64 translate_physical_address(u64 phys)
{ {
return (addr >> 39) & 0777; return g_physical_mapping_base + phys;
}
PageTableEntry& l4_entry(u64 addr)
{
return l4_table()->entries[l4_index(addr)];
}
constexpr PageDirectory* raw_l3_table(u64 l4)
{
const u64 l3 = sign | (rindex << 39) | (rindex << 30) | (rindex << 21) | (l4 << 12);
return (PageDirectory*)l3;
}
constexpr PageDirectory* l3_table(u64 addr)
{
const u64 l4 = l4_index(addr);
return raw_l3_table(l4);
}
constexpr u64 l3_index(u64 addr)
{
return (addr >> 30) & 0777;
}
PageTableEntry& l3_entry(u64 addr)
{
return l3_table(addr)->entries[l3_index(addr)];
}
constexpr PageDirectory* raw_l2_table(u64 l4, u64 l3)
{
const u64 l2 = sign | (rindex << 39) | (rindex << 30) | (l4 << 21) | (l3 << 12);
return (PageDirectory*)l2;
}
constexpr PageDirectory* l2_table(u64 addr)
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
return raw_l2_table(l4, l3);
}
constexpr u64 l2_index(u64 addr)
{
return (addr >> 21) & 0777;
}
PageTableEntry& l2_entry(u64 addr)
{
return l2_table(addr)->entries[l2_index(addr)];
}
constexpr PageDirectory* raw_l1_table(u64 l4, u64 l3, u64 l2)
{
const u64 l1 = sign | (rindex << 39) | (l4 << 30) | (l3 << 21) | (l2 << 12);
return (PageDirectory*)l1;
}
constexpr PageDirectory* l1_table(u64 addr)
{
const u64 l4 = l4_index(addr);
const u64 l3 = l3_index(addr);
const u64 l2 = l2_index(addr);
return raw_l1_table(l4, l3, l2);
}
constexpr u64 l1_index(u64 addr)
{
return (addr >> 12) & 0777;
}
PageTableEntry& l1_entry(u64 addr)
{
return l1_table(addr)->entries[l1_index(addr)];
} }
void switch_page_directory(PageDirectory* dir) void switch_page_directory(PageDirectory* dir)
@ -134,6 +64,11 @@ namespace MMU
return value; return value;
} }
PageDirectory* get_virtual_page_directory()
{
return translate_physical(get_page_directory());
}
void flush_all() void flush_all()
{ {
switch_page_directory(get_page_directory()); switch_page_directory(get_page_directory());
@ -144,6 +79,26 @@ namespace MMU
asm volatile("invlpg (%0)" : : "r"(page) : "memory"); asm volatile("invlpg (%0)" : : "r"(page) : "memory");
} }
constexpr u64 l4_index(u64 addr)
{
return (addr >> 39) & 0777;
}
constexpr u64 l3_index(u64 addr)
{
return (addr >> 30) & 0777;
}
constexpr u64 l2_index(u64 addr)
{
return (addr >> 21) & 0777;
}
constexpr u64 l1_index(u64 addr)
{
return (addr >> 12) & 0777;
}
int arch_flags_to_mmu(const PageTableEntry& entry) int arch_flags_to_mmu(const PageTableEntry& entry)
{ {
int result = Flags::None; int result = Flags::None;
@ -155,17 +110,46 @@ namespace MMU
return result; return result;
} }
PageTableEntry& l4_entry(u64 virt)
{
auto index = l4_index(virt);
return get_virtual_page_directory()->entries[index];
}
PageDirectory& page_table(const PageTableEntry& entry)
{
return *translate_physical((PageDirectory*)entry.get_address());
}
PageTableEntry& l3_entry(const PageTableEntry& entry, u64 virt)
{
auto index = l3_index(virt);
return page_table(entry).entries[index];
}
PageTableEntry& l2_entry(const PageTableEntry& entry, u64 virt)
{
auto index = l2_index(virt);
return page_table(entry).entries[index];
}
PageTableEntry& l1_entry(const PageTableEntry& entry, u64 virt)
{
auto index = l1_index(virt);
return page_table(entry).entries[index];
}
Result<PageTableEntry*> find_entry(u64 virt) Result<PageTableEntry*> find_entry(u64 virt)
{ {
const auto& l4 = l4_entry(virt); const auto& l4 = l4_entry(virt);
if (!l4.present) return err(EFAULT); if (!l4.present) return err(EFAULT);
auto& l3 = l3_entry(virt); auto& l3 = l3_entry(l4, virt);
if (!l3.present) return err(EFAULT); if (!l3.present) return err(EFAULT);
if (l3.larger_pages) return &l3; if (l3.larger_pages) return &l3;
auto& l2 = l2_entry(virt); auto& l2 = l2_entry(l3, virt);
if (!l2.present) return err(EFAULT); if (!l2.present) return err(EFAULT);
if (l2.larger_pages) return &l2; if (l2.larger_pages) return &l2;
return &l1_entry(virt); return &l1_entry(l2, virt);
} }
Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags) Result<PageTableEntry*> apply_cascading_flags(u64 virt, int flags)
@ -174,21 +158,32 @@ namespace MMU
if (!l4.present) return err(EFAULT); if (!l4.present) return err(EFAULT);
if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true; if (flags & Flags::User) l4.user = true;
auto& l3 = l3_entry(virt); auto& l3 = l3_entry(l4, virt);
if (!l3.present) return err(EFAULT); if (!l3.present) return err(EFAULT);
if (l3.larger_pages) return &l3; if (l3.larger_pages) return &l3;
if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true; if (flags & Flags::User) l3.user = true;
auto& l2 = l2_entry(virt); auto& l2 = l2_entry(l3, virt);
if (!l2.present) return err(EFAULT); if (!l2.present) return err(EFAULT);
if (l2.larger_pages) return &l2; if (l2.larger_pages) return &l2;
if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true; if (flags & Flags::User) l2.user = true;
auto& l1 = l1_entry(virt); auto& l1 = l1_entry(l2, virt);
return &l1; return &l1;
} }
Result<void> map(u64 virt, u64 phys, int flags) void set_page_table_entry_properties(PageTableEntry& entry, u64 phys, int flags)
{
entry.present = true;
entry.read_write = has_flag(flags, Flags::ReadWrite);
entry.user = has_flag(flags, Flags::User);
entry.write_through = has_flag(flags, Flags::WriteThrough);
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
entry.no_execute = has_flag(flags, Flags::NoExecute);
entry.set_address(phys);
}
Result<void> map(u64 virt, u64 phys, int flags, UseHugePages use_huge_pages)
{ {
auto& l4 = l4_entry(virt); auto& l4 = l4_entry(virt);
if (!l4.present) if (!l4.present)
@ -196,46 +191,51 @@ namespace MMU
const u64 addr = TRY(MemoryManager::alloc_frame()); const u64 addr = TRY(MemoryManager::alloc_frame());
l4.present = true; l4.present = true;
l4.set_address(addr); l4.set_address(addr);
memset(l3_table(virt), 0, ARCH_PAGE_SIZE); memset(&page_table(l4), 0, ARCH_PAGE_SIZE);
} }
if (flags & Flags::ReadWrite) l4.read_write = true; if (flags & Flags::ReadWrite) l4.read_write = true;
if (flags & Flags::User) l4.user = true; if (flags & Flags::User) l4.user = true;
auto& l3 = l3_entry(virt); auto& l3 = l3_entry(l4, virt);
if (!l3.present) if (!l3.present)
{ {
const u64 addr = TRY(MemoryManager::alloc_frame()); const u64 addr = TRY(MemoryManager::alloc_frame());
l3.present = true; l3.present = true;
l3.set_address(addr); l3.set_address(addr);
memset(l2_table(virt), 0, ARCH_PAGE_SIZE); memset(&page_table(l3), 0, ARCH_PAGE_SIZE);
} }
if (flags & Flags::ReadWrite) l3.read_write = true; if (flags & Flags::ReadWrite) l3.read_write = true;
if (flags & Flags::User) l3.user = true; if (flags & Flags::User) l3.user = true;
if (l3.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM if (l3.larger_pages) return err(EEXIST);
auto& l2 = l2_entry(virt); auto& l2 = l2_entry(l3, virt);
if (!l2.present) if (!l2.present)
{ {
const u64 addr = TRY(MemoryManager::alloc_frame());
l2.present = true; l2.present = true;
l2.set_address(addr);
memset(l1_table(virt), 0, ARCH_PAGE_SIZE); if (use_huge_pages == UseHugePages::No)
{
const u64 addr = TRY(MemoryManager::alloc_frame());
l2.set_address(addr);
memset(&page_table(l2), 0, ARCH_PAGE_SIZE);
}
} }
if (flags & Flags::ReadWrite) l2.read_write = true; if (flags & Flags::ReadWrite) l2.read_write = true;
if (flags & Flags::User) l2.user = true; if (flags & Flags::User) l2.user = true;
if (l2.larger_pages) return err(EFIXME); // FIXME: Replacing larger pages is not supported ATM if (l2.larger_pages) return err(EEXIST);
else if (use_huge_pages == UseHugePages::Yes)
{
l2.larger_pages = true;
set_page_table_entry_properties(l2, phys, flags);
return {};
}
auto& l1 = l1_entry(virt); auto& l1 = l1_entry(l2, virt);
if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again. if (l1.present) return err(EEXIST); // Please explicitly unmap the page before mapping it again.
l1.present = true; set_page_table_entry_properties(l1, phys, flags);
l1.read_write = has_flag(flags, Flags::ReadWrite);
l1.user = has_flag(flags, Flags::User);
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
l1.set_address(phys);
return {}; return {};
} }
@ -281,39 +281,39 @@ namespace MMU
PageDirectory* const dir = get_page_directory(); PageDirectory* const dir = get_page_directory();
g_kernel_directory = dir; g_kernel_directory = dir;
const u64 paddr = (u64)dir; const u64 physical_memory_base = 0xFFFF800000000000;
PageTableEntry& recursive_entry = dir->entries[rindex];
recursive_entry.read_write = true;
recursive_entry.present = true;
recursive_entry.set_address(paddr);
flush_all();
g_kernel_directory_virt = MemoryMapIterator iter;
MemoryManager::get_kernel_mapping_for_frames((u64)dir, 1, MMU::ReadWrite | MMU::NoExecute).value(); const MemoryMapEntry highest_entry = iter.highest();
const u64 physical_memory_size = highest_entry.address() + highest_entry.size();
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
MMU::ReadWrite | MMU::NoExecute);
g_physical_mapping_base = physical_memory_base;
g_kernel_directory_virt = translate_physical((u64)g_kernel_directory);
kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory); kdbgln("MMU init page directory (ring0): virt %#.16lx, phys %p", g_kernel_directory_virt, g_kernel_directory);
} }
Result<PageDirectory*> create_page_directory_for_userspace() Result<PageDirectory*> create_page_directory_for_userspace()
{ {
const u64 directory_virt = TRY(MemoryManager::alloc_for_kernel(1, MMU::ReadWrite | MMU::NoExecute)); const u64 directory_phys = TRY(MemoryManager::alloc_frame());
const u64 directory_phys = MMU::get_physical(directory_virt).value(); const u64 directory_virt = translate_physical(directory_phys);
PageDirectory* const directory = (PageDirectory*)directory_virt; PageDirectory* const directory = (PageDirectory*)directory_virt;
memset(directory, 0, ARCH_PAGE_SIZE); memset(directory, 0, ARCH_PAGE_SIZE);
PageTableEntry& recursive_entry = directory->entries[rindex];
recursive_entry.read_write = true; constexpr auto HALF_PAGE = ARCH_PAGE_SIZE / 2;
recursive_entry.present = true; // Copy the upper part of the page directory (higher half)
recursive_entry.set_address(directory_phys); memcpy(offset_ptr(directory, HALF_PAGE), offset_ptr((PageDirectory*)g_kernel_directory_virt, HALF_PAGE),
HALF_PAGE);
kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys); kdbgln("MMU init page directory (ring3): virt %p, phys %#.16lx", directory, directory_phys);
directory->entries[511] = ((PageDirectory*)g_kernel_directory_virt)->entries[511];
// From now on, we're only going to use the physical address, since accessing the PageDirectory will be dealt
// with using recursive mapping. So let's make sure we don't leak any VM.
MemoryManager::unmap_weak_and_free_vm(directory_virt, 1);
return (PageDirectory*)directory_phys; return (PageDirectory*)directory_phys;
} }
@ -321,16 +321,11 @@ namespace MMU
{ {
check(directory); check(directory);
// Needed in order to access page tables using the recursive mapping system. switch_page_directory(g_kernel_directory);
switch_page_directory(directory);
auto guard = make_scope_guard([&] { auto guard = make_scope_guard([directory] { MemoryManager::free_frame((u64)directory); });
check(g_kernel_directory);
switch_page_directory(g_kernel_directory);
MemoryManager::free_frame((u64)directory);
});
PageDirectory* const table = l4_table(); PageDirectory* const table = translate_physical(directory);
// Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages) // Let's iterate over every top-level entry, skipping the last two entries (recursive mapping and kernel pages)
for (u64 i = 0; i < 510; i++) for (u64 i = 0; i < 510; i++)
@ -338,7 +333,7 @@ namespace MMU
PageTableEntry& l4 = table->entries[i]; PageTableEntry& l4 = table->entries[i];
if (!l4.present) continue; if (!l4.present) continue;
PageDirectory* const pdp = raw_l3_table(i); PageDirectory* const pdp = &page_table(l4);
for (u64 j = 0; j < 512; j++) for (u64 j = 0; j < 512; j++)
{ {
@ -350,7 +345,7 @@ namespace MMU
TRY(MemoryManager::free_frame(l3.get_address())); TRY(MemoryManager::free_frame(l3.get_address()));
} }
PageDirectory* const pd = raw_l2_table(i, j); PageDirectory* const pd = &page_table(l3);
for (u64 k = 0; k < 512; k++) for (u64 k = 0; k < 512; k++)
{ {
@ -362,7 +357,7 @@ namespace MMU
TRY(MemoryManager::free_frame(l2.get_address())); TRY(MemoryManager::free_frame(l2.get_address()));
} }
PageDirectory* const pt = raw_l1_table(i, j, k); PageDirectory* const pt = &page_table(l2);
for (u64 l = 0; l < 512; l++) for (u64 l = 0; l < 512; l++)
{ {

View File

@ -2,9 +2,7 @@
#include <luna/Types.h> #include <luna/Types.h>
const usize ARCH_PAGE_SIZE = 4096; const usize ARCH_PAGE_SIZE = 4096;
const usize ARCH_HUGE_PAGE_SIZE = 2 * 1024 * 1024; // 2 MiB
const u64 rindex = 0776; // recursive index
const u64 sign = 0177777UL << 48; // sign extension
struct [[gnu::packed]] PageTableEntry struct [[gnu::packed]] PageTableEntry
{ {

11
kernel/src/fs/VFS.cpp Normal file
View File

@ -0,0 +1,11 @@
#include "fs/VFS.h"
namespace VFS
{
SharedPtr<FileSystem> root_fs;
Inode& root_inode()
{
return root_fs->root_inode();
}
}

70
kernel/src/fs/VFS.h Normal file
View File

@ -0,0 +1,70 @@
#pragma once
#include <luna/SharedPtr.h>
namespace VFS
{
enum class InodeType
{
RegularFile,
Directory
};
class FileSystem;
class Inode
{
public:
// Directory-specific methods
virtual Result<SharedPtr<Inode>> find(const char* name) const = 0;
virtual Result<SharedPtr<Inode>> create_file(const char* name) = 0;
// Generic methods
virtual FileSystem& fs() const = 0;
virtual ~Inode() = default;
virtual InodeType type() const = 0;
virtual usize inode_number() const = 0;
};
class FileInode : Inode
{
public:
Result<SharedPtr<Inode>> find(const char*) const override
{
return err(ENOTDIR);
}
Result<SharedPtr<Inode>> create_file(const char*) override
{
return err(ENOTDIR);
}
InodeType type() const override
{
return InodeType::RegularFile;
}
virtual ~FileInode() = default;
};
class FileSystem
{
public:
virtual Inode& root_inode() const = 0;
virtual Result<SharedPtr<Inode>> create_file_inode() = 0;
virtual Result<SharedPtr<Inode>> create_dir_inode(SharedPtr<Inode> parent) = 0;
virtual ~FileSystem() = default;
};
extern SharedPtr<FileSystem> root_fs;
Result<Inode*> resolve_path(const char* path);
Inode& root_inode();
}

View File

@ -0,0 +1,71 @@
#include "fs/tmpfs/FileSystem.h"
#include <luna/Alloc.h>
#include <luna/CString.h>
#include <luna/Ignore.h>
namespace TmpFS
{
Result<SharedPtr<VFS::FileSystem>> FileSystem::create()
{
SharedPtr<FileSystem> fs = TRY(adopt_shared(new (std::nothrow) FileSystem()));
SharedPtr<VFS::Inode> root = TRY(fs->create_dir_inode({}));
fs->set_root(root);
return (SharedPtr<VFS::FileSystem>)fs;
}
Result<SharedPtr<VFS::Inode>> FileSystem::create_file_inode()
{
SharedPtr<FileInode> inode = TRY(make_shared<FileInode>());
inode->set_fs(*this, {});
inode->set_inode_number(m_next_inode_number, {});
TRY(m_inodes.try_append(inode));
m_next_inode_number++;
return (SharedPtr<VFS::Inode>)inode;
}
Result<SharedPtr<VFS::Inode>> FileSystem::create_dir_inode(SharedPtr<VFS::Inode> parent)
{
SharedPtr<DirInode> inode = TRY(make_shared<DirInode>());
TRY(inode->add_entry(inode, "."));
TRY(inode->add_entry(parent ? parent : (SharedPtr<VFS::Inode>)inode, ".."));
inode->set_fs(*this, {});
inode->set_inode_number(m_next_inode_number, {});
TRY(m_inodes.try_append(inode));
m_next_inode_number++;
return (SharedPtr<VFS::Inode>)inode;
}
void FileSystem::set_root(SharedPtr<VFS::Inode> root)
{
m_root_inode = root;
}
Result<SharedPtr<VFS::Inode>> DirInode::find(const char* name) const
{
for (const auto& entry : m_entries)
{
if (!strcmp(name, entry.name.chars())) return entry.inode;
}
return err(ENOENT);
}
Result<void> DirInode::add_entry(SharedPtr<VFS::Inode> inode, const char* name)
{
Entry entry { inode, name };
TRY(m_entries.try_append(move(entry)));
return {};
}
Result<SharedPtr<VFS::Inode>> DirInode::create_file(const char* name)
{
auto inode = TRY(m_fs->create_file_inode());
TRY(add_entry(inode, name));
return inode;
}
}

View File

@ -0,0 +1,118 @@
#pragma once
#include "fs/VFS.h"
#include <luna/Atomic.h>
#include <luna/Badge.h>
#include <luna/StaticString.h>
#include <luna/Vector.h>
namespace TmpFS
{
class FileSystem : public VFS::FileSystem
{
public:
VFS::Inode& root_inode() const override
{
return *m_root_inode;
}
Result<SharedPtr<VFS::Inode>> create_file_inode() override;
Result<SharedPtr<VFS::Inode>> create_dir_inode(SharedPtr<VFS::Inode> parent) override;
static Result<SharedPtr<VFS::FileSystem>> create();
virtual ~FileSystem() = default;
private:
FileSystem() = default;
void set_root(SharedPtr<VFS::Inode> root);
SharedPtr<VFS::Inode> m_root_inode;
Vector<SharedPtr<VFS::Inode>> m_inodes;
Atomic<usize> m_next_inode_number { 2 };
};
class FileInode : public VFS::FileInode
{
public:
FileInode() = default;
void set_fs(FileSystem& fs, Badge<FileSystem>)
{
m_fs = &fs;
}
void set_inode_number(usize inum, Badge<FileSystem>)
{
m_inode_number = inum;
}
VFS::FileSystem& fs() const override
{
return *m_fs;
}
usize inode_number() const override
{
return m_inode_number;
}
virtual ~FileInode() = default;
private:
VFS::FileSystem* m_fs;
usize m_inode_number;
};
class DirInode : public VFS::Inode
{
public:
DirInode() = default;
void set_fs(FileSystem& fs, Badge<FileSystem>)
{
m_fs = &fs;
}
void set_inode_number(usize inum, Badge<FileSystem>)
{
m_inode_number = inum;
}
Result<SharedPtr<VFS::Inode>> find(const char* name) const override;
VFS::FileSystem& fs() const override
{
return *m_fs;
}
usize inode_number() const override
{
return m_inode_number;
}
VFS::InodeType type() const override
{
return VFS::InodeType::Directory;
}
Result<SharedPtr<VFS::Inode>> create_file(const char* name) override;
Result<void> add_entry(SharedPtr<VFS::Inode> inode, const char* name);
virtual ~DirInode() = default;
private:
VFS::FileSystem* m_fs;
usize m_inode_number;
struct Entry
{
SharedPtr<VFS::Inode> inode;
StaticString<128> name;
};
Vector<Entry> m_entries;
};
}

View File

@ -7,6 +7,7 @@
#include "arch/Timer.h" #include "arch/Timer.h"
#include "boot/Init.h" #include "boot/Init.h"
#include "config.h" #include "config.h"
#include "fs/tmpfs/FileSystem.h"
#include "memory/Heap.h" #include "memory/Heap.h"
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "memory/MemoryManager.h" #include "memory/MemoryManager.h"
@ -55,6 +56,18 @@ Result<void> init()
Thread::init(); Thread::init();
Scheduler::init(); Scheduler::init();
VFS::root_fs = TRY(TmpFS::FileSystem::create());
VFS::Inode& root_inode = VFS::root_inode();
kinfoln("root inode number: %zu", root_inode.inode_number());
kinfoln("root inode's '.' entry inode number: %zu", TRY(root_inode.find("."))->inode_number());
kinfoln("root inode's '..' entry inode number: %zu", TRY(root_inode.find(".."))->inode_number());
TRY(root_inode.create_file("usr"));
kinfoln("root inode's 'usr' entry inode number: %zu", TRY(root_inode.find("usr"))->inode_number());
TarStream::Entry entry; TarStream::Entry entry;
while (TRY(g_initrd.read_next_entry(entry))) while (TRY(g_initrd.read_next_entry(entry)))
{ {
@ -87,11 +100,34 @@ Result<void> init()
return {}; return {};
} }
extern "C" [[noreturn]] void _start() [[noreturn]] void init_wrapper()
{ {
Init::check_magic();
Init::early_init();
auto rc = init(); auto rc = init();
if (rc.has_error()) kerrorln("Runtime error: %s", rc.error_string()); if (rc.has_error()) kerrorln("Runtime error: %s", rc.error_string());
CPU::idle_loop(); CPU::idle_loop();
} }
static constexpr u64 BOOTSTRAP_STACK_PAGES = 8;
// FIXME: Reclaim this memory as soon as we leave the init task (so as soon as the Scheduler runs a task switch)
static u64 allocate_initial_kernel_stack()
{
u64 address = MemoryManager::alloc_for_kernel(BOOTSTRAP_STACK_PAGES + 1, MMU::ReadWrite | MMU::NoExecute).value();
// First page is a guard page, the rest is stack.
MMU::unmap(address); // Unmap (without deallocating VM) one guard page so that attempts to access it fail with a
// non-present page fault.
kdbgln("stack guard page: %p", (void*)address);
// The actual stack.
Stack stack { address + ARCH_PAGE_SIZE, BOOTSTRAP_STACK_PAGES * ARCH_PAGE_SIZE };
return stack.top();
}
extern "C" [[noreturn]] void _start()
{
Init::check_magic();
Init::early_init();
u64 bootstrap_stack_top = allocate_initial_kernel_stack();
CPU::bootstrap_switch_stack(bootstrap_stack_top, (void*)init_wrapper);
}

View File

@ -1,7 +1,7 @@
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "thread/Spinlock.h"
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
#include <luna/Spinlock.h>
static const u64 KERNEL_VM_RANGE_START = 0xffffffffc0000000; static const u64 KERNEL_VM_RANGE_START = 0xffffffffc0000000;

View File

@ -2,10 +2,10 @@
#include "arch/MMU.h" #include "arch/MMU.h"
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "memory/MemoryMap.h" #include "memory/MemoryMap.h"
#include "thread/Spinlock.h"
#include <luna/Alignment.h> #include <luna/Alignment.h>
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
#include <luna/ScopeGuard.h> #include <luna/ScopeGuard.h>
#include <luna/Spinlock.h>
#include <luna/SystemError.h> #include <luna/SystemError.h>
#include <luna/Types.h> #include <luna/Types.h>
@ -100,24 +100,16 @@ namespace MemoryManager
void init() void init()
{ {
init_physical_frame_allocator(); init_physical_frame_allocator();
KernelVM::init();
MMU::setup_initial_page_directory(); MMU::setup_initial_page_directory();
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE;
auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
u64 phys = (u64)g_frame_bitmap.lock()->location();
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
auto frame_bitmap = g_frame_bitmap.lock(); auto frame_bitmap = g_frame_bitmap.lock();
u64 phys = (u64)frame_bitmap->location();
auto virtual_bitmap_base = MMU::translate_physical_address(phys);
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes()); frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
KernelVM::init();
} }
void do_lock_frame(u64 index, Bitmap& bitmap) void do_lock_frame(u64 index, Bitmap& bitmap)
@ -202,7 +194,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
TRY(MMU::map(virt, phys, flags)); TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
@ -213,6 +205,29 @@ namespace MemoryManager
return {}; return {};
} }
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags)
{
CHECK_PAGE_ALIGNED(virt);
CHECK_PAGE_ALIGNED(phys);
usize pages_mapped = 0;
// Let's clean up after ourselves if we fail.
auto guard = make_scope_guard([=, &pages_mapped] { unmap_weak_huge(virt, pages_mapped); });
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::Yes));
virt += ARCH_HUGE_PAGE_SIZE;
phys += ARCH_HUGE_PAGE_SIZE;
pages_mapped++;
}
guard.deactivate();
return {};
}
Result<u64> alloc_at(u64 virt, usize count, int flags) Result<u64> alloc_at(u64 virt, usize count, int flags)
{ {
CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(virt);
@ -225,7 +240,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
const u64 frame = TRY(alloc_frame()); const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags)); TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
} }
@ -250,7 +265,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
const u64 frame = TRY(alloc_frame()); const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags)); TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
} }
@ -275,7 +290,7 @@ namespace MemoryManager
while (pages_mapped < count) while (pages_mapped < count)
{ {
TRY(MMU::map(virt, phys, flags)); TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE; virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE;
pages_mapped++; pages_mapped++;
@ -322,6 +337,19 @@ namespace MemoryManager
return {}; return {};
} }
Result<void> unmap_weak_huge(u64 virt, usize count)
{
CHECK_PAGE_ALIGNED(virt);
while (count--)
{
TRY(MMU::unmap(virt));
virt += ARCH_HUGE_PAGE_SIZE;
}
return {};
}
Result<void> unmap_weak_and_free_vm(u64 virt, usize count) Result<void> unmap_weak_and_free_vm(u64 virt, usize count)
{ {
CHECK_PAGE_ALIGNED(virt); CHECK_PAGE_ALIGNED(virt);

View File

@ -53,6 +53,7 @@ namespace MemoryManager
} }
Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags); Result<void> map_frames_at(u64 virt, u64 phys, usize count, int flags);
Result<void> map_huge_frames_at(u64 virt, u64 phys, usize count, int flags);
Result<u64> alloc_at(u64 virt, usize count, int flags); Result<u64> alloc_at(u64 virt, usize count, int flags);
Result<u64> alloc_for_kernel(usize count, int flags); Result<u64> alloc_for_kernel(usize count, int flags);
@ -64,6 +65,8 @@ namespace MemoryManager
Result<void> unmap_weak(u64 virt, usize count); Result<void> unmap_weak(u64 virt, usize count);
Result<void> unmap_weak_and_free_vm(u64 virt, usize count); Result<void> unmap_weak_and_free_vm(u64 virt, usize count);
Result<void> unmap_weak_huge(u64 virt, usize count);
usize free(); usize free();
usize used(); usize used();
usize reserved(); usize reserved();

View File

@ -84,4 +84,5 @@ void debug_log_impl(const char* format, va_list ap)
{ {
pure_cstyle_format( pure_cstyle_format(
format, [](char c, void*) { console_write(&c, 1); }, nullptr, ap); format, [](char c, void*) { console_write(&c, 1); }, nullptr, ap);
console_write("\n", 1);
} }

View File

@ -16,6 +16,8 @@ set(FREESTANDING_SOURCES
src/TarStream.cpp src/TarStream.cpp
src/DebugLog.cpp src/DebugLog.cpp
src/Heap.cpp src/Heap.cpp
src/Spinlock.cpp
src/UBSAN.cpp
) )
set(SOURCES set(SOURCES

View File

@ -22,6 +22,8 @@ extern "C"
// FIXME: Replace this invented function with strlcpy(). // FIXME: Replace this invented function with strlcpy().
void nullcpy(char* dest, const char* src, usize len); void nullcpy(char* dest, const char* src, usize len);
usize strlcpy(char* dest, const char* src, usize len);
[[deprecated]] char* strcpy(char* dst, const char* src); [[deprecated]] char* strcpy(char* dst, const char* src);
[[deprecated]] char* strcat(char* dst, const char* src); [[deprecated]] char* strcat(char* dst, const char* src);

View File

@ -19,6 +19,10 @@
if (!(expr)) [[unlikely]] { __check_failed(location, message); } \ if (!(expr)) [[unlikely]] { __check_failed(location, message); } \
} while (0) } while (0)
// Fail with an error message and location.
#define fail(message) __check_failed(SourceLocation::current(), message)
#define fail_at(location, message) __check_failed(location, message)
// Like assert(), but always enabled. // Like assert(), but always enabled.
#define check(expr) \ #define check(expr) \
do { \ do { \

View File

@ -101,7 +101,7 @@ template <typename T> class Option
return true; return true;
} }
bool try_move_value(T& ref) const bool try_move_value(T& ref)
{ {
if (!has_value()) return false; if (!has_value()) return false;
m_has_value = false; m_has_value = false;
@ -138,26 +138,16 @@ template <typename T> class Option
private: private:
struct Storage struct Storage
{ {
u8 buffer[sizeof(T)]; alignas(T) u8 buffer[sizeof(T)];
T* fetch_ptr()
{
return (T*)buffer;
}
T& fetch_reference() T& fetch_reference()
{ {
return *fetch_ptr(); return *__builtin_launder(reinterpret_cast<T*>(&buffer));
}
const T* fetch_ptr() const
{
return (const T*)buffer;
} }
const T& fetch_reference() const const T& fetch_reference() const
{ {
return *fetch_ptr(); return *__builtin_launder(reinterpret_cast<const T*>(&buffer));
} }
void store_reference(const T& ref) void store_reference(const T& ref)

View File

@ -59,6 +59,11 @@ template <typename T> class OwnedPtr
return *m_ptr; return *m_ptr;
} }
operator bool() const
{
return m_ptr != nullptr;
}
template <typename Type> friend Result<SharedPtr<Type>> adopt_shared_from_owned(OwnedPtr<Type>&&); template <typename Type> friend Result<SharedPtr<Type>> adopt_shared_from_owned(OwnedPtr<Type>&&);
private: private:

View File

@ -110,7 +110,7 @@ template <typename T> class Result
return m_value.try_set_value(ref); return m_value.try_set_value(ref);
} }
bool try_move_value(T& ref) const bool try_move_value(T& ref)
{ {
return m_value.try_move_value(ref); return m_value.try_move_value(ref);
} }

View File

@ -99,31 +99,34 @@ template <typename T> class SharedPtr
return *m_ptr; return *m_ptr;
} }
operator bool() const
{
return m_ptr != nullptr;
}
private: private:
T* m_ptr; T* m_ptr;
RefCount* m_ref_count; RefCount* m_ref_count;
}; };
template <typename T, class... Args> Result<SharedPtr<T>> make_shared(Args... args) // NOTE: ptr is deleted if any of the adopt_shared* functions fail to construct a SharedPtr.
template <typename T> Result<SharedPtr<T>> adopt_shared(T* ptr)
{ {
using RefCount = __detail::RefCount; using RefCount = __detail::RefCount;
RefCount* const ref_count = TRY(make<RefCount>()); auto guard = make_scope_guard([ptr] { delete ptr; });
auto guard = make_scope_guard([&] { delete ref_count; });
RefCount* const ref_count = TRY(make<RefCount>());
T* const ptr = TRY(make<T>(args...));
guard.deactivate(); guard.deactivate();
return SharedPtr<T> { ptr, ref_count }; return SharedPtr<T> { ptr, ref_count };
} }
template <typename T> Result<SharedPtr<T>> adopt_shared(T* ptr) template <typename T, class... Args> Result<SharedPtr<T>> make_shared(Args... args)
{ {
using RefCount = __detail::RefCount; T* raw_ptr = TRY(make<T>(args...));
return adopt_shared(raw_ptr);
RefCount* const ref_count = TRY(make<RefCount>());
return SharedPtr<T> { ptr, ref_count };
} }
template <typename T> Result<SharedPtr<T>> adopt_shared_if_nonnull(T* ptr) template <typename T> Result<SharedPtr<T>> adopt_shared_if_nonnull(T* ptr)
@ -138,13 +141,7 @@ template <typename T> Result<SharedPtr<T>> adopt_shared_from_owned(OwnedPtr<T>&&
T* ptr = other.m_ptr; T* ptr = other.m_ptr;
other.m_ptr = nullptr; other.m_ptr = nullptr;
// FIXME: Should the pointee magically vanish on failure? Or go back into the OwnedPtr, even though it's been
// moved...
auto guard = make_scope_guard([&] { delete ptr; });
const SharedPtr<T> shared_ptr = TRY(adopt_shared(ptr)); const SharedPtr<T> shared_ptr = TRY(adopt_shared(ptr));
guard.deactivate();
return shared_ptr; return shared_ptr;
} }

View File

@ -1,6 +1,4 @@
#pragma once #pragma once
#include "Log.h"
#include "arch/CPU.h"
#include <luna/Atomic.h> #include <luna/Atomic.h>
#include <luna/Option.h> #include <luna/Option.h>
@ -30,8 +28,15 @@ class ScopeLock
ScopeLock(const ScopeLock&) = delete; ScopeLock(const ScopeLock&) = delete;
ScopeLock(ScopeLock&&) = delete; ScopeLock(ScopeLock&&) = delete;
Spinlock& take_over()
{
m_taken_over = true;
return m_lock;
}
private: private:
Spinlock& m_lock; Spinlock& m_lock;
bool m_taken_over { false };
}; };
class SafeScopeLock class SafeScopeLock
@ -107,25 +112,11 @@ template <typename T> class LockedValue
{ {
} }
#ifndef LOCKED_VALUE_DEBUG
LockedValueGuard lock() LockedValueGuard lock()
{ {
m_lock.lock(); m_lock.lock();
return { *this }; return { *this };
} }
#else
LockedValueGuard lock()
{
if (m_lock.try_lock()) { return { *this }; }
kwarnln("Spinning on a locked LockedValue. This might lead to a deadlock...");
CPU::print_stack_trace();
m_lock.lock();
return { *this };
}
#endif
Option<LockedValueGuard> try_lock() Option<LockedValueGuard> try_lock()
{ {

View File

@ -0,0 +1,52 @@
#pragma once
#include <luna/CString.h>
#include <luna/Types.h>
template <usize Size> class StaticString
{
public:
StaticString() = default;
StaticString(const char* string)
{
adopt(string);
}
void adopt(const char* string)
{
usize length = strlcpy(m_buffer, string, sizeof(m_buffer));
if (length > Size) { m_length = Size; }
else
m_length = length;
}
StaticString<Size>& operator=(const char* string)
{
adopt(string);
return *this;
}
template <usize OSize> StaticString<Size>& operator=(const StaticString<OSize>& string)
{
if constexpr (OSize == Size)
{
if (this == &string) return *this;
}
adopt(string.chars());
return *this;
}
const char* chars() const
{
return m_buffer;
}
usize length() const
{
return m_length;
}
private:
char m_buffer[Size + 1];
usize m_length { 0 };
};

View File

@ -0,0 +1,74 @@
#pragma once
#include <luna/Types.h>
namespace UBSAN
{
struct SourceLocation
{
const char* file;
u32 line;
u32 column;
};
struct TypeDescriptor
{
u16 kind;
u16 info;
char name[];
};
namespace UBInfo
{
struct TypeMismatchInfo
{
SourceLocation location;
TypeDescriptor* type;
usize alignment;
u8 type_check_kind;
};
struct TypeMismatchInfo_v1
{
SourceLocation location;
TypeDescriptor* type;
u8 log_alignment;
u8 type_check_kind;
};
struct OverflowInfo
{
SourceLocation location;
TypeDescriptor* type;
};
struct UnreachableInfo
{
SourceLocation location;
};
struct OutOfBoundsInfo
{
SourceLocation location;
TypeDescriptor* array_type;
TypeDescriptor* index_type;
};
struct InvalidValueInfo
{
SourceLocation location;
TypeDescriptor* type;
};
struct ShiftOutOfBoundsInfo
{
SourceLocation location;
TypeDescriptor* lhs_type;
TypeDescriptor* rhs_type;
};
struct PointerOverflowInfo
{
SourceLocation location;
};
}
}

View File

@ -78,7 +78,7 @@ template <typename T> class Vector
resize(capacity).release_value(); resize(capacity).release_value();
} }
Result<void> try_append(T item) Result<void> try_append(T&& item)
{ {
if (m_capacity == m_size) TRY(resize(m_capacity + 8)); if (m_capacity == m_size) TRY(resize(m_capacity + 8));
@ -89,6 +89,11 @@ template <typename T> class Vector
return {}; return {};
} }
Result<void> try_append(const T& item)
{
return try_append(T(item));
}
Option<T> try_pop() Option<T> try_pop()
{ {
if (m_size == 0) return {}; if (m_size == 0) return {};

View File

@ -125,4 +125,15 @@ extern "C"
if (*str) return const_cast<char*>(str); if (*str) return const_cast<char*>(str);
return NULL; return NULL;
} }
usize strlcpy(char* dest, const char* src, usize len)
{
usize src_len = strlen(src);
usize copy_len = src_len;
if (len == 0) return src_len;
if (src_len >= (len - 1)) copy_len = len - 1;
memcpy(dest, src, copy_len);
dest[copy_len] = 0;
return src_len;
}
} }

View File

@ -6,6 +6,7 @@
#include <luna/LinkedList.h> #include <luna/LinkedList.h>
#include <luna/SafeArithmetic.h> #include <luna/SafeArithmetic.h>
#include <luna/ScopeGuard.h> #include <luna/ScopeGuard.h>
#include <luna/Spinlock.h>
#include <luna/SystemError.h> #include <luna/SystemError.h>
#ifdef USE_FREESTANDING #ifdef USE_FREESTANDING
@ -45,9 +46,10 @@ static_assert(sizeof(HeapBlock) == 48UL);
static const isize HEAP_BLOCK_SIZE = 48; static const isize HEAP_BLOCK_SIZE = 48;
static LinkedList<HeapBlock> heap; static LinkedList<HeapBlock> heap;
static Spinlock g_heap_lock;
// If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount of // If we're allocating a large amount of memory, map enough pages for it, but otherwise just use the default amount
// pages. // of pages.
static usize get_pages_for_allocation(usize bytes) static usize get_pages_for_allocation(usize bytes)
{ {
usize pages = get_blocks_from_size(bytes, PAGE_SIZE); usize pages = get_blocks_from_size(bytes, PAGE_SIZE);
@ -97,7 +99,7 @@ static Option<HeapBlock*> split(HeapBlock* block, usize size)
const usize old_size = const usize old_size =
block->full_size; // Save the old value of this variable since we are going to use it after modifying it block->full_size; // Save the old value of this variable since we are going to use it after modifying it
if (available < (size + sizeof(HeapBlock))) if (available <= (size + sizeof(HeapBlock)))
return {}; // This block hasn't got enough free space to hold the requested size. return {}; // This block hasn't got enough free space to hold the requested size.
const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock)); const usize offset = get_fair_offset_to_split_at(block, size + sizeof(HeapBlock));
@ -128,6 +130,8 @@ static Result<void> combine_forward(HeapBlock* block)
heap.remove(next); heap.remove(next);
next->magic = BLOCK_DEAD; next->magic = BLOCK_DEAD;
block->full_size += next->full_size + sizeof(HeapBlock);
if (next->status & BLOCK_END_MEM) if (next->status & BLOCK_END_MEM)
{ {
if (next->status & BLOCK_START_MEM) if (next->status & BLOCK_START_MEM)
@ -140,8 +144,6 @@ static Result<void> combine_forward(HeapBlock* block)
block->status |= BLOCK_END_MEM; block->status |= BLOCK_END_MEM;
} }
block->full_size += next->full_size + sizeof(HeapBlock);
return {}; return {};
} }
@ -157,6 +159,8 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
heap.remove(block); heap.remove(block);
block->magic = BLOCK_DEAD; block->magic = BLOCK_DEAD;
last->full_size += block->full_size + sizeof(HeapBlock);
if (block->status & BLOCK_END_MEM) if (block->status & BLOCK_END_MEM)
{ {
if (block->status & BLOCK_START_MEM) if (block->status & BLOCK_START_MEM)
@ -169,8 +173,6 @@ static Result<HeapBlock*> combine_backward(HeapBlock* block)
last->status |= BLOCK_END_MEM; last->status |= BLOCK_END_MEM;
} }
last->full_size += block->full_size + sizeof(HeapBlock);
return last; return last;
} }
@ -178,6 +180,8 @@ Result<void*> malloc_impl(usize size, bool should_scrub)
{ {
if (!size) return (void*)BLOCK_MAGIC; if (!size) return (void*)BLOCK_MAGIC;
ScopeLock lock(g_heap_lock);
size = align_up<16>(size); size = align_up<16>(size);
Option<HeapBlock*> block = heap.first(); Option<HeapBlock*> block = heap.first();
@ -231,6 +235,8 @@ Result<void> free_impl(void* ptr)
if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0) if (ptr == (void*)BLOCK_MAGIC) return {}; // This pointer was returned from a call to malloc(0)
if (!ptr) return {}; if (!ptr) return {};
ScopeLock lock(g_heap_lock);
HeapBlock* block = get_heap_block_for_pointer(ptr); HeapBlock* block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC) if (block->magic != BLOCK_MAGIC)
@ -286,6 +292,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
return (void*)BLOCK_MAGIC; return (void*)BLOCK_MAGIC;
} }
ScopeLock lock(g_heap_lock);
HeapBlock* const block = get_heap_block_for_pointer(ptr); HeapBlock* const block = get_heap_block_for_pointer(ptr);
if (block->magic != BLOCK_MAGIC) if (block->magic != BLOCK_MAGIC)
@ -327,6 +335,8 @@ Result<void*> realloc_impl(void* ptr, usize size)
usize old_size = block->req_size; usize old_size = block->req_size;
lock.take_over().unlock();
void* const new_ptr = TRY(malloc_impl(size, false)); void* const new_ptr = TRY(malloc_impl(size, false));
memcpy(new_ptr, ptr, old_size > size ? size : old_size); memcpy(new_ptr, ptr, old_size > size ? size : old_size);
TRY(free_impl(ptr)); TRY(free_impl(ptr));

View File

@ -1,6 +1,11 @@
#include "thread/Spinlock.h" #include <luna/DebugLog.h>
#include "Log.h" #include <luna/Spinlock.h>
#include "arch/CPU.h"
#ifdef ARCH_X86_64
#define pause() asm volatile("pause")
#else
#error "Unsupported architecture"
#endif
void Spinlock::lock() void Spinlock::lock()
{ {
@ -8,7 +13,7 @@ void Spinlock::lock()
while (!m_lock.compare_exchange_strong(expected, 1)) while (!m_lock.compare_exchange_strong(expected, 1))
{ {
expected = 0; expected = 0;
CPU::pause(); pause();
} }
} }
@ -23,7 +28,7 @@ void Spinlock::unlock()
int expected = 1; int expected = 1;
if (!m_lock.compare_exchange_strong(expected, 0)) if (!m_lock.compare_exchange_strong(expected, 0))
{ {
kwarnln("Spinlock::unlock() called on an unlocked lock with value %d", expected); dbgln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
} }
} }
@ -34,7 +39,7 @@ ScopeLock::ScopeLock(Spinlock& lock) : m_lock(lock)
ScopeLock::~ScopeLock() ScopeLock::~ScopeLock()
{ {
m_lock.unlock(); if (!m_taken_over) m_lock.unlock();
} }
const u32 RETRIES = 5000000; const u32 RETRIES = 5000000;
@ -42,7 +47,7 @@ const u32 RETRIES = 5000000;
SafeScopeLock::SafeScopeLock(Spinlock& lock) : m_lock(lock) SafeScopeLock::SafeScopeLock(Spinlock& lock) : m_lock(lock)
{ {
u32 tries_left = RETRIES; u32 tries_left = RETRIES;
while (!lock.try_lock() && --tries_left) { CPU::pause(); } while (!lock.try_lock() && --tries_left) { pause(); }
if (tries_left) m_success = true; if (tries_left) m_success = true;
} }

121
libluna/src/UBSAN.cpp Normal file
View File

@ -0,0 +1,121 @@
#include <luna/Check.h>
#include <luna/DebugLog.h>
#include <luna/SourceLocation.h>
#include <luna/UBSAN.h>
using namespace UBSAN::UBInfo;
[[noreturn]] void ub_panic(SourceLocation caller = SourceLocation::current())
{
fail_at(caller, "Undefined behavior");
}
#define DISPLAY(loc) loc.file, loc.line, loc.column
extern "C"
{
void __ubsan_handle_builtin_unreachable(UnreachableInfo* info)
{
const auto& location = info->location;
dbgln("ubsan: __builtin_unreachable reached at %s:%d:%d", DISPLAY(location));
ub_panic();
}
void __ubsan_handle_pointer_overflow(PointerOverflowInfo* info)
{
const auto& location = info->location;
dbgln("ubsan: pointer overflow occurred at %s:%d:%d", DISPLAY(location));
ub_panic();
}
void __ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsInfo* info)
{
const auto& location = info->location;
dbgln("ubsan: shift out of bounds for type %s at %s:%d:%d", info->lhs_type->name, DISPLAY(location));
ub_panic();
}
void __ubsan_handle_load_invalid_value(InvalidValueInfo* info)
{
const auto& location = info->location;
dbgln("ubsan: load invalid value for type %s at %s:%d:%d", info->type->name, DISPLAY(location));
ub_panic();
}
void __ubsan_handle_out_of_bounds(OutOfBoundsInfo* info, usize index)
{
const auto& location = info->location;
dbgln("ubsan: out of bounds array (of type %s) access (index %zu of type %s) at %s:%d:%d",
info->array_type->name, index, info->index_type->name, DISPLAY(location));
ub_panic();
}
void ubsan_handle_generic_overflow(OverflowInfo* info, const char* overflow_type)
{
const auto& location = info->location;
dbgln("ubsan: %s overflow (value cannot fit into type %s) at %s:%d:%d", overflow_type, info->type->name,
DISPLAY(location));
ub_panic();
}
#define UBSAN_OVERFLOW_BINARY(operation) \
void __ubsan_handle_##operation##_overflow(OverflowInfo* info, usize, usize) \
{ \
ubsan_handle_generic_overflow(info, #operation); \
}
#define UBSAN_OVERFLOW_UNARY(operation) \
void __ubsan_handle_##operation##_overflow(OverflowInfo* info, usize) \
{ \
ubsan_handle_generic_overflow(info, #operation); \
}
UBSAN_OVERFLOW_BINARY(add);
UBSAN_OVERFLOW_BINARY(sub);
UBSAN_OVERFLOW_BINARY(mul);
UBSAN_OVERFLOW_UNARY(negate);
UBSAN_OVERFLOW_BINARY(divrem);
#define is_aligned(value, alignment) !(value & (alignment - 1))
const char* g_type_check_kinds[] = {
"load of",
"store to",
"reference binding to",
"member access within",
"member call on",
"constructor call on",
"downcast of",
"downcast of",
"upcast of",
"cast to virtual base of",
};
void __ubsan_handle_type_mismatch(TypeMismatchInfo* info, usize pointer)
{
const auto& location = info->location;
if (pointer == 0) { dbgln("ubsan: null pointer access at %s:%d:%d", DISPLAY(location)); }
else if (info->alignment != 0 && is_aligned(pointer, info->alignment))
{
dbgln("ubsan: unaligned pointer access (address %p) at %s:%d:%d", (void*)pointer, DISPLAY(location));
}
else
{
dbgln("ubsan: %s address %p with insufficient space for object of type %s at %s:%d:%d",
g_type_check_kinds[info->type_check_kind], (void*)pointer, info->type->name, DISPLAY(location));
}
ub_panic();
}
void __ubsan_handle_type_mismatch_v1(TypeMismatchInfo_v1* v1_info, usize pointer)
{
TypeMismatchInfo info = {
.location = v1_info->location,
.type = v1_info->type,
.alignment = 1UL << v1_info->log_alignment,
.type_check_kind = v1_info->type_check_kind,
};
__ubsan_handle_type_mismatch(&info, pointer);
}
}

View File

@ -10,4 +10,4 @@ mkdir -p $LUNA_BASE/usr/include
mkdir -p $LUNA_BASE/usr/include/luna mkdir -p $LUNA_BASE/usr/include/luna
cp --preserve=timestamps -RT libc/include/ $LUNA_BASE/usr/include cp --preserve=timestamps -RT libc/include/ $LUNA_BASE/usr/include
cp --preserve=timestamps -RT luna/include/luna/ $LUNA_BASE/usr/include/luna cp --preserve=timestamps -RT libluna/include/luna/ $LUNA_BASE/usr/include/luna