Compare commits

..

6 Commits

Author SHA1 Message Date
df9a13cbfb
KernelVM: Make g_used_vm atomic and g_kernelvm_bitmap a LockedValue
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-17 10:56:24 +01:00
132963070b
MemoryManager: Make some variables atomic and the frame bitmap a LockedValue 2022-12-17 10:56:24 +01:00
f97515bd7e
Kernel: Add Spinlock and LockedValue<T> 2022-12-17 10:55:54 +01:00
6e5d2b5335
Thread: Make g_next_id atomic 2022-12-17 10:50:49 +01:00
14461c6fe8
Atomic: Add operators += and -= 2022-12-17 10:49:19 +01:00
9d6235e109
CPU: Add a pause() method 2022-12-17 10:45:55 +01:00
11 changed files with 199 additions and 43 deletions

View File

@ -12,6 +12,7 @@ set(SOURCES
src/boot/Init.cpp src/boot/Init.cpp
src/arch/Serial.cpp src/arch/Serial.cpp
src/arch/Timer.cpp src/arch/Timer.cpp
src/thread/Spinlock.cpp
src/thread/Thread.cpp src/thread/Thread.cpp
src/thread/Scheduler.cpp src/thread/Scheduler.cpp
) )

View File

@ -25,4 +25,6 @@ namespace CPU
void print_stack_trace(); void print_stack_trace();
void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg); void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg);
void print_stack_trace_at(Registers* regs); void print_stack_trace_at(Registers* regs);
void pause();
} }

View File

@ -506,6 +506,11 @@ namespace CPU
}, },
&frame_index); &frame_index);
} }
void pause()
{
asm volatile("pause");
}
} }
// called by kernel_yield // called by kernel_yield

View File

@ -49,6 +49,7 @@ Result<void> init()
kinfoln("Used memory: %s", to_dynamic_unit(MemoryManager::used()).release_value().chars()); kinfoln("Used memory: %s", to_dynamic_unit(MemoryManager::used()).release_value().chars());
kinfoln("Reserved memory: %s", to_dynamic_unit(MemoryManager::reserved()).release_value().chars()); kinfoln("Reserved memory: %s", to_dynamic_unit(MemoryManager::reserved()).release_value().chars());
Thread::init();
Scheduler::init(); Scheduler::init();
TRY(Scheduler::new_kernel_thread(async_thread)); TRY(Scheduler::new_kernel_thread(async_thread));

View File

@ -1,10 +1,11 @@
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "thread/Spinlock.h"
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
static const u64 KERNEL_VM_RANGE_START = 0xfffffffff0000000; static const u64 KERNEL_VM_RANGE_START = 0xfffffffff0000000;
static Bitmap g_kernelvm_bitmap; static LockedValue<Bitmap> g_kernelvm_bitmap;
static u8 bitmap_memory[4096]; static u8 bitmap_memory[4096];
@ -14,22 +15,26 @@ static const u64 KERNEL_VM_RANGE_END = KERNEL_VM_RANGE_SIZE + KERNEL_VM_RANGE_ST
static_assert(KERNEL_VM_RANGE_END == 0xfffffffff8000000); static_assert(KERNEL_VM_RANGE_END == 0xfffffffff8000000);
static usize g_used_vm; static Atomic<usize> g_used_vm;
namespace KernelVM namespace KernelVM
{ {
void init() void init()
{ {
g_kernelvm_bitmap.initialize(bitmap_memory, sizeof(bitmap_memory)); g_kernelvm_bitmap.init();
g_kernelvm_bitmap.clear(false);
auto kernelvm_bitmap = g_kernelvm_bitmap.lock();
kernelvm_bitmap->initialize(bitmap_memory, sizeof(bitmap_memory));
kernelvm_bitmap->clear(false);
} }
Result<u64> alloc_one_page() Result<u64> alloc_one_page()
{ {
for (u64 index = 0; index < g_kernelvm_bitmap.size(); index++) auto kernelvm_bitmap = g_kernelvm_bitmap.lock();
for (u64 index = 0; index < kernelvm_bitmap->size(); index++)
{ {
if (g_kernelvm_bitmap.get(index)) continue; if (kernelvm_bitmap->get(index)) continue;
g_kernelvm_bitmap.set(index, true); kernelvm_bitmap->set(index, true);
g_used_vm += ARCH_PAGE_SIZE; g_used_vm += ARCH_PAGE_SIZE;
return KERNEL_VM_RANGE_START + (index * ARCH_PAGE_SIZE); return KERNEL_VM_RANGE_START + (index * ARCH_PAGE_SIZE);
} }
@ -41,9 +46,10 @@ namespace KernelVM
{ {
u64 first_free_index = 0; u64 first_free_index = 0;
u64 free_contiguous_pages = 0; u64 free_contiguous_pages = 0;
for (u64 index = 0; index < g_kernelvm_bitmap.size(); index++) auto kernelvm_bitmap = g_kernelvm_bitmap.lock();
for (u64 index = 0; index < kernelvm_bitmap->size(); index++)
{ {
if (g_kernelvm_bitmap.get(index)) if (kernelvm_bitmap->get(index))
{ {
free_contiguous_pages = 0; free_contiguous_pages = 0;
continue; continue;
@ -70,7 +76,7 @@ namespace KernelVM
u64 start_index; u64 start_index;
if (find_several_pages_impl(count, start_index)) if (find_several_pages_impl(count, start_index))
{ {
g_kernelvm_bitmap.clear_region(start_index, count, true); g_kernelvm_bitmap.lock()->clear_region(start_index, count, true);
g_used_vm += ARCH_PAGE_SIZE * count; g_used_vm += ARCH_PAGE_SIZE * count;
return KERNEL_VM_RANGE_START + (start_index * ARCH_PAGE_SIZE); return KERNEL_VM_RANGE_START + (start_index * ARCH_PAGE_SIZE);
} }
@ -84,9 +90,11 @@ namespace KernelVM
u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE;
if (index >= g_kernelvm_bitmap.size()) return err(EFAULT); auto kernelvm_bitmap = g_kernelvm_bitmap.lock();
g_kernelvm_bitmap.set(index, false); if (index >= kernelvm_bitmap->size()) return err(EFAULT);
kernelvm_bitmap->set(index, false);
g_used_vm -= ARCH_PAGE_SIZE; g_used_vm -= ARCH_PAGE_SIZE;
return {}; return {};
@ -99,7 +107,7 @@ namespace KernelVM
u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE;
g_kernelvm_bitmap.clear_region(index, count, false); g_kernelvm_bitmap.lock()->clear_region(index, count, false);
g_used_vm -= ARCH_PAGE_SIZE * count; g_used_vm -= ARCH_PAGE_SIZE * count;
return {}; return {};

View File

@ -4,6 +4,7 @@
#include "arch/MMU.h" #include "arch/MMU.h"
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "memory/MemoryMap.h" #include "memory/MemoryMap.h"
#include "thread/Spinlock.h"
#include <luna/Alignment.h> #include <luna/Alignment.h>
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
#include <luna/CString.h> #include <luna/CString.h>
@ -16,13 +17,13 @@ extern const u8 end_of_kernel_rodata[1];
extern const u8 start_of_kernel_data[1]; extern const u8 start_of_kernel_data[1];
extern const u8 end_of_kernel_data[1]; extern const u8 end_of_kernel_data[1];
static usize free_mem = 0; static Atomic<usize> free_mem;
static usize used_mem = 0; static Atomic<usize> used_mem;
static usize reserved_mem = 0; static Atomic<usize> reserved_mem;
static u64 start_index = 0; static Atomic<u64> start_index;
static Bitmap g_frame_bitmap; static LockedValue<Bitmap> g_frame_bitmap;
#define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned") #define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned")
@ -55,6 +56,8 @@ namespace MemoryManager
MemoryMapIterator iter; MemoryMapIterator iter;
MemoryMapEntry entry; MemoryMapEntry entry;
g_frame_bitmap.init();
const auto largest_free_entry = iter.largest_free(); const auto largest_free_entry = iter.largest_free();
expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!"); expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!");
@ -74,20 +77,24 @@ namespace MemoryManager
expect(frame_bitmap_size < largest_free_entry.size(), expect(frame_bitmap_size < largest_free_entry.size(),
"No single memory region is enough to hold the frame bitmap"); "No single memory region is enough to hold the frame bitmap");
g_frame_bitmap.initialize(frame_bitmap_addr, frame_bitmap_size);
g_frame_bitmap.clear(true); // Set all pages to used/reserved by default, then clear out the free ones
iter.rewind();
while (iter.next().try_set_value(entry))
{ {
const u64 index = entry.address() / ARCH_PAGE_SIZE; auto frame_bitmap = g_frame_bitmap.lock();
const usize pages = entry.size() / ARCH_PAGE_SIZE;
if (!entry.is_free()) { reserved_mem += entry.size(); } frame_bitmap->initialize(frame_bitmap_addr, frame_bitmap_size);
else
frame_bitmap->clear(true); // Set all pages to used/reserved by default, then clear out the free ones
iter.rewind();
while (iter.next().try_set_value(entry))
{ {
free_mem += entry.size(); const u64 index = entry.address() / ARCH_PAGE_SIZE;
g_frame_bitmap.clear_region(index, pages, false); const usize pages = entry.size() / ARCH_PAGE_SIZE;
if (!entry.is_free()) { reserved_mem += entry.size(); }
else
{
free_mem += entry.size();
frame_bitmap->clear_region(index, pages, false);
}
} }
} }
@ -103,23 +110,27 @@ namespace MemoryManager
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory // NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing. // there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.size_in_bytes() / ARCH_PAGE_SIZE; auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE;
auto virtual_bitmap_base = auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages) KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue"); .expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
map_frames_at(virtual_bitmap_base, (u64)g_frame_bitmap.location(), bitmap_pages,
MMU::ReadWrite | MMU::NoExecute) u64 phys = (u64)g_frame_bitmap.lock()->location();
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue"); .expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
g_frame_bitmap.initialize((void*)virtual_bitmap_base, g_frame_bitmap.size_in_bytes()); auto frame_bitmap = g_frame_bitmap.lock();
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
} }
void lock_frame(u64 frame) void lock_frame(u64 frame)
{ {
const u64 index = frame / ARCH_PAGE_SIZE; const u64 index = frame / ARCH_PAGE_SIZE;
if (g_frame_bitmap.get(index)) return; auto frame_bitmap = g_frame_bitmap.lock();
g_frame_bitmap.set(index, true); if (frame_bitmap->get(index)) return;
frame_bitmap->set(index, true);
used_mem += ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE;
} }
@ -131,10 +142,11 @@ namespace MemoryManager
Result<u64> alloc_frame() Result<u64> alloc_frame()
{ {
for (u64 index = start_index; index < g_frame_bitmap.size(); index++) auto frame_bitmap = g_frame_bitmap.lock();
for (u64 index = start_index; index < frame_bitmap->size(); index++)
{ {
if (g_frame_bitmap.get(index)) continue; if (frame_bitmap->get(index)) continue;
g_frame_bitmap.set(index, true); frame_bitmap->set(index, true);
start_index = index + 1; start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE;
@ -147,9 +159,10 @@ namespace MemoryManager
Result<void> free_frame(u64 frame) Result<void> free_frame(u64 frame)
{ {
const u64 index = frame / ARCH_PAGE_SIZE; const u64 index = frame / ARCH_PAGE_SIZE;
if (index > g_frame_bitmap.size()) return err(EFAULT); auto frame_bitmap = g_frame_bitmap.lock();
if (!g_frame_bitmap.get(index)) return err(EFAULT); if (index > frame_bitmap->size()) return err(EFAULT);
g_frame_bitmap.set(index, false); if (!frame_bitmap->get(index)) return err(EFAULT);
frame_bitmap->set(index, false);
used_mem -= ARCH_PAGE_SIZE; used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE; free_mem += ARCH_PAGE_SIZE;
if (start_index > index) start_index = index; if (start_index > index) start_index = index;

View File

@ -0,0 +1,22 @@
#include "thread/Spinlock.h"
#include "Log.h"
#include "arch/CPU.h"
void Spinlock::lock()
{
int expected = -1;
while (!m_lock.compare_exchange_strong(expected, 0))
{
expected = -1;
CPU::pause();
}
}
void Spinlock::unlock()
{
int expected = 0;
if (!m_lock.compare_exchange_strong(expected, -1))
{
kwarnln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
}
}

View File

@ -0,0 +1,86 @@
#pragma once
#include <luna/Atomic.h>
class Spinlock
{
public:
void lock();
void unlock();
bool is_locked() const
{
return m_lock.load() != -1;
}
// Call this before use if the Spinlock is a global variable.
void init()
{
m_lock = -1;
}
private:
Atomic<int> m_lock{-1};
};
template <typename T> class LockedValue
{
struct LockedValueGuard
{
LockedValueGuard(LockedValue& value_ref) : m_value_ref(value_ref)
{
}
~LockedValueGuard()
{
m_value_ref.m_lock.unlock();
}
T& ref()
{
return m_value_ref.m_value;
}
void set(const T& other)
{
ref() = other;
}
T* operator->()
{
return &ref();
}
T& operator*()
{
return ref();
}
private:
LockedValue& m_value_ref;
};
public:
LockedValue() : m_value()
{
}
LockedValue(T value) : m_value(value)
{
}
// Call this before use if the LockedValue is a global variable.
void init()
{
m_lock.init();
}
LockedValueGuard lock()
{
m_lock.lock();
return {*this};
}
private:
T m_value;
Spinlock m_lock;
};

View File

@ -1,10 +1,16 @@
#include "thread/Thread.h" #include "thread/Thread.h"
#include <luna/Alloc.h> #include <luna/Alloc.h>
#include <luna/Atomic.h>
static u64 g_next_id = 1; static Atomic<u64> g_next_id;
DoublyLinkedList<Thread> g_threads; DoublyLinkedList<Thread> g_threads;
void Thread::init()
{
g_next_id = 1;
}
Result<Thread*> new_thread() Result<Thread*> new_thread()
{ {
Thread* thread = TRY(make<Thread>()); Thread* thread = TRY(make<Thread>());

View File

@ -47,6 +47,8 @@ struct Thread : public DoublyLinkedListNode<Thread>
void set_sp(u64 sp); void set_sp(u64 sp);
u64 sp(); u64 sp();
static void init();
}; };
void switch_context(Thread* old_thread, Thread* new_thread, Registers* regs); void switch_context(Thread* old_thread, Thread* new_thread, Registers* regs);

View File

@ -103,6 +103,16 @@ template <typename T> class Atomic
return fetch_sub(1); return fetch_sub(1);
} }
T operator+=(const T& other)
{
return fetch_add(other) + other;
}
T operator-=(const T& other)
{
return fetch_sub(other) - other;
}
private: private:
T m_value; T m_value;
}; };