Compare commits

..

No commits in common. "df9a13cbfb47fd0b25bfafd4b162ef11ee60966e" and "a16f357ab9d07df27df5b712392c985ba1b1cdd8" have entirely different histories.

11 changed files with 43 additions and 199 deletions

View File

@ -12,7 +12,6 @@ set(SOURCES
src/boot/Init.cpp src/boot/Init.cpp
src/arch/Serial.cpp src/arch/Serial.cpp
src/arch/Timer.cpp src/arch/Timer.cpp
src/thread/Spinlock.cpp
src/thread/Thread.cpp src/thread/Thread.cpp
src/thread/Scheduler.cpp src/thread/Scheduler.cpp
) )

View File

@ -25,6 +25,4 @@ namespace CPU
void print_stack_trace(); void print_stack_trace();
void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg); void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg);
void print_stack_trace_at(Registers* regs); void print_stack_trace_at(Registers* regs);
void pause();
} }

View File

@ -506,11 +506,6 @@ namespace CPU
}, },
&frame_index); &frame_index);
} }
void pause()
{
asm volatile("pause");
}
} }
// called by kernel_yield // called by kernel_yield

View File

@ -49,7 +49,6 @@ Result<void> init()
kinfoln("Used memory: %s", to_dynamic_unit(MemoryManager::used()).release_value().chars()); kinfoln("Used memory: %s", to_dynamic_unit(MemoryManager::used()).release_value().chars());
kinfoln("Reserved memory: %s", to_dynamic_unit(MemoryManager::reserved()).release_value().chars()); kinfoln("Reserved memory: %s", to_dynamic_unit(MemoryManager::reserved()).release_value().chars());
Thread::init();
Scheduler::init(); Scheduler::init();
TRY(Scheduler::new_kernel_thread(async_thread)); TRY(Scheduler::new_kernel_thread(async_thread));

View File

@ -1,11 +1,10 @@
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "arch/MMU.h" #include "arch/MMU.h"
#include "thread/Spinlock.h"
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
static const u64 KERNEL_VM_RANGE_START = 0xfffffffff0000000; static const u64 KERNEL_VM_RANGE_START = 0xfffffffff0000000;
static LockedValue<Bitmap> g_kernelvm_bitmap; static Bitmap g_kernelvm_bitmap;
static u8 bitmap_memory[4096]; static u8 bitmap_memory[4096];
@ -15,26 +14,22 @@ static const u64 KERNEL_VM_RANGE_END = KERNEL_VM_RANGE_SIZE + KERNEL_VM_RANGE_ST
static_assert(KERNEL_VM_RANGE_END == 0xfffffffff8000000); static_assert(KERNEL_VM_RANGE_END == 0xfffffffff8000000);
static Atomic<usize> g_used_vm; static usize g_used_vm;
namespace KernelVM namespace KernelVM
{ {
void init() void init()
{ {
g_kernelvm_bitmap.init(); g_kernelvm_bitmap.initialize(bitmap_memory, sizeof(bitmap_memory));
g_kernelvm_bitmap.clear(false);
auto kernelvm_bitmap = g_kernelvm_bitmap.lock();
kernelvm_bitmap->initialize(bitmap_memory, sizeof(bitmap_memory));
kernelvm_bitmap->clear(false);
} }
Result<u64> alloc_one_page() Result<u64> alloc_one_page()
{ {
auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); for (u64 index = 0; index < g_kernelvm_bitmap.size(); index++)
for (u64 index = 0; index < kernelvm_bitmap->size(); index++)
{ {
if (kernelvm_bitmap->get(index)) continue; if (g_kernelvm_bitmap.get(index)) continue;
kernelvm_bitmap->set(index, true); g_kernelvm_bitmap.set(index, true);
g_used_vm += ARCH_PAGE_SIZE; g_used_vm += ARCH_PAGE_SIZE;
return KERNEL_VM_RANGE_START + (index * ARCH_PAGE_SIZE); return KERNEL_VM_RANGE_START + (index * ARCH_PAGE_SIZE);
} }
@ -46,10 +41,9 @@ namespace KernelVM
{ {
u64 first_free_index = 0; u64 first_free_index = 0;
u64 free_contiguous_pages = 0; u64 free_contiguous_pages = 0;
auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); for (u64 index = 0; index < g_kernelvm_bitmap.size(); index++)
for (u64 index = 0; index < kernelvm_bitmap->size(); index++)
{ {
if (kernelvm_bitmap->get(index)) if (g_kernelvm_bitmap.get(index))
{ {
free_contiguous_pages = 0; free_contiguous_pages = 0;
continue; continue;
@ -76,7 +70,7 @@ namespace KernelVM
u64 start_index; u64 start_index;
if (find_several_pages_impl(count, start_index)) if (find_several_pages_impl(count, start_index))
{ {
g_kernelvm_bitmap.lock()->clear_region(start_index, count, true); g_kernelvm_bitmap.clear_region(start_index, count, true);
g_used_vm += ARCH_PAGE_SIZE * count; g_used_vm += ARCH_PAGE_SIZE * count;
return KERNEL_VM_RANGE_START + (start_index * ARCH_PAGE_SIZE); return KERNEL_VM_RANGE_START + (start_index * ARCH_PAGE_SIZE);
} }
@ -90,11 +84,9 @@ namespace KernelVM
u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE;
auto kernelvm_bitmap = g_kernelvm_bitmap.lock(); if (index >= g_kernelvm_bitmap.size()) return err(EFAULT);
if (index >= kernelvm_bitmap->size()) return err(EFAULT); g_kernelvm_bitmap.set(index, false);
kernelvm_bitmap->set(index, false);
g_used_vm -= ARCH_PAGE_SIZE; g_used_vm -= ARCH_PAGE_SIZE;
return {}; return {};
@ -107,7 +99,7 @@ namespace KernelVM
u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE; u64 index = (address - KERNEL_VM_RANGE_START) / ARCH_PAGE_SIZE;
g_kernelvm_bitmap.lock()->clear_region(index, count, false); g_kernelvm_bitmap.clear_region(index, count, false);
g_used_vm -= ARCH_PAGE_SIZE * count; g_used_vm -= ARCH_PAGE_SIZE * count;
return {}; return {};

View File

@ -4,7 +4,6 @@
#include "arch/MMU.h" #include "arch/MMU.h"
#include "memory/KernelVM.h" #include "memory/KernelVM.h"
#include "memory/MemoryMap.h" #include "memory/MemoryMap.h"
#include "thread/Spinlock.h"
#include <luna/Alignment.h> #include <luna/Alignment.h>
#include <luna/Bitmap.h> #include <luna/Bitmap.h>
#include <luna/CString.h> #include <luna/CString.h>
@ -17,13 +16,13 @@ extern const u8 end_of_kernel_rodata[1];
extern const u8 start_of_kernel_data[1]; extern const u8 start_of_kernel_data[1];
extern const u8 end_of_kernel_data[1]; extern const u8 end_of_kernel_data[1];
static Atomic<usize> free_mem; static usize free_mem = 0;
static Atomic<usize> used_mem; static usize used_mem = 0;
static Atomic<usize> reserved_mem; static usize reserved_mem = 0;
static Atomic<u64> start_index; static u64 start_index = 0;
static LockedValue<Bitmap> g_frame_bitmap; static Bitmap g_frame_bitmap;
#define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned") #define CHECK_PAGE_ALIGNED(address) expect(is_aligned<ARCH_PAGE_SIZE>(address), "Address is not page-aligned")
@ -56,8 +55,6 @@ namespace MemoryManager
MemoryMapIterator iter; MemoryMapIterator iter;
MemoryMapEntry entry; MemoryMapEntry entry;
g_frame_bitmap.init();
const auto largest_free_entry = iter.largest_free(); const auto largest_free_entry = iter.largest_free();
expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!"); expect(largest_free_entry.is_free(), "We were given a largest free memory region that isn't even free!");
@ -77,12 +74,9 @@ namespace MemoryManager
expect(frame_bitmap_size < largest_free_entry.size(), expect(frame_bitmap_size < largest_free_entry.size(),
"No single memory region is enough to hold the frame bitmap"); "No single memory region is enough to hold the frame bitmap");
{ g_frame_bitmap.initialize(frame_bitmap_addr, frame_bitmap_size);
auto frame_bitmap = g_frame_bitmap.lock();
frame_bitmap->initialize(frame_bitmap_addr, frame_bitmap_size); g_frame_bitmap.clear(true); // Set all pages to used/reserved by default, then clear out the free ones
frame_bitmap->clear(true); // Set all pages to used/reserved by default, then clear out the free ones
iter.rewind(); iter.rewind();
while (iter.next().try_set_value(entry)) while (iter.next().try_set_value(entry))
@ -93,8 +87,7 @@ namespace MemoryManager
else else
{ {
free_mem += entry.size(); free_mem += entry.size();
frame_bitmap->clear_region(index, pages, false); g_frame_bitmap.clear_region(index, pages, false);
}
} }
} }
@ -110,27 +103,23 @@ namespace MemoryManager
// NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory // NOTE: We force these operations to succeed, because if we can't map the frame bitmap to virtual memory
// there's no point in continuing. // there's no point in continuing.
auto bitmap_pages = g_frame_bitmap.lock()->size_in_bytes() / ARCH_PAGE_SIZE; auto bitmap_pages = g_frame_bitmap.size_in_bytes() / ARCH_PAGE_SIZE;
auto virtual_bitmap_base = auto virtual_bitmap_base =
KernelVM::alloc_several_pages(bitmap_pages) KernelVM::alloc_several_pages(bitmap_pages)
.expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue"); .expect_value("Unable to allocate virtual memory for the physical frame bitmap, cannot continue");
map_frames_at(virtual_bitmap_base, (u64)g_frame_bitmap.location(), bitmap_pages,
u64 phys = (u64)g_frame_bitmap.lock()->location(); MMU::ReadWrite | MMU::NoExecute)
map_frames_at(virtual_bitmap_base, phys, bitmap_pages, MMU::ReadWrite | MMU::NoExecute)
.expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue"); .expect_value("Unable to map the physical frame bitmap to virtual memory, cannot continue");
auto frame_bitmap = g_frame_bitmap.lock(); g_frame_bitmap.initialize((void*)virtual_bitmap_base, g_frame_bitmap.size_in_bytes());
frame_bitmap->initialize((void*)virtual_bitmap_base, frame_bitmap->size_in_bytes());
} }
void lock_frame(u64 frame) void lock_frame(u64 frame)
{ {
const u64 index = frame / ARCH_PAGE_SIZE; const u64 index = frame / ARCH_PAGE_SIZE;
auto frame_bitmap = g_frame_bitmap.lock(); if (g_frame_bitmap.get(index)) return;
if (frame_bitmap->get(index)) return; g_frame_bitmap.set(index, true);
frame_bitmap->set(index, true);
used_mem += ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE;
free_mem -= ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE;
} }
@ -142,11 +131,10 @@ namespace MemoryManager
Result<u64> alloc_frame() Result<u64> alloc_frame()
{ {
auto frame_bitmap = g_frame_bitmap.lock(); for (u64 index = start_index; index < g_frame_bitmap.size(); index++)
for (u64 index = start_index; index < frame_bitmap->size(); index++)
{ {
if (frame_bitmap->get(index)) continue; if (g_frame_bitmap.get(index)) continue;
frame_bitmap->set(index, true); g_frame_bitmap.set(index, true);
start_index = index + 1; start_index = index + 1;
free_mem -= ARCH_PAGE_SIZE; free_mem -= ARCH_PAGE_SIZE;
used_mem += ARCH_PAGE_SIZE; used_mem += ARCH_PAGE_SIZE;
@ -159,10 +147,9 @@ namespace MemoryManager
Result<void> free_frame(u64 frame) Result<void> free_frame(u64 frame)
{ {
const u64 index = frame / ARCH_PAGE_SIZE; const u64 index = frame / ARCH_PAGE_SIZE;
auto frame_bitmap = g_frame_bitmap.lock(); if (index > g_frame_bitmap.size()) return err(EFAULT);
if (index > frame_bitmap->size()) return err(EFAULT); if (!g_frame_bitmap.get(index)) return err(EFAULT);
if (!frame_bitmap->get(index)) return err(EFAULT); g_frame_bitmap.set(index, false);
frame_bitmap->set(index, false);
used_mem -= ARCH_PAGE_SIZE; used_mem -= ARCH_PAGE_SIZE;
free_mem += ARCH_PAGE_SIZE; free_mem += ARCH_PAGE_SIZE;
if (start_index > index) start_index = index; if (start_index > index) start_index = index;

View File

@ -1,22 +0,0 @@
#include "thread/Spinlock.h"
#include "Log.h"
#include "arch/CPU.h"
void Spinlock::lock()
{
int expected = -1;
while (!m_lock.compare_exchange_strong(expected, 0))
{
expected = -1;
CPU::pause();
}
}
void Spinlock::unlock()
{
int expected = 0;
if (!m_lock.compare_exchange_strong(expected, -1))
{
kwarnln("Spinlock::unlock() called on an unlocked lock with value %d", expected);
}
}

View File

@ -1,86 +0,0 @@
#pragma once
#include <luna/Atomic.h>
class Spinlock
{
public:
void lock();
void unlock();
bool is_locked() const
{
return m_lock.load() != -1;
}
// Call this before use if the Spinlock is a global variable.
void init()
{
m_lock = -1;
}
private:
Atomic<int> m_lock{-1};
};
template <typename T> class LockedValue
{
struct LockedValueGuard
{
LockedValueGuard(LockedValue& value_ref) : m_value_ref(value_ref)
{
}
~LockedValueGuard()
{
m_value_ref.m_lock.unlock();
}
T& ref()
{
return m_value_ref.m_value;
}
void set(const T& other)
{
ref() = other;
}
T* operator->()
{
return &ref();
}
T& operator*()
{
return ref();
}
private:
LockedValue& m_value_ref;
};
public:
LockedValue() : m_value()
{
}
LockedValue(T value) : m_value(value)
{
}
// Call this before use if the LockedValue is a global variable.
void init()
{
m_lock.init();
}
LockedValueGuard lock()
{
m_lock.lock();
return {*this};
}
private:
T m_value;
Spinlock m_lock;
};

View File

@ -1,16 +1,10 @@
#include "thread/Thread.h" #include "thread/Thread.h"
#include <luna/Alloc.h> #include <luna/Alloc.h>
#include <luna/Atomic.h>
static Atomic<u64> g_next_id; static u64 g_next_id = 1;
DoublyLinkedList<Thread> g_threads; DoublyLinkedList<Thread> g_threads;
void Thread::init()
{
g_next_id = 1;
}
Result<Thread*> new_thread() Result<Thread*> new_thread()
{ {
Thread* thread = TRY(make<Thread>()); Thread* thread = TRY(make<Thread>());

View File

@ -47,8 +47,6 @@ struct Thread : public DoublyLinkedListNode<Thread>
void set_sp(u64 sp); void set_sp(u64 sp);
u64 sp(); u64 sp();
static void init();
}; };
void switch_context(Thread* old_thread, Thread* new_thread, Registers* regs); void switch_context(Thread* old_thread, Thread* new_thread, Registers* regs);

View File

@ -103,16 +103,6 @@ template <typename T> class Atomic
return fetch_sub(1); return fetch_sub(1);
} }
T operator+=(const T& other)
{
return fetch_add(other) + other;
}
T operator-=(const T& other)
{
return fetch_sub(other) - other;
}
private: private:
T m_value; T m_value;
}; };