Luna/kernel/src/arch/x86_64/CPU.cpp

520 lines
13 KiB
C++
Raw Normal View History

2022-12-07 10:40:02 +00:00
#include "arch/CPU.h"
2022-11-30 16:16:36 +00:00
#include "Log.h"
2022-11-19 19:01:01 +00:00
#include "arch/Timer.h"
2022-12-07 11:26:09 +00:00
#include "arch/x86_64/CPU.h"
2022-11-19 19:01:01 +00:00
#include "arch/x86_64/IO.h"
2022-12-07 16:39:59 +00:00
#include "memory/MemoryManager.h"
#include "thread/Scheduler.h"
2022-11-15 18:10:32 +00:00
#include <cpuid.h>
#include <luna/CString.h>
2022-12-07 11:26:09 +00:00
#include <luna/Check.h>
#include <luna/Result.h>
#include <luna/SystemError.h>
#include <luna/Types.h>
2022-11-15 18:10:32 +00:00
extern "C" void enable_sse();
extern "C" void enable_write_protect();
extern "C" void enable_nx();
// FIXME: Split this into separate files?
// GDT code and definitions
struct [[gnu::packed]] GDTR
{
u16 size;
u64 offset;
};
static_assert(sizeof(GDTR) == 10UL);
struct [[gnu::packed]] GDTEntry
{
u16 limit0;
u16 base0;
u8 base1;
u8 access;
u8 limit1_flags;
u8 base2;
};
static_assert(sizeof(GDTEntry) == 8UL);
struct [[gnu::packed]] HighGDTEntry
{
u32 base_high;
u32 reserved;
};
static_assert(sizeof(HighGDTEntry) == 8UL);
struct [[gnu::packed]] TSS
{
u32 reserved0;
u64 rsp[3];
u64 reserved1;
u64 ist[7];
u64 reserved2;
u16 reserved3;
u16 iomap_base;
};
static_assert(sizeof(TSS) == 104UL);
struct [[gnu::packed]] alignas(4096) GlobalDescriptorTable
{
GDTEntry null;
GDTEntry kernel_code;
GDTEntry kernel_data;
GDTEntry user_code;
GDTEntry user_data;
GDTEntry tss;
HighGDTEntry tss2;
};
static TSS task_state_segment;
static GlobalDescriptorTable gdt = {{0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00},
{0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00},
{0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00},
{0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00},
{0x00000000, 0x00000000}};
extern "C" void load_gdt(GDTR* gdtr);
extern "C" void load_tr(int segment);
static void set_base(GDTEntry* entry, u32 base)
{
entry->base0 = (base & 0xFFFF);
entry->base1 = (base >> 16) & 0xFF;
entry->base2 = (u8)((base >> 24) & 0xFF);
}
static void set_limit(GDTEntry* entry, u32 limit)
{
expect(limit <= 0xFFFFF, "Limit too big for a GDT entry");
entry->limit0 = limit & 0xFFFF;
entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF);
}
static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, u64 addr)
{
set_base(tss1, addr & 0xffffffff);
tss2->base_high = (u32)(addr >> 32);
}
static void setup_tss()
{
memset(&task_state_segment, 0, sizeof(TSS));
task_state_segment.iomap_base = sizeof(TSS);
set_tss_base(&gdt.tss, &gdt.tss2, (u64)&task_state_segment);
set_limit(&gdt.tss, sizeof(TSS) - 1);
}
static void setup_gdt()
{
static GDTR gdtr;
gdtr.offset = (u64)&gdt;
gdtr.size = sizeof(GlobalDescriptorTable);
setup_tss();
load_gdt(&gdtr);
load_tr(0x2b);
}
2022-11-19 19:01:01 +00:00
// PIC code
#define PIC1_COMMAND 0x20
#define PIC1_DATA 0x21
#define PIC2_COMMAND 0xA0
#define PIC2_DATA 0xA1
#define PIC_EOI 0x20
#define ICW1_INIT 0x10
#define ICW1_ICW4 0x01
#define ICW4_8086 0x01
#define io_delay() IO::outb(0x80, 0)
static void remap_pic()
{
IO::outb(PIC1_COMMAND, ICW1_INIT | ICW1_ICW4);
io_delay();
IO::outb(PIC2_COMMAND, ICW1_INIT | ICW1_ICW4);
io_delay();
IO::outb(PIC1_DATA, 0x20);
io_delay();
IO::outb(PIC2_DATA, 0x28);
io_delay();
IO::outb(PIC1_DATA, 4);
io_delay();
IO::outb(PIC2_DATA, 2);
io_delay();
IO::outb(PIC1_DATA, ICW4_8086);
io_delay();
IO::outb(PIC2_DATA, ICW4_8086);
io_delay();
IO::outb(PIC1_DATA, 0b11111110);
io_delay();
IO::outb(PIC2_DATA, 0b11111111);
}
static void pic_eoi(unsigned char irq)
{
if (irq >= 8) IO::outb(PIC2_COMMAND, PIC_EOI);
IO::outb(PIC1_COMMAND, PIC_EOI);
}
static void pic_eoi(Registers* regs)
{
pic_eoi((unsigned char)(regs->error)); // On IRQs, the error code is the IRQ number
}
// IDT code and definitions
struct IDTEntry
{
u16 offset0;
u16 selector;
u8 ist;
u8 type_attr;
u16 offset1;
u32 offset2;
u32 ignore;
void set_offset(u64 offset);
u64 get_offset() const;
};
static_assert(sizeof(IDTEntry) == 16UL);
void IDTEntry::set_offset(u64 offset)
{
offset0 = (u16)(offset & 0x000000000000ffff);
offset1 = (u16)((offset & 0x00000000ffff0000) >> 16);
offset2 = (u32)((offset & 0xffffffff00000000) >> 32);
}
u64 IDTEntry::get_offset() const
{
u64 offset = 0;
offset |= (u64)offset0;
offset |= (u64)offset1 << 16;
offset |= (u64)offset2 << 32;
return offset;
}
static IDTEntry idt[256];
#define IDT_TA_InterruptGate 0b10001110
#define IDT_TA_UserInterruptGate 0b11101110
#define IDT_TA_TrapGate 0b10001111
struct [[gnu::packed]] IDTR
{
u16 limit;
u64 offset;
};
static_assert(sizeof(IDTR) == 10UL);
2022-11-16 16:37:18 +00:00
static void idt_add_handler(short num, void* handler, u8 type_attr)
{
check(handler != nullptr);
expect(num < 256, "IDT can only hold up to 256 entries");
IDTEntry* const entry_for_handler = &idt[num];
entry_for_handler->selector = 0x08;
entry_for_handler->type_attr = type_attr;
entry_for_handler->set_offset((u64)handler);
}
2022-11-16 16:37:18 +00:00
#define INT(x) extern "C" void _isr##x()
#define TRAP(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_TrapGate)
2022-11-19 19:01:01 +00:00
#define IRQ(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_InterruptGate)
2022-11-16 16:37:18 +00:00
INT(0);
INT(1);
INT(2);
INT(3);
INT(4);
INT(5);
INT(6);
INT(7);
INT(8);
INT(10);
INT(11);
INT(12);
INT(13);
INT(14);
INT(16);
INT(17);
INT(18);
INT(19);
INT(20);
INT(21);
2022-11-19 19:01:01 +00:00
INT(32);
2022-11-16 16:37:18 +00:00
static void setup_idt()
{
memset(idt, 0, sizeof(idt));
TRAP(0);
TRAP(1);
TRAP(2);
TRAP(3);
TRAP(4);
TRAP(5);
TRAP(6);
TRAP(7);
TRAP(8);
TRAP(10);
TRAP(11);
TRAP(12);
TRAP(13);
TRAP(14);
TRAP(16);
TRAP(17);
TRAP(18);
TRAP(19);
TRAP(20);
TRAP(21);
2022-11-19 19:01:01 +00:00
IRQ(32);
2022-11-16 16:37:18 +00:00
static IDTR idtr;
idtr.limit = 0x0FFF;
idtr.offset = (u64)idt;
asm("lidt %0" : : "m"(idtr));
}
2022-11-16 16:37:18 +00:00
// Interrupt handling
#define FIXME_UNHANDLED_INTERRUPT(name) \
2022-11-30 16:16:36 +00:00
kerrorln("FIXME(interrupt): %s", name); \
CPU::efficient_halt();
[[noreturn]] void handle_page_fault(Registers* regs)
{
u64 cr2;
asm volatile("mov %%cr2, %0" : "=r"(cr2));
kerrorln("Page fault at RIP %lx while accessing %lx!", regs->rip, cr2);
2022-12-07 16:39:59 +00:00
CPU::print_stack_trace_at(regs);
2022-12-07 16:39:59 +00:00
CPU::efficient_halt();
}
extern "C" void handle_x86_exception(Registers* regs)
2022-11-16 16:37:18 +00:00
{
switch (regs->isr)
2022-11-16 16:37:18 +00:00
{
case 0: FIXME_UNHANDLED_INTERRUPT("Division by zero");
case 1: FIXME_UNHANDLED_INTERRUPT("Debug interrupt");
case 2: FIXME_UNHANDLED_INTERRUPT("NMI (Non-maskable interrupt)");
case 3: FIXME_UNHANDLED_INTERRUPT("Breakpoint");
case 4: FIXME_UNHANDLED_INTERRUPT("Overflow");
case 5: FIXME_UNHANDLED_INTERRUPT("Bound range exceeded");
case 6: FIXME_UNHANDLED_INTERRUPT("Invalid opcode");
case 7: FIXME_UNHANDLED_INTERRUPT("Device not available");
case 10: FIXME_UNHANDLED_INTERRUPT("Invalid TSS");
case 11: FIXME_UNHANDLED_INTERRUPT("Segment not present");
case 12: FIXME_UNHANDLED_INTERRUPT("Stack-segment fault");
case 13: FIXME_UNHANDLED_INTERRUPT("General protection fault");
case 14: handle_page_fault(regs);
case 16: FIXME_UNHANDLED_INTERRUPT("x87 floating-point exception");
case 17: FIXME_UNHANDLED_INTERRUPT("Alignment check");
case 19: FIXME_UNHANDLED_INTERRUPT("SIMD floating-point exception");
case 20: FIXME_UNHANDLED_INTERRUPT("Virtualization exception");
case 21: FIXME_UNHANDLED_INTERRUPT("Control-protection exception");
default: FIXME_UNHANDLED_INTERRUPT("Reserved exception or #DF/#MC, which shouldn't call handle_x86_exception");
2022-11-16 16:37:18 +00:00
}
}
// Called from _asm_interrupt_entry
extern "C" void arch_interrupt_entry(Registers* regs)
{
if (regs->isr < 32) handle_x86_exception(regs);
2022-11-19 19:01:01 +00:00
else if (regs->isr == 32)
{
Timer::tick();
if (should_invoke_scheduler()) Scheduler::invoke(regs);
2022-11-19 19:01:01 +00:00
pic_eoi(regs);
}
2022-11-16 16:37:18 +00:00
else
{
2022-11-30 16:16:36 +00:00
kwarnln("IRQ catched! Halting.");
2022-11-16 16:37:18 +00:00
CPU::efficient_halt();
}
}
extern "C" [[noreturn]] void arch_double_fault()
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Catched double fault");
CPU::efficient_halt();
}
extern "C" [[noreturn]] void arch_machine_check()
{
2022-11-30 16:16:36 +00:00
kerrorln("ERROR: Machine check failed");
CPU::efficient_halt();
}
// Generic CPU code
static bool test_nx()
{
u32 __unused, edx = 0;
if (!__get_cpuid(0x80000001, &__unused, &__unused, &__unused, &edx)) return 0;
return edx & (1 << 20);
}
2022-11-15 18:10:32 +00:00
namespace CPU
{
Result<const char*> identify()
{
static char brand_string[49];
u32 buf[4];
2022-11-30 16:13:59 +00:00
if (!__get_cpuid(0x80000002, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP);
2022-11-15 18:10:32 +00:00
memcpy(brand_string, buf, 16);
2022-11-30 16:13:59 +00:00
if (!__get_cpuid(0x80000003, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP);
2022-11-15 18:10:32 +00:00
memcpy(&brand_string[16], buf, 16);
2022-11-30 16:13:59 +00:00
if (!__get_cpuid(0x80000004, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP);
2022-11-15 18:10:32 +00:00
memcpy(&brand_string[32], buf, 16);
brand_string[48] = 0; // null-terminate it :)
return brand_string;
}
2022-11-18 20:04:53 +00:00
const char* platform_string()
{
return "x86_64";
}
2022-11-15 18:10:32 +00:00
void platform_init()
{
enable_sse();
enable_write_protect();
if (test_nx()) enable_nx();
setup_gdt();
setup_idt();
2022-11-15 18:10:32 +00:00
}
2022-11-19 19:01:01 +00:00
void platform_finish_init()
{
remap_pic();
}
void enable_interrupts()
{
asm volatile("sti");
}
void disable_interrupts()
{
asm volatile("cli");
}
void wait_for_interrupt()
{
asm volatile("hlt");
}
2022-11-15 18:10:32 +00:00
[[noreturn]] void efficient_halt() // Halt the CPU, using the lowest power possible. On x86-64 we do this using the
// "hlt" instruction, which puts the CPU into a low-power idle state until the
// next interrupt arrives... and we disable interrupts beforehand.
{
asm volatile("cli"); // Disable interrupts
loop:
asm volatile("hlt"); // Let the cpu rest and pause until the next interrupt arrives... which in this case should
// be never (unless an NMI arrives) :)
goto loop; // Safeguard: if we ever wake up, start our low-power rest again
}
2022-12-07 13:46:56 +00:00
[[noreturn]] void idle_loop()
2022-12-07 11:26:09 +00:00
{
asm volatile("sti");
loop:
asm volatile("hlt");
goto loop;
}
void switch_kernel_stack(u64 top)
{
task_state_segment.rsp[0] = top;
}
2022-12-07 16:39:59 +00:00
struct StackFrame
{
StackFrame* next;
u64 instruction;
};
void get_stack_trace(void (*callback)(u64, void*), void* arg)
{
u64 rbp;
asm volatile("mov %%rbp, %0" : "=r"(rbp));
StackFrame* current_frame = (StackFrame*)rbp;
// FIXME: Validate that the frame itself is readable, might span across multiple pages
while (current_frame && MemoryManager::validate_readable_page((u64)current_frame))
{
callback(current_frame->instruction, arg);
current_frame = current_frame->next;
}
}
void print_stack_trace()
{
u64 rbp;
int frame_index = 0;
asm volatile("mov %%rbp, %0" : "=r"(rbp));
StackFrame* current_frame = (StackFrame*)rbp;
// FIXME: Validate that the frame itself is readable, might span across multiple pages
while (current_frame && MemoryManager::validate_readable_page((u64)current_frame))
{
kinfoln("#%d at %p", frame_index++, (void*)current_frame->instruction);
current_frame = current_frame->next;
}
}
2022-12-07 16:39:59 +00:00
void get_stack_trace_at(Registers* regs, void (*callback)(u64, void*), void* arg)
{
callback(regs->rip, arg);
StackFrame* current_frame = (StackFrame*)regs->rbp;
// FIXME: Validate that the frame itself is readable, might span across multiple pages
while (current_frame && MemoryManager::validate_readable_page((u64)current_frame))
{
callback(current_frame->instruction, arg);
current_frame = current_frame->next;
}
}
void print_stack_trace_at(Registers* regs)
{
int frame_index = 0;
get_stack_trace_at(
regs,
[](u64 instruction, void* arg) {
int* ptr = (int*)arg;
kinfoln("#%d at %p", *ptr, (void*)instruction);
(*ptr)++;
},
&frame_index);
}
2022-12-17 09:45:55 +00:00
void pause()
{
asm volatile("pause");
}
}
// called by kernel_yield
extern "C" void switch_task(Registers* regs)
{
Scheduler::switch_task(regs);
2022-11-15 18:10:32 +00:00
}