Luna/kernel/src/arch/x86_64/CPU.cpp

323 lines
8.1 KiB
C++
Raw Normal View History

2022-11-16 16:37:18 +00:00
#include "arch/x86_64/CPU.h"
#include "arch/Serial.h"
2022-11-15 18:10:32 +00:00
#include <String.h>
#include <Types.h>
#include <cpuid.h>
extern "C" void enable_sse();
extern "C" void enable_write_protect();
extern "C" void enable_nx();
// GDT code and definitions
struct GDTR
{
u16 size;
u64 offset;
} __attribute__((packed));
struct GDTEntry
{
u16 limit0;
u16 base0;
u8 base1;
u8 access;
u8 limit1_flags;
u8 base2;
} __attribute__((packed));
struct HighGDTEntry
{
u32 base_high;
u32 reserved;
} __attribute__((packed));
struct TSS
{
u32 reserved0;
u64 rsp[3];
u64 reserved1;
u64 ist[7];
u64 reserved2;
u16 reserved3;
u16 iomap_base;
} __attribute__((packed));
struct GlobalDescriptorTable
{
GDTEntry null;
GDTEntry kernel_code;
GDTEntry kernel_data;
GDTEntry user_code;
GDTEntry user_data;
GDTEntry tss;
HighGDTEntry tss2;
} __attribute__((packed)) __attribute((aligned(4096)));
static TSS task_state_segment;
static GlobalDescriptorTable gdt = {{0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00},
{0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00},
{0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00},
{0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00},
{0x00000000, 0x00000000}};
extern "C" void load_gdt(GDTR* gdtr);
extern "C" void load_tr(int segment);
static void set_base(GDTEntry* entry, u32 base)
{
entry->base0 = (base & 0xFFFF);
entry->base1 = (base >> 16) & 0xFF;
entry->base2 = (u8)((base >> 24) & 0xFF);
}
static void set_limit(GDTEntry* entry, u32 limit)
{
check(limit <= 0xFFFFF);
entry->limit0 = limit & 0xFFFF;
entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF);
}
static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, u64 addr)
{
set_base(tss1, addr & 0xffffffff);
tss2->base_high = (u32)(addr >> 32);
}
static void setup_tss()
{
memset(&task_state_segment, 0, sizeof(TSS));
task_state_segment.iomap_base = sizeof(TSS);
set_tss_base(&gdt.tss, &gdt.tss2, (u64)&task_state_segment);
set_limit(&gdt.tss, sizeof(TSS) - 1);
}
static void setup_gdt()
{
static GDTR gdtr;
gdtr.offset = (u64)&gdt;
gdtr.size = sizeof(GlobalDescriptorTable);
setup_tss();
load_gdt(&gdtr);
load_tr(0x2b);
}
// IDT code and definitions
struct IDTEntry
{
u16 offset0;
u16 selector;
u8 ist;
u8 type_attr;
u16 offset1;
u32 offset2;
u32 ignore;
void set_offset(u64 offset);
u64 get_offset();
};
void IDTEntry::set_offset(u64 offset)
{
offset0 = (u16)(offset & 0x000000000000ffff);
offset1 = (u16)((offset & 0x00000000ffff0000) >> 16);
offset2 = (u32)((offset & 0xffffffff00000000) >> 32);
}
u64 IDTEntry::get_offset()
{
u64 offset = 0;
offset |= (u64)offset0;
offset |= (u64)offset1 << 16;
offset |= (u64)offset2 << 32;
return offset;
}
static IDTEntry idt[256];
#define IDT_TA_InterruptGate 0b10001110
#define IDT_TA_UserInterruptGate 0b11101110
#define IDT_TA_TrapGate 0b10001111
struct IDTR
{
uint16_t limit;
uint64_t offset;
} __attribute__((packed));
2022-11-16 16:37:18 +00:00
static void idt_add_handler(short num, void* handler, u8 type_attr)
{
check(handler != nullptr);
check(num < 256);
IDTEntry* entry_for_handler = &idt[num];
entry_for_handler->selector = 0x08;
entry_for_handler->type_attr = type_attr;
entry_for_handler->set_offset((u64)handler);
}
2022-11-16 16:37:18 +00:00
#define INT(x) extern "C" void _isr##x()
#define TRAP(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_TrapGate)
INT(0);
INT(1);
INT(2);
INT(3);
INT(4);
INT(5);
INT(6);
INT(7);
INT(8);
INT(10);
INT(11);
INT(12);
INT(13);
INT(14);
INT(16);
INT(17);
INT(18);
INT(19);
INT(20);
INT(21);
2022-11-16 16:37:18 +00:00
static void setup_idt()
{
memset(idt, 0, sizeof(idt));
TRAP(0);
TRAP(1);
TRAP(2);
TRAP(3);
TRAP(4);
TRAP(5);
TRAP(6);
TRAP(7);
TRAP(8);
TRAP(10);
TRAP(11);
TRAP(12);
TRAP(13);
TRAP(14);
TRAP(16);
TRAP(17);
TRAP(18);
TRAP(19);
TRAP(20);
TRAP(21);
2022-11-16 16:37:18 +00:00
static IDTR idtr;
idtr.limit = 0x0FFF;
idtr.offset = (u64)idt;
asm("lidt %0" : : "m"(idtr));
}
2022-11-16 16:37:18 +00:00
// Interrupt handling
#define FIXME_UNHANDLED_INTERRUPT(name) \
Serial::println("FIXME(interrupt): " name); \
CPU::efficient_halt();
extern "C" void handle_x86_exception([[maybe_unused]] Registers* regs)
2022-11-16 16:37:18 +00:00
{
switch (regs->isr)
2022-11-16 16:37:18 +00:00
{
case 0: FIXME_UNHANDLED_INTERRUPT("Division by zero");
case 1: FIXME_UNHANDLED_INTERRUPT("Debug interrupt");
case 2: FIXME_UNHANDLED_INTERRUPT("NMI (Non-maskable interrupt)");
case 3: FIXME_UNHANDLED_INTERRUPT("Breakpoint");
case 4: FIXME_UNHANDLED_INTERRUPT("Overflow");
case 5: FIXME_UNHANDLED_INTERRUPT("Bound range exceeded");
case 6: FIXME_UNHANDLED_INTERRUPT("Invalid opcode");
case 7: FIXME_UNHANDLED_INTERRUPT("Device not available");
case 10: FIXME_UNHANDLED_INTERRUPT("Invalid TSS");
case 11: FIXME_UNHANDLED_INTERRUPT("Segment not present");
case 12: FIXME_UNHANDLED_INTERRUPT("Stack-segment fault");
case 13: FIXME_UNHANDLED_INTERRUPT("General protection fault");
case 14: FIXME_UNHANDLED_INTERRUPT("Page fault");
case 16: FIXME_UNHANDLED_INTERRUPT("x87 floating-point exception");
case 17: FIXME_UNHANDLED_INTERRUPT("Alignment check");
case 19: FIXME_UNHANDLED_INTERRUPT("SIMD floating-point exception");
case 20: FIXME_UNHANDLED_INTERRUPT("Virtualization exception");
case 21: FIXME_UNHANDLED_INTERRUPT("Control-protection exception");
default: FIXME_UNHANDLED_INTERRUPT("Reserved exception or #DF/#MC, which shouldn't call handle_x86_exception");
2022-11-16 16:37:18 +00:00
}
}
// Called from _asm_interrupt_entry
extern "C" void arch_interrupt_entry(Registers* regs)
{
if (regs->isr < 32) handle_x86_exception(regs);
2022-11-16 16:37:18 +00:00
else
{
Serial::println("IRQ catched! Halting.");
CPU::efficient_halt();
}
}
extern "C" [[noreturn]] void arch_double_fault()
{
Serial::println("ERROR: Catched double fault");
CPU::efficient_halt();
}
extern "C" [[noreturn]] void arch_machine_check()
{
Serial::println("ERROR: Machine check failed");
CPU::efficient_halt();
}
// Generic CPU code
static bool test_nx()
{
u32 __unused, edx = 0;
if (!__get_cpuid(0x80000001, &__unused, &__unused, &__unused, &edx)) return 0;
return edx & (1 << 20);
}
2022-11-15 18:10:32 +00:00
namespace CPU
{
Result<const char*> identify()
{
static char brand_string[49];
u32 buf[4];
if (!__get_cpuid(0x80000002, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
memcpy(brand_string, buf, 16);
if (!__get_cpuid(0x80000003, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
memcpy(&brand_string[16], buf, 16);
if (!__get_cpuid(0x80000004, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
memcpy(&brand_string[32], buf, 16);
brand_string[48] = 0; // null-terminate it :)
return brand_string;
}
void platform_init()
{
enable_sse();
enable_write_protect();
if (test_nx()) enable_nx();
setup_gdt();
setup_idt();
2022-11-15 18:10:32 +00:00
}
[[noreturn]] void efficient_halt() // Halt the CPU, using the lowest power possible. On x86-64 we do this using the
// "hlt" instruction, which puts the CPU into a low-power idle state until the
// next interrupt arrives... and we disable interrupts beforehand.
{
asm volatile("cli"); // Disable interrupts
loop:
asm volatile("hlt"); // Let the cpu rest and pause until the next interrupt arrives... which in this case should
// be never (unless an NMI arrives) :)
goto loop; // Safeguard: if we ever wake up, start our low-power rest again
}
void switch_kernel_stack(u64 top)
{
task_state_segment.rsp[0] = top;
}
2022-11-15 18:10:32 +00:00
}