#include "arch/CPU.h" #include "arch/x86_64/CPU.h" #include "Log.h" #include "arch/Timer.h" #include "arch/x86_64/IO.h" #include #include #include #include #include #include extern "C" void enable_sse(); extern "C" void enable_write_protect(); extern "C" void enable_nx(); // FIXME: Split this into separate files? // GDT code and definitions struct [[gnu::packed]] GDTR { u16 size; u64 offset; }; static_assert(sizeof(GDTR) == 10UL); struct [[gnu::packed]] GDTEntry { u16 limit0; u16 base0; u8 base1; u8 access; u8 limit1_flags; u8 base2; }; static_assert(sizeof(GDTEntry) == 8UL); struct [[gnu::packed]] HighGDTEntry { u32 base_high; u32 reserved; }; static_assert(sizeof(HighGDTEntry) == 8UL); struct [[gnu::packed]] TSS { u32 reserved0; u64 rsp[3]; u64 reserved1; u64 ist[7]; u64 reserved2; u16 reserved3; u16 iomap_base; }; static_assert(sizeof(TSS) == 104UL); struct [[gnu::packed]] alignas(4096) GlobalDescriptorTable { GDTEntry null; GDTEntry kernel_code; GDTEntry kernel_data; GDTEntry user_code; GDTEntry user_data; GDTEntry tss; HighGDTEntry tss2; }; static TSS task_state_segment; static GlobalDescriptorTable gdt = {{0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00}, {0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00}, {0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00}, {0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00}, {0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00}, {0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00}, {0x00000000, 0x00000000}}; extern "C" void load_gdt(GDTR* gdtr); extern "C" void load_tr(int segment); static void set_base(GDTEntry* entry, u32 base) { entry->base0 = (base & 0xFFFF); entry->base1 = (base >> 16) & 0xFF; entry->base2 = (u8)((base >> 24) & 0xFF); } static void set_limit(GDTEntry* entry, u32 limit) { expect(limit <= 0xFFFFF, "Limit too big for a GDT entry"); entry->limit0 = limit & 0xFFFF; entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF); } static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, u64 addr) { set_base(tss1, addr & 0xffffffff); tss2->base_high = (u32)(addr >> 32); } static void setup_tss() { memset(&task_state_segment, 0, sizeof(TSS)); task_state_segment.iomap_base = sizeof(TSS); set_tss_base(&gdt.tss, &gdt.tss2, (u64)&task_state_segment); set_limit(&gdt.tss, sizeof(TSS) - 1); } static void setup_gdt() { static GDTR gdtr; gdtr.offset = (u64)&gdt; gdtr.size = sizeof(GlobalDescriptorTable); setup_tss(); load_gdt(&gdtr); load_tr(0x2b); } // PIC code #define PIC1_COMMAND 0x20 #define PIC1_DATA 0x21 #define PIC2_COMMAND 0xA0 #define PIC2_DATA 0xA1 #define PIC_EOI 0x20 #define ICW1_INIT 0x10 #define ICW1_ICW4 0x01 #define ICW4_8086 0x01 #define io_delay() IO::outb(0x80, 0) static void remap_pic() { IO::outb(PIC1_COMMAND, ICW1_INIT | ICW1_ICW4); io_delay(); IO::outb(PIC2_COMMAND, ICW1_INIT | ICW1_ICW4); io_delay(); IO::outb(PIC1_DATA, 0x20); io_delay(); IO::outb(PIC2_DATA, 0x28); io_delay(); IO::outb(PIC1_DATA, 4); io_delay(); IO::outb(PIC2_DATA, 2); io_delay(); IO::outb(PIC1_DATA, ICW4_8086); io_delay(); IO::outb(PIC2_DATA, ICW4_8086); io_delay(); IO::outb(PIC1_DATA, 0b11111110); io_delay(); IO::outb(PIC2_DATA, 0b11111111); } static void pic_eoi(unsigned char irq) { if (irq >= 8) IO::outb(PIC2_COMMAND, PIC_EOI); IO::outb(PIC1_COMMAND, PIC_EOI); } static void pic_eoi(Registers* regs) { pic_eoi((unsigned char)(regs->error)); // On IRQs, the error code is the IRQ number } // IDT code and definitions struct IDTEntry { u16 offset0; u16 selector; u8 ist; u8 type_attr; u16 offset1; u32 offset2; u32 ignore; void set_offset(u64 offset); u64 get_offset() const; }; static_assert(sizeof(IDTEntry) == 16UL); void IDTEntry::set_offset(u64 offset) { offset0 = (u16)(offset & 0x000000000000ffff); offset1 = (u16)((offset & 0x00000000ffff0000) >> 16); offset2 = (u32)((offset & 0xffffffff00000000) >> 32); } u64 IDTEntry::get_offset() const { u64 offset = 0; offset |= (u64)offset0; offset |= (u64)offset1 << 16; offset |= (u64)offset2 << 32; return offset; } static IDTEntry idt[256]; #define IDT_TA_InterruptGate 0b10001110 #define IDT_TA_UserInterruptGate 0b11101110 #define IDT_TA_TrapGate 0b10001111 struct [[gnu::packed]] IDTR { u16 limit; u64 offset; }; static_assert(sizeof(IDTR) == 10UL); static void idt_add_handler(short num, void* handler, u8 type_attr) { check(handler != nullptr); expect(num < 256, "IDT can only hold up to 256 entries"); IDTEntry* const entry_for_handler = &idt[num]; entry_for_handler->selector = 0x08; entry_for_handler->type_attr = type_attr; entry_for_handler->set_offset((u64)handler); } #define INT(x) extern "C" void _isr##x() #define TRAP(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_TrapGate) #define IRQ(x) idt_add_handler(x, (void*)_isr##x, IDT_TA_InterruptGate) INT(0); INT(1); INT(2); INT(3); INT(4); INT(5); INT(6); INT(7); INT(8); INT(10); INT(11); INT(12); INT(13); INT(14); INT(16); INT(17); INT(18); INT(19); INT(20); INT(21); INT(32); static void setup_idt() { memset(idt, 0, sizeof(idt)); TRAP(0); TRAP(1); TRAP(2); TRAP(3); TRAP(4); TRAP(5); TRAP(6); TRAP(7); TRAP(8); TRAP(10); TRAP(11); TRAP(12); TRAP(13); TRAP(14); TRAP(16); TRAP(17); TRAP(18); TRAP(19); TRAP(20); TRAP(21); IRQ(32); static IDTR idtr; idtr.limit = 0x0FFF; idtr.offset = (u64)idt; asm("lidt %0" : : "m"(idtr)); } // Interrupt handling #define FIXME_UNHANDLED_INTERRUPT(name) \ kerrorln("FIXME(interrupt): %s", name); \ CPU::efficient_halt(); [[noreturn]] void handle_page_fault(Registers* regs) { u64 cr2; asm volatile("mov %%cr2, %0" : "=r"(cr2)); kerrorln("Page fault at RIP %lx while accessing %lx!", regs->rip, cr2); CPU::efficient_halt(); } extern "C" void handle_x86_exception(Registers* regs) { switch (regs->isr) { case 0: FIXME_UNHANDLED_INTERRUPT("Division by zero"); case 1: FIXME_UNHANDLED_INTERRUPT("Debug interrupt"); case 2: FIXME_UNHANDLED_INTERRUPT("NMI (Non-maskable interrupt)"); case 3: FIXME_UNHANDLED_INTERRUPT("Breakpoint"); case 4: FIXME_UNHANDLED_INTERRUPT("Overflow"); case 5: FIXME_UNHANDLED_INTERRUPT("Bound range exceeded"); case 6: FIXME_UNHANDLED_INTERRUPT("Invalid opcode"); case 7: FIXME_UNHANDLED_INTERRUPT("Device not available"); case 10: FIXME_UNHANDLED_INTERRUPT("Invalid TSS"); case 11: FIXME_UNHANDLED_INTERRUPT("Segment not present"); case 12: FIXME_UNHANDLED_INTERRUPT("Stack-segment fault"); case 13: FIXME_UNHANDLED_INTERRUPT("General protection fault"); case 14: handle_page_fault(regs); case 16: FIXME_UNHANDLED_INTERRUPT("x87 floating-point exception"); case 17: FIXME_UNHANDLED_INTERRUPT("Alignment check"); case 19: FIXME_UNHANDLED_INTERRUPT("SIMD floating-point exception"); case 20: FIXME_UNHANDLED_INTERRUPT("Virtualization exception"); case 21: FIXME_UNHANDLED_INTERRUPT("Control-protection exception"); default: FIXME_UNHANDLED_INTERRUPT("Reserved exception or #DF/#MC, which shouldn't call handle_x86_exception"); } } // Called from _asm_interrupt_entry extern "C" void arch_interrupt_entry(Registers* regs) { if (regs->isr < 32) handle_x86_exception(regs); else if (regs->isr == 32) { Timer::tick(); pic_eoi(regs); } else { kwarnln("IRQ catched! Halting."); CPU::efficient_halt(); } } extern "C" [[noreturn]] void arch_double_fault() { kerrorln("ERROR: Catched double fault"); CPU::efficient_halt(); } extern "C" [[noreturn]] void arch_machine_check() { kerrorln("ERROR: Machine check failed"); CPU::efficient_halt(); } // Generic CPU code static bool test_nx() { u32 __unused, edx = 0; if (!__get_cpuid(0x80000001, &__unused, &__unused, &__unused, &edx)) return 0; return edx & (1 << 20); } namespace CPU { Result identify() { static char brand_string[49]; u32 buf[4]; if (!__get_cpuid(0x80000002, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP); memcpy(brand_string, buf, 16); if (!__get_cpuid(0x80000003, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP); memcpy(&brand_string[16], buf, 16); if (!__get_cpuid(0x80000004, &buf[0], &buf[1], &buf[2], &buf[3])) return err(ENOTSUP); memcpy(&brand_string[32], buf, 16); brand_string[48] = 0; // null-terminate it :) return brand_string; } const char* platform_string() { return "x86_64"; } void platform_init() { enable_sse(); enable_write_protect(); if (test_nx()) enable_nx(); setup_gdt(); setup_idt(); } void platform_finish_init() { remap_pic(); } void enable_interrupts() { asm volatile("sti"); } void disable_interrupts() { asm volatile("cli"); } void wait_for_interrupt() { asm volatile("hlt"); } [[noreturn]] void efficient_halt() // Halt the CPU, using the lowest power possible. On x86-64 we do this using the // "hlt" instruction, which puts the CPU into a low-power idle state until the // next interrupt arrives... and we disable interrupts beforehand. { asm volatile("cli"); // Disable interrupts loop: asm volatile("hlt"); // Let the cpu rest and pause until the next interrupt arrives... which in this case should // be never (unless an NMI arrives) :) goto loop; // Safeguard: if we ever wake up, start our low-power rest again } void switch_kernel_stack(u64 top) { task_state_segment.rsp[0] = top; } }