2022-11-15 18:10:32 +00:00
|
|
|
#include "arch/CPU.h"
|
|
|
|
#include <String.h>
|
|
|
|
#include <Types.h>
|
|
|
|
#include <cpuid.h>
|
|
|
|
|
|
|
|
extern "C" void enable_sse();
|
|
|
|
extern "C" void enable_write_protect();
|
2022-11-15 19:41:59 +00:00
|
|
|
extern "C" void enable_nx();
|
|
|
|
|
|
|
|
// GDT code and definitions
|
|
|
|
|
|
|
|
struct GDTR
|
|
|
|
{
|
|
|
|
u16 size;
|
|
|
|
u64 offset;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct GDTEntry
|
|
|
|
{
|
|
|
|
u16 limit0;
|
|
|
|
u16 base0;
|
|
|
|
u8 base1;
|
|
|
|
u8 access;
|
|
|
|
u8 limit1_flags;
|
|
|
|
u8 base2;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct HighGDTEntry
|
|
|
|
{
|
|
|
|
u32 base_high;
|
|
|
|
u32 reserved;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct TSS
|
|
|
|
{
|
|
|
|
u32 reserved0;
|
|
|
|
u64 rsp[3];
|
|
|
|
u64 reserved1;
|
|
|
|
u64 ist[7];
|
|
|
|
u64 reserved2;
|
|
|
|
u16 reserved3;
|
|
|
|
u16 iomap_base;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct GlobalDescriptorTable
|
|
|
|
{
|
|
|
|
GDTEntry null;
|
|
|
|
GDTEntry kernel_code;
|
|
|
|
GDTEntry kernel_data;
|
|
|
|
GDTEntry user_code;
|
|
|
|
GDTEntry user_data;
|
|
|
|
GDTEntry tss;
|
|
|
|
HighGDTEntry tss2;
|
|
|
|
} __attribute__((packed)) __attribute((aligned(4096)));
|
|
|
|
|
|
|
|
static TSS task_state_segment;
|
|
|
|
|
|
|
|
static GlobalDescriptorTable gdt = {{0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00},
|
|
|
|
{0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00},
|
|
|
|
{0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00},
|
|
|
|
{0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00},
|
|
|
|
{0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00},
|
|
|
|
{0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00},
|
|
|
|
{0x00000000, 0x00000000}};
|
|
|
|
|
|
|
|
extern "C" void load_gdt(GDTR* gdtr);
|
|
|
|
extern "C" void load_tr(int segment);
|
|
|
|
|
|
|
|
static void set_base(GDTEntry* entry, u32 base)
|
|
|
|
{
|
|
|
|
entry->base0 = (base & 0xFFFF);
|
|
|
|
entry->base1 = (base >> 16) & 0xFF;
|
|
|
|
entry->base2 = (u8)((base >> 24) & 0xFF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_limit(GDTEntry* entry, u32 limit)
|
|
|
|
{
|
|
|
|
check(limit <= 0xFFFFF);
|
|
|
|
entry->limit0 = limit & 0xFFFF;
|
|
|
|
entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, u64 addr)
|
|
|
|
{
|
|
|
|
set_base(tss1, addr & 0xffffffff);
|
|
|
|
tss2->base_high = (u32)(addr >> 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_tss()
|
|
|
|
{
|
|
|
|
memset(&task_state_segment, 0, sizeof(TSS));
|
|
|
|
task_state_segment.iomap_base = sizeof(TSS);
|
|
|
|
set_tss_base(&gdt.tss, &gdt.tss2, (u64)&task_state_segment);
|
|
|
|
set_limit(&gdt.tss, sizeof(TSS) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_gdt()
|
|
|
|
{
|
|
|
|
static GDTR gdtr;
|
|
|
|
gdtr.offset = (u64)&gdt;
|
|
|
|
gdtr.size = sizeof(GlobalDescriptorTable);
|
|
|
|
setup_tss();
|
|
|
|
load_gdt(&gdtr);
|
|
|
|
load_tr(0x2b);
|
|
|
|
}
|
|
|
|
|
|
|
|
// IDT code and definitions
|
|
|
|
|
|
|
|
struct IDTEntry
|
|
|
|
{
|
|
|
|
u16 offset0;
|
|
|
|
u16 selector;
|
|
|
|
u8 ist;
|
|
|
|
u8 type_attr;
|
|
|
|
u16 offset1;
|
|
|
|
u32 offset2;
|
|
|
|
u32 ignore;
|
|
|
|
void set_offset(u64 offset);
|
|
|
|
u64 get_offset();
|
|
|
|
};
|
|
|
|
|
|
|
|
void IDTEntry::set_offset(u64 offset)
|
|
|
|
{
|
|
|
|
offset0 = (u16)(offset & 0x000000000000ffff);
|
|
|
|
offset1 = (u16)((offset & 0x00000000ffff0000) >> 16);
|
|
|
|
offset2 = (u32)((offset & 0xffffffff00000000) >> 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 IDTEntry::get_offset()
|
|
|
|
{
|
|
|
|
u64 offset = 0;
|
|
|
|
offset |= (u64)offset0;
|
|
|
|
offset |= (u64)offset1 << 16;
|
|
|
|
offset |= (u64)offset2 << 32;
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static IDTEntry idt[256];
|
|
|
|
|
|
|
|
#define IDT_TA_InterruptGate 0b10001110
|
|
|
|
#define IDT_TA_UserInterruptGate 0b11101110
|
|
|
|
#define IDT_TA_TrapGate 0b10001111
|
|
|
|
|
|
|
|
struct IDTR
|
|
|
|
{
|
|
|
|
uint16_t limit;
|
|
|
|
uint64_t offset;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
[[maybe_unused]] static void idt_add_handler(short num, void* handler, u8 type_attr)
|
|
|
|
{
|
|
|
|
check(handler != nullptr);
|
|
|
|
check(num < 256);
|
|
|
|
IDTEntry* entry_for_handler = &idt[num];
|
|
|
|
entry_for_handler->selector = 0x08;
|
|
|
|
entry_for_handler->type_attr = type_attr;
|
|
|
|
entry_for_handler->set_offset((u64)handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_idt() // FIXME: Add entries to the IDT.
|
|
|
|
{
|
|
|
|
static IDTR idtr;
|
|
|
|
idtr.limit = 0x0FFF;
|
|
|
|
idtr.offset = (u64)idt;
|
|
|
|
asm("lidt %0" : : "m"(idtr));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generic CPU code
|
|
|
|
|
|
|
|
static bool test_nx()
|
|
|
|
{
|
|
|
|
u32 __unused, edx = 0;
|
|
|
|
if (!__get_cpuid(0x80000001, &__unused, &__unused, &__unused, &edx)) return 0;
|
|
|
|
return edx & (1 << 20);
|
|
|
|
}
|
2022-11-15 18:10:32 +00:00
|
|
|
|
|
|
|
namespace CPU
|
|
|
|
{
|
|
|
|
Result<const char*> identify()
|
|
|
|
{
|
|
|
|
static char brand_string[49];
|
|
|
|
|
|
|
|
u32 buf[4];
|
|
|
|
if (!__get_cpuid(0x80000002, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
|
|
|
|
memcpy(brand_string, buf, 16);
|
|
|
|
if (!__get_cpuid(0x80000003, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
|
|
|
|
memcpy(&brand_string[16], buf, 16);
|
|
|
|
if (!__get_cpuid(0x80000004, &buf[0], &buf[1], &buf[2], &buf[3])) return err;
|
|
|
|
memcpy(&brand_string[32], buf, 16);
|
|
|
|
|
|
|
|
brand_string[48] = 0; // null-terminate it :)
|
|
|
|
|
|
|
|
return brand_string;
|
|
|
|
}
|
|
|
|
|
|
|
|
void platform_init()
|
|
|
|
{
|
|
|
|
enable_sse();
|
|
|
|
enable_write_protect();
|
2022-11-15 19:41:59 +00:00
|
|
|
if (test_nx()) enable_nx();
|
|
|
|
setup_gdt();
|
|
|
|
setup_idt();
|
2022-11-15 18:10:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]] void efficient_halt() // Halt the CPU, using the lowest power possible. On x86-64 we do this using the
|
|
|
|
// "hlt" instruction, which puts the CPU into a low-power idle state until the
|
|
|
|
// next interrupt arrives... and we disable interrupts beforehand.
|
|
|
|
{
|
|
|
|
asm volatile("cli"); // Disable interrupts
|
|
|
|
loop:
|
|
|
|
asm volatile("hlt"); // Let the cpu rest and pause until the next interrupt arrives... which in this case should
|
|
|
|
// be never (unless an NMI arrives) :)
|
|
|
|
goto loop; // Safeguard: if we ever wake up, start our low-power rest again
|
|
|
|
}
|
2022-11-15 19:41:59 +00:00
|
|
|
|
|
|
|
void switch_kernel_stack(u64 top)
|
|
|
|
{
|
|
|
|
task_state_segment.rsp[0] = top;
|
|
|
|
}
|
2022-11-15 18:10:32 +00:00
|
|
|
}
|