Add GDT and IDT setup and loading + NX if supported

This commit is contained in:
apio 2022-11-15 20:41:59 +01:00
parent c9feb11366
commit bb46cd890b
3 changed files with 210 additions and 0 deletions

View File

@ -7,4 +7,6 @@ namespace CPU
void platform_init();
[[noreturn]] void efficient_halt();
void switch_kernel_stack(u64 top);
}

View File

@ -15,3 +15,35 @@ enable_write_protect:
or eax, 0x80000 ;set write-protect CR0.WP
mov cr0, rax
ret
global enable_nx
enable_nx:
mov rcx, 0xC0000080 ; IA32_EFER
rdmsr
or eax, 1 << 11 ; no-execute enable (NXE)
wrmsr
.end:
ret
global load_gdt
load_gdt:
cli
lgdt [rdi]
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
push 0x08
lea rax, [rel .reload_CS]
push rax
retfq
.reload_CS:
ret
global load_tr
load_tr:
mov rax, rdi
ltr ax
ret

View File

@ -5,6 +5,174 @@
extern "C" void enable_sse();
extern "C" void enable_write_protect();
extern "C" void enable_nx();
// GDT code and definitions
struct GDTR
{
u16 size;
u64 offset;
} __attribute__((packed));
struct GDTEntry
{
u16 limit0;
u16 base0;
u8 base1;
u8 access;
u8 limit1_flags;
u8 base2;
} __attribute__((packed));
struct HighGDTEntry
{
u32 base_high;
u32 reserved;
} __attribute__((packed));
struct TSS
{
u32 reserved0;
u64 rsp[3];
u64 reserved1;
u64 ist[7];
u64 reserved2;
u16 reserved3;
u16 iomap_base;
} __attribute__((packed));
struct GlobalDescriptorTable
{
GDTEntry null;
GDTEntry kernel_code;
GDTEntry kernel_data;
GDTEntry user_code;
GDTEntry user_data;
GDTEntry tss;
HighGDTEntry tss2;
} __attribute__((packed)) __attribute((aligned(4096)));
static TSS task_state_segment;
static GlobalDescriptorTable gdt = {{0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00},
{0xffff, 0x0000, 0x00, 0x9a, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0x92, 0xcf, 0x00},
{0xffff, 0x0000, 0x00, 0xfa, 0xaf, 0x00},
{0xffff, 0x0000, 0x00, 0xf2, 0xcf, 0x00},
{0x0000, 0x0000, 0x00, 0xe9, 0x0f, 0x00},
{0x00000000, 0x00000000}};
extern "C" void load_gdt(GDTR* gdtr);
extern "C" void load_tr(int segment);
static void set_base(GDTEntry* entry, u32 base)
{
entry->base0 = (base & 0xFFFF);
entry->base1 = (base >> 16) & 0xFF;
entry->base2 = (u8)((base >> 24) & 0xFF);
}
static void set_limit(GDTEntry* entry, u32 limit)
{
check(limit <= 0xFFFFF);
entry->limit0 = limit & 0xFFFF;
entry->limit1_flags = (entry->limit1_flags & 0xF0) | ((limit >> 16) & 0xF);
}
static void set_tss_base(GDTEntry* tss1, HighGDTEntry* tss2, u64 addr)
{
set_base(tss1, addr & 0xffffffff);
tss2->base_high = (u32)(addr >> 32);
}
static void setup_tss()
{
memset(&task_state_segment, 0, sizeof(TSS));
task_state_segment.iomap_base = sizeof(TSS);
set_tss_base(&gdt.tss, &gdt.tss2, (u64)&task_state_segment);
set_limit(&gdt.tss, sizeof(TSS) - 1);
}
static void setup_gdt()
{
static GDTR gdtr;
gdtr.offset = (u64)&gdt;
gdtr.size = sizeof(GlobalDescriptorTable);
setup_tss();
load_gdt(&gdtr);
load_tr(0x2b);
}
// IDT code and definitions
struct IDTEntry
{
u16 offset0;
u16 selector;
u8 ist;
u8 type_attr;
u16 offset1;
u32 offset2;
u32 ignore;
void set_offset(u64 offset);
u64 get_offset();
};
void IDTEntry::set_offset(u64 offset)
{
offset0 = (u16)(offset & 0x000000000000ffff);
offset1 = (u16)((offset & 0x00000000ffff0000) >> 16);
offset2 = (u32)((offset & 0xffffffff00000000) >> 32);
}
u64 IDTEntry::get_offset()
{
u64 offset = 0;
offset |= (u64)offset0;
offset |= (u64)offset1 << 16;
offset |= (u64)offset2 << 32;
return offset;
}
static IDTEntry idt[256];
#define IDT_TA_InterruptGate 0b10001110
#define IDT_TA_UserInterruptGate 0b11101110
#define IDT_TA_TrapGate 0b10001111
struct IDTR
{
uint16_t limit;
uint64_t offset;
} __attribute__((packed));
[[maybe_unused]] static void idt_add_handler(short num, void* handler, u8 type_attr)
{
check(handler != nullptr);
check(num < 256);
IDTEntry* entry_for_handler = &idt[num];
entry_for_handler->selector = 0x08;
entry_for_handler->type_attr = type_attr;
entry_for_handler->set_offset((u64)handler);
}
static void setup_idt() // FIXME: Add entries to the IDT.
{
static IDTR idtr;
idtr.limit = 0x0FFF;
idtr.offset = (u64)idt;
asm("lidt %0" : : "m"(idtr));
}
// Generic CPU code
static bool test_nx()
{
u32 __unused, edx = 0;
if (!__get_cpuid(0x80000001, &__unused, &__unused, &__unused, &edx)) return 0;
return edx & (1 << 20);
}
namespace CPU
{
@ -29,6 +197,9 @@ namespace CPU
{
enable_sse();
enable_write_protect();
if (test_nx()) enable_nx();
setup_gdt();
setup_idt();
}
[[noreturn]] void efficient_halt() // Halt the CPU, using the lowest power possible. On x86-64 we do this using the
@ -41,4 +212,9 @@ namespace CPU
// be never (unless an NMI arrives) :)
goto loop; // Safeguard: if we ever wake up, start our low-power rest again
}
void switch_kernel_stack(u64 top)
{
task_state_segment.rsp[0] = top;
}
}