kernel/x86_64: Map kernel-side pages as global to avoid TLB flushes
Some checks are pending
Build and test / build (push) Waiting to run

Fixes #34.
This commit is contained in:
apio 2024-05-01 18:14:43 +02:00
parent ab70a72434
commit fb52c67f16
Signed by: apio
GPG Key ID: B8A7D06E42258954
6 changed files with 19 additions and 6 deletions

View File

@ -19,6 +19,7 @@ namespace MMU
NoExecute = 4,
WriteThrough = 8,
CacheDisable = 16,
Global = 32,
};
enum class UseHugePages

View File

@ -25,6 +25,13 @@ enable_nx:
wrmsr
ret
global enable_global_pages
enable_global_pages:
mov rax, cr4
or ax, 1 << 7
mov cr4, rax
ret
global load_gdt
load_gdt:
cli

View File

@ -22,6 +22,7 @@
extern "C" void enable_sse();
extern "C" void enable_write_protect();
extern "C" void enable_global_pages();
extern "C" void enable_nx();
extern void setup_gdt();
@ -268,6 +269,7 @@ namespace CPU
void platform_init()
{
enable_sse();
enable_global_pages();
// enable_write_protect();
if (test_nx()) enable_nx();
else

View File

@ -107,6 +107,7 @@ namespace MMU
if (entry.no_execute) result |= Flags::NoExecute;
if (entry.write_through) result |= Flags::WriteThrough;
if (entry.cache_disabled) result |= Flags::CacheDisable;
if (entry.global) result |= Flags::Global;
return result;
}
@ -181,6 +182,7 @@ namespace MMU
entry.write_through = has_flag(flags, Flags::WriteThrough);
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
entry.no_execute = has_flag(flags, Flags::NoExecute);
entry.global = has_flag(flags, Flags::Global);
entry.set_address(phys);
}
@ -249,6 +251,7 @@ namespace MMU
l1.write_through = has_flag(flags, Flags::WriteThrough);
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
l1.no_execute = has_flag(flags, Flags::NoExecute);
l1.global = has_flag(flags, Flags::Global);
flush_page(virt);
return {};
}
@ -291,7 +294,7 @@ namespace MMU
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
MMU::ReadWrite | MMU::NoExecute);
MMU::ReadWrite | MMU::NoExecute | MMU::Global);
g_physical_mapping_base = physical_memory_base;

View File

@ -17,7 +17,7 @@ struct [[gnu::packed]] PageTableEntry
bool accessed : 1;
bool ignore0 : 1;
bool larger_pages : 1;
bool ignore1 : 1;
bool global : 1;
u8 available : 3;
u64 address : 48;
u8 available2 : 3;

View File

@ -44,11 +44,11 @@ namespace MemoryManager
{
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute | MMU::Global));
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE);
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite | MMU::Global));
return {};
}
@ -380,7 +380,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
const u64 frame = TRY(alloc_frame());
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
TRY(MMU::map(virt, frame, flags | MMU::Global, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
pages_mapped++;
}
@ -405,7 +405,7 @@ namespace MemoryManager
while (pages_mapped < count)
{
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
TRY(MMU::map(virt, phys, flags | MMU::Global, MMU::UseHugePages::No));
virt += ARCH_PAGE_SIZE;
phys += ARCH_PAGE_SIZE;
pages_mapped++;