diff --git a/kernel/src/arch/MMU.h b/kernel/src/arch/MMU.h index 2db1230a..2b73d217 100644 --- a/kernel/src/arch/MMU.h +++ b/kernel/src/arch/MMU.h @@ -19,6 +19,7 @@ namespace MMU NoExecute = 4, WriteThrough = 8, CacheDisable = 16, + Global = 32, }; enum class UseHugePages diff --git a/kernel/src/arch/x86_64/CPU.asm b/kernel/src/arch/x86_64/CPU.asm index 20f8d5ac..d71842c0 100644 --- a/kernel/src/arch/x86_64/CPU.asm +++ b/kernel/src/arch/x86_64/CPU.asm @@ -25,6 +25,13 @@ enable_nx: wrmsr ret +global enable_global_pages +enable_global_pages: + mov rax, cr4 + or ax, 1 << 7 + mov cr4, rax + ret + global load_gdt load_gdt: cli diff --git a/kernel/src/arch/x86_64/CPU.cpp b/kernel/src/arch/x86_64/CPU.cpp index 2e2ac01e..afd2d4e8 100644 --- a/kernel/src/arch/x86_64/CPU.cpp +++ b/kernel/src/arch/x86_64/CPU.cpp @@ -22,6 +22,7 @@ extern "C" void enable_sse(); extern "C" void enable_write_protect(); +extern "C" void enable_global_pages(); extern "C" void enable_nx(); extern void setup_gdt(); @@ -268,6 +269,7 @@ namespace CPU void platform_init() { enable_sse(); + enable_global_pages(); // enable_write_protect(); if (test_nx()) enable_nx(); else diff --git a/kernel/src/arch/x86_64/MMU.cpp b/kernel/src/arch/x86_64/MMU.cpp index 4914eb04..f7cbab51 100644 --- a/kernel/src/arch/x86_64/MMU.cpp +++ b/kernel/src/arch/x86_64/MMU.cpp @@ -107,6 +107,7 @@ namespace MMU if (entry.no_execute) result |= Flags::NoExecute; if (entry.write_through) result |= Flags::WriteThrough; if (entry.cache_disabled) result |= Flags::CacheDisable; + if (entry.global) result |= Flags::Global; return result; } @@ -181,6 +182,7 @@ namespace MMU entry.write_through = has_flag(flags, Flags::WriteThrough); entry.cache_disabled = has_flag(flags, Flags::CacheDisable); entry.no_execute = has_flag(flags, Flags::NoExecute); + entry.global = has_flag(flags, Flags::Global); entry.set_address(phys); } @@ -249,6 +251,7 @@ namespace MMU l1.write_through = has_flag(flags, Flags::WriteThrough); l1.cache_disabled = has_flag(flags, Flags::CacheDisable); l1.no_execute = has_flag(flags, Flags::NoExecute); + l1.global = has_flag(flags, Flags::Global); flush_page(virt); return {}; } @@ -291,7 +294,7 @@ namespace MMU check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0); MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE, - MMU::ReadWrite | MMU::NoExecute); + MMU::ReadWrite | MMU::NoExecute | MMU::Global); g_physical_mapping_base = physical_memory_base; diff --git a/kernel/src/arch/x86_64/MMU.h b/kernel/src/arch/x86_64/MMU.h index 294956b6..56e7c7e8 100644 --- a/kernel/src/arch/x86_64/MMU.h +++ b/kernel/src/arch/x86_64/MMU.h @@ -17,7 +17,7 @@ struct [[gnu::packed]] PageTableEntry bool accessed : 1; bool ignore0 : 1; bool larger_pages : 1; - bool ignore1 : 1; + bool global : 1; u8 available : 3; u64 address : 48; u8 available2 : 3; diff --git a/kernel/src/memory/MemoryManager.cpp b/kernel/src/memory/MemoryManager.cpp index d291c71f..248d1b82 100644 --- a/kernel/src/memory/MemoryManager.cpp +++ b/kernel/src/memory/MemoryManager.cpp @@ -44,11 +44,11 @@ namespace MemoryManager { const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata); const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE); - TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute)); + TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute | MMU::Global)); const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data); const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE); - TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite)); + TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite | MMU::Global)); return {}; } @@ -380,7 +380,7 @@ namespace MemoryManager while (pages_mapped < count) { const u64 frame = TRY(alloc_frame()); - TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No)); + TRY(MMU::map(virt, frame, flags | MMU::Global, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; pages_mapped++; } @@ -405,7 +405,7 @@ namespace MemoryManager while (pages_mapped < count) { - TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No)); + TRY(MMU::map(virt, phys, flags | MMU::Global, MMU::UseHugePages::No)); virt += ARCH_PAGE_SIZE; phys += ARCH_PAGE_SIZE; pages_mapped++;