kernel/x86_64: Map kernel-side pages as global to avoid TLB flushes
Some checks are pending
Build and test / build (push) Waiting to run
Some checks are pending
Build and test / build (push) Waiting to run
Fixes #34.
This commit is contained in:
parent
ab70a72434
commit
fb52c67f16
@ -19,6 +19,7 @@ namespace MMU
|
|||||||
NoExecute = 4,
|
NoExecute = 4,
|
||||||
WriteThrough = 8,
|
WriteThrough = 8,
|
||||||
CacheDisable = 16,
|
CacheDisable = 16,
|
||||||
|
Global = 32,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class UseHugePages
|
enum class UseHugePages
|
||||||
|
@ -25,6 +25,13 @@ enable_nx:
|
|||||||
wrmsr
|
wrmsr
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
global enable_global_pages
|
||||||
|
enable_global_pages:
|
||||||
|
mov rax, cr4
|
||||||
|
or ax, 1 << 7
|
||||||
|
mov cr4, rax
|
||||||
|
ret
|
||||||
|
|
||||||
global load_gdt
|
global load_gdt
|
||||||
load_gdt:
|
load_gdt:
|
||||||
cli
|
cli
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
extern "C" void enable_sse();
|
extern "C" void enable_sse();
|
||||||
extern "C" void enable_write_protect();
|
extern "C" void enable_write_protect();
|
||||||
|
extern "C" void enable_global_pages();
|
||||||
extern "C" void enable_nx();
|
extern "C" void enable_nx();
|
||||||
|
|
||||||
extern void setup_gdt();
|
extern void setup_gdt();
|
||||||
@ -268,6 +269,7 @@ namespace CPU
|
|||||||
void platform_init()
|
void platform_init()
|
||||||
{
|
{
|
||||||
enable_sse();
|
enable_sse();
|
||||||
|
enable_global_pages();
|
||||||
// enable_write_protect();
|
// enable_write_protect();
|
||||||
if (test_nx()) enable_nx();
|
if (test_nx()) enable_nx();
|
||||||
else
|
else
|
||||||
|
@ -107,6 +107,7 @@ namespace MMU
|
|||||||
if (entry.no_execute) result |= Flags::NoExecute;
|
if (entry.no_execute) result |= Flags::NoExecute;
|
||||||
if (entry.write_through) result |= Flags::WriteThrough;
|
if (entry.write_through) result |= Flags::WriteThrough;
|
||||||
if (entry.cache_disabled) result |= Flags::CacheDisable;
|
if (entry.cache_disabled) result |= Flags::CacheDisable;
|
||||||
|
if (entry.global) result |= Flags::Global;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,6 +182,7 @@ namespace MMU
|
|||||||
entry.write_through = has_flag(flags, Flags::WriteThrough);
|
entry.write_through = has_flag(flags, Flags::WriteThrough);
|
||||||
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
entry.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
||||||
entry.no_execute = has_flag(flags, Flags::NoExecute);
|
entry.no_execute = has_flag(flags, Flags::NoExecute);
|
||||||
|
entry.global = has_flag(flags, Flags::Global);
|
||||||
entry.set_address(phys);
|
entry.set_address(phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,6 +251,7 @@ namespace MMU
|
|||||||
l1.write_through = has_flag(flags, Flags::WriteThrough);
|
l1.write_through = has_flag(flags, Flags::WriteThrough);
|
||||||
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
l1.cache_disabled = has_flag(flags, Flags::CacheDisable);
|
||||||
l1.no_execute = has_flag(flags, Flags::NoExecute);
|
l1.no_execute = has_flag(flags, Flags::NoExecute);
|
||||||
|
l1.global = has_flag(flags, Flags::Global);
|
||||||
flush_page(virt);
|
flush_page(virt);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -291,7 +294,7 @@ namespace MMU
|
|||||||
|
|
||||||
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
|
check(physical_memory_size % ARCH_HUGE_PAGE_SIZE == 0);
|
||||||
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
|
MemoryManager::map_huge_frames_at(physical_memory_base, 0, physical_memory_size / ARCH_HUGE_PAGE_SIZE,
|
||||||
MMU::ReadWrite | MMU::NoExecute);
|
MMU::ReadWrite | MMU::NoExecute | MMU::Global);
|
||||||
|
|
||||||
g_physical_mapping_base = physical_memory_base;
|
g_physical_mapping_base = physical_memory_base;
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ struct [[gnu::packed]] PageTableEntry
|
|||||||
bool accessed : 1;
|
bool accessed : 1;
|
||||||
bool ignore0 : 1;
|
bool ignore0 : 1;
|
||||||
bool larger_pages : 1;
|
bool larger_pages : 1;
|
||||||
bool ignore1 : 1;
|
bool global : 1;
|
||||||
u8 available : 3;
|
u8 available : 3;
|
||||||
u64 address : 48;
|
u64 address : 48;
|
||||||
u8 available2 : 3;
|
u8 available2 : 3;
|
||||||
|
@ -44,11 +44,11 @@ namespace MemoryManager
|
|||||||
{
|
{
|
||||||
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
|
const usize rodata_size = (usize)(end_of_kernel_rodata - start_of_kernel_rodata);
|
||||||
const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE);
|
const usize rodata_pages = ceil_div(rodata_size, ARCH_PAGE_SIZE);
|
||||||
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute));
|
TRY(remap((u64)start_of_kernel_rodata, rodata_pages, MMU::NoExecute | MMU::Global));
|
||||||
|
|
||||||
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
|
const usize data_size = (usize)(end_of_kernel_data - start_of_kernel_data);
|
||||||
const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE);
|
const usize data_pages = ceil_div(data_size, ARCH_PAGE_SIZE);
|
||||||
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite));
|
TRY(remap((u64)start_of_kernel_data, data_pages, MMU::NoExecute | MMU::ReadWrite | MMU::Global));
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -380,7 +380,7 @@ namespace MemoryManager
|
|||||||
while (pages_mapped < count)
|
while (pages_mapped < count)
|
||||||
{
|
{
|
||||||
const u64 frame = TRY(alloc_frame());
|
const u64 frame = TRY(alloc_frame());
|
||||||
TRY(MMU::map(virt, frame, flags, MMU::UseHugePages::No));
|
TRY(MMU::map(virt, frame, flags | MMU::Global, MMU::UseHugePages::No));
|
||||||
virt += ARCH_PAGE_SIZE;
|
virt += ARCH_PAGE_SIZE;
|
||||||
pages_mapped++;
|
pages_mapped++;
|
||||||
}
|
}
|
||||||
@ -405,7 +405,7 @@ namespace MemoryManager
|
|||||||
|
|
||||||
while (pages_mapped < count)
|
while (pages_mapped < count)
|
||||||
{
|
{
|
||||||
TRY(MMU::map(virt, phys, flags, MMU::UseHugePages::No));
|
TRY(MMU::map(virt, phys, flags | MMU::Global, MMU::UseHugePages::No));
|
||||||
virt += ARCH_PAGE_SIZE;
|
virt += ARCH_PAGE_SIZE;
|
||||||
phys += ARCH_PAGE_SIZE;
|
phys += ARCH_PAGE_SIZE;
|
||||||
pages_mapped++;
|
pages_mapped++;
|
||||||
|
Loading…
Reference in New Issue
Block a user