Kernel Loader: Difference between revisions
| (4 intermediate revisions by one other user not shown) | |||
| Line 183: | Line 183: | ||
// Maps .rodata as R-- | // Maps .rodata as R-- | ||
attribute = 0x60000000000788; | attribute = 0x60000000000788; | ||
// 9.0.0+ | |||
{ | |||
// On 9.0.0+, .rodata is initially RW- to facilitate .rel.ro. | |||
attribute = 0x60000000000708; | |||
} | |||
ttbr1_page_table.Map(final_virtual_kernel_base + ro_offset, ro_end_offset - ro_offset, kernel_base + ro_offset, &attribute, &g_InitialPageAllocator); | ttbr1_page_table.Map(final_virtual_kernel_base + ro_offset, ro_end_offset - ro_offset, kernel_base + ro_offset, &attribute, &g_InitialPageAllocator); | ||
| Line 197: | Line 204: | ||
// Applies all R_AARCH64_RELATIVE relocations. | // Applies all R_AARCH64_RELATIVE relocations. | ||
KernelLdr_ApplyRelocations(final_kernel_virtual_base, final_kernel_virtual_base + dynamic_offset); | KernelLdr_ApplyRelocations(final_kernel_virtual_base, final_kernel_virtual_base + dynamic_offset); | ||
// 9.0.0+: Reprotects .rodata as R--. | |||
ttbr1_page_table.ReprotectToReadOnly(final_virtual_kernel_base + ro_offset, ro_end_offset - ro_offset); | |||
// This is standard libc init_array code, but called for the kernel's binary instead of kernelldr's. | // This is standard libc init_array code, but called for the kernel's binary instead of kernelldr's. | ||
| Line 251: | Line 261: | ||
// 9.0.0+: Save X19-X30 + SP, save context struct in TPIDR_EL1. | // 9.0.0+: Save X19-X30 + SP, save context struct in TPIDR_EL1. | ||
KernelLdr_SaveRegistersToTpidrEl1(); | |||
if (implementer == 0x41) { | if (implementer == 0x41) { | ||
| Line 261: | Line 271: | ||
// Architecture is 0xD07 (Cortex-A57). | // Architecture is 0xD07 (Cortex-A57). | ||
cpuactlr_value = 0x1000000; // Non-cacheable load forwarding enabled | cpuactlr_value = 0x1000000; // Non-cacheable load forwarding enabled | ||
cpuectlr_value = 0x1B00000040; // | cpuectlr_value = 0x1B00000040; // Enable the processor to receive instruction cache and TLB maintenance operations broadcast from other processors in the cluster; set the L2 load/store data prefetch distance to 8 requests; set the L2 instruction fetch prefetch distance to 3 requests. | ||
if (hw_variant == 0 || (hw_variant == 1 && hw_revision <= 1)) { | if (hw_variant == 0 || (hw_variant == 1 && hw_revision <= 1)) { | ||
// If supported, disable load-pass DMB. | // If supported, disable load-pass DMB. | ||
| Line 272: | Line 282: | ||
} else if (architecture == 0xD03) { // 9.0.0+ | } else if (architecture == 0xD03) { // 9.0.0+ | ||
// Architecture is 0xD03 (Cortex-A53). | // Architecture is 0xD03 (Cortex-A53). | ||
cpuactlr_value = 0x90CA000; // | cpuactlr_value = 0x90CA000; // Set L1 data prefetch control to allow 5 outstanding prefetches; enable device split throttle; set the number of independent data prefetch streams to 2; disable transient and no-read-allocate hints for loads; set write streaming no-allocate threshold so the 128th consecutive streaming cache line does not allocate in the L1 or L2 cache. | ||
cpuectlr_value = 0x40; // | cpuectlr_value = 0x40; // Enable hardware management of data coherency with other cores in the cluster. | ||
if (hw_variant != 0 || (hw_variant == 0 && hw_revision > 2)) { | if (hw_variant != 0 || (hw_variant == 0 && hw_revision > 2)) { | ||
// | // If supported, enable data cache clean as data cache clean/invalidate. | ||
cpuactlr_value |= 0x100000000000; | cpuactlr_value |= 0x100000000000; | ||
} | } | ||
| Line 414: | Line 424: | ||
* Restores X19-X30 + SP from the memory pointed to by TPIDR_EL1. | * Restores X19-X30 + SP from the memory pointed to by TPIDR_EL1. | ||
* Returns to the saved LR stored in the context save struct. | * Returns to the saved LR stored in the context save struct. | ||
== KernelLdr_SaveRegistersToTpidrEl1 == | |||
This saves X19-X30 + SP to an input pointer, and moves the pointer into TPIDR_EL1. | |||
== KernelLdr_VerifyTpidrEl1 == | |||
This just verifies that TPIDR_EL1 is equal to an input argument, and clears it. | |||
<pre> | |||
// 9.0.0+ | |||
if (TPIDR_EL1 != input_arg) { | |||
while (1) { /* Infinite loop panic */ } | |||
} | |||
TPIDR_EL1 = 0 | |||
</pre> | |||
== KInitialPageAllocator::KInitialPageAllocator == | == KInitialPageAllocator::KInitialPageAllocator == | ||
| Line 484: | Line 510: | ||
This is just standard aarch64 page table code. Walks the page table, verifying that all entries it would map for size + range are free. | This is just standard aarch64 page table code. Walks the page table, verifying that all entries it would map for size + range are free. | ||
== KInitialPageTable::ReprotectToReadOnly == | |||
This is just standard aarch64 page table code. Walks the page table, reprotects the read-write pages in the specified region as read-only. | |||
This is probably a compiler-optimized version of a function that does an arbitrary reprotection. | |||
== KInitialPageTable::GetL1Table == | == KInitialPageTable::GetL1Table == | ||