diff options
Diffstat (limited to 'arch/arm64/mm/proc.S')
-rw-r--r-- | arch/arm64/mm/proc.S | 455 |
1 files changed, 268 insertions, 187 deletions
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..9d40f3ffd8d2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -9,13 +9,18 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <linux/pgtable.h> +#include <linux/cfi_types.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> +#include <asm/asm_pointer_auth.h> #include <asm/hwcap.h> -#include <asm/pgtable.h> +#include <asm/kernel-pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/cpufeature.h> #include <asm/alternative.h> +#include <asm/smp.h> +#include <asm/sysreg.h> #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K @@ -37,25 +42,41 @@ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_FLAGS TCR_TBI1 +#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 #else -#define TCR_KASAN_FLAGS 0 +#define TCR_KASAN_SW_FLAGS 0 #endif -/* Default MAIR_EL1 */ +#ifdef CONFIG_KASAN_HW_TAGS +#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 +#elif defined(CONFIG_ARM64_MTE) +/* + * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on + * TBI being enabled at EL1. + */ +#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 +#else +#define TCR_MTE_FLAGS 0 +#endif + +/* + * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and + * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. + */ #define MAIR_EL1_SET \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ - MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ - MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context * * x0: virtual address of context pointer + * + * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. */ SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 @@ -68,11 +89,7 @@ SYM_FUNC_START(cpu_do_suspend) mrs x9, mdscr_el1 mrs x10, oslsr_el1 mrs x11, sctlr_el1 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x12, tpidr_el1 -alternative_else - mrs x12, tpidr_el2 -alternative_endif + get_this_cpu_offset x12 mrs x13, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] @@ -80,6 +97,11 @@ alternative_endif stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] + /* + * Save x18 as it may be used as a platform register, e.g. by shadow + * call stack. + */ + str x18, [x0, #96] ret SYM_FUNC_END(cpu_do_suspend) @@ -88,7 +110,6 @@ SYM_FUNC_END(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "awx" SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -96,6 +117,13 @@ SYM_FUNC_START(cpu_do_resume) ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] ldp x13, x14, [x0, #80] + /* + * Restore x18, as it may be used as a platform register, and clear + * the buffer to minimize the risk of exposure when used for shadow + * call stack. + */ + ldr x18, [x0, #96] + str xzr, [x0, #96] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 @@ -118,11 +146,7 @@ SYM_FUNC_START(cpu_do_resume) msr mdscr_el1, x10 msr sctlr_el1, x12 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - msr tpidr_el1, x13 -alternative_else - msr tpidr_el2, x13 -alternative_endif + set_this_cpu_offset x13 msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 @@ -131,49 +155,22 @@ alternative_endif ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif + ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 isb ret SYM_FUNC_END(cpu_do_resume) - .popsection #endif -/* - * cpu_do_switch_mm(pgd_phys, tsk) - * - * Set the translation table base pointer to be pgd_phys. - * - * - pgd_phys - physical address of new TTB - */ -SYM_FUNC_START(cpu_do_switch_mm) - mrs x2, ttbr1_el1 - mmid x1, x1 // get mm->context.id - phys_to_ttbr x3, x0 - -alternative_if ARM64_HAS_CNP - cbz x1, 1f // skip CNP for reserved ASID - orr x3, x3, #TTBR_CNP_BIT -1: -alternative_else_nop_endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - bfi x3, x1, #48, #16 // set the ASID field in TTBR0 -#endif - bfi x2, x1, #48, #16 // set the ASID - msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) - isb - msr ttbr0_el1, x3 // now update TTBR0 - isb - b post_ttbr_update_workaround // Back to C code... -SYM_FUNC_END(cpu_do_switch_mm) - - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 - adrp \tmp1, empty_zero_page + adrp \tmp1, reserved_pg_dir phys_to_ttbr \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 @@ -189,50 +186,85 @@ SYM_FUNC_END(cpu_do_switch_mm) * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ -SYM_FUNC_START(idmap_cpu_replace_ttbr1) - save_and_disable_daif flags=x2 - +SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb - restore_daif x2 - ret SYM_FUNC_END(idmap_cpu_replace_ttbr1) +SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - .pushsection ".idmap.text", "awx" - - .macro __idmap_kpti_get_pgtable_ent, type - dc cvac, cur_\()\type\()p // Ensure any existing dirty - dmb sy // lines are written back before - ldr \type, [cur_\()\type\()p] // loading the entry - tbz \type, #0, skip_\()\type // Skip invalid and - tbnz \type, #11, skip_\()\type // non-global entries + +#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \ + PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE) + + .pushsection ".idmap.text", "a" + + .macro pte_to_phys, phys, pte + and \phys, \pte, #PTE_ADDR_LOW +#ifdef CONFIG_ARM64_PA_BITS_52 + and \pte, \pte, #PTE_ADDR_HIGH + orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT +#endif .endm - .macro __idmap_kpti_put_pgtable_ent_ng, type + .macro kpti_mk_tbl_ng, type, num_entries + add end_\type\()p, cur_\type\()p, #\num_entries * 8 +.Ldo_\type: + ldr \type, [cur_\type\()p], #8 // Load the entry and advance + tbz \type, #0, .Lnext_\type // Skip invalid and + tbnz \type, #11, .Lnext_\type // non-global entries orr \type, \type, #PTE_NG // Same bit for blocks and pages - str \type, [cur_\()\type\()p] // Update the entry and ensure - dmb sy // that it is visible to all - dc civac, cur_\()\type\()p // CPUs. + str \type, [cur_\type\()p, #-8] // Update the entry + .ifnc \type, pte + tbnz \type, #1, .Lderef_\type + .endif +.Lnext_\type: + cmp cur_\type\()p, end_\type\()p + b.ne .Ldo_\type + .endm + + /* + * Dereference the current table entry and map it into the temporary + * fixmap slot associated with the current level. + */ + .macro kpti_map_pgtbl, type, level + str xzr, [temp_pte, #8 * (\level + 2)] // break before make + dsb nshst + add pte, temp_pte, #PAGE_SIZE * (\level + 2) + lsr pte, pte, #12 + tlbi vaae1, pte + dsb nsh + isb + + phys_to_pte pte, cur_\type\()p + add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2) + orr pte, pte, pte_flags + str pte, [temp_pte, #8 * (\level + 2)] + dsb nshst .endm /* - * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, + * unsigned long temp_pte_va) * * Called exactly once from stop_machine context by each CPU found during boot. */ -__idmap_kpti_flag: - .long 1 -SYM_FUNC_START(idmap_kpti_install_ng_mappings) + .pushsection ".data", "aw", %progbits +SYM_DATA(__idmap_kpti_flag, .long 1) + .popsection + +SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) cpu .req w0 + temp_pte .req x0 num_cpus .req w1 - swapper_pa .req x2 + pte_flags .req x1 + temp_pgd_phys .req x2 swapper_ttb .req x3 flag_ptr .req x4 cur_pgdp .req x5 @@ -240,20 +272,27 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings) pgd .req x7 cur_pudp .req x8 end_pudp .req x9 - pud .req x10 cur_pmdp .req x11 end_pmdp .req x12 - pmd .req x13 cur_ptep .req x14 end_ptep .req x15 pte .req x16 + valid .req x17 + cur_p4dp .req x19 + end_p4dp .req x20 + mov x5, x3 // preserve temp_pte arg mrs swapper_ttb, ttbr1_el1 - restore_ttbr1 swapper_ttb - adr flag_ptr, __idmap_kpti_flag + adr_l flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary +#if CONFIG_PGTABLE_LEVELS > 4 + stp x29, x30, [sp, #-32]! + mov x29, sp + stp x19, x20, [sp, #16] +#endif + /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe @@ -261,108 +300,111 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings) eor w17, w17, num_cpus cbnz w17, 1b - /* We need to walk swapper, so turn off the MMU. */ - pre_disable_mmu_workaround - mrs x17, sctlr_el1 - bic x17, x17, #SCTLR_ELx_M - msr sctlr_el1, x17 + /* Switch to the temporary page tables on this CPU only */ + __idmap_cpu_set_reserved_ttbr1 x8, x9 + offset_ttbr1 temp_pgd_phys, x8 + msr ttbr1_el1, temp_pgd_phys isb - /* Everybody is enjoying the idmap, so we can rewrite swapper. */ - /* PGD */ - mov cur_pgdp, swapper_pa - add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) -do_pgd: __idmap_kpti_get_pgtable_ent pgd - tbnz pgd, #1, walk_puds -next_pgd: - __idmap_kpti_put_pgtable_ent_ng pgd -skip_pgd: - add cur_pgdp, cur_pgdp, #8 - cmp cur_pgdp, end_pgdp - b.ne do_pgd - - /* Publish the updated tables and nuke all the TLBs */ - dsb sy - tlbi vmalle1is - dsb ish - isb + mov temp_pte, x5 + mov_q pte_flags, KPTI_NG_PTE_FLAGS - /* We're done: fire up the MMU again */ - mrs x17, sctlr_el1 - orr x17, x17, #SCTLR_ELx_M - msr sctlr_el1, x17 - isb + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ +#ifdef CONFIG_ARM64_LPA2 /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. + * If LPA2 support is configured, but 52-bit virtual addressing is not + * enabled at runtime, we will fall back to one level of paging less, + * and so we have to walk swapper_pg_dir as if we dereferenced its + * address from a PGD level entry, and terminate the PGD level loop + * right after. */ - ic iallu - dsb nsh + adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level + mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop +alternative_if_not ARM64_HAS_VA52 + b .Lderef_pgd // skip to the next level +alternative_else_nop_endif + /* + * LPA2 based 52-bit virtual addressing requires 52-bit physical + * addressing to be enabled as well. In this case, the shareability + * bits are repurposed as physical address bits, and should not be + * set in pte_flags. + */ + bic pte_flags, pte_flags, #PTE_SHARED +#endif + + /* PGD */ + adrp cur_pgdp, swapper_pg_dir + kpti_map_pgtbl pgd, -1 + kpti_mk_tbl_ng pgd, PTRS_PER_PGD + + /* Ensure all the updated entries are visible to secondary CPUs */ + dsb ishst + + /* We're done: fire up swapper_pg_dir again */ + __idmap_cpu_set_reserved_ttbr1 x8, x9 + msr ttbr1_el1, swapper_ttb isb /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] +#if CONFIG_PGTABLE_LEVELS > 4 + ldp x19, x20, [sp, #16] + ldp x29, x30, [sp], #32 +#endif ret +.Lderef_pgd: + /* P4D */ + .if CONFIG_PGTABLE_LEVELS > 4 + p4d .req x30 + pte_to_phys cur_p4dp, pgd + kpti_map_pgtbl p4d, 0 + kpti_mk_tbl_ng p4d, PTRS_PER_P4D + b .Lnext_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 4 */ + p4d .req pgd + .set .Lnext_p4d, .Lnext_pgd + .endif + +.Lderef_p4d: /* PUD */ -walk_puds: - .if CONFIG_PGTABLE_LEVELS > 3 - pte_to_phys cur_pudp, pgd - add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) -do_pud: __idmap_kpti_get_pgtable_ent pud - tbnz pud, #1, walk_pmds -next_pud: - __idmap_kpti_put_pgtable_ent_ng pud -skip_pud: - add cur_pudp, cur_pudp, 8 - cmp cur_pudp, end_pudp - b.ne do_pud - b next_pgd - .else /* CONFIG_PGTABLE_LEVELS <= 3 */ - mov pud, pgd - b walk_pmds -next_pud: - b next_pgd + .if CONFIG_PGTABLE_LEVELS > 3 + pud .req x10 + pte_to_phys cur_pudp, p4d + kpti_map_pgtbl pud, 1 + kpti_mk_tbl_ng pud, PTRS_PER_PUD + b .Lnext_p4d + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + pud .req pgd + .set .Lnext_pud, .Lnext_pgd .endif +.Lderef_pud: /* PMD */ -walk_pmds: - .if CONFIG_PGTABLE_LEVELS > 2 + .if CONFIG_PGTABLE_LEVELS > 2 + pmd .req x13 pte_to_phys cur_pmdp, pud - add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) -do_pmd: __idmap_kpti_get_pgtable_ent pmd - tbnz pmd, #1, walk_ptes -next_pmd: - __idmap_kpti_put_pgtable_ent_ng pmd -skip_pmd: - add cur_pmdp, cur_pmdp, #8 - cmp cur_pmdp, end_pmdp - b.ne do_pmd - b next_pud - .else /* CONFIG_PGTABLE_LEVELS <= 2 */ - mov pmd, pud - b walk_ptes -next_pmd: - b next_pud + kpti_map_pgtbl pmd, 2 + kpti_mk_tbl_ng pmd, PTRS_PER_PMD + b .Lnext_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + pmd .req pgd + .set .Lnext_pmd, .Lnext_pgd .endif +.Lderef_pmd: /* PTE */ -walk_ptes: pte_to_phys cur_ptep, pmd - add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) -do_pte: __idmap_kpti_get_pgtable_ent pte - __idmap_kpti_put_pgtable_ent_ng pte -skip_pte: - add cur_ptep, cur_ptep, #8 - cmp cur_ptep, end_ptep - b.ne do_pte - b next_pmd + kpti_map_pgtbl pte, 3 + kpti_mk_tbl_ng pte, PTRS_PER_PTE + b .Lnext_pmd .unreq cpu + .unreq temp_pte .unreq num_cpus - .unreq swapper_pa + .unreq pte_flags + .unreq temp_pgd_phys .unreq cur_pgdp .unreq end_pgdp .unreq pgd @@ -375,6 +417,10 @@ skip_pte: .unreq cur_ptep .unreq end_ptep .unreq pte + .unreq valid + .unreq cur_p4dp + .unreq end_p4dp + .unreq p4d /* Secondary CPUs end up here */ __idmap_kpti_secondary: @@ -394,7 +440,6 @@ __idmap_kpti_secondary: cbnz w16, 1b /* All done, act like nothing happened */ - offset_ttbr1 swapper_ttb, x16 msr ttbr1_el1, swapper_ttb isb ret @@ -408,53 +453,51 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 + msr cpacr_el1, xzr // Reset cpacr_el1 + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 - /* - * Memory region attributes - */ - mov_q x5, MAIR_EL1_SET - msr mair_el1, x5 - /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 + reset_amuserenr_el0 x1 // Disable AMU access from EL0 + /* - * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for - * both user and kernel. + * Default values for VMSA control registers. These will be adjusted + * below depending on detected CPU features. */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ - TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS - tcr_clear_errata_bits x10, x9, x5 + mair .req x17 + tcr .req x16 + mov_q mair, MAIR_EL1_SET + mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ + TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ + TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS + + tcr_clear_errata_bits tcr, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 - ldr_l x9, vabits_actual - sub x9, xzr, x9 - add x9, x9, #64 - tcr_set_t1sz x10, x9 -#else - ldr_l x9, idmap_t0sz + mov x9, #64 - VA_BITS +alternative_if ARM64_HAS_VA52 + tcr_set_t1sz tcr, x9 +#ifdef CONFIG_ARM64_LPA2 + orr tcr, tcr, #TCR_DS +#endif +alternative_else_nop_endif #endif - tcr_set_t0sz x10, x9 /* * Set the IPS bits in TCR_EL1. */ - tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 + tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. @@ -462,11 +505,49 @@ SYM_FUNC_START(__cpu_setup) * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 - and x9, x9, #0xf + and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK cbz x9, 1f - orr x10, x10, #TCR_HA // hardware Access flag update + orr tcr, tcr, #TCR_HA // hardware Access flag update 1: #endif /* CONFIG_ARM64_HW_AFDBM */ - msr tcr_el1, x10 + msr mair_el1, mair + msr tcr_el1, tcr + + mrs_s x1, SYS_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 + cbz x1, .Lskip_indirection + + /* + * The PROT_* macros describing the various memory types may resolve to + * C expressions if they include the PTE_MAYBE_* macros, and so they + * can only be used from C code. The PIE_E* constants below are also + * defined in terms of those macros, but will mask out those + * PTE_MAYBE_* constants, whether they are set or not. So #define them + * as 0x0 here so we can evaluate the PIE_E* constants in asm context. + */ + +#define PTE_MAYBE_NG 0 +#define PTE_MAYBE_SHARED 0 + + mov_q x0, PIE_E0 + msr REG_PIRE0_EL1, x0 + mov_q x0, PIE_E1 + msr REG_PIR_EL1, x0 + +#undef PTE_MAYBE_NG +#undef PTE_MAYBE_SHARED + + mov x0, TCR2_EL1x_PIE + msr REG_TCR2_EL1, x0 + +.Lskip_indirection: + + /* + * Prepare SCTLR + */ + mov_q x0, INIT_SCTLR_EL1_MMU_ON ret // return to head.S + + .unreq mair + .unreq tcr SYM_FUNC_END(__cpu_setup) |