diff options
Diffstat (limited to 'arch/arm64/include/asm/mmu_context.h')
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 153 |
1 files changed, 87 insertions, 66 deletions
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3827ff4040a3..c768d16b81a4 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -14,13 +14,14 @@ #include <linux/sched.h> #include <linux/sched/hotplug.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> +#include <asm/daifflags.h> #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> -#include <asm/pgtable.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> @@ -36,59 +37,44 @@ static inline void contextidr_thread_switch(struct task_struct *next) } /* - * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. + * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. */ -static inline void cpu_set_reserved_ttbr0(void) +static inline void cpu_set_reserved_ttbr0_nosync(void) { - unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); + unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); write_sysreg(ttbr, ttbr0_el1); - isb(); } -static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) +static inline void cpu_set_reserved_ttbr0(void) { - BUG_ON(pgd == swapper_pg_dir); - cpu_set_reserved_ttbr0(); - cpu_do_switch_mm(virt_to_phys(pgd),mm); + cpu_set_reserved_ttbr0_nosync(); + isb(); } -/* - * TCR.T0SZ value to use when the ID map is active. Usually equals - * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in - * physical memory, in which case it will be smaller. - */ -extern u64 idmap_t0sz; -extern u64 idmap_ptrs_per_pgd; +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); -static inline bool __cpu_uses_extended_idmap(void) +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) { - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) - return false; - - return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); + BUG_ON(pgd == swapper_pg_dir); + cpu_do_switch_mm(virt_to_phys(pgd),mm); } /* - * True if the extended ID map requires an extra level of translation table - * to be configured. + * TCR.T0SZ value to use when the ID map is active. */ -static inline bool __cpu_uses_extended_idmap_level(void) -{ - return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS; -} +#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS) /* - * Set TCR.T0SZ to its default value (based on VA_BITS) + * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { - unsigned long tcr; + unsigned long tcr = read_sysreg(tcr_el1); - if (!__cpu_uses_extended_idmap()) + if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz) return; - tcr = read_sysreg(tcr_el1); tcr &= ~TCR_T0SZ_MASK; tcr |= t0sz << TCR_T0SZ_OFFSET; write_sysreg(tcr, tcr_el1); @@ -132,35 +118,44 @@ static inline void cpu_install_idmap(void) } /* - * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, - * avoiding the possibility of conflicting TLB entries being allocated. + * Load our new page tables. A strict BBM approach requires that we ensure that + * TLBs are free of any entries that may overlap with the global mappings we are + * about to install. + * + * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime + * services), while for a userspace-driven test_resume cycle it points to + * userspace page tables (and we must point it at a zero page ourselves). + * + * We change T0SZ as part of installing the idmap. This is undone by + * cpu_uninstall_idmap() in __cpu_suspend_exit(). */ -static inline void cpu_replace_ttbr1(pgd_t *pgdp) +static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) { - typedef void (ttbr_replace_func)(phys_addr_t); - extern ttbr_replace_func idmap_cpu_replace_ttbr1; - ttbr_replace_func *replace_phys; - - /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ - phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); - - if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) { - /* - * cpu_replace_ttbr1() is used when there's a boot CPU - * up (i.e. cpufeature framework is not up yet) and - * latter only when we enable CNP via cpufeature's - * enable() callback. - * Also we rely on the cpu_hwcap bit being set before - * calling the enable() function. - */ - ttbr1 |= TTBR_CNP_BIT; - } + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + __cpu_set_tcr_t0sz(t0sz); - replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); + /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */ + write_sysreg(ttbr0, ttbr0_el1); + isb(); +} + +void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp); - cpu_install_idmap(); - replace_phys(ttbr1); - cpu_uninstall_idmap(); +static inline void cpu_enable_swapper_cnp(void) +{ + __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), true); +} + +static inline void cpu_replace_ttbr1(pgd_t *pgdp) +{ + /* + * Only for early TTBR1 replacement before cpucaps are finalized and + * before we've decided whether to use CNP. + */ + WARN_ON(system_capabilities_finalized()); + __cpu_replace_ttbr1(pgdp, false); } /* @@ -172,10 +167,16 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you * take CPU migration into account. */ -#define destroy_context(mm) do { } while(0) -void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); +void check_and_switch_context(struct mm_struct *mm); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +#define init_new_context(tsk, mm) init_new_context(tsk, mm) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + refcount_set(&mm->context.pinned, 0); + return 0; +} #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -187,9 +188,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, return; if (mm == &init_mm) - ttbr = __pa_symbol(empty_zero_page); + ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); else - ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } @@ -200,6 +201,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif +#define enter_lazy_tlb enter_lazy_tlb static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { @@ -212,8 +214,6 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void __switch_mm(struct mm_struct *next) { - unsigned int cpu = smp_processor_id(); - /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. @@ -223,7 +223,7 @@ static inline void __switch_mm(struct mm_struct *next) return; } - check_and_switch_context(next, cpu); + check_and_switch_context(next); } static inline void @@ -242,12 +242,33 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, update_saved_ttbr0(tsk, next); } -#define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, current) +static inline const struct cpumask * +task_cpu_possible_mask(struct task_struct *p) +{ + if (!static_branch_unlikely(&arm64_mismatched_32bit_el0)) + return cpu_possible_mask; + + if (!is_compat_thread(task_thread_info(p))) + return cpu_possible_mask; + + return system_32bit_el0_cpumask(); +} +#define task_cpu_possible_mask task_cpu_possible_mask void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); +unsigned long arm64_mm_context_get(struct mm_struct *mm); +void arm64_mm_context_put(struct mm_struct *mm); + +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> 8; +} + +#include <asm-generic/mmu_context.h> + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ |