summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/smp.c')
-rw-r--r--arch/arm64/kernel/smp.c563
1 files changed, 339 insertions, 224 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d4ed9a19d8fe..4ced34f62dab 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -30,8 +30,11 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/kexec.h>
+#include <linux/kgdb.h>
#include <linux/kvm_host.h>
+#include <linux/nmi.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -43,8 +46,6 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/smp_plat.h>
#include <asm/sections.h>
@@ -52,7 +53,6 @@
#include <asm/ptrace.h>
#include <asm/virt.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
@@ -65,7 +65,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
*/
struct secondary_data secondary_data;
/* Number of CPUs which aren't online, but looping in kernel text. */
-int cpus_stuck_in_kernel;
+static int cpus_stuck_in_kernel;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -74,10 +74,24 @@ enum ipi_msg_type {
IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ NR_IPI,
+ /*
+ * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
+ * with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
+ IPI_KGDB_ROUNDUP,
+ MAX_IPI
};
+static int ipi_irq_base __ro_after_init;
+static int nr_ipi __ro_after_init = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
+
+static void ipi_setup(int cpu);
+
#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu);
static int op_cpu_kill(unsigned int cpu);
#else
static inline int op_cpu_kill(unsigned int cpu)
@@ -93,8 +107,10 @@ static inline int op_cpu_kill(unsigned int cpu)
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- if (cpu_ops[cpu]->cpu_boot)
- return cpu_ops[cpu]->cpu_boot(cpu);
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops->cpu_boot)
+ return ops->cpu_boot(cpu);
return -EOPNOTSUPP;
}
@@ -111,67 +127,58 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
- /*
- * Now bring the CPU into our world.
- */
+ /* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
- if (ret == 0) {
- /*
- * CPU was successfully started, wait for it to come online or
- * time out.
- */
- wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(5000));
-
- if (!cpu_online(cpu)) {
- pr_crit("CPU%u: failed to come online\n", cpu);
- ret = -EIO;
- }
- } else {
+ if (ret) {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
return ret;
}
+ /*
+ * CPU was successfully started, wait for it to come online or
+ * time out.
+ */
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(5000));
+ if (cpu_online(cpu))
+ return 0;
+
+ pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
- secondary_data.stack = NULL;
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
- if (ret && status) {
+ if (status == CPU_MMU_OFF)
+ status = READ_ONCE(__early_cpu_boot_status);
- if (status == CPU_MMU_OFF)
- status = READ_ONCE(__early_cpu_boot_status);
-
- switch (status & CPU_BOOT_STATUS_MASK) {
- default:
- pr_err("CPU%u: failed in unknown state : 0x%lx\n",
- cpu, status);
- cpus_stuck_in_kernel++;
- break;
- case CPU_KILL_ME:
- if (!op_cpu_kill(cpu)) {
- pr_crit("CPU%u: died during early boot\n", cpu);
- break;
- }
- pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
- case CPU_STUCK_IN_KERNEL:
- pr_crit("CPU%u: is stuck in kernel\n", cpu);
- if (status & CPU_STUCK_REASON_52_BIT_VA)
- pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
- if (status & CPU_STUCK_REASON_NO_GRAN)
- pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
- cpus_stuck_in_kernel++;
+ switch (status & CPU_BOOT_STATUS_MASK) {
+ default:
+ pr_err("CPU%u: failed in unknown state : 0x%lx\n",
+ cpu, status);
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_KILL_ME:
+ if (!op_cpu_kill(cpu)) {
+ pr_crit("CPU%u: died during early boot\n", cpu);
break;
- case CPU_PANIC_KERNEL:
- panic("CPU%u detected unsupported configuration\n", cpu);
}
+ pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ fallthrough;
+ case CPU_STUCK_IN_KERNEL:
+ pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN) {
+ pr_crit("CPU%u: does not support %luK granule\n",
+ cpu, PAGE_SIZE / SZ_1K);
+ }
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_PANIC_KERNEL:
+ panic("CPU%u detected unsupported configuration\n", cpu);
}
- return ret;
+ return -EIO;
}
static void init_gic_priority_masking(void)
@@ -184,6 +191,7 @@ static void init_gic_priority_masking(void)
cpuflags = read_sysreg(daif);
WARN_ON(!(cpuflags & PSR_I_BIT));
+ WARN_ON(!(cpuflags & PSR_F_BIT));
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
}
@@ -196,10 +204,8 @@ asmlinkage notrace void secondary_start_kernel(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ const struct cpu_operations *ops;
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
@@ -217,7 +223,7 @@ asmlinkage notrace void secondary_start_kernel(void)
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
- preempt_disable();
+ rcutree_report_cpu_starting(cpu);
trace_hardirqs_off();
/*
@@ -227,20 +233,23 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
check_local_cpu_capabilities();
- if (cpu_ops[cpu]->cpu_postboot)
- cpu_ops[cpu]->cpu_postboot();
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_postboot)
+ ops->cpu_postboot();
/*
* Log the CPU info before it is marked online and might get read.
*/
cpuinfo_store_cpu();
+ store_cpu_topology(cpu);
/*
* Enable GIC and timers.
*/
notify_cpu_starting(cpu);
- store_cpu_topology(cpu);
+ ipi_setup(cpu);
+
numa_add_cpu(cpu);
/*
@@ -266,19 +275,21 @@ asmlinkage notrace void secondary_start_kernel(void)
#ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_disable(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we don't have a cpu_die method, abort before we reach the point
* of no return. CPU0 may not have an cpu_ops, so test for it.
*/
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ if (!ops || !ops->cpu_die)
return -EOPNOTSUPP;
/*
* We may need to abort a hot unplug for some other mechanism-specific
* reason.
*/
- if (cpu_ops[cpu]->cpu_disable)
- return cpu_ops[cpu]->cpu_disable(cpu);
+ if (ops->cpu_disable)
+ return ops->cpu_disable(cpu);
return 0;
}
@@ -303,6 +314,7 @@ int __cpu_disable(void)
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
@@ -314,30 +326,28 @@ int __cpu_disable(void)
static int op_cpu_kill(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we have no means of synchronising with the dying CPU, then assume
* that it is really dead. We can only wait for an arbitrary length of
* time and hope that it's dead, so let's skip the wait and just hope.
*/
- if (!cpu_ops[cpu]->cpu_kill)
+ if (!ops->cpu_kill)
return 0;
- return cpu_ops[cpu]->cpu_kill(cpu);
+ return ops->cpu_kill(cpu);
}
/*
- * called on the thread which is asking for a CPU to be shutdown -
- * waits until shutdown has completed, or it is timed out.
+ * Called on the thread which is asking for a CPU to be shutdown after the
+ * shutdown completed.
*/
-void __cpu_die(unsigned int cpu)
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
int err;
- if (!cpu_wait_death(cpu, 5)) {
- pr_crit("CPU%u: cpu didn't die\n", cpu);
- return;
- }
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
@@ -354,33 +364,44 @@ void __cpu_die(unsigned int cpu)
* Called from the idle thread for the CPU which has been shutdown.
*
*/
-void cpu_die(void)
+void __noreturn cpu_die(void)
{
unsigned int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
idle_task_exit();
local_daif_mask();
- /* Tell __cpu_die() that this CPU is now safe to dispose of */
- (void)cpu_report_death();
+ /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */
+ cpuhp_ap_report_dead();
/*
* Actually shutdown the CPU. This must never fail. The specific hotplug
* mechanism must perform all required cache maintenance to ensure that
* no dirty lines are lost in the process of shutting down the CPU.
*/
- cpu_ops[cpu]->cpu_die(cpu);
+ ops->cpu_die(cpu);
BUG();
}
#endif
+static void __cpu_try_die(int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_die)
+ ops->cpu_die(cpu);
+#endif
+}
+
/*
* Kill the calling secondary CPU, early in bringup before it is turned
* online.
*/
-void cpu_die_early(void)
+void __noreturn cpu_die_early(void)
{
int cpu = smp_processor_id();
@@ -388,13 +409,13 @@ void cpu_die_early(void)
/* Mark this CPU absent */
set_cpu_present(cpu, 0);
+ rcutree_report_cpu_dead();
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ update_cpu_boot_status(CPU_KILL_ME);
+ __cpu_try_die(cpu);
+ }
-#ifdef CONFIG_HOTPLUG_CPU
- update_cpu_boot_status(CPU_KILL_ME);
- /* Check if we can park ourselves */
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
-#endif
update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
cpu_park_loop();
@@ -409,61 +430,38 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
kvm_compute_layout();
+ kvm_apply_hyp_relocations();
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- setup_cpu_features();
hyp_mode_check();
- apply_alternatives_all();
+ setup_system_features();
+ setup_user_features();
mark_linear_text_alias_ro();
}
void __init smp_prepare_boot_cpu(void)
{
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
- cpuinfo_store_boot_cpu();
-
/*
- * We now know enough about the boot CPU to apply the
- * alternatives that cannot wait until interrupt handling
- * and/or scheduling is enabled.
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
*/
- apply_boot_alternatives();
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
+ cpuinfo_store_boot_cpu();
+ setup_boot_cpu_features();
/* Conditionally switch to GIC PMR for interrupt masking */
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
-}
-
-static u64 __init of_get_cpu_mpidr(struct device_node *dn)
-{
- const __be32 *cell;
- u64 hwid;
-
- /*
- * A cpu node with missing "reg" property is
- * considered invalid to build a cpu_logical_map
- * entry.
- */
- cell = of_get_property(dn, "reg", NULL);
- if (!cell) {
- pr_err("%pOF: missing reg property\n", dn);
- return INVALID_HWID;
- }
- hwid = of_read_number(cell, of_n_addr_cells(dn));
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%pOF: invalid reg property\n", dn);
- return INVALID_HWID;
- }
- return hwid;
+ kasan_init_hw_tags();
}
/*
@@ -488,10 +486,13 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
*/
static int __init smp_cpu_setup(int cpu)
{
- if (cpu_read_ops(cpu))
+ const struct cpu_operations *ops;
+
+ if (init_cpu_ops(cpu))
return -ENODEV;
- if (cpu_ops[cpu]->cpu_init(cpu))
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_init(cpu))
return -ENODEV;
set_cpu_possible(cpu, true);
@@ -509,6 +510,7 @@ struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
{
return &cpu_madt_gicc[cpu];
}
+EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
/*
* acpi_map_gic_cpu_interface - parse processor MADT entry
@@ -521,7 +523,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
- if (!(processor->flags & ACPI_MADT_ENABLED)) {
+ if (!acpi_gicc_is_usable(processor)) {
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
@@ -552,7 +554,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
/* map the logical cpu id to cpu MPIDR */
- cpu_logical_map(cpu_count) = hwid;
+ set_cpu_logical_map(cpu_count, hwid);
cpu_madt_gicc[cpu_count] = *processor;
@@ -626,9 +628,9 @@ static void __init of_parse_and_init_cpus(void)
struct device_node *dn;
for_each_of_cpu_node(dn) {
- u64 hwid = of_get_cpu_mpidr(dn);
+ u64 hwid = of_get_cpu_hwid(dn, 0);
- if (hwid == INVALID_HWID)
+ if (hwid & ~MPIDR_HWID_BITMASK)
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
@@ -666,7 +668,7 @@ static void __init of_parse_and_init_cpus(void)
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
- cpu_logical_map(cpu_count) = hwid;
+ set_cpu_logical_map(cpu_count, hwid);
early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
next:
@@ -707,13 +709,14 @@ void __init smp_init_cpus(void)
for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
- cpu_logical_map(i) = INVALID_HWID;
+ set_cpu_logical_map(i, INVALID_HWID);
}
}
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ const struct cpu_operations *ops;
int err;
unsigned int cpu;
unsigned int this_cpu;
@@ -744,10 +747,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!cpu_ops[cpu])
+ ops = get_cpu_ops(cpu);
+ if (!ops)
continue;
- err = cpu_ops[cpu]->cpu_prepare(cpu);
+ err = ops->cpu_prepare(cpu);
if (err)
continue;
@@ -756,31 +760,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_WAKEUP, "CPU wake-up interrupts"),
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
+
+unsigned long irq_err_count;
-void show_ipi_list(struct seq_file *p, int prec)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
unsigned int cpu, i;
@@ -788,21 +781,12 @@ void show_ipi_list(struct seq_file *p, int prec)
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
-}
-
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+ return 0;
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -815,22 +799,14 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-#endif
-
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
- if (__smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif
-static void local_cpu_stop(void)
+static void __noreturn local_cpu_stop(void)
{
set_cpu_online(smp_processor_id(), false);
@@ -844,7 +820,7 @@ static void local_cpu_stop(void)
* that cpu_online_mask gets correctly updated and smp_send_stop() can skip
* CPUs that have already stopped themselves.
*/
-void panic_smp_self_stop(void)
+void __noreturn panic_smp_self_stop(void)
{
local_cpu_stop();
}
@@ -853,7 +829,7 @@ void panic_smp_self_stop(void)
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
#endif
-static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
+static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
#ifdef CONFIG_KEXEC_CORE
crash_save_cpu(regs, cpu);
@@ -863,28 +839,57 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
local_irq_disable();
sdei_mask_local_cpu();
-#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
-#endif
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ __cpu_try_die(cpu);
/* just in case */
cpu_park_loop();
+#else
+ BUG();
#endif
}
+static void arm64_backtrace_ipi(cpumask_t *mask)
+{
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ /*
+ * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name,
+ * nothing about it truly needs to be implemented using an NMI, it's
+ * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi()
+ * returned false our backtrace attempt will just use a regular IPI.
+ */
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+ int this_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ /* No need to roundup ourselves */
+ if (cpu == this_cpu)
+ continue;
+
+ __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
+ }
+}
+#endif
+
/*
* Main handler for inter-processor interrupts
*/
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_RESCHEDULE:
@@ -892,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
local_cpu_stop();
- irq_exit();
break;
case IPI_CPU_CRASH_STOP:
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
- irq_enter();
- ipi_cpu_crash_stop(cpu, regs);
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
unreachable();
}
@@ -914,27 +914,27 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- case IPI_WAKEUP:
- WARN_ONCE(!acpi_parking_protocol_valid(cpu),
- "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
- cpu);
+ case IPI_CPU_BACKTRACE:
+ /*
+ * NOTE: in some cases this _won't_ be NMI context. See the
+ * comment in arch_trigger_cpumask_backtrace().
+ */
+ nmi_cpu_backtrace(get_irq_regs());
+ break;
+
+ case IPI_KGDB_ROUNDUP:
+ kgdb_nmicallback(cpu, get_irq_regs());
break;
-#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
@@ -942,15 +942,121 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
- set_irq_regs(old_regs);
+ trace_ipi_exit(ipi_types[ipinr]);
+}
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
+{
+ if (!system_uses_irq_prio_masking())
+ return false;
+
+ switch (ipi) {
+ case IPI_CPU_STOP:
+ case IPI_CPU_CRASH_STOP:
+ case IPI_CPU_BACKTRACE:
+ case IPI_KGDB_ROUNDUP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++) {
+ if (ipi_should_be_nmi(i)) {
+ prepare_percpu_nmi(ipi_irq_base + i);
+ enable_percpu_nmi(ipi_irq_base + i, 0);
+ } else {
+ enable_percpu_irq(ipi_irq_base + i, 0);
+ }
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++) {
+ if (ipi_should_be_nmi(i)) {
+ disable_percpu_nmi(ipi_irq_base + i);
+ teardown_percpu_nmi(ipi_irq_base + i);
+ } else {
+ disable_percpu_irq(ipi_irq_base + i);
+ }
+ }
+}
+#endif
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ if (ipi_should_be_nmi(i)) {
+ err = request_percpu_nmi(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN(err, "Could not request IPI %d as NMI, err=%d\n",
+ i, err);
+ } else {
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
+ i, err);
+ }
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
}
-void smp_send_reschedule(int cpu)
+void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi(unsigned int cpu)
+{
+ /*
+ * We use a scheduler IPI to wake the CPU as this avoids the need for a
+ * dedicated IPI and we can safely handle spurious scheduler IPIs.
+ */
+ smp_send_reschedule(cpu);
+}
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -958,11 +1064,22 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+ unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+ return num_online_cpus() - this_cpu_online;
+}
+
void smp_send_stop(void)
{
unsigned long timeout;
- if (num_online_cpus() > 1) {
+ if (num_other_online_cpus()) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
@@ -975,10 +1092,10 @@ void smp_send_stop(void)
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (num_other_online_cpus() && timeout--)
udelay(1);
- if (num_online_cpus() > 1)
+ if (num_other_online_cpus())
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
@@ -1001,15 +1118,17 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
- if (num_online_cpus() == 1) {
- sdei_mask_local_cpu();
- return;
- }
+ /*
+ * If this cpu is the only one alive at this point in time, online or
+ * not, there are no stop messages to be sent around, so just back out.
+ */
+ if (num_other_online_cpus() == 0)
+ goto skip_ipi;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
@@ -1023,7 +1142,9 @@ void crash_smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
+skip_ipi:
sdei_mask_local_cpu();
+ sdei_handler_abort();
}
bool smp_crash_stop_failed(void)
@@ -1032,20 +1153,13 @@ bool smp_crash_stop_failed(void)
}
#endif
-/*
- * not supported here
- */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(any_cpu);
- if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
+ if (ops && ops->cpu_die)
return true;
#endif
return false;
@@ -1055,5 +1169,6 @@ bool cpus_are_stuck_in_kernel(void)
{
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
- return !!cpus_stuck_in_kernel || smp_spin_tables;
+ return !!cpus_stuck_in_kernel || smp_spin_tables ||
+ is_protected_kvm_enabled();
}