summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c67
1 files changed, 44 insertions, 23 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b26361355dae..a1e4ff99d1bc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -384,12 +384,12 @@ asmlinkage void secondary_start_kernel(void)
if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
+ smp_store_cpu_info(cpu);
+
notify_cpu_starting(cpu);
calibrate_delay();
- smp_store_cpu_info(cpu);
-
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
@@ -462,6 +462,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+DEFINE_PER_CPU(bool, pending_ipi);
+
+static void smp_cross_call_common(const struct cpumask *cpumask,
+ unsigned int func)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpumask)
+ per_cpu(pending_ipi, cpu) = true;
+
+ __smp_cross_call(cpumask, func);
+}
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
@@ -469,6 +481,21 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call_common(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call_common(mask, IPI_WAKEUP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -483,6 +510,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ per_cpu(pending_ipi, cpu) = true;
+
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -513,21 +545,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
@@ -558,7 +575,7 @@ static void ipi_cpu_stop(unsigned int cpu)
raw_spin_unlock(&stop_lock);
}
- set_cpu_online(cpu, false);
+ set_cpu_active(cpu, false);
local_fiq_disable();
local_irq_disable();
@@ -660,12 +677,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+
+ per_cpu(pending_ipi, cpu) = false;
+
set_irq_regs(old_regs);
}
void smp_send_reschedule(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+ BUG_ON(cpu_is_offline(cpu));
+ smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
@@ -676,14 +697,14 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
if (!cpumask_empty(&mask))
- smp_cross_call(&mask, IPI_CPU_STOP);
+ smp_cross_call_common(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (num_active_cpus() > 1 && timeout--)
udelay(1);
- if (num_online_cpus() > 1)
+ if (num_active_cpus() > 1)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
@@ -758,7 +779,7 @@ static void raise_nmi(cpumask_t *mask)
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_all_cpu_backtrace(bool include_self)