diff options
| author | Neil Leeder <nleeder@codeaurora.org> | 2014-08-29 14:30:57 -0400 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:58:11 -0700 |
| commit | 09e03e5113703a706c4c105f7293080c0ea1aef3 (patch) | |
| tree | 617cfe022da829d163a2e84a71fcee5a84aa3a7f /drivers/perf | |
| parent | 0e4ee435ac293483d156db59cabb14a8445f5979 (diff) | |
Perf: arm64: fix disable of pmu irq during hotplug
PMU irq is disabled when a cpu is hotplugged off
and perf is running. Using cpu_pmu->active_events
to determine if the pmu is running left a window
where it is decremented to 0 in hw_perf_event_destroy,
and then armpmu_release_hardware is called. If a cpu
is hotplugged off in this window it may not disable
its irq. Use a separate flag which is not cleared
until after the irq is released by all online cpus.
The variable needs to be tristate because of the possibility
of a cpu being hotplugged in during this window. In that
case it should not enable its irq when the pmu is being
shut down. Having the GOING_DOWN state allows correct
behavior for cpus both going down and coming up.
Change-Id: I934ba5dec34e681ce8defd7fa7e311b4a2a92c1a
Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
[satyap: merge conflict resolution and move changes in
arch/arm64/kernel/perf_event.c to drivers/perf/arm_pmu.c
to align with kernel 4.4]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Diffstat (limited to 'drivers/perf')
| -rw-r--r-- | drivers/perf/arm_pmu.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index bb6f8bdbf01b..d8708fe77317 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -370,6 +370,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) return err; } + armpmu->pmu_state = ARM_PMU_STATE_RUNNING; + return 0; } @@ -630,6 +632,14 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (!irqs) + return; + + /* + * If a cpu comes online during this function, do not enable its irq. + * If a cpu goes offline, it should disable its irq. + */ + cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN; irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { @@ -649,6 +659,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } + cpu_pmu->pmu_state = ARM_PMU_STATE_OFF; } static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) @@ -766,7 +777,6 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, struct arm_pmu *cpu_pmu = container_of(b, struct arm_pmu, hotplug_nb); int irq; struct pmu *pmu; - int perf_running; unsigned long masked_action = action & ~CPU_TASKS_FROZEN; int ret = NOTIFY_DONE; @@ -783,13 +793,12 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, if (!cpumask_test_cpu(cpu, &cpu_pmu->supported_cpus)) return NOTIFY_DONE; - perf_running = atomic_read(&cpu_pmu->active_events); switch (masked_action) { case CPU_DOWN_PREPARE: if (cpu_pmu->save_pm_registers) smp_call_function_single(cpu, cpu_pmu->save_pm_registers, hcpu, 1); - if (perf_running) { + if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) { if (cpu_has_active_perf(cpu, cpu_pmu)) smp_call_function_single(cpu, armpmu_update_counters, cpu_pmu, 1); @@ -808,7 +817,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, cpu_pmu->reset(NULL); if (cpu_pmu->restore_pm_registers) cpu_pmu->restore_pm_registers(hcpu); - if (perf_running) { + if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) { /* Arm the PMU IRQ before appearing. */ if (cpu_pmu->plat_device) { irq = cpu_pmu->percpu_irq; |
