summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Gebben <jgebben@codeaurora.org>2016-04-26 09:40:26 -0600
committerKyle Yan <kyan@codeaurora.org>2016-04-27 19:05:00 -0700
commit4da4b1181eaac878141a891d530dac00d6fdce4e (patch)
treebdb439531e05c08b3bcd5836902b94d64f269076
parentf5921cfca0db89db6ab7ced7ea46348179dae0e2 (diff)
Revert "Perf: arm64: stop counters when going into hotplug"
This reverts commit 182eeb0c0daf70acd8 ("Perf: arm64: stop counters when going into hotplug") This change is being reverted so that it can be replaced by equivalent functionality from upstream. CRs-Fixed: 1008368 Change-Id: Ibf007132366486ae70c1d919f32e933744a1721e Signed-off-by: Jeremy Gebben <jgebben@codeaurora.org>
-rw-r--r--arch/arm64/kernel/perf_debug.c1
-rw-r--r--drivers/perf/arm_pmu.c53
-rw-r--r--include/linux/perf_event.h1
3 files changed, 2 insertions, 53 deletions
diff --git a/arch/arm64/kernel/perf_debug.c b/arch/arm64/kernel/perf_debug.c
index 39cf7c9d7b5d..260c71b54382 100644
--- a/arch/arm64/kernel/perf_debug.c
+++ b/arch/arm64/kernel/perf_debug.c
@@ -32,7 +32,6 @@ static char *descriptions =
"11 Perf: arm64: Refine disable/enable in tracecounters\n"
"12 Perf: arm64: fix disable of pmu irq during hotplug\n"
"13 Perf: arm64: restore registers after reset\n"
- "14 Perf: arm64: stop counters when going into hotplug\n"
"15 Perf: arm64: make debug dir handle exportable\n"
"16 Perf: arm64: add perf trace user\n"
"17 Perf: arm64: add support for kryo pmu\n"
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9795842e700a..67b6b5bc3482 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -30,7 +30,6 @@
#include <asm/irq_regs.h>
static DEFINE_PER_CPU(u32, from_idle);
-static DEFINE_PER_CPU(u32, hotplug_down);
static int
armpmu_map_cache_event(const unsigned (*cache_map)
@@ -765,48 +764,6 @@ static void armpmu_update_counters(void *x)
}
}
-static void armpmu_hotplug_enable(void *parm_pmu)
-{
- struct arm_pmu *armpmu = parm_pmu;
- struct pmu *pmu = &(armpmu->pmu);
- struct pmu_hw_events *hw_events = armpmu->hw_events;
- int idx;
-
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
- struct perf_event *event = hw_events->events[idx];
- if (!event)
- continue;
-
- event->state = event->hotplug_save_state;
- pmu->start(event, 0);
- }
- per_cpu(hotplug_down, smp_processor_id()) = 0;
-}
-
-static void armpmu_hotplug_disable(void *parm_pmu)
-{
- struct arm_pmu *armpmu = parm_pmu;
- struct pmu *pmu = &(armpmu->pmu);
- struct pmu_hw_events *hw_events = armpmu->hw_events;
- int idx;
-
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
- struct perf_event *event = hw_events->events[idx];
- if (!event)
- continue;
-
- event->hotplug_save_state = event->state;
- /*
- * Prevent timer tick handler perf callback from enabling
- * this event and potentially generating an interrupt
- * before the CPU goes down.
- */
- event->state = PERF_EVENT_STATE_OFF;
- pmu->stop(event, 0);
- }
- per_cpu(hotplug_down, smp_processor_id()) = 1;
-}
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
@@ -824,7 +781,6 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
int ret = NOTIFY_DONE;
if ((masked_action != CPU_DOWN_PREPARE) &&
- (masked_action != CPU_DOWN_FAILED) &&
(masked_action != CPU_STARTING))
return NOTIFY_DONE;
@@ -845,7 +801,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
if (cpu_has_active_perf(cpu, cpu_pmu))
smp_call_function_single(cpu,
- armpmu_hotplug_disable, cpu_pmu, 1);
+ armpmu_update_counters, cpu_pmu, 1);
/* Disarm the PMU IRQ before disappearing. */
if (cpu_pmu->plat_device) {
irq = cpu_pmu->percpu_irq;
@@ -856,7 +812,6 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
break;
case CPU_STARTING:
- case CPU_DOWN_FAILED:
/* Reset PMU to clear counters for ftrace buffer */
if (cpu_pmu->reset)
cpu_pmu->reset(NULL);
@@ -869,7 +824,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
cpu_pmu_enable_percpu_irq(&irq);
}
if (cpu_has_active_perf(cpu, cpu_pmu)) {
- armpmu_hotplug_enable(cpu_pmu);
+ get_cpu_var(from_idle) = 1;
pmu = &cpu_pmu->pmu;
pmu->pmu_enable(pmu);
}
@@ -890,10 +845,6 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
if (!cpu_pmu)
return NOTIFY_OK;
- /* If the cpu is going down, don't do anything here */
- if (per_cpu(hotplug_down, cpu))
- return NOTIFY_OK;
-
switch (cmd) {
case CPU_PM_ENTER:
if (cpu_pmu->save_pm_registers)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a9cc2b530409..95fd207e63ca 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -471,7 +471,6 @@ struct perf_event {
struct pmu *pmu;
enum perf_event_active_state state;
- enum perf_event_active_state hotplug_save_state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;