summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorNeil Leeder <nleeder@codeaurora.org>2014-10-28 14:44:37 -0400
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:58:09 -0700
commit86cd54bb1953eaebbd8d466672851c545342ead0 (patch)
tree7295fe74a73b524f041579328ff53d51d1fb77f3 /kernel
parent0c718959c8264db200f6e8bbdd417c4cc6cadf38 (diff)
perf: support hotplug
Add support for hotplugged cpu cores. Change-Id: I0538ed67f1ad90bbd0510a7ba137cb6d1ad42172 Signed-off-by: Neil Leeder <nleeder@codeaurora.org> [satyap: trivial merge conflict resolution] Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c113
-rw-r--r--kernel/events/hw_breakpoint.c2
2 files changed, 110 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1087bbeb152b..2050f3245ed7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -176,7 +176,11 @@ static struct srcu_struct pmus_srcu;
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+int sysctl_perf_event_paranoid __read_mostly = -1;
+#else
int sysctl_perf_event_paranoid __read_mostly = 1;
+#endif
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -1657,7 +1661,30 @@ static int __perf_remove_from_context(void *info)
}
-/*
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event)
+{
+ int up_ret;
+ /*
+ * CPU was offline. Bring it online so we can
+ * gracefully exit a perf context.
+ */
+ up_ret = cpu_up(event->cpu);
+ if (!up_ret)
+ /* Try the remove call once again. */
+ cpu_function_call(event->cpu, __perf_remove_from_context,
+ event);
+ else
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event)
+{
+}
+#endif
+
+ /*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
@@ -1670,7 +1697,8 @@ static int __perf_remove_from_context(void *info)
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void __ref perf_remove_from_context(struct perf_event *event,
+ bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1678,6 +1706,7 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
.event = event,
.detach_group = detach_group,
};
+ int ret;
lockdep_assert_held(&ctx->mutex);
@@ -1688,7 +1717,11 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
* already called __perf_remove_from_context from
* perf_event_exit_cpu.
*/
- cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+ ret = cpu_function_call(event->cpu, __perf_remove_from_context,
+ &re);
+ if (ret == -ENXIO)
+ perf_retry_remove(event);
+
return;
}
@@ -3844,6 +3877,15 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
*/
static int perf_release(struct inode *inode, struct file *file)
{
+ struct perf_event *event = file->private_data;
+
+ /*
+ * Event can be in state OFF because of a constraint check.
+ * Change to ACTIVE so that it gets cleaned up correctly.
+ */
+ if ((event->state == PERF_EVENT_STATE_OFF) &&
+ event->attr.constraint_duplicate)
+ event->state = PERF_EVENT_STATE_ACTIVE;
put_event(file->private_data);
return 0;
}
@@ -6920,6 +6962,8 @@ static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
@@ -7041,6 +7085,8 @@ static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
@@ -7319,6 +7365,8 @@ static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .events_across_hotplug = 1,
};
/*
@@ -7400,6 +7448,8 @@ static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
@@ -9298,6 +9348,18 @@ static void __perf_event_exit_context(void *__info)
rcu_read_unlock();
}
+static void __perf_event_stop_swclock(void *__info)
+{
+ struct perf_event_context *ctx = __info;
+ struct perf_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
+ if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_stop(event, 0);
+ }
+}
+
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
@@ -9307,20 +9369,56 @@ static void perf_event_exit_cpu_context(int cpu)
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
-
mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+ /*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list, but keep it alive across CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+ if (!pmu->events_across_hotplug)
+ smp_call_function_single(cpu, __perf_event_exit_context,
+ ctx, 1);
+ else
+ smp_call_function_single(cpu, __perf_event_stop_swclock,
+ ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
+static void perf_event_start_swclock(int cpu)
+{
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int idx;
+ struct perf_event *event, *tmp;
+
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ if (pmu->events_across_hotplug) {
+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+ list_for_each_entry_safe(event, tmp, &ctx->event_list,
+ event_entry) {
+ if (event->attr.config ==
+ PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_start(event, 0);
+ }
+ }
+ }
+ srcu_read_unlock(&pmus_srcu, idx);
+}
+
static void perf_event_exit_cpu(int cpu)
{
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
+static inline void perf_event_start_swclock(int cpu) { }
#endif
static int
@@ -9359,6 +9457,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
+
+ case CPU_STARTING:
+ perf_event_start_swclock(cpu);
+ break;
+
default:
break;
}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..7da5b674d16e 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -614,6 +614,8 @@ static struct pmu perf_breakpoint = {
.start = hw_breakpoint_start,
.stop = hw_breakpoint_stop,
.read = hw_breakpoint_pmu_read,
+
+ .events_across_hotplug = 1,
};
int __init init_hw_breakpoint(void)