summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPatrick Fay <pfay@codeaurora.org>2016-08-11 17:49:20 -0700
committerPatrick Fay <pfay@codeaurora.org>2016-08-13 15:11:24 -0700
commit573979dee2a76e4d7f61cb867c12afbaed7e1eb5 (patch)
treef614d460d5677c1162ae781f71ff79a51e251f13 /kernel
parent1f0f95c5fe9ef3cf90cdd5f4686e3ea1be2a2545 (diff)
perf: Add support for exclude_idle attribute
Use the exclude_idle attribute of the perf events to avoid reading PMUs of idle CPUs. The counter values are updated when CPU enters idle and the saved value is returned when the idle CPU is queried for that event provided the attribute is set in the perf_event. Change-Id: I61f7a7474856abf67ac6dfd9e531702072e108a5 Signed-off-by: Patrick Fay <pfay@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 96100cc046c5..32e2617d654f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -158,6 +158,7 @@ enum event_type_t {
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(bool, is_idle);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -3388,9 +3389,12 @@ static int perf_event_read(struct perf_event *event, bool group)
.group = group,
.ret = 0,
};
- smp_call_function_single(event->oncpu,
- __perf_event_read, &data, 1);
- ret = data.ret;
+ if (!event->attr.exclude_idle ||
+ !per_cpu(is_idle, event->oncpu)) {
+ smp_call_function_single(event->oncpu,
+ __perf_event_read, &data, 1);
+ ret = data.ret;
+ }
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -9479,6 +9483,25 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case IDLE_START:
+ __this_cpu_write(is_idle, true);
+ break;
+ case IDLE_END:
+ __this_cpu_write(is_idle, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+ .notifier_call = event_idle_notif,
+};
+
void __init perf_event_init(void)
{
int ret;
@@ -9492,6 +9515,7 @@ void __init perf_event_init(void)
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
+ idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();