summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--kernel/trace/bpf_trace.c9
2 files changed, 8 insertions, 3 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 5bac307880a6..2d1d7c8887fe 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -586,7 +586,7 @@ enum bpf_func_id {
#define BPF_F_FAST_STACK_CMP (1ULL << 9)
#define BPF_F_REUSE_STACKID (1ULL << 10)
-/* BPF_FUNC_perf_event_output flags. */
+/* BPF_FUNC_perf_event_output flags and BPF_FUNC_perf_event_read. */
#define BPF_F_INDEX_MASK 0xffffffffULL
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
/* BPF_FUNC_perf_event_output for sk_buff input context. */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 045bde1a64ac..de4af0208f61 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -246,9 +246,15 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
+ unsigned int cpu = smp_processor_id();
+ u64 index = flags & BPF_F_INDEX_MASK;
struct bpf_event_entry *ee;
struct perf_event *event;
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+ return -EINVAL;
+ if (index == BPF_F_CURRENT_CPU)
+ index = cpu;
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
@@ -262,8 +268,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
return -EINVAL;
/* make sure event is local and doesn't have pmu::count */
- if (event->oncpu != smp_processor_id() ||
- event->pmu->count)
+ if (unlikely(event->oncpu != cpu || event->pmu->count))
return -EINVAL;
if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&