diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2016-06-28 12:18:25 +0200 |
---|---|---|
committer | Michael Bestas <mkbestas@lineageos.org> | 2022-04-19 00:51:45 +0300 |
commit | fc6c6256bb108dde5c7f533d6dda2e9044a7283c (patch) | |
tree | c9aedd0d34176f2aa7ac9d9d8beb8b39a31a6ca5 | |
parent | 73a23d04435e3adbe87bd5c9abc8e696fd7a0357 (diff) |
bpf, trace: add BPF_F_CURRENT_CPU flag for bpf_perf_event_read
Follow-up commit to 1e33759c788c ("bpf, trace: add BPF_F_CURRENT_CPU
flag for bpf_perf_event_output") to add the same functionality into
bpf_perf_event_read() helper. The split of index into flags and index
component is also safe here, since such large maps are rejected during
map allocation time.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/uapi/linux/bpf.h | 2 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 9 |
2 files changed, 8 insertions, 3 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5bac307880a6..2d1d7c8887fe 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -586,7 +586,7 @@ enum bpf_func_id { #define BPF_F_FAST_STACK_CMP (1ULL << 9) #define BPF_F_REUSE_STACKID (1ULL << 10) -/* BPF_FUNC_perf_event_output flags. */ +/* BPF_FUNC_perf_event_output flags and BPF_FUNC_perf_event_read. */ #define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK /* BPF_FUNC_perf_event_output for sk_buff input context. */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 045bde1a64ac..de4af0208f61 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -246,9 +246,15 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) { struct bpf_array *array = container_of(map, struct bpf_array, map); + unsigned int cpu = smp_processor_id(); + u64 index = flags & BPF_F_INDEX_MASK; struct bpf_event_entry *ee; struct perf_event *event; + if (unlikely(flags & ~(BPF_F_INDEX_MASK))) + return -EINVAL; + if (index == BPF_F_CURRENT_CPU) + index = cpu; if (unlikely(index >= array->map.max_entries)) return -E2BIG; @@ -262,8 +268,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) return -EINVAL; /* make sure event is local and doesn't have pmu::count */ - if (event->oncpu != smp_processor_id() || - event->pmu->count) + if (unlikely(event->oncpu != cpu || event->pmu->count)) return -EINVAL; if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && |