summaryrefslogtreecommitdiff
path: root/include/trace/perf.h
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2017-10-23 23:53:08 -0700
committerMichael Bestas <mkbestas@lineageos.org>2022-04-19 00:51:32 +0300
commita85b6c28bb287439916716f124450ceb79a1624f (patch)
treefaa8839e1d97bcd06a42bc681f86df74734d02fb /include/trace/perf.h
parent3f79f14b62202045de323c754352cee2ac3b1d08 (diff)
BACKPORT: bpf: permit multiple bpf attachments for a single perf event
This patch enables multiple bpf attachments for a kprobe/uprobe/tracepoint single trace event. Each trace_event keeps a list of attached perf events. When an event happens, all attached bpf programs will be executed based on the order of attachment. A global bpf_event_mutex lock is introduced to protect prog_array attaching and detaching. An alternative will be introduce a mutex lock in every trace_event_call structure, but it takes a lot of extra memory. So a global bpf_event_mutex lock is a good compromise. The bpf prog detachment involves allocation of memory. If the allocation fails, a dummy do-nothing program will replace to-be-detached program in-place. Signed-off-by: Yonghong Song <yhs@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net> (cherry picked from commit e87c6bc3852b981e71c757be20771546ce9f76f3) Signed-off-by: Connor O'Brien <connoro@google.com> Bug: 121213201 Bug: 138317270 Test: build & boot cuttlefish; attach 2 progs to 1 tracepoint Change-Id: I390d8c0146888ddb1aed5a6f6e5dae7ef394ebc9 Signed-off-by: Chatur27 <jasonbright2709@gmail.com>
Diffstat (limited to 'include/trace/perf.h')
-rw-r--r--include/trace/perf.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 88de5c205e86..31b8d276a993 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -34,7 +34,6 @@ perf_trace_##call(void *__data, proto) \
struct trace_event_call *event_call = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_raw_##call *entry; \
- struct bpf_prog *prog = event_call->prog; \
struct pt_regs *__regs; \
u64 __count = 1; \
struct task_struct *__task = NULL; \
@@ -46,8 +45,9 @@ perf_trace_##call(void *__data, proto) \
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
head = this_cpu_ptr(event_call->perf_events); \
- if (!prog && __builtin_constant_p(!__task) && !__task && \
- hlist_empty(head)) \
+ if (!bpf_prog_array_valid(event_call) && \
+ __builtin_constant_p(!__task) && !__task && \
+ hlist_empty(head)) \
return; \
\
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\