diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2015-11-02 15:08:20 -0800 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:38 -0700 |
| commit | 280b8668481d732d7e2602cb719b517f23f329b7 (patch) | |
| tree | d8be09ecdc4301276dc0a583184f55088595aec5 /include/trace/events | |
| parent | f406df7c3588d9a605639e9f72eb894898cfba02 (diff) | |
sched: Optimize scheduler trace events to reduce trace buffer usage
Scheduler ftrace events currently generate a lot of data when turned
on. The excessive log messages often end up overflowing trace buffers
for long use cases or crowding out other events. Optimize scheduler
events so that the log spew is less and more manageable. To that end
change the variable type for some event fields; introduce variants
of sched_cpu_load that can be turned on/off for separate code paths
and remove unused fields from various events.
Change-Id: I2b313542b39ad5e09a01ad1303b5dfe2c4883b8a
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in rt.c due to
CONFIG_SCHED_QHMP.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'include/trace/events')
| -rw-r--r-- | include/trace/events/sched.h | 50 |
1 files changed, 27 insertions, 23 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1a215ba026b5..f279fcf33297 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -58,7 +58,7 @@ TRACE_EVENT(sched_kthread_stop_ret, */ TRACE_EVENT(sched_enq_deq_task, - TP_PROTO(struct task_struct *p, int enqueue, unsigned int cpus_allowed), + TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed), TP_ARGS(p, enqueue, cpus_allowed), @@ -67,14 +67,12 @@ TRACE_EVENT(sched_enq_deq_task, __field( pid_t, pid ) __field( int, prio ) __field( int, cpu ) - __field( int, enqueue ) + __field( bool, enqueue ) __field(unsigned int, nr_running ) __field(unsigned long, cpu_load ) __field(unsigned int, rt_nr_running ) __field(unsigned int, cpus_allowed ) #ifdef CONFIG_SCHED_HMP - __field(unsigned int, sum_scaled ) - __field(unsigned int, period ) __field(unsigned int, demand ) #endif ), @@ -90,15 +88,13 @@ TRACE_EVENT(sched_enq_deq_task, __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running; __entry->cpus_allowed = cpus_allowed; #ifdef CONFIG_SCHED_HMP - __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled; - __entry->period = p->se.avg.runnable_avg_period; __entry->demand = p->ravg.demand; #endif ), TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x" #ifdef CONFIG_SCHED_HMP - " sum_scaled=%u period=%u demand=%u" + " demand=%u" #endif , __entry->cpu, __entry->enqueue ? "enqueue" : "dequeue", @@ -106,7 +102,7 @@ TRACE_EVENT(sched_enq_deq_task, __entry->prio, __entry->nr_running, __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed #ifdef CONFIG_SCHED_HMP - , __entry->sum_scaled, __entry->period, __entry->demand + , __entry->demand #endif ) ); @@ -115,22 +111,19 @@ TRACE_EVENT(sched_enq_deq_task, TRACE_EVENT(sched_task_load, - TP_PROTO(struct task_struct *p, int boost, int reason, - int sync, int need_idle, int best_cpu), + TP_PROTO(struct task_struct *p, bool boost, int reason, + bool sync, bool need_idle, int best_cpu), TP_ARGS(p, boost, reason, sync, need_idle, best_cpu), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) - __field(unsigned int, sum ) - __field(unsigned int, sum_scaled ) - __field(unsigned int, period ) __field(unsigned int, demand ) - __field( int, boost ) + __field( bool, boost ) __field( int, reason ) - __field( int, sync ) - __field( int, need_idle ) + __field( bool, sync ) + __field( bool, need_idle ) __field( int, best_cpu ) __field( u64, latency ) ), @@ -138,9 +131,6 @@ TRACE_EVENT(sched_task_load, TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; - __entry->sum = p->se.avg.runnable_avg_sum; - __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled; - __entry->period = p->se.avg.runnable_avg_period; __entry->demand = p->ravg.demand; __entry->boost = boost; __entry->reason = reason; @@ -151,14 +141,13 @@ TRACE_EVENT(sched_task_load, sched_clock() - p->ravg.mark_start : 0; ), - TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d need_idle=%d best_cpu=%d latency=%llu", - __entry->pid, __entry->comm, __entry->sum, - __entry->sum_scaled, __entry->period, __entry->demand, + TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d best_cpu=%d latency=%llu", + __entry->pid, __entry->comm, __entry->demand, __entry->boost, __entry->reason, __entry->sync, __entry->need_idle, __entry->best_cpu, __entry->latency) ); -TRACE_EVENT(sched_cpu_load, +DECLARE_EVENT_CLASS(sched_cpu_load, TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), @@ -206,6 +195,21 @@ TRACE_EVENT(sched_cpu_load, __entry->dstate, __entry->temp) ); +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + TRACE_EVENT(sched_set_boost, TP_PROTO(int ref_count), |
