diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2014-12-04 18:18:30 -0800 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:15 -0700 |
| commit | b9c3d7384dc6df0697b0a4bbf0f1dfb855543e11 (patch) | |
| tree | 0473a0512b58dfbd60b489cf576507af9242ef3f | |
| parent | 636a5749c8706da90cfb1c607df53f3f657fb76f (diff) | |
sched: extend sched_task_load tracepoint to indicate prefer_idle
Prefer idle determines whether the scheduler prefers an idle CPU
over a busy CPU or not to wake up a task on. Knowing the correct
value of this tunable is essential in understanding placement
decisions made in select_best_cpu().
Change-Id: I955d7577061abccb65d01f560e1911d9db70298a
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
| -rw-r--r-- | include/trace/events/sched.h | 10 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 2 |
2 files changed, 7 insertions, 5 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index a60fc80f66e3..7294f44da319 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -116,9 +116,9 @@ TRACE_EVENT(sched_enq_deq_task, TRACE_EVENT(sched_task_load, TP_PROTO(struct task_struct *p, int small_task, int boost, int reason, - int sync), + int sync, int prefer_idle), - TP_ARGS(p, small_task, boost, reason, sync), + TP_ARGS(p, small_task, boost, reason, sync, prefer_idle), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -131,6 +131,7 @@ TRACE_EVENT(sched_task_load, __field( int, boost ) __field( int, reason ) __field( int, sync ) + __field( int, prefer_idle ) ), TP_fast_assign( @@ -144,13 +145,14 @@ TRACE_EVENT(sched_task_load, __entry->boost = boost; __entry->reason = reason; __entry->sync = sync; + __entry->prefer_idle = prefer_idle; ), - TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u small=%d boost=%d reason=%d sync=%d", + TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u small=%d boost=%d reason=%d sync=%d prefer_idle=%d", __entry->pid, __entry->comm, __entry->sum, __entry->sum_scaled, __entry->period, __entry->demand, __entry->small_task, __entry->boost, __entry->reason, - __entry->sync) + __entry->sync, __entry->prefer_idle) ); TRACE_EVENT(sched_cpu_load, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 75f1862e86d8..613420b3836e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3229,7 +3229,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, int cstate, min_cstate = INT_MAX; int prefer_idle = reason ? 1 : sysctl_sched_prefer_idle; - trace_sched_task_load(p, small_task, boost, reason, sync); + trace_sched_task_load(p, small_task, boost, reason, sync, prefer_idle); if (small_task && !boost) { best_cpu = best_small_task_cpu(p, sync); |
