summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2014-04-13 14:05:30 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:59:31 -0700
commit8e7389b5c2130b62ceafb7cc9d6a2ae00bfbcf5a (patch)
treeadc1c8372c22a27fcf1e51cf57182f494a199ddd
parentadac5c0890434eea96646b61ca5af49e285dfd5d (diff)
sched: add affinity, task load information to sched tracepoints
Knowing the affinity mask and CPU usage of a task is helpful in understanding the behavior of the system. Affinity information has been added to the enq_deq trace event, and the migration tracepoint now reports the load of the task migrated. Change-Id: I29d8a610292b4dfeeb8fe16174e9d4dc196649b7 Signed-off-by: Steve Muckle <smuckle@codeaurora.org> Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--include/trace/events/sched.h23
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
3 files changed, 21 insertions, 13 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e9569c65330d..99f93e6a9235 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -57,9 +57,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
*/
TRACE_EVENT(sched_enq_deq_task,
- TP_PROTO(struct task_struct *p, int enqueue),
+ TP_PROTO(struct task_struct *p, int enqueue, unsigned int cpus_allowed),
- TP_ARGS(p, enqueue),
+ TP_ARGS(p, enqueue, cpus_allowed),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -70,6 +70,7 @@ TRACE_EVENT(sched_enq_deq_task,
__field(unsigned int, nr_running )
__field(unsigned long, cpu_load )
__field(unsigned int, rt_nr_running )
+ __field(unsigned int, cpus_allowed )
#ifdef CONFIG_SCHED_FREQ_INPUT
__field(unsigned int, sum_scaled )
__field(unsigned int, period )
@@ -86,6 +87,7 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->nr_running = task_rq(p)->nr_running;
__entry->cpu_load = task_rq(p)->cpu_load[0];
__entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
+ __entry->cpus_allowed = cpus_allowed;
#ifdef CONFIG_SCHED_FREQ_INPUT
__entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
__entry->period = p->se.avg.runnable_avg_period;
@@ -93,15 +95,15 @@ TRACE_EVENT(sched_enq_deq_task,
#endif
),
- TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u"
+ TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
#ifdef CONFIG_SCHED_FREQ_INPUT
- "sum_scaled=%u period=%u demand=%u"
+ " sum_scaled=%u period=%u demand=%u"
#endif
, __entry->cpu,
__entry->enqueue ? "enqueue" : "dequeue",
__entry->comm, __entry->pid,
__entry->prio, __entry->nr_running,
- __entry->cpu_load, __entry->rt_nr_running
+ __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
#ifdef CONFIG_SCHED_FREQ_INPUT
, __entry->sum_scaled, __entry->period, __entry->demand
#endif
@@ -303,14 +305,16 @@ TRACE_EVENT(sched_switch,
*/
TRACE_EVENT(sched_migrate_task,
- TP_PROTO(struct task_struct *p, int dest_cpu),
+ TP_PROTO(struct task_struct *p, int dest_cpu,
+ unsigned int load),
- TP_ARGS(p, dest_cpu),
+ TP_ARGS(p, dest_cpu, load),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
+ __field(unsigned int, load )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
@@ -319,12 +323,13 @@ TRACE_EVENT(sched_migrate_task,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
+ __entry->load = load;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
- TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
- __entry->comm, __entry->pid, __entry->prio,
+ TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
+ __entry->comm, __entry->pid, __entry->prio, __entry->load,
__entry->orig_cpu, __entry->dest_cpu)
);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a96e2225755a..1352245a0fc4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -854,7 +854,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE))
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
- trace_sched_enq_deq_task(p, 1);
+ trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
inc_cumulative_runnable_avg(rq, p);
}
@@ -864,7 +864,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & DEQUEUE_SAVE))
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
- trace_sched_enq_deq_task(p, 0);
+ trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
dec_cumulative_runnable_avg(rq, p);
}
@@ -1702,7 +1702,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
#endif
#endif
- trace_sched_migrate_task(p, new_cpu);
+ trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
@@ -8070,6 +8070,8 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
+ BUG_ON(num_possible_cpus() > BITS_PER_LONG);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b553a3..78f04e4ad829 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -359,7 +359,8 @@ static bool report_latency(struct trace_array *tr, cycle_t delta)
}
static void
-probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
+probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu,
+ unsigned int load)
{
if (task != wakeup_task)
return;