summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/trace/events/sched.h81
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h2
3 files changed, 92 insertions, 0 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 04546537ff15..1d08d7eb40d3 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -8,6 +8,8 @@
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
+struct rq;
+
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
@@ -88,6 +90,85 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->cpu_load, __entry->rt_nr_running)
);
+#ifdef CONFIG_SCHED_HMP
+
+TRACE_EVENT(sched_task_load,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, sum )
+ __field(unsigned int, sum_scaled )
+ __field(unsigned int, period )
+ __field(unsigned int, demand )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->sum = p->se.avg.runnable_avg_sum;
+ __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
+ __entry->period = p->se.avg.runnable_avg_period;
+ __entry->demand = p->ravg.demand;
+ ),
+
+ TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u",
+ __entry->pid, __entry->comm, __entry->sum,
+ __entry->sum_scaled, __entry->period, __entry->demand)
+);
+
+TRACE_EVENT(sched_cpu_load,
+
+ TP_PROTO(struct rq *rq, int idle, int mostly_idle,
+ unsigned int power_cost),
+
+ TP_ARGS(rq, idle, mostly_idle, power_cost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu )
+ __field(unsigned int, idle )
+ __field(unsigned int, mostly_idle )
+ __field(unsigned int, nr_running )
+ __field(unsigned int, nr_big_tasks )
+ __field(unsigned int, nr_small_tasks )
+ __field(unsigned int, load_scale_factor )
+ __field(unsigned int, capacity )
+ __field( u64, cumulative_runnable_avg )
+ __field(unsigned int, cur_freq )
+ __field(unsigned int, max_freq )
+ __field(unsigned int, power_cost )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = rq->cpu;
+ __entry->idle = idle;
+ __entry->mostly_idle = mostly_idle;
+ __entry->nr_running = rq->nr_running;
+ __entry->nr_big_tasks = rq->nr_big_tasks;
+ __entry->nr_small_tasks = rq->nr_small_tasks;
+ __entry->load_scale_factor = rq->load_scale_factor;
+ __entry->capacity = rq->capacity;
+ __entry->cumulative_runnable_avg = rq->cumulative_runnable_avg;
+ __entry->cur_freq = rq->cur_freq;
+ __entry->max_freq = rq->max_freq;
+ __entry->power_cost = power_cost;
+ ),
+
+ TP_printk("cpu %u idle %d mostly_idle %d nr_run %u nr_big %u nr_small %u lsf %u capacity %u cr_avg %llu fcur %u fmax %u power_cost %u",
+ __entry->cpu, __entry->idle, __entry->mostly_idle, __entry->nr_running,
+ __entry->nr_big_tasks, __entry->nr_small_tasks,
+ __entry->load_scale_factor, __entry->capacity,
+ __entry->cumulative_runnable_avg, __entry->cur_freq, __entry->max_freq,
+ __entry->power_cost)
+);
+
+#endif /* CONFIG_SCHED_HMP */
+
+
/*
* Tracepoint for waking up a task:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e5518b0ddab7..d0cd0a8afe63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2735,6 +2735,11 @@ static int select_best_cpu(struct task_struct *p, int target)
int cpu_cost, min_cost = INT_MAX;
int small_task = is_small_task(p);
+ trace_sched_task_load(p);
+ for_each_online_cpu(i)
+ trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
+ mostly_idle_cpu(i), power_cost(p, i));
+
/* provide bias for prev_cpu */
if (!small_task && mostly_idle_cpu(prev_cpu) &&
task_will_fit(p, prev_cpu)) {
@@ -6968,6 +6973,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
+ trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
+ mostly_idle_cpu(i),
+ power_cost(NULL, i));
+
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 965db5c8437b..01aa57b070f1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1009,6 +1009,8 @@ static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
}
+#define trace_sched_cpu_load(...)
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_CGROUP_SCHED