summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h2
2 files changed, 11 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e5518b0ddab7..d0cd0a8afe63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2735,6 +2735,11 @@ static int select_best_cpu(struct task_struct *p, int target)
int cpu_cost, min_cost = INT_MAX;
int small_task = is_small_task(p);
+ trace_sched_task_load(p);
+ for_each_online_cpu(i)
+ trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
+ mostly_idle_cpu(i), power_cost(p, i));
+
/* provide bias for prev_cpu */
if (!small_task && mostly_idle_cpu(prev_cpu) &&
task_will_fit(p, prev_cpu)) {
@@ -6968,6 +6973,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
+ trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
+ mostly_idle_cpu(i),
+ power_cost(NULL, i));
+
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 965db5c8437b..01aa57b070f1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1009,6 +1009,8 @@ static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
}
+#define trace_sched_cpu_load(...)
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_CGROUP_SCHED