diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2016-03-22 20:41:28 -0700 |
|---|---|---|
| committer | Jeevan Shriram <jshriram@codeaurora.org> | 2016-04-22 15:05:13 -0700 |
| commit | d4ca4d767f4c8a64ab212ddf2a9356c316de4f80 (patch) | |
| tree | 1d1157d76b08cb5590bb66672b8f6f3ac33aab31 /kernel | |
| parent | c34b0b85aa3cc5ed434420b2636131f3ff614b7a (diff) | |
sched: update placement logic to prefer C-state and busier CPUs
Update the wakeup placement logic when need_idle is not set. Break
ties in power with C-state. If C-state is the same break ties with
prev_cpu. Finally go for the most loaded CPU.
CRs-fixed: 1006303
Change-Id: Iafa98a909ed464af33f4fe3345bbfc8e77dee963
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed bug where assigns best_cpu_cstate with
uninitialized cpu_cstate.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 37 |
1 files changed, 31 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 11dc798e071a..b6e638bdcd98 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3138,7 +3138,7 @@ struct cluster_cpu_stats { int best_capacity_cpu, best_cpu, best_sibling_cpu; int min_cost, best_sibling_cpu_cost; int best_cpu_cstate; - u64 min_load, best_sibling_cpu_load; + u64 min_load, max_load, best_sibling_cpu_load; s64 highest_spare_capacity; }; @@ -3398,10 +3398,11 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, } } + cpu_cstate = cpu_rq(cpu)->cstate; + if (env->need_idle) { stats->min_cost = cpu_cost; if (idle_cpu(cpu)) { - cpu_cstate = cpu_rq(cpu)->cstate; if (cpu_cstate < stats->best_cpu_cstate || (cpu_cstate == stats->best_cpu_cstate && cpu == prev_cpu)) { @@ -3420,11 +3421,34 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, return; } - if ((cpu_cost < stats->min_cost) || - ((stats->best_cpu != prev_cpu && stats->min_load > env->cpu_load) || - cpu == prev_cpu)) { + if (cpu_cost < stats->min_cost) { stats->min_cost = cpu_cost; - stats->min_load = env->cpu_load; + stats->best_cpu_cstate = cpu_cstate; + stats->max_load = env->cpu_load; + stats->best_cpu = cpu; + return; + } + + /* CPU cost is the same. Start breaking the tie by C-state */ + + if (cpu_cstate > stats->best_cpu_cstate) + return; + + if (cpu_cstate < stats->best_cpu_cstate) { + stats->best_cpu_cstate = cpu_cstate; + stats->max_load = env->cpu_load; + stats->best_cpu = cpu; + return; + } + + /* C-state is the same. Use prev CPU to break the tie */ + if (cpu == prev_cpu) { + stats->best_cpu = cpu; + return; + } + + if (stats->best_cpu != prev_cpu && env->cpu_load > stats->max_load) { + stats->max_load = env->cpu_load; stats->best_cpu = cpu; } } @@ -3468,6 +3492,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats) stats->best_capacity_cpu = stats->best_sibling_cpu = -1; stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX; stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX; + stats->max_load = 0; stats->highest_spare_capacity = 0; stats->least_loaded_cpu = -1; stats->best_cpu_cstate = INT_MAX; |
