diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2014-10-27 21:56:41 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:00:56 -0700 |
| commit | 77b024f33738d65bcb1272fc07f3ad8b37316599 (patch) | |
| tree | eda935052d4c043728b5aa0f65f75c232782458f /kernel | |
| parent | 4edc997e12994f2b62bdfc1388cabc258654f990 (diff) | |
sched: use C-states in non-small task wakeup placement logic
Currently when a non-small task wakes up, the task placement logic
first tries to find the least loaded CPU before breaking any ties
via the power cost of running the task on those CPUs. When the power
cost is also same, however, the scheduler just selects the first CPU
it came across. Use C-states to further break ties when the power
cost is the same for multiple CPUs. The scheduler will now pick a
CPU in the shallowest C-state.
Change-Id: Ie1401b305fa02758a2f7b30cfca1afe64459fc2b
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 88 |
1 files changed, 65 insertions, 23 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ff9c396dce5b..e078a63adc51 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3086,6 +3086,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) u64 load, min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX; int small_task = is_small_task(p); int boost = sched_boost(); + int cstate, min_cstate = INT_MAX; trace_sched_task_load(p, small_task, boost, reason); @@ -3103,6 +3104,11 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) trace_sched_cpu_load(cpu_rq(i), idle_cpu(i), mostly_idle_cpu(i), power_cost(p, i)); + /* + * The least-loaded mostly-idle CPU where the task + * won't fit is our fallback if we can't find a CPU + * where the task will fit. + */ if (!task_will_fit(p, i)) { if (mostly_idle_cpu(i)) { load = cpu_load(i); @@ -3111,36 +3117,72 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) fallback_idle_cpu = i; } } - } else { - if (eligible_cpu(p, i)) { - cpu_cost = power_cost(p, i); - load = cpu_load(i); + continue; + } - if (power_delta_exceeded(cpu_cost, min_cost)) { - if (cpu_cost < min_cost) { - min_cost = cpu_cost; - min_load = load; - best_cpu = i; - } - } else { - if (load < min_load) { - min_load = load; - best_cpu = i; - } else if (load == min_load && - cpu_cost < min_cost) { - best_cpu = i; - } - - if (cpu_cost < min_cost) - min_cost = cpu_cost; - } + if (!eligible_cpu(p, i)) + continue; + + /* + * The task will fit on this CPU, and the CPU is either + * mostly_idle or not max capacity and can fit it under + * spill. + */ + + load = cpu_load(i); + cpu_cost = power_cost(p, i); + cstate = cpu_rq(i)->cstate; + + /* + * If the task fits in a CPU in a lower power band, that + * overrides load and C-state. + */ + if (power_delta_exceeded(cpu_cost, min_cost)) { + if (cpu_cost < min_cost) { + min_load = load; + min_cost = cpu_cost; + min_cstate = cstate; + best_cpu = i; } + continue; } - } + /* After power band, load is prioritized next. */ + if (load < min_load) { + min_load = load; + min_cost = cpu_cost; + min_cstate = cstate; + best_cpu = i; + continue; + } + if (load > min_load) + continue; + + /* + * The load is equal to the previous selected CPU. + * This will most often occur when deciding between + * idle CPUs. Power cost is prioritized after load, + * followed by cstate. + */ + if (cpu_cost < min_cost) { + min_cost = cpu_cost; + min_cstate = cstate; + best_cpu = i; + continue; + } + if (cpu_cost == min_cost && cstate < min_cstate) { + min_cstate = cstate; + best_cpu = i; + } + } done: if (best_cpu < 0) { if (unlikely(fallback_idle_cpu < 0)) + /* + * For the lack of a better choice just use + * prev_cpu. We may just benefit from having + * a hot cache. + */ best_cpu = prev_cpu; else best_cpu = fallback_idle_cpu; |
