summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2017-03-03 11:43:03 +0000
committerAndres Oportus <andresoportus@google.com>2017-06-02 08:01:54 -0700
commitfef0112a63989830cfd60f6ede53014600e751e0 (patch)
tree9e08b67252576ffe2bd67376adb38e1e107b0213 /kernel
parent83f462daa328f2f42c3c1f7f5277f71e3fa0f750 (diff)
sched/fair: discount task contribution to find CPU with lowest utilization
In some cases, the new_util of a task can be the same on several CPUs. This causes an issue because the target_util is only updated if the current new_util is strictly smaller than target_util. To fix that, the cpu_util_wake() return value is used alongside the new_util value. If two CPUs compute the same new_util value, we'll now also look at their cpu_util_wake() return value. In this case, the CPU that last ran the task will be chosen in priority. Change-Id: Ia1ea2c4b3ec39621372c2f748862317d5b497723 Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23e2b5f33ff6..fc4e2529fbd2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6221,7 +6221,8 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
int i;
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
- unsigned long cur_capacity, new_util;
+ unsigned long cur_capacity, new_util, wake_util;
+ unsigned long min_wake_util = ULONG_MAX;
if (!cpu_online(i))
continue;
@@ -6231,7 +6232,8 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- new_util = cpu_util_wake(i, p) + task_util(p);
+ wake_util = cpu_util_wake(i, p);
+ new_util = wake_util + task_util(p);
/*
* Ensure minimum capacity to grant the required boost.
@@ -6266,8 +6268,15 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
* Find a target cpu with the lowest/highest
* utilization if prefer_idle/!prefer_idle.
*/
- if ((prefer_idle && target_util > new_util) ||
- (!prefer_idle && target_util < new_util)) {
+ if (prefer_idle) {
+ /* Favor the CPU that last ran the task */
+ if (new_util > target_util ||
+ wake_util > min_wake_util)
+ continue;
+ min_wake_util = wake_util;
+ target_util = new_util;
+ target_cpu = i;
+ } else if (target_util < new_util) {
target_util = new_util;
target_cpu = i;
}