summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-26 19:47:28 +0000
committerAmit Pundir <amit.pundir@linaro.org>2016-09-14 14:48:50 +0530
commit11d962803d25d080f08f3f3c448fa9e5727694b7 (patch)
tree14d7c93f33198ca15d571e806d07a0d2d9e1b3d3 /kernel
parentd72801bf86b42594ba6e89b809718d2d401b1660 (diff)
sched: Enable idle balance to pull single task towards cpu with higher capacity
We do not want to miss out on the ability to pull a single remaining task from a potential source cpu towards an idle destination cpu. Add an extra criteria to need_active_balance() to kick off active load balance if the source cpu is over-utilized and has lower capacity than the destination cpu. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 60f2e982c6bf..56362ac036b2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4800,6 +4800,11 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu)
return __task_fits(p, cpu, cpu_util(cpu));
}
+static bool cpu_overutilized(int cpu)
+{
+ return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -7016,6 +7021,13 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
+ if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ env->src_rq->cfs.h_nr_running == 1 &&
+ cpu_overutilized(env->src_cpu) &&
+ !cpu_overutilized(env->dst_cpu)) {
+ return 1;
+ }
+
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}