summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2015-06-25 13:38:18 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:04 -0700
commite9c6508168c4a313dae5e7c11b2e458a7e7fb88b (patch)
tree642b43cb9d4664fee8c638a70d1243fc12243a98 /kernel
parente61ddbb14ca6a2fa636b7ad93ed9b35067383be7 (diff)
sched/fair: Fix capacity and nr_run comparisons in can_migrate_task()
Kernel version 3.18 and beyond alter the definition of sgs->group_capacity whereby it reflects the load a group is capable of taking. In previous kernel versions the term used to refer to the number of effective CPUs available. This change breaks the comparison of capacity with the number of running tasks on a group. To fix this convert the capacity metric before doing the comparison. Change-Id: I3ebd941273edbcc903a611d9c883773172e86c8e Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed minor conflict in can_migrate_task().] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 58e6f5662947..c564897cbd4e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7752,7 +7752,7 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
- int twf;
+ int twf, group_cpus;
lockdep_assert_held(&env->src_rq->lock);
@@ -7766,32 +7766,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
- if (nr_big_tasks(env->src_rq) &&
- capacity(env->dst_rq) > capacity(env->src_rq) &&
- !is_big_task(p))
- return 0;
-
- if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
- return 0;
-
- twf = task_will_fit(p, env->dst_cpu);
-
- /*
- * Attempt to not pull tasks that don't fit. We may get lucky and find
- * one that actually fits.
- */
- if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
- return 0;
-
- /*
- * Group imbalance can sometimes cause work to be pulled across groups
- * even though the group could have managed the imbalance on its own.
- * Prevent inter-cluster migrations for big tasks when the number of
- * tasks is lower than the capacity of the group.
- */
- if (!twf && env->busiest_nr_running <= env->busiest_grp_capacity)
- return 0;
-
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
int cpu;
@@ -7825,6 +7799,34 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Record that we found atleast one task that could run on dst_cpu */
env->flags &= ~LBF_ALL_PINNED;
+ if (nr_big_tasks(env->src_rq) &&
+ capacity(env->dst_rq) > capacity(env->src_rq) &&
+ !is_big_task(p))
+ return 0;
+
+ if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
+ return 0;
+
+ twf = task_will_fit(p, env->dst_cpu);
+
+ /*
+ * Attempt to not pull tasks that don't fit. We may get lucky and find
+ * one that actually fits.
+ */
+ if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
+ return 0;
+
+ /*
+ * Group imbalance can sometimes cause work to be pulled across groups
+ * even though the group could have managed the imbalance on its own.
+ * Prevent inter-cluster migrations for big tasks when the number of
+ * tasks is lower than the capacity of the group.
+ */
+ group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
+ SCHED_CAPACITY_SCALE);
+ if (!twf && env->busiest_nr_running <= group_cpus)
+ return 0;
+
if (task_running(env->src_rq, p)) {
schedstat_inc(p, se.statistics.nr_failed_migrations_running);
return 0;