summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2014-11-05 14:49:38 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:01 -0700
commit59512f4e49ab5723faec8d3404a704c163e8b744 (patch)
treec1ae4b3ee5bf8adc82f3d7c1fe8bc02a1af8e50c
parent76da8e29cef3597651fde396a294dbf528307ae2 (diff)
sched: Avoid unnecessary load balance when tasks don't fit on dst_cpu
When considering to pull over a task that does not fit on the destination CPU make sure that the busiest group has exceeded its capacity. While the change is applicable to all groups, the biggest impact will be on migrating big tasks to little CPUs. This should only happen when the big cluster is no longer capable of balancing load within the cluster. This change should have no impact on single cluster systems. Change-Id: I6d1ef0e0d878460530f036921ce4a4a9c1e1394b Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--kernel/sched/fair.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0ae7c75fffc7..087f5e072e35 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3578,6 +3578,11 @@ static inline int nr_big_tasks(struct rq *rq)
#define sched_enable_power_aware 0
+static inline int task_will_fit(struct task_struct *p, int cpu)
+{
+ return 1;
+}
+
static inline int select_best_cpu(struct task_struct *p, int target, int reason)
{
return 0;
@@ -6766,6 +6771,8 @@ struct lb_env {
long imbalance;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
+ unsigned int busiest_grp_capacity;
+ unsigned int busiest_nr_running;
unsigned int flags;
@@ -6896,6 +6903,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
return 0;
+ if (!task_will_fit(p, env->dst_cpu) &&
+ env->busiest_nr_running <= env->busiest_grp_capacity)
+ return 0;
+
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
int cpu;
@@ -7819,6 +7830,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
+ env->busiest_nr_running = sgs->sum_nr_running;
+ env->busiest_grp_capacity = sgs->group_capacity;
}
next_group:
@@ -8349,6 +8362,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.imbalance = 0,
.flags = 0,
.loop = 0,
+ .busiest_nr_running = 0,
+ .busiest_grp_capacity = 0,
};
/*
@@ -8796,14 +8811,16 @@ static int active_load_balance_cpu_stop(void *data)
struct task_struct *push_task;
int push_task_detached = 0;
struct lb_env env = {
- .sd = sd,
- .dst_cpu = target_cpu,
- .dst_rq = target_rq,
- .src_cpu = busiest_rq->cpu,
- .src_rq = busiest_rq,
- .idle = CPU_IDLE,
- .flags = 0,
- .loop = 0,
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ .busiest_nr_running = 0,
+ .busiest_grp_capacity = 0,
+ .flags = 0,
+ .loop = 0,
};
bool moved = false;