diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-07-17 13:05:23 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:17 -0700 |
| commit | d5bf122db13ca17e819a10d09bfcb8acd5608e5f (patch) | |
| tree | 9f577b1fee82654266f21a8eab8a14502b1fc231 /kernel | |
| parent | b5a9a7b1c714e4a6711f6f88d9b963a8e6fcf9d7 (diff) | |
sched: encourage idle load balance and discourage active load balance
Encourage IDLE and NEWLY_IDLE load balance by ignoring cache hotness and
discourage active load balance by increasing busy balancing failure
threshold. Such changes are for idle CPUs to help out busy CPUs more
aggressively and reduce unnecessary active load balance within the
same CPU domain.
Change-Id: I22f6aba11932ccbb82a436c0532589c46f9148ed
[joonwoop@codeaurora.org: fixed conflict in need_active_balance() and
can_migrate_task().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2be602775241..289e79e4b076 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7917,15 +7917,16 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * Aggressive migration if: - * 1) destination numa is preferred - * 2) task is cache cold, or - * 3) too many balance attempts have failed. + * 1) IDLE or NEWLY_IDLE balance. + * 2) destination numa is preferred + * 3) task is cache cold, or + * 4) too many balance attempts have failed. */ tsk_cache_hot = migrate_degrades_locality(p, env); if (tsk_cache_hot == -1) tsk_cache_hot = task_hot(p, env); - if (tsk_cache_hot <= 0 || + if (env->idle != CPU_NOT_IDLE || tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { if (tsk_cache_hot == 1) { schedstat_inc(env->sd, lb_hot_gained[env->idle]); @@ -9302,6 +9303,8 @@ static struct rq *find_busiest_queue(struct lb_env *env, /* Working cpumask for load_balance and load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); +#define NEED_ACTIVE_BALANCE_THRESHOLD 10 + static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; @@ -9333,7 +9336,8 @@ static int need_active_balance(struct lb_env *env) return 1; } - return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); + return unlikely(sd->nr_balance_failed > + sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD); } static int should_we_balance(struct lb_env *env) @@ -9600,7 +9604,9 @@ no_move: * We've kicked active balancing, reset the failure * counter. */ - sd->nr_balance_failed = sd->cache_nice_tries+1; + sd->nr_balance_failed = + sd->cache_nice_tries + + NEED_ACTIVE_BALANCE_THRESHOLD - 1; } } else { sd->nr_balance_failed = 0; |
