diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2014-09-02 15:42:39 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:00:34 -0700 |
| commit | 9aecd4c576c866c40facea400feb8d13e600ebb0 (patch) | |
| tree | bd016488c6112f43a4e8ce3aca5aa1463052adf0 /kernel | |
| parent | 97b9ad42d98bb3c94823d1ed49d8d7a127e1de30 (diff) | |
sched: actively migrate tasks to idle big CPUs during sched boost
The sched boost feature is currently tick driven, i.e. task placement
decisions only take place at a tick (or wakeup). The load balancer
does not have any knowledge of boost being in effect. Tasks that are
woken up on a little CPU when all big CPUs are busy will continue
executing there at least until the next tick even if one of the big
CPUs becomes idle. Reduce this latency by adding support for detecting
whether boost is in effect or not in the load balancer. If boost is
in effect any big CPU running idle balance will trigger active
migration from a little CPU with the highest task load.
Change-Id: Ib2828809efa0f9857f5009b29931f63b276a59f3
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 16bfe366e286..9df673c16232 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3510,6 +3510,11 @@ static inline int mostly_idle_cpu(int cpu) return 0; } +static inline int sched_boost(void) +{ + return 0; +} + static inline int is_small_task(struct task_struct *p) { return 0; @@ -6641,6 +6646,7 @@ enum fbq_type { regular, remote, all }; #define LBF_SOME_PINNED 0x08 #define LBF_IGNORE_SMALL_TASKS 0x10 #define LBF_PWR_ACTIVE_BALANCE 0x20 +#define LBF_SCHED_BOOST 0x40 struct lb_env { struct sched_domain *sd; @@ -7579,6 +7585,13 @@ static bool update_sd_pick_busiest(struct lb_env *env, { struct sg_lb_stats *busiest = &sds->busiest_stat; + if (sched_boost() && !sds->busiest && sgs->sum_nr_running && + (env->idle != CPU_NOT_IDLE) && (capacity(env->dst_rq) > + group_rq_capacity(sg))) { + env->flags |= LBF_SCHED_BOOST; + return true; + } + if (sgs->group_type > busiest->group_type) return true; @@ -7953,6 +7966,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; + if (sched_boost() && (capacity(env->dst_rq) > + group_rq_capacity(sds.busiest))) + goto force_balance; + if (bail_inter_cluster_balance(env, &sds)) goto out_balanced; @@ -8136,7 +8153,7 @@ static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; - if (env->flags & LBF_PWR_ACTIVE_BALANCE) + if (env->flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)) return 1; if (env->idle == CPU_NEWLY_IDLE) { @@ -8369,7 +8386,7 @@ more_balance: } if (!ld_moved) { - if (!(env.flags & LBF_PWR_ACTIVE_BALANCE)) + if (!(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST))) schedstat_inc(sd, lb_failed[idle]); /* @@ -8379,7 +8396,7 @@ more_balance: * excessive cache_hot migrations and active balances. */ if (idle != CPU_NEWLY_IDLE && - !(env.flags & LBF_PWR_ACTIVE_BALANCE)) + !(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST))) sd->nr_balance_failed++; if (need_active_balance(&env)) { |
