diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-03-31 12:11:12 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 19:59:06 -0700 |
| commit | 06a5fb422d821bf0d72b6a944fe4ad4930149682 (patch) | |
| tree | 88375ffeddb7eacd8f6ebba5ffa5b946bdf019cc /kernel/sched | |
| parent | 45acc2457b618c0ab056f708fb8ee030e44015dc (diff) | |
sched: Handle cpu-bound tasks stuck on wrong cpu
CPU-bound tasks that don't sleep for long intervals can stay stuck on
the wrong cpu, as the selection of "ideal" cpu for tasks largely
happens during task wakeup time. This patch adds a check in the
scheduler tick for task/cpu mismatch (big task on little cpu OR
little task on big cpu) and forces migration of such tasks to their
ideal cpu (via select_best_cpu()).
Change-Id: Icac3485b6aa4b558c4ed9df23c2e81fb8f4bb9d9
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 1 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 55 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 56 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b288947994b7..7cf0dbf77da1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3134,6 +3134,7 @@ void scheduler_tick(void) trigger_load_balance(rq); #endif rq_last_tick_reset(rq); + check_for_migration(rq, curr); } #ifdef CONFIG_NO_HZ_FULL diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4235fd175760..fde9c1266a17 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -244,6 +244,9 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight return mul_u64_u32_shr(delta_exec, fact, shift); } +#ifdef CONFIG_SMP +static int active_load_balance_cpu_stop(void *data); +#endif const struct sched_class fair_sched_class; @@ -2825,6 +2828,56 @@ static inline int find_new_hmp_ilb(void) return best_cpu; } +/* + * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal + * cpu as per its demand or priority) + */ +static inline int migration_needed(struct rq *rq, struct task_struct *p) +{ + int nice = task_nice(p); + + /* Todo: cgroup-based control? */ + if (nice > sysctl_sched_upmigrate_min_nice && + rq->capacity > min_capacity) + return 1; + + return !task_will_fit(p, cpu_of(rq)); +} + +/* + * cpu-bound tasks will not go through select_best_cpu() and hence can be stuck + * on the wrong cpu. Check if any such tasks need to be "force-migrated" + * + * Todo: Effect this via changes to nohz_balancer_kick() and load balance? + */ +void check_for_migration(struct rq *rq, struct task_struct *p) +{ + int cpu = cpu_of(rq), new_cpu = cpu; + unsigned long flags; + int active_balance = 0; + + if (migration_needed(rq, p)) + new_cpu = select_best_cpu(p, cpu); + + if (new_cpu == cpu) + return; + + /* Invoke active balance to force migrate currently running task */ + raw_spin_lock_irqsave(&rq->lock, flags); + if (!rq->active_balance) { + rq->active_balance = 1; + rq->push_cpu = new_cpu; + get_task_struct(p); + rq->push_task = p; + active_balance = 1; + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + if (active_balance) + stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq, + &rq->active_balance_work); +} + #else /* CONFIG_SCHED_HMP */ static inline int select_best_cpu(struct task_struct *p, int target) @@ -7345,8 +7398,6 @@ static int need_active_balance(struct lb_env *env) return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); } -static int active_load_balance_cpu_stop(void *data); - static int should_we_balance(struct lb_env *env) { struct sched_group *sg = env->sd->groups; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 00d7f187a88c..1b1f75e10cfa 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -983,10 +983,12 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu) #ifdef CONFIG_SCHED_HMP +extern void check_for_migration(struct rq *rq, struct task_struct *p); extern void set_hmp_defaults(void); #else /* CONFIG_SCHED_HMP */ +static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } static inline void set_hmp_defaults(void) { } #endif /* CONFIG_SCHED_HMP */ |
