diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-07-27 22:04:10 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:00:08 -0700 |
| commit | 8f8c8db1c5f49f90efa9e2f7656ead216a7643cb (patch) | |
| tree | 549570b92b8980b53095c4da7582f2710891a6e4 /kernel | |
| parent | 35bf2d9d10710770c8f7d070b64381f8dde54e80 (diff) | |
sched: Avoid needless migration
Restrict check_for_migration() to operate on fair_sched class tasks
only.
Also check_for_migration() can result in a call to select_best_cpu()
to look for a better cpu for currently running task on a cpu. However
select_best_cpu() can end up suggesting a cpu that is not necessarily
better than the cpu on which task is running currently. This will
result in unnecessary migration. Prevent that from happening.
Change-Id: I391cdda0d7285671d5f79aa2da12eaaa6cae42d7
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 3 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 67 |
2 files changed, 55 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a2d75c9a6321..bb949ae0fdb9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3896,7 +3896,8 @@ void scheduler_tick(void) trigger_load_balance(rq); #endif rq_last_tick_reset(rq); - check_for_migration(rq, curr); + if (curr->sched_class == &fair_sched_class) + check_for_migration(rq, curr); } #ifdef CONFIG_NO_HZ_FULL diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f7600cbfc658..436efe2a1f49 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3026,8 +3026,38 @@ static int best_small_task_cpu(struct task_struct *p) return best_fallback_cpu; } +#define MOVE_TO_BIG_CPU 1 +#define MOVE_TO_LITTLE_CPU 2 +#define MOVE_TO_POWER_EFFICIENT_CPU 3 + +static int skip_cpu(struct task_struct *p, int cpu, int reason) +{ + struct rq *rq = cpu_rq(cpu); + struct rq *task_rq = task_rq(p); + int skip = 0; + + if (!reason) + return 0; + + switch (reason) { + case MOVE_TO_BIG_CPU: + skip = (rq->capacity <= task_rq->capacity); + break; + + case MOVE_TO_LITTLE_CPU: + skip = (rq->capacity >= task_rq->capacity); + break; + + default: + skip = (cpu == task_cpu(p)); + break; + } + + return skip; +} + /* return cheapest cpu that can fit this task */ -static int select_best_cpu(struct task_struct *p, int target) +static int select_best_cpu(struct task_struct *p, int target, int reason) { int i, best_cpu = -1, fallback_idle_cpu = -1; int prev_cpu = task_cpu(p); @@ -3044,6 +3074,10 @@ static int select_best_cpu(struct task_struct *p, int target) /* Todo : Optimize this loop */ for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_online_mask) { + + if (skip_cpu(p, i, reason)) + continue; + trace_sched_cpu_load(cpu_rq(i), idle_cpu(i), mostly_idle_cpu(i), power_cost(p, i)); @@ -3322,10 +3356,11 @@ static int lower_power_cpu_available(struct task_struct *p, int cpu) return (lowest_power_cpu != task_cpu(p)); } - /* * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal * cpu as per its demand or priority) + * + * Returns reason why task needs to be migrated */ static inline int migration_needed(struct rq *rq, struct task_struct *p) { @@ -3334,8 +3369,12 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) if (!sched_enable_hmp || p->state != TASK_RUNNING) return 0; - if (sched_boost()) - return (rq->capacity != max_capacity); + if (sched_boost()) { + if (rq->capacity != max_capacity) + return MOVE_TO_BIG_CPU; + + return 0; + } if (is_small_task(p)) return 0; @@ -3343,21 +3382,20 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p) /* Todo: cgroup-based control? */ if (nice > sysctl_sched_upmigrate_min_nice && rq->capacity > min_capacity) - return 1; + return MOVE_TO_LITTLE_CPU; if (!task_will_fit(p, cpu_of(rq))) - return 1; + return MOVE_TO_BIG_CPU; if (sched_enable_power_aware && lower_power_cpu_available(p, cpu_of(rq))) - return 1; + return MOVE_TO_POWER_EFFICIENT_CPU; return 0; } /* - * cpu-bound tasks will not go through select_best_cpu() and hence can be stuck - * on the wrong cpu. Check if any such tasks need to be "force-migrated" + * Check if currently running task should be migrated to a better cpu. * * Todo: Effect this via changes to nohz_balancer_kick() and load balance? */ @@ -3365,10 +3403,11 @@ void check_for_migration(struct rq *rq, struct task_struct *p) { int cpu = cpu_of(rq), new_cpu = cpu; unsigned long flags; - int active_balance = 0; + int active_balance = 0, rc; - if (migration_needed(rq, p)) - new_cpu = select_best_cpu(p, cpu); + rc = migration_needed(rq, p); + if (rc) + new_cpu = select_best_cpu(p, cpu, rc); if (new_cpu == cpu) return; @@ -3403,7 +3442,7 @@ static inline int nr_big_tasks(struct rq *rq) #define sched_enable_power_aware 0 -static inline int select_best_cpu(struct task_struct *p, int target) +static inline int select_best_cpu(struct task_struct *p, int target, int reason) { return 0; } @@ -5968,7 +6007,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f int sync = wake_flags & WF_SYNC; if (sched_enable_hmp) - return select_best_cpu(p, prev_cpu); + return select_best_cpu(p, prev_cpu, 0); if (sd_flag & SD_BALANCE_WAKE) want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p)); |
