diff options
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 110 |
1 files changed, 46 insertions, 64 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9263ffd5673f..b3a8411bac2b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3660,68 +3660,6 @@ static inline int migration_needed(struct task_struct *p, int cpu) return 0; } -static inline int -kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) -{ - unsigned long flags; - int rc = 0; - - /* Invoke active balance to force migrate currently running task */ - raw_spin_lock_irqsave(&rq->lock, flags); - if (!rq->active_balance) { - rq->active_balance = 1; - rq->push_cpu = new_cpu; - get_task_struct(p); - rq->push_task = p; - rc = 1; - } - raw_spin_unlock_irqrestore(&rq->lock, flags); - - return rc; -} - -static DEFINE_RAW_SPINLOCK(migration_lock); - -static bool do_migration(int reason, int new_cpu, int cpu) -{ - if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION) - && same_cluster(new_cpu, cpu)) - return false; - - /* Inter cluster high irqload migrations are OK */ - return new_cpu != cpu; -} - -/* - * Check if currently running task should be migrated to a better cpu. - * - * Todo: Effect this via changes to nohz_balancer_kick() and load balance? - */ -void check_for_migration(struct rq *rq, struct task_struct *p) -{ - int cpu = cpu_of(rq), new_cpu; - int active_balance = 0, reason; - - reason = migration_needed(p, cpu); - if (!reason) - return; - - raw_spin_lock(&migration_lock); - new_cpu = select_best_cpu(p, cpu, reason, 0); - - if (do_migration(reason, new_cpu, cpu)) { - active_balance = kick_active_balance(rq, p, new_cpu); - if (active_balance) - mark_reserved(new_cpu); - } - - raw_spin_unlock(&migration_lock); - - if (active_balance) - stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq, - &rq->active_balance_work); -} - #ifdef CONFIG_CFS_BANDWIDTH static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) @@ -7464,7 +7402,9 @@ done: /* * cpu_util_wake: Compute cpu utilization with any contributions from - * the waking task p removed. + * the waking task p removed. check_for_migration() looks for a better CPU of + * rq->curr. For that case we should return cpu util with contributions from + * currently running task p removed. */ static int cpu_util_wake(int cpu, struct task_struct *p) { @@ -7477,7 +7417,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p) * utilization from cpu utilization. Instead just use * cpu_util for this case. */ - if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + if (!walt_disabled && sysctl_sched_use_walt_cpu_util && + p->state == TASK_WAKING) return cpu_util(cpu); #endif /* Task has no contribution or is new */ @@ -11582,6 +11523,47 @@ static void rq_offline_fair(struct rq *rq) unthrottle_offline_cfs_rqs(rq); } +static inline int +kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) +{ + int rc = 0; + + /* Invoke active balance to force migrate currently running task */ + raw_spin_lock(&rq->lock); + if (!rq->active_balance) { + rq->active_balance = 1; + rq->push_cpu = new_cpu; + get_task_struct(p); + rq->push_task = p; + rc = 1; + } + raw_spin_unlock(&rq->lock); + + return rc; +} + +void check_for_migration(struct rq *rq, struct task_struct *p) +{ + int new_cpu; + int active_balance; + int cpu = task_cpu(p); + + if (rq->misfit_task) { + if (rq->curr->state != TASK_RUNNING || + rq->curr->nr_cpus_allowed == 1) + return; + + new_cpu = select_energy_cpu_brute(p, cpu, 0); + if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) { + active_balance = kick_active_balance(rq, p, new_cpu); + if (active_balance) + stop_one_cpu_nowait(cpu, + active_load_balance_cpu_stop, + rq, &rq->active_balance_work); + } + } +} + #endif /* CONFIG_SMP */ /* |
