summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2017-05-18 17:43:58 -0700
committerTodd Kjos <tkjos@google.com>2017-11-01 15:09:35 -0700
commit9e293db0522f2332c3c89f431c488a3f525bc4e6 (patch)
treeebcccb34f731769867a237f26d96a9cf0204c0cf
parentdc626b28ee7cf9269eaadc512a2c497acfae0109 (diff)
sched: EAS: upmigrate misfit current task
Upmigrate misfit current task upon scheduler tick with stopper. We can kick an random (not necessarily big CPU) NOHZ idle CPU when a CPU bound task is in need of upmigration. But it's not efficient as that way needs following unnecessary wakeups: 1. Busy little CPU A to kick idle B 2. B runs idle balancer and enqueue migration/A 3. B goes idle 4. A runs migration/A, enqueues busy task on B. 5. B wakes up again. This change makes active upmigration more efficiently by doing: 1. Busy little CPU A find target CPU B upon tick. 2. CPU A enqueues migration/A. Change-Id: Ie865738054ea3296f28e6ba01710635efa7193c0 [joonwoop: The original version had logic to reserve CPU. The logic is omitted in this version.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org> Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/fair.c48
-rw-r--r--kernel/sched/sched.h2
3 files changed, 51 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 679791f6c2df..0c9e332ceb3b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3097,6 +3097,9 @@ void scheduler_tick(void)
trigger_load_balance(rq);
#endif
rq_last_tick_reset(rq);
+
+ if (curr->sched_class == &fair_sched_class)
+ check_for_migration(rq, curr);
}
#ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8ffebf95a522..ac22d32a6255 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6312,7 +6312,9 @@ done:
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
- * the waking task p removed.
+ * the waking task p removed. check_for_migration() looks for a better CPU of
+ * rq->curr. For that case we should return cpu util with contributions from
+ * currently running task p removed.
*/
static int cpu_util_wake(int cpu, struct task_struct *p)
{
@@ -6325,7 +6327,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
* utilization from cpu utilization. Instead just use
* cpu_util for this case.
*/
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
+ p->state == TASK_WAKING)
return cpu_util(cpu);
#endif
/* Task has no contribution or is new */
@@ -9974,6 +9977,47 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ int rc = 0;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock(&rq->lock);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock(&rq->lock);
+
+ return rc;
+}
+
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int new_cpu;
+ int active_balance;
+ int cpu = task_cpu(p);
+
+ if (rq->misfit_task) {
+ if (rq->curr->state != TASK_RUNNING ||
+ rq->curr->nr_cpus_allowed == 1)
+ return;
+
+ new_cpu = select_energy_cpu_brute(p, cpu, 0);
+ if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ stop_one_cpu_nowait(cpu,
+ active_load_balance_cpu_stop,
+ rq, &rq->active_balance_work);
+ }
+ }
+}
+
#endif /* CONFIG_SMP */
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index af2fd9ccaddf..0238e94b0a1e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -31,8 +31,10 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*