summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@quicinc.com>2018-01-09 15:40:40 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2018-01-09 15:40:39 -0800
commitfcf826264c8d635251a751a14dc36224eb7e5fe9 (patch)
treebcaccb903233186b3bd77206992fd05223856f4f /kernel/sched
parent20ab01289c7807a88a38f67e98ee683984aec1e1 (diff)
parent6d5adb184946b1751b8e3ea522c8e382bcd6ec0c (diff)
Merge "sched: Restore previous implementation of check_for_migration()"
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c103
-rw-r--r--kernel/sched/sched.h4
2 files changed, 64 insertions, 43 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e515311aa93c..6fc5de10673e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3657,6 +3657,68 @@ static inline int migration_needed(struct task_struct *p, int cpu)
return 0;
}
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+static bool do_migration(int reason, int new_cpu, int cpu)
+{
+ if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
+ && same_cluster(new_cpu, cpu))
+ return false;
+
+ /* Inter cluster high irqload migrations are OK */
+ return new_cpu != cpu;
+}
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int cpu = cpu_of(rq), new_cpu;
+ int active_balance = 0, reason;
+
+ reason = migration_needed(p, cpu);
+ if (!reason)
+ return;
+
+ raw_spin_lock(&migration_lock);
+ new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+ if (do_migration(reason, new_cpu, cpu)) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ mark_reserved(new_cpu);
+ }
+
+ raw_spin_unlock(&migration_lock);
+
+ if (active_balance)
+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+}
+
#ifdef CONFIG_CFS_BANDWIDTH
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
@@ -11436,47 +11498,6 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
- int rc = 0;
-
- /* Invoke active balance to force migrate currently running task */
- raw_spin_lock(&rq->lock);
- if (!rq->active_balance) {
- rq->active_balance = 1;
- rq->push_cpu = new_cpu;
- get_task_struct(p);
- rq->push_task = p;
- rc = 1;
- }
- raw_spin_unlock(&rq->lock);
-
- return rc;
-}
-
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
- int new_cpu;
- int active_balance;
- int cpu = task_cpu(p);
-
- if (rq->misfit_task) {
- if (rq->curr->state != TASK_RUNNING ||
- rq->curr->nr_cpus_allowed == 1)
- return;
-
- new_cpu = select_energy_cpu_brute(p, cpu, 0);
- if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
- active_balance = kick_active_balance(rq, p, new_cpu);
- if (active_balance)
- stop_one_cpu_nowait(cpu,
- active_load_balance_cpu_stop,
- rq, &rq->active_balance_work);
- }
- }
-}
-
#endif /* CONFIG_SMP */
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c0e3314bf4c2..7b31973c6db3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -32,10 +32,8 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*
@@ -1449,6 +1447,7 @@ static inline bool is_short_burst_task(struct task_struct *p)
p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
}
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
@@ -1708,6 +1707,7 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return 1;
}
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_task_count_change(void) { }
static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }