diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-08-31 17:21:35 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:43 -0700 |
| commit | 07eb3f803b419bdebad45d846113dc069307be90 (patch) | |
| tree | 845a39607dde5f403295b161cbd321ee4e4f3122 /kernel/sched | |
| parent | fd38bb103d3e0be4796dd9fa19c2d0c90c06cf6a (diff) | |
sched: select task's prev_cpu as the best CPU when it was chosen recently
Select given task's prev_cpu when the task slept for short period to
reduce latency of task placement and migrations. A new tunable
/proc/sys/kernel/sched_select_prev_cpu_us introduced to determine whether
tasks are eligible to go through fast path.
CRs-fixed: 947467
Change-Id: Ia507665b91f4e9f0e6ee1448d8df8994ead9739a
[joonwoop@codeaurora.org: fixed conflict in include/linux/sched.h,
include/linux/sched/sysctl.h, kernel/sched/core.c and kernel/sysctl.c]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 11 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 57 |
2 files changed, 62 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 983e5f1363e8..dea3a7ae3787 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -859,6 +859,12 @@ static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { p->last_wake_ts = wallclock; } + +static inline void set_task_last_switch_out(struct task_struct *p, + u64 wallclock) +{ + p->last_switch_out_ts = wallclock; +} #else u64 sched_ktime_clock(void) { @@ -867,6 +873,8 @@ u64 sched_ktime_clock(void) static inline void clear_ed_task(struct task_struct *p, struct rq *rq) {} static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) {} +static inline void set_task_last_switch_out(struct task_struct *p, + u64 wallclock) {} #endif #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ @@ -2196,6 +2204,7 @@ static inline void mark_task_starting(struct task_struct *p) wallclock = sched_ktime_clock(); p->ravg.mark_start = p->last_wake_ts = wallclock; + p->last_switch_out_ts = 0; } static inline void set_window_start(struct rq *rq) @@ -5308,6 +5317,8 @@ static void __sched notrace __schedule(bool preempt) rq->curr = next; ++*switch_count; + set_task_last_switch_out(prev, wallclock); + trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next); /* unlocks the rq */ cpu = cpu_of(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3813be29b6e8..9ffcaa5cf41c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2772,6 +2772,14 @@ unsigned int up_down_migrate_scale_factor = 1024; */ unsigned int sysctl_sched_boost; +/* + * Scheduler selects and places task to its previous CPU if sleep time is + * less than sysctl_sched_select_prev_cpu_us. + */ +static unsigned int __read_mostly +sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC; +unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000; + static inline int available_cpu_capacity(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -2829,6 +2837,9 @@ void set_hmp_defaults(void) (u64)sched_ravg_window, 100); sched_upmigrate_min_nice = sysctl_sched_upmigrate_min_nice; + + sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us * + NSEC_PER_USEC; } u32 sched_get_init_task_load(struct task_struct *p) @@ -3175,6 +3186,21 @@ static inline int wake_to_idle(struct task_struct *p) (p->flags & PF_WAKE_UP_IDLE); } +static inline bool short_sleep_task_waking(struct task_struct *p, int prev_cpu, + const cpumask_t *search_cpus) +{ + /* + * This function should be used by task wake up path only as it's + * assuming p->last_switch_out_ts as last sleep time. + * p->last_switch_out_ts can denote last preemption time as well as + * last sleep time. + */ + return (sched_short_sleep_task_threshold && + (p->ravg.mark_start - p->last_switch_out_ts < + sched_short_sleep_task_threshold) && + cpumask_test_cpu(prev_cpu, search_cpus)); +} + /* return cheapest cpu that can fit this task */ static int select_best_cpu(struct task_struct *p, int target, int reason, int sync) @@ -3187,11 +3213,32 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, s64 spare_capacity, highest_spare_capacity = 0; int boost = sched_boost(); int need_idle = wake_to_idle(p); + bool fast_path = false; cpumask_t search_cpus; struct rq *trq; - trq = task_rq(p); cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask); + + if (!boost && !reason && !need_idle && + short_sleep_task_waking(p, prev_cpu, &search_cpus)) { + cpu_load = cpu_load_sync(prev_cpu, sync); + tload = scale_load_to_cpu(task_load(p), prev_cpu); + if (eligible_cpu(tload, cpu_load, prev_cpu, sync) && + task_load_will_fit(p, tload, prev_cpu)) { + fast_path = true; + best_cpu = prev_cpu; + goto done; + } + + spare_capacity = sched_ravg_window - cpu_load; + if (spare_capacity > 0) { + highest_spare_capacity = spare_capacity; + best_capacity_cpu = prev_cpu; + } + cpumask_clear_cpu(prev_cpu, &search_cpus); + } + + trq = task_rq(p); for_each_cpu(i, &search_cpus) { struct rq *rq = cpu_rq(i); @@ -3279,10 +3326,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, if (best_idle_cpu >= 0) { best_cpu = best_idle_cpu; - goto done; - } - - if (best_cpu < 0 || boost) { + } else if (best_cpu < 0 || boost) { if (unlikely(best_capacity_cpu < 0)) best_cpu = prev_cpu; else @@ -3293,7 +3337,8 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, } done: - trace_sched_task_load(p, boost, reason, sync, need_idle, best_cpu); + trace_sched_task_load(p, boost, reason, sync, need_idle, fast_path, + best_cpu); return best_cpu; } |
