diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-11-18 13:19:39 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:17 -0700 |
| commit | 29a412dffa5cbd6d7d913909cd57d04d9d5cb172 (patch) | |
| tree | 034ba56d1040d86df67375ab1683e058c521b65b /kernel | |
| parent | d1b240ccc7317c502b8a051a7d94466de482f8a4 (diff) | |
sched: Avoid frequent migration of running task
Power values for cpus can drop quite considerably when it goes idle.
As a result, the best choice for running a single task in a cluster
can vary quite rapidly. As the task keeps hopping cpus, other cpus go
idle and start being seen as more favorable target for running a task,
leading to task migrating almost every scheduler tick!
Prevent this by keeping track of when a task started running on a cpu
and allowing task migration in tick path (migration_needed()) on
account of energy efficiency reasons only if the task has run
sufficiently long (as determined by sysctl_sched_min_runtime
variable).
Note that currently sysctl_sched_min_runtime setting is considered
only in scheduler_tick()->migration_needed() path and not in
idle_balance() path. In other words, a task could be migrated to
another cpu which did a idle_balance(). This limitation should not
affect high-frequency migrations seen typically (when a single
high-demand task runs on high-performance cpu).
CRs-Fixed: 756570
Change-Id: I96413b7a81b623193c3bbcec6f3fa9dfec367d99
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in set_task_cpu() and
__schedule().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 16 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 6 | ||||
| -rw-r--r-- | kernel/sysctl.c | 7 |
3 files changed, 29 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b900b2de3990..f3d385c2dac6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2538,6 +2538,16 @@ static void restore_orig_mark_start(struct task_struct *p, u64 mark_start) p->ravg.mark_start = mark_start; } +/* + * Note down when task started running on a cpu. This information will be handy + * to avoid "too" frequent task migrations for a running task on account of + * power. + */ +static inline void note_run_start(struct task_struct *p, u64 wallclock) +{ + p->run_start = wallclock; +} + #else /* CONFIG_SCHED_HMP */ static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } @@ -2569,6 +2579,8 @@ restore_orig_mark_start(struct task_struct *p, u64 mark_start) { } +static inline void note_run_start(struct task_struct *p, u64 wallclock) { } + #endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_SMP @@ -2834,6 +2846,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) trace_sched_migrate_task(p, new_cpu, pct_task_load(p)); + note_run_start(p, -1); + if (task_cpu(p) != new_cpu) { if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p); @@ -4772,6 +4786,7 @@ static void __sched notrace __schedule(bool preempt) prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); + note_run_start(prev, -1); prev->on_rq = 0; /* @@ -4800,6 +4815,7 @@ static void __sched notrace __schedule(bool preempt) clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; + note_run_start(next, wallclock); BUG_ON(task_cpu(next) != cpu_of(rq)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9ab83b5af025..8259d1af3efb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2543,6 +2543,8 @@ unsigned int __read_mostly sched_init_task_load_pelt; unsigned int __read_mostly sched_init_task_load_windows; unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15; +unsigned int __read_mostly sysctl_sched_min_runtime = 200000000; /* 200 ms */ + static inline unsigned int task_load(struct task_struct *p) { if (sched_use_pelt) @@ -3602,6 +3604,10 @@ static int lower_power_cpu_available(struct task_struct *p, int cpu) int i; int lowest_power_cpu = task_cpu(p); int lowest_power = power_cost(p, task_cpu(p)); + u64 delta = sched_clock() - p->run_start; + + if (delta < sysctl_sched_min_runtime) + return 0; /* Is a lower-powered idle CPU available which will fit this task? */ for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_online_mask) { diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1465fb869657..9c2719cc9cc9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -373,6 +373,13 @@ static struct ctl_table kern_table[] = { .proc_handler = sched_hmp_proc_update_handler, }, { + .procname = "sched_min_runtime", + .data = &sysctl_sched_min_runtime, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "sched_spill_load", .data = &sysctl_sched_spill_load_pct, .maxlen = sizeof(unsigned int), |
