diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-01-23 17:07:14 -0800 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:46 -0700 |
| commit | 3fe87bc057a3104dbacc9dc300bc70370e3242bb (patch) | |
| tree | 34de3a49f7135ee7902f2be960c444317915649d /kernel/sched | |
| parent | ce35afd096b49eb58faa01d7a9e70deb4f7c6251 (diff) | |
sched: avoid CPUs with high irq activity for non-small tasks
The irq-aware scheduler is to achieve better performance by avoiding task
placement to the CPUs which have high irq activity. However current
scheduler places tasks to the CPUs which are loaded by irq activity
preferably as opposed to what it is meant to be when the task is non-small.
This is suboptimal for both power and performance.
Fix task placement algorithm to avoid CPUs with significant irq activities.
Change-Id: Ifa5a6ac186241bd58fa614e93e3d873a5f5ad4ca
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/fair.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6ef06d51c088..c78b78a37f6f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2915,8 +2915,7 @@ static int mostly_idle_cpu_sync(int cpu, int sync) nr_running--; return load <= rq->mostly_idle_load && - nr_running <= rq->mostly_idle_nr_run && - !sched_cpu_high_irqload(cpu); + nr_running <= rq->mostly_idle_nr_run; } static int boost_refcount; @@ -3036,6 +3035,9 @@ static int eligible_cpu(struct task_struct *p, int cpu, int sync) { struct rq *rq = cpu_rq(cpu); + if (sched_cpu_high_irqload(cpu)) + return 0; + if (mostly_idle_cpu_sync(cpu, sync)) return 1; @@ -3161,7 +3163,7 @@ static int best_small_task_cpu(struct task_struct *p, int sync) */ if (!cpu_rq(min_cost_cpu)->cstate && mostly_idle_cpu_sync(min_cost_cpu, sync) && - min_cost_cpu == prev_cpu) + !sched_cpu_high_irqload(min_cost_cpu) && min_cost_cpu == prev_cpu) return min_cost_cpu; for_each_cpu(i, &search_cpus) { @@ -3187,7 +3189,8 @@ static int best_small_task_cpu(struct task_struct *p, int sync) continue; } - if (mostly_idle_cpu_sync(i, sync)) { + if (mostly_idle_cpu_sync(i, sync) && + !sched_cpu_high_irqload(i)) { if (best_mi_cpu == -1 || i == prev_cpu) best_mi_cpu = i; continue; @@ -3357,7 +3360,8 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, * where the task will fit. */ if (!task_will_fit(p, i)) { - if (mostly_idle_cpu_sync(i, sync)) { + if (mostly_idle_cpu_sync(i, sync) && + !sched_cpu_high_irqload(i)) { load = cpu_load_sync(i, sync); if (load < min_fallback_load || (load == min_fallback_load && @@ -3459,8 +3463,12 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, } } - if (min_cstate_cpu >= 0 && (prefer_idle > 0 || - !(best_cpu >= 0 && mostly_idle_cpu_sync(best_cpu, sync)))) + /* + * Don't need to check !sched_cpu_high_irqload(best_cpu) because + * best_cpu cannot have high irq load. + */ + if (min_cstate_cpu >= 0 && (prefer_idle > 0 || best_cpu < 0 || + !mostly_idle_cpu_sync(best_cpu, sync))) best_cpu = min_cstate_cpu; done: if (best_cpu < 0) { |
