summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2017-03-22 18:16:03 +0000
committerAndres Oportus <andresoportus@google.com>2017-06-02 08:01:53 -0700
commit9e92e8a24fa5bfec70cc70cbfa81e2d34e887634 (patch)
tree7465e5983907e48f8ad176b6b141e286a4df93e7
parentf6f931489311aa208255192bf22d189d095f5b9a (diff)
sched/fair: Code !is_big_little path into select_energy_cpu_brute()
This patch replaces the existing EAS upstream implementation of select_energy_cpu_brute() with the one of find_best_target() used in Android previously. It also removes the cpumask 'and' from select_energy_cpu_brute, see the existing use of 'cpu = smp_processor_id()' in select_task_rq_fair(). Change-Id: If678c002efaa87d1ba3ec9989a4e9f8df98b83ec Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> [ added guarding for non-schedtune builds ] Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c66
1 files changed, 29 insertions, 37 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5e4bf1e76275..387950f2649d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5891,65 +5891,57 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
- int i;
- int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
- unsigned long max_spare = 0;
struct sched_domain *sd;
+ int target_cpu = prev_cpu, tmp_target;
+ bool boosted, prefer_idle;
if (sysctl_sched_sync_hint_enable && sync) {
int cpu = smp_processor_id();
- cpumask_t search_cpus;
- cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
- if (cpumask_test_cpu(cpu, &search_cpus))
+
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
return cpu;
}
rcu_read_lock();
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ boosted = schedtune_task_boost(p) > 0;
+ prefer_idle = schedtune_prefer_idle(p) > 0;
+#else
+ boosted = get_sysctl_sched_cfs_boost() > 0;
+ prefer_idle = 0;
+#endif
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+ /* Find a cpu with sufficient capacity */
+ tmp_target = find_best_target(p, boosted, prefer_idle);
if (!sd)
goto unlock;
+ if (tmp_target >= 0) {
+ target_cpu = tmp_target;
+ if ((boosted || prefer_idle) && idle_cpu(target_cpu))
+ goto unlock;
+ }
- for_each_cpu_and(i, tsk_cpus_allowed(p), sched_domain_span(sd)) {
- int diff;
- unsigned long spare;
-
+ if (target_cpu != prev_cpu) {
struct energy_env eenv = {
- .util_delta = task_util(p),
- .src_cpu = prev_cpu,
- .dst_cpu = i,
- .task = p,
+ .util_delta = task_util(p),
+ .src_cpu = prev_cpu,
+ .dst_cpu = target_cpu,
+ .task = p,
};
- spare = capacity_spare_wake(i, p);
-
- if (i == prev_cpu)
- continue;
-
- if (spare > max_spare) {
- max_spare = spare;
- spare_cpu = i;
- }
-
- if (spare * 1024 < capacity_margin * task_util(p))
- continue;
-
- diff = energy_diff(&eenv);
+ /* Not enough spare capacity on previous cpu */
+ if (cpu_overutilized(prev_cpu))
+ goto unlock;
- if (diff < min_diff) {
- min_diff = diff;
- energy_cpu = i;
- }
+ if (energy_diff(&eenv) >= 0)
+ target_cpu = prev_cpu;
}
unlock:
rcu_read_unlock();
-
- if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
- return prev_cpu;
-
- return energy_cpu != prev_cpu ? energy_cpu : spare_cpu;
+ return target_cpu;
}
/*