summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2016-12-05 14:15:54 +0000
committerAndres Oportus <andresoportus@google.com>2017-06-02 08:01:53 -0700
commitf6f931489311aa208255192bf22d189d095f5b9a (patch)
treeaeb99d83ec47436d5908131d2f62e06cd1bd1cd0 /kernel
parent81bd5ed393832c088e3a31420205bd13e667d538 (diff)
EAS: sched/fair: Re-integrate 'honor sync wakeups' into wakeup path
This patch re-integrates the part which was initially provided by 3b9d7554aeec ("EAS: sched/fair: tunable to honor sync wakeups") into energy_aware_wake_cpu() into select_energy_cpu_brute(). Change-Id: I748fde3ecdeb44651179bce0a5bb8dd82d1903f6 Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> (cherry picked from commit b75b7286cb068d5761621ea134c23dd131db953f) Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ae3cc8df331c..5e4bf1e76275 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5889,13 +5889,21 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
return min_cap * 1024 < task_util(p) * capacity_margin;
}
-static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu)
+static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
int i;
int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
unsigned long max_spare = 0;
struct sched_domain *sd;
+ if (sysctl_sched_sync_hint_enable && sync) {
+ int cpu = smp_processor_id();
+ cpumask_t search_cpus;
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ if (cpumask_test_cpu(cpu, &search_cpus))
+ return cpu;
+ }
+
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
@@ -5970,7 +5978,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
- return select_energy_cpu_brute(p, prev_cpu);
+ return select_energy_cpu_brute(p, prev_cpu, sync);
rcu_read_lock();
for_each_domain(cpu, tmp) {