summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2014-11-24 16:27:20 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:18 -0700
commit72fa561b0dfd807f1f69ffd6a6f63cdf38a5d000 (patch)
treebda9b886ef364ef4e1c9728d502cf94352b62b19 /kernel
parent29a412dffa5cbd6d7d913909cd57d04d9d5cb172 (diff)
sched: Only do EA migration when CPU throttling is imminent
We do not want to migrate tasks unnecessary to avoid cache hit and other migration latencies that could affect the performance of the system. Add a check to only try EA migration when CPU frequency throttling is imminent. CRs-Fixed: 764788 Change-Id: I92e86e62da10ce15f1e76a980df3545e93d76348 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8259d1af3efb..262c8528e775 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3623,6 +3623,8 @@ static int lower_power_cpu_available(struct task_struct *p, int cpu)
return (lowest_power_cpu != task_cpu(p));
}
+static inline int is_cpu_throttling_imminent(int cpu);
+
/*
* Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
* cpu as per its demand or priority)
@@ -3655,6 +3657,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
return MOVE_TO_BIG_CPU;
if (sched_enable_power_aware &&
+ is_cpu_throttling_imminent(cpu_of(rq)) &&
lower_power_cpu_available(p, cpu_of(rq)))
return MOVE_TO_POWER_EFFICIENT_CPU;
@@ -3718,6 +3721,16 @@ static inline int nr_big_tasks(struct rq *rq)
return rq->nr_big_tasks;
}
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+ int throttling = 0;
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+
+ if (per_cpu_info)
+ throttling = per_cpu_info[cpu].throttling;
+ return throttling;
+}
+
#else /* CONFIG_SCHED_HMP */
#define sched_enable_power_aware 0
@@ -3774,6 +3787,11 @@ static inline int nr_big_tasks(struct rq *rq)
return 0;
}
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
@@ -7745,7 +7763,8 @@ group_type group_classify(struct sched_group *group,
if ((capacity(env->dst_rq) == group_rq_capacity(group)) &&
sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
power_cost_at_freq(env->dst_cpu, 0) <
- power_cost_at_freq(cpumask_first(sched_group_cpus(group)), 0)) {
+ power_cost_at_freq(cpumask_first(sched_group_cpus(group)), 0) &&
+ is_cpu_throttling_imminent(cpumask_first(sched_group_cpus(group)))) {
env->flags |= LBF_PWR_ACTIVE_BALANCE;
return group_ea;
}