summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@google.com>2016-07-29 14:41:25 +0100
committerJohn Stultz <john.stultz@linaro.org>2016-08-11 14:26:41 -0700
commit369bcbbae4407a5b50f472a0fa3d569b9a832081 (patch)
tree036a1fd0431b9c74bae8c00596d09b5bb09b30b1 /kernel
parentaf14760e19ebce67e8913fc0efd32cf839282a95 (diff)
sched/fair: optimize idle cpu selection for boosted tasks
find_best_target CPU selection is biased towards lower CPU IDs. Bias towards higher CPUs for boosted tasks. For boosted tasks unconditionally use the idle CPU returned by find_best_target. BUG: 29512132 Change-Id: I3d650051752163fcf3dc7909751d1fde3f9d17c0 Conflicts: kernel/sched/fair.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c70
1 files changed, 36 insertions, 34 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a26f40cb7fd0..6efdcad51603 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5569,32 +5569,30 @@ done:
return target;
}
-static inline int find_best_target(struct task_struct *p)
+static inline int find_best_target(struct task_struct *p, bool boosted)
{
- int i, boosted;
+ int iter_cpu;
int target_cpu = -1;
int target_capacity = 0;
int backup_capacity = 0;
- int idle_cpu = -1;
+ int best_idle_cpu = -1;
int best_idle_cstate = INT_MAX;
int backup_cpu = -1;
unsigned long task_util_boosted, new_util;
- /*
- * Favor 1) busy cpu with most capacity at current OPP
- * 2) idle_cpu with capacity at current OPP
- * 3) busy cpu with capacity at higher OPP
- */
-#ifdef CONFIG_CGROUP_SCHEDTUNE
- boosted = schedtune_task_boost(p);
-#else
- boosted = 0;
-#endif
task_util_boosted = boosted_task_util(p);
- for_each_cpu(i, tsk_cpus_allowed(p)) {
- int cur_capacity = capacity_curr_of(i);
- struct rq *rq = cpu_rq(i);
- int idle_idx = idle_get_state_idx(rq);
+ for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
+ int cur_capacity;
+ struct rq *rq;
+ int idle_idx;
+
+ /*
+ * favor higher cpus for boosted tasks
+ */
+ int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+
+ if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
+ continue;
/*
* p's blocked utilization is still accounted for on prev_cpu
@@ -5615,46 +5613,43 @@ static inline int find_best_target(struct task_struct *p)
* For boosted tasks we favor idle cpus unconditionally to
* improve latency.
*/
- if (idle_idx >= 0 && boosted) {
- if (idle_cpu < 0 ||
- (sysctl_sched_cstate_aware &&
- best_idle_cstate > idle_idx)) {
- best_idle_cstate = idle_idx;
- idle_cpu = i;
- }
+ if (idle_cpu(i) && boosted) {
+ if (best_idle_cpu < 0)
+ best_idle_cpu = i;
continue;
}
+ cur_capacity = capacity_curr_of(i);
+ rq = cpu_rq(i);
+ idle_idx = idle_get_state_idx(rq);
+
if (new_util < cur_capacity) {
if (cpu_rq(i)->nr_running) {
if (target_capacity == 0 ||
target_capacity > cur_capacity) {
- /* busy CPU with most capacity at current OPP */
target_cpu = i;
target_capacity = cur_capacity;
}
} else if (!boosted) {
- if (idle_cpu < 0 ||
+ if (best_idle_cpu < 0 ||
(sysctl_sched_cstate_aware &&
best_idle_cstate > idle_idx)) {
best_idle_cstate = idle_idx;
- idle_cpu = i;
+ best_idle_cpu = i;
}
}
} else if (backup_capacity == 0 ||
backup_capacity > cur_capacity) {
- /* first busy CPU with capacity at higher OPP */
backup_capacity = cur_capacity;
backup_cpu = i;
}
}
- if (!boosted && target_cpu < 0) {
- target_cpu = idle_cpu >= 0 ? idle_cpu : backup_cpu;
- }
+ if (boosted && best_idle_cpu >= 0)
+ target_cpu = best_idle_cpu;
+ else if (target_cpu < 0)
+ target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
- if (boosted && idle_cpu >= 0)
- target_cpu = idle_cpu;
return target_cpu;
}
@@ -5740,9 +5735,16 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
/*
* Find a cpu with sufficient capacity
*/
- int tmp_target = find_best_target(p);
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ bool boosted = schedtune_task_boost(p) > 0;
+#else
+ bool boosted = 0;
+#endif
+ int tmp_target = find_best_target(p, boosted);
if (tmp_target >= 0)
target_cpu = tmp_target;
+ if (boosted && idle_cpu(target_cpu))
+ return target_cpu;
}
if (target_cpu != task_cpu(p)) {