diff options
| author | Srinath Sridharan <srinathsr@google.com> | 2016-07-14 13:09:03 -0700 |
|---|---|---|
| committer | John Stultz <john.stultz@linaro.org> | 2016-08-11 14:26:49 -0700 |
| commit | c5a00c2dad8d161da3c2086cccd6375d8ad5b04f (patch) | |
| tree | 45338f8f9fa792bcc1f791ea344a7d64ac31a9f0 /kernel/sched/fair.c | |
| parent | d4cda03828f5c8eae35efcb08f520f8f1a35950e (diff) | |
sched/tune: Introducing a new schedtune attribute prefer_idle
Hint to enable biasing of tasks towards idle cpus, even when a given
task is negatively boosted. The mechanism allows upto 20% reduction in
camera power without hurting performance.
bug: 28312446
Change-Id: I97ea5671aa1e6bcb165408b41e17bc82e41c2c9e
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7b6e95aa7360..e099ce747345 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5589,7 +5589,7 @@ done: return target; } -static inline int find_best_target(struct task_struct *p, bool boosted) +static inline int find_best_target(struct task_struct *p, bool prefer_idle) { int iter_cpu; int target_cpu = -1; @@ -5607,9 +5607,9 @@ static inline int find_best_target(struct task_struct *p, bool boosted) int idle_idx; /* - * favor higher cpus for boosted tasks + * favor higher cpus for tasks that prefer idle cores */ - int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu; + int i = prefer_idle ? NR_CPUS-iter_cpu-1 : iter_cpu; if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p))) continue; @@ -5634,10 +5634,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted) continue; #endif /* - * For boosted tasks we favor idle cpus unconditionally to + * Unconditionally favoring tasks that prefer idle cpus to * improve latency. */ - if (idle_cpu(i) && boosted) { + if (idle_cpu(i) && prefer_idle) { if (best_idle_cpu < 0) best_idle_cpu = i; continue; @@ -5654,7 +5654,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted) target_cpu = i; target_util = new_util; } - } else if (!boosted) { + } else if (!prefer_idle) { if (best_idle_cpu < 0 || (sysctl_sched_cstate_aware && best_idle_cstate > idle_idx)) { @@ -5669,7 +5669,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted) } } - if (boosted && best_idle_cpu >= 0) + if (prefer_idle && best_idle_cpu >= 0) target_cpu = best_idle_cpu; else if (target_cpu < 0) target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu; @@ -5761,14 +5761,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) */ #ifdef CONFIG_CGROUP_SCHEDTUNE bool boosted = schedtune_task_boost(p) > 0; + bool prefer_idle = schedtune_prefer_idle(p) > 0; #else bool boosted = 0; + bool prefer_idle = 0; #endif - int tmp_target = find_best_target(p, boosted); - if (tmp_target >= 0) + int tmp_target = find_best_target(p, boosted || prefer_idle); + if (tmp_target >= 0) { target_cpu = tmp_target; - if (boosted && idle_cpu(target_cpu)) + if ((boosted || prefer_idle) && idle_cpu(target_cpu)) return target_cpu; + } } if (target_cpu != task_cpu(p)) { |
