summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrinath Sridharan <srinathsr@google.com>2016-07-14 13:09:03 -0700
committerJohn Stultz <john.stultz@linaro.org>2016-08-11 14:26:49 -0700
commitc5a00c2dad8d161da3c2086cccd6375d8ad5b04f (patch)
tree45338f8f9fa792bcc1f791ea344a7d64ac31a9f0 /kernel
parentd4cda03828f5c8eae35efcb08f520f8f1a35950e (diff)
sched/tune: Introducing a new schedtune attribute prefer_idle
Hint to enable biasing of tasks towards idle cpus, even when a given task is negatively boosted. The mechanism allows upto 20% reduction in camera power without hurting performance. bug: 28312446 Change-Id: I97ea5671aa1e6bcb165408b41e17bc82e41c2c9e
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/tune.c42
-rw-r--r--kernel/sched/tune.h2
3 files changed, 57 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7b6e95aa7360..e099ce747345 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5589,7 +5589,7 @@ done:
return target;
}
-static inline int find_best_target(struct task_struct *p, bool boosted)
+static inline int find_best_target(struct task_struct *p, bool prefer_idle)
{
int iter_cpu;
int target_cpu = -1;
@@ -5607,9 +5607,9 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
int idle_idx;
/*
- * favor higher cpus for boosted tasks
+ * favor higher cpus for tasks that prefer idle cores
*/
- int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+ int i = prefer_idle ? NR_CPUS-iter_cpu-1 : iter_cpu;
if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
continue;
@@ -5634,10 +5634,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
continue;
#endif
/*
- * For boosted tasks we favor idle cpus unconditionally to
+ * Unconditionally favoring tasks that prefer idle cpus to
* improve latency.
*/
- if (idle_cpu(i) && boosted) {
+ if (idle_cpu(i) && prefer_idle) {
if (best_idle_cpu < 0)
best_idle_cpu = i;
continue;
@@ -5654,7 +5654,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
target_cpu = i;
target_util = new_util;
}
- } else if (!boosted) {
+ } else if (!prefer_idle) {
if (best_idle_cpu < 0 ||
(sysctl_sched_cstate_aware &&
best_idle_cstate > idle_idx)) {
@@ -5669,7 +5669,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
}
}
- if (boosted && best_idle_cpu >= 0)
+ if (prefer_idle && best_idle_cpu >= 0)
target_cpu = best_idle_cpu;
else if (target_cpu < 0)
target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
@@ -5761,14 +5761,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
*/
#ifdef CONFIG_CGROUP_SCHEDTUNE
bool boosted = schedtune_task_boost(p) > 0;
+ bool prefer_idle = schedtune_prefer_idle(p) > 0;
#else
bool boosted = 0;
+ bool prefer_idle = 0;
#endif
- int tmp_target = find_best_target(p, boosted);
- if (tmp_target >= 0)
+ int tmp_target = find_best_target(p, boosted || prefer_idle);
+ if (tmp_target >= 0) {
target_cpu = tmp_target;
- if (boosted && idle_cpu(target_cpu))
+ if ((boosted || prefer_idle) && idle_cpu(target_cpu))
return target_cpu;
+ }
}
if (target_cpu != task_cpu(p)) {
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index d24f365b0c90..644f8e9ee96f 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -125,6 +125,10 @@ struct schedtune {
/* Performance Constraint (C) region threshold params */
int perf_constrain_idx;
+
+ /* Hint to bias scheduling of tasks on that SchedTune CGroup
+ * towards idle CPUs */
+ int prefer_idle;
};
static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
@@ -156,6 +160,7 @@ root_schedtune = {
.boost = 0,
.perf_boost_idx = 0,
.perf_constrain_idx = 0,
+ .prefer_idle = 0,
};
int
@@ -536,6 +541,38 @@ int schedtune_task_boost(struct task_struct *p)
return task_boost;
}
+int schedtune_prefer_idle(struct task_struct *p)
+{
+ struct schedtune *st;
+ int prefer_idle;
+
+ /* Get prefer_idle value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ prefer_idle = st->prefer_idle;
+ rcu_read_unlock();
+
+ return prefer_idle;
+}
+
+static u64
+prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->prefer_idle;
+}
+
+static int
+prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 prefer_idle)
+{
+ struct schedtune *st = css_st(css);
+ st->prefer_idle = prefer_idle;
+
+ return 0;
+}
+
static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
@@ -587,6 +624,11 @@ static struct cftype files[] = {
.read_s64 = boost_read,
.write_s64 = boost_write,
},
+ {
+ .name = "prefer_idle",
+ .read_u64 = prefer_idle_read,
+ .write_u64 = prefer_idle_write,
+ },
{ } /* terminate */
};
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index be1785eb1c5b..4f6441771e4c 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -17,6 +17,8 @@ struct target_nrg {
int schedtune_cpu_boost(int cpu);
int schedtune_task_boost(struct task_struct *tsk);
+int schedtune_prefer_idle(struct task_struct *tsk);
+
void schedtune_exit_task(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);