summaryrefslogtreecommitdiff
path: root/kernel/sched/tune.c
diff options
context:
space:
mode:
authorSrinath Sridharan <srinathsr@google.com>2016-07-28 17:28:55 +0100
committerAmit Pundir <amit.pundir@linaro.org>2016-09-14 14:59:32 +0530
commit3fc52a99e795d0086f402c36c45bb64e66e7b126 (patch)
tree41cb7ab7ac780db98918d36be25f4ac3b9df912b /kernel/sched/tune.c
parent9064187216fa872e72548618c5cced462e5bee24 (diff)
sched/tune: Add support for negative boost values
Change-Id: I164ee04ba98c3a776605f18cb65ee61b3e917939 Contains also: eas/stune: schedtune cpu boost_max must be non-negative. This is to avoid under-accounting cpu capacity which may cause task stacking and frequency spikes. Change-Id: Ie1c1cbd52a6edb77b4c15a830030aa748dff6f29
Diffstat (limited to 'kernel/sched/tune.c')
-rw-r--r--kernel/sched/tune.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 8ca8db2de818..afc4a7747161 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -213,10 +213,11 @@ static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
*/
struct boost_groups {
/* Maximum boost value for all RUNNABLE tasks on a CPU */
- unsigned boost_max;
+ bool idle;
+ int boost_max;
struct {
/* The boost for tasks on that boost group */
- unsigned boost;
+ int boost;
/* Count of RUNNABLE tasks on that boost group */
unsigned tasks;
} group[BOOSTGROUPS_COUNT];
@@ -229,7 +230,7 @@ static void
schedtune_cpu_update(int cpu)
{
struct boost_groups *bg;
- unsigned boost_max;
+ int boost_max;
int idx;
bg = &per_cpu(cpu_boost_groups, cpu);
@@ -243,9 +244,13 @@ schedtune_cpu_update(int cpu)
*/
if (bg->group[idx].tasks == 0)
continue;
+
boost_max = max(boost_max, bg->group[idx].boost);
}
-
+ /* Ensures boost_max is non-negative when all cgroup boost values
+ * are neagtive. Avoids under-accounting of cpu capacity which may cause
+ * task stacking and frequency spikes.*/
+ boost_max = max(boost_max, 0);
bg->boost_max = boost_max;
}
@@ -391,7 +396,7 @@ int schedtune_task_boost(struct task_struct *p)
return task_boost;
}
-static u64
+static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct schedtune *st = css_st(css);
@@ -401,11 +406,13 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 boost)
+ s64 boost)
{
struct schedtune *st = css_st(css);
+ unsigned threshold_idx;
+ int boost_pct;
- if (boost < 0 || boost > 100)
+ if (boost < -100 || boost > 100)
return -EINVAL;
st->boost = boost;
@@ -423,8 +430,8 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
static struct cftype files[] = {
{
.name = "boost",
- .read_u64 = boost_read,
- .write_u64 = boost_write,
+ .read_s64 = boost_read,
+ .write_s64 = boost_write,
},
{ } /* terminate */
};