summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrinath Sridharan <srinathsr@google.com>2016-07-28 17:28:55 +0100
committerJohn Stultz <john.stultz@linaro.org>2016-08-10 15:18:35 -0700
commit00aae8d5d5cd6f28d7603e0c1c4ac5cf91cb4aa3 (patch)
treedb52bfb982bad85a51b49fc4ef49aedad5b60b5a /kernel
parent6ba071d89dd72b080b9f0e4abf587cad99d5320b (diff)
sched/tune: Add support for negative boost values
Change-Id: I164ee04ba98c3a776605f18cb65ee61b3e917939 Contains also: eas/stune: schedtune cpu boost_max must be non-negative. This is to avoid under-accounting cpu capacity which may cause task stacking and frequency spikes. Change-Id: Ie1c1cbd52a6edb77b4c15a830030aa748dff6f29
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c37
-rw-r--r--kernel/sched/tune.c25
2 files changed, 37 insertions, 25 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2b23dfefe6f1..aef41d9acd83 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5250,22 +5250,25 @@ static bool cpu_overutilized(int cpu)
#ifdef CONFIG_SCHED_TUNE
-static unsigned long
-schedtune_margin(unsigned long signal, unsigned long boost)
+static long
+schedtune_margin(unsigned long signal, long boost)
{
- unsigned long long margin = 0;
+ long long margin = 0;
/*
* Signal proportional compensation (SPC)
*
* The Boost (B) value is used to compute a Margin (M) which is
* proportional to the complement of the original Signal (S):
- * M = B * (SCHED_LOAD_SCALE - S)
+ * M = B * (SCHED_LOAD_SCALE - S), if B is positive
+ * M = B * S, if B is negative
* The obtained M could be used by the caller to "boost" S.
*/
- margin = SCHED_LOAD_SCALE - signal;
- margin *= boost;
-
+ if (boost >= 0) {
+ margin = SCHED_LOAD_SCALE - signal;
+ margin *= boost;
+ } else
+ margin = -signal * boost;
/*
* Fast integer division by constant:
* Constant : (C) = 100
@@ -5281,13 +5284,15 @@ schedtune_margin(unsigned long signal, unsigned long boost)
margin *= 1311;
margin >>= 17;
+ if (boost < 0)
+ margin *= -1;
return margin;
}
-static inline unsigned int
+static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
{
- unsigned int boost;
+ int boost;
#ifdef CONFIG_CGROUP_SCHEDTUNE
boost = schedtune_cpu_boost(cpu);
@@ -5300,12 +5305,12 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return schedtune_margin(util, boost);
}
-static inline unsigned long
+static inline long
schedtune_task_margin(struct task_struct *task)
{
- unsigned int boost;
+ int boost;
unsigned long util;
- unsigned long margin;
+ long margin;
#ifdef CONFIG_CGROUP_SCHEDTUNE
boost = schedtune_task_boost(task);
@@ -5323,13 +5328,13 @@ schedtune_task_margin(struct task_struct *task)
#else /* CONFIG_SCHED_TUNE */
-static inline unsigned int
+static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
{
return 0;
}
-static inline unsigned int
+static inline int
schedtune_task_margin(struct task_struct *task)
{
return 0;
@@ -5341,7 +5346,7 @@ static inline unsigned long
boosted_cpu_util(int cpu)
{
unsigned long util = cpu_util(cpu);
- unsigned long margin = schedtune_cpu_margin(util, cpu);
+ long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -5352,7 +5357,7 @@ static inline unsigned long
boosted_task_util(struct task_struct *task)
{
unsigned long util = task_util(task);
- unsigned long margin = schedtune_task_margin(task);
+ long margin = schedtune_task_margin(task);
trace_sched_boost_task(task, util, margin);
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 8ca8db2de818..afc4a7747161 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -213,10 +213,11 @@ static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
*/
struct boost_groups {
/* Maximum boost value for all RUNNABLE tasks on a CPU */
- unsigned boost_max;
+ bool idle;
+ int boost_max;
struct {
/* The boost for tasks on that boost group */
- unsigned boost;
+ int boost;
/* Count of RUNNABLE tasks on that boost group */
unsigned tasks;
} group[BOOSTGROUPS_COUNT];
@@ -229,7 +230,7 @@ static void
schedtune_cpu_update(int cpu)
{
struct boost_groups *bg;
- unsigned boost_max;
+ int boost_max;
int idx;
bg = &per_cpu(cpu_boost_groups, cpu);
@@ -243,9 +244,13 @@ schedtune_cpu_update(int cpu)
*/
if (bg->group[idx].tasks == 0)
continue;
+
boost_max = max(boost_max, bg->group[idx].boost);
}
-
+ /* Ensures boost_max is non-negative when all cgroup boost values
+ * are neagtive. Avoids under-accounting of cpu capacity which may cause
+ * task stacking and frequency spikes.*/
+ boost_max = max(boost_max, 0);
bg->boost_max = boost_max;
}
@@ -391,7 +396,7 @@ int schedtune_task_boost(struct task_struct *p)
return task_boost;
}
-static u64
+static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct schedtune *st = css_st(css);
@@ -401,11 +406,13 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 boost)
+ s64 boost)
{
struct schedtune *st = css_st(css);
+ unsigned threshold_idx;
+ int boost_pct;
- if (boost < 0 || boost > 100)
+ if (boost < -100 || boost > 100)
return -EINVAL;
st->boost = boost;
@@ -423,8 +430,8 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
static struct cftype files[] = {
{
.name = "boost",
- .read_u64 = boost_read,
- .write_u64 = boost_write,
+ .read_s64 = boost_read,
+ .write_s64 = boost_write,
},
{ } /* terminate */
};