summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-04-02 19:18:38 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:59:11 -0700
commitfb9ab2a720a549c8cdcc7956c3fceaf5199503a2 (patch)
treeff397f045bca22bc67fa9ca4f48b497c8be6b029 /kernel
parentbf863e333ff3704543c1300f837058b33c7bcb46 (diff)
sched: Provide tunable to switch between PELT and window-based stats
Provide a runtime tunable to switch between using PELT-based load stats and window-based load stats. This will be needed for runtime analysis of the two load tracking schemes. Change-Id: I018f6a90b49844bf2c4e5666912621d87acc7217 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/sched/sched.h13
3 files changed, 28 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fbc51a548bf9..df50659bd662 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,10 +1752,10 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
__read_mostly unsigned int sched_ravg_window = 10000000;
/* Min window size (in ns) = 10ms */
-__read_mostly unsigned int min_sched_ravg_window = 10000000;
+#define MIN_SCHED_RAVG_WINDOW 10000000
/* Max window size (in ns) = 1s */
-__read_mostly unsigned int max_sched_ravg_window = 1000000000;
+#define MAX_SCHED_RAVG_WINDOW 1000000000
#define WINDOW_STATS_USE_RECENT 0
#define WINDOW_STATS_USE_MAX 1
@@ -1764,6 +1764,9 @@ __read_mostly unsigned int max_sched_ravg_window = 1000000000;
__read_mostly unsigned int sysctl_sched_window_stats_policy =
WINDOW_STATS_USE_AVG;
+/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
+unsigned int __read_mostly sched_use_pelt = 1;
+
unsigned int max_possible_efficiency = 1024;
unsigned int min_possible_efficiency = 1024;
@@ -1832,6 +1835,9 @@ static int __init set_sched_ravg_window(char *str)
{
get_option(&str, &sched_ravg_window);
+ sched_use_pelt = (sched_ravg_window < MIN_SCHED_RAVG_WINDOW ||
+ sched_ravg_window > MAX_SCHED_RAVG_WINDOW);
+
return 0;
}
@@ -1843,7 +1849,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
int new_window;
u64 wallclock = sched_clock();
- if (is_idle_task(p) || (sched_ravg_window < min_sched_ravg_window))
+ if (is_idle_task(p) || sched_use_pelt)
return;
do {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d0cd0a8afe63..d1e675d65f6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2545,12 +2545,18 @@ unsigned int __read_mostly sysctl_sched_init_task_load_pct = 100;
static inline unsigned int task_load(struct task_struct *p)
{
- return p->se.avg.runnable_avg_sum_scaled;
+ if (sched_use_pelt)
+ return p->se.avg.runnable_avg_sum_scaled;
+
+ return p->ravg.demand;
}
static inline unsigned int max_task_load(void)
{
- return LOAD_AVG_MAX;
+ if (sched_use_pelt)
+ return LOAD_AVG_MAX;
+
+ return sched_ravg_window;
}
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 01aa57b070f1..5808369fdce3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -930,6 +930,7 @@ extern void init_new_task_load(struct task_struct *p);
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sched_ravg_window;
+extern unsigned int sched_use_pelt;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
extern unsigned int pct_task_load(struct task_struct *p);
@@ -949,13 +950,21 @@ extern unsigned int sched_init_task_load_windows;
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
- rq->cumulative_runnable_avg += p->ravg.demand;
+ if (sched_use_pelt)
+ rq->cumulative_runnable_avg +=
+ p->se.avg.runnable_avg_sum_scaled;
+ else
+ rq->cumulative_runnable_avg += p->ravg.demand;
}
static inline void
dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
- rq->cumulative_runnable_avg -= p->ravg.demand;
+ if (sched_use_pelt)
+ rq->cumulative_runnable_avg -=
+ p->se.avg.runnable_avg_sum_scaled;
+ else
+ rq->cumulative_runnable_avg -= p->ravg.demand;
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}