summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-08-11 10:21:40 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:26 -0700
commitd8932ae7df04cb715c5f4214fdbdbddfc8f750c7 (patch)
tree09cbf6b434a78703166fb97517005072d2311eb6
parent32e4c4a368fed944f7c92a7cb7cf96223b7f599d (diff)
sched: window-stats: legacy mode
Support legacy mode, which results in busy time being seen by governor that is close to what it would have seen via existing APIs i.e get_cpu_idle_time_us(), get_cpu_iowait_time_us() and get_cpu_idle_time_jiffy(). In particular, legacy mode means that only task execution time is counted in rq->curr_runnable_sum and rq->prev_runnable_sum. Also task migration does not result in adjustment of those counters. Change-Id: If374ccc084aa73f77374b6b3ab4cd0a4ca7b8c90 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--kernel/sched/core.c40
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/sysctl.c7
5 files changed, 42 insertions, 10 deletions
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 9e0d06370f1d..fb51198716fa 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -44,6 +44,7 @@ extern unsigned int sysctl_sched_wakeup_load_threshold;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_account_wait_time;
extern unsigned int sysctl_sched_ravg_hist_size;
+extern unsigned int sysctl_sched_freq_legacy_mode;
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sysctl_sched_init_task_load_pct;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 17d5f7a6b205..0225b622cd06 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1163,9 +1163,10 @@ static inline void clear_hmp_request(int cpu) { }
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
/*
- * sched_window_stats_policy, sched_account_wait_time and sched_ravg_hist_size
- * have a 'sysctl' copy associated with them. This is required for atomic update
- * of those variables when being modifed via sysctl interface.
+ * sched_window_stats_policy, sched_account_wait_time, sched_ravg_hist_size
+ * and sched_freq_legacy_mode have a 'sysctl' copy associated with them. This
+ * is required for atomic update of those variables when being modifed via
+ * sysctl interface.
*
* IMPORTANT: Initialize both copies to same value!!
*/
@@ -1182,6 +1183,9 @@ __read_mostly unsigned int sysctl_sched_window_stats_policy =
static __read_mostly unsigned int sched_account_wait_time = 1;
__read_mostly unsigned int sysctl_sched_account_wait_time = 1;
+static __read_mostly unsigned int sched_freq_legacy_mode;
+__read_mostly unsigned int sysctl_sched_freq_legacy_mode;
+
/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = 10000000;
@@ -1334,14 +1338,15 @@ compute_demand:
if (new_window)
p->ravg.demand = demand;
- if (update_sum && (p->ravg.flags & CURR_WINDOW_CONTRIB)) {
+ if (!sched_freq_legacy_mode && update_sum &&
+ (p->ravg.flags & CURR_WINDOW_CONTRIB)) {
rq->curr_runnable_sum -= p->ravg.partial_demand;
BUG_ON((s64)rq->curr_runnable_sum < 0);
}
p->ravg.partial_demand = demand;
- if (update_sum && !new_window) {
+ if (!sched_freq_legacy_mode && update_sum && !new_window) {
rq->curr_runnable_sum += p->ravg.partial_demand;
p->ravg.flags |= CURR_WINDOW_CONTRIB;
}
@@ -1491,6 +1496,9 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
BUG_ON(delta < 0);
p->ravg.sum += delta;
+ if (sched_freq_legacy_mode && (event == PUT_PREV_TASK))
+ rq->curr_runnable_sum += delta;
+
if (unlikely(p->ravg.sum > window_size))
p->ravg.sum = window_size;
}
@@ -1511,6 +1519,18 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
if (update_sum) {
if (event == PUT_PREV_TASK || event == TASK_UPDATE) {
+ if (sched_freq_legacy_mode) {
+ if (nr_full_windows) {
+ /* sum == scaled window_size */
+ rq->curr_runnable_sum = sum;
+ }
+ rq->prev_runnable_sum =
+ rq->curr_runnable_sum;
+ rq->curr_runnable_sum = 0;
+ mark_start = window_start;
+ continue;
+ }
+
if (!nr_full_windows) {
rq->curr_runnable_sum -= partial_demand;
rq->curr_runnable_sum += p->ravg.demand;
@@ -1521,7 +1541,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
}
rq->curr_runnable_sum = p->ravg.partial_demand;
p->ravg.flags |= CURR_WINDOW_CONTRIB;
- } else {
+ } else if (!sched_freq_legacy_mode) {
if (!nr_full_windows) {
rq->prev_runnable_sum -= partial_demand;
BUG_ON((s64)rq->prev_runnable_sum < 0);
@@ -1545,12 +1565,12 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
* sched_account_wait_time == 0, ensure this dependency is met.
*/
- if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
+ if (!sched_freq_legacy_mode && !(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
rq->curr_runnable_sum += p->ravg.partial_demand;
p->ravg.flags |= CURR_WINDOW_CONTRIB;
}
- if (!(p->ravg.flags & PREV_WINDOW_CONTRIB)) {
+ if (!sched_freq_legacy_mode && !(p->ravg.flags & PREV_WINDOW_CONTRIB)) {
rq->prev_runnable_sum += p->ravg.demand;
p->ravg.flags |= PREV_WINDOW_CONTRIB;
}
@@ -1722,6 +1742,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
sched_window_stats_policy = sysctl_sched_window_stats_policy;
sched_account_wait_time = sysctl_sched_account_wait_time;
sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
+ sched_freq_legacy_mode = sysctl_sched_freq_legacy_mode;
for_each_online_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
@@ -2336,7 +2357,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.nr_migrations++;
perf_event_task_migrate(p);
- if (sched_enable_hmp && (p->on_rq || p->state == TASK_WAKING))
+ if (sched_enable_hmp && (p->on_rq || p->state == TASK_WAKING)
+ && !sched_freq_legacy_mode)
fixup_busy_time(p, new_cpu);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 48a63fac33e9..7c24b9809527 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3217,7 +3217,8 @@ static inline int invalid_value(unsigned int *data)
/*
* Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size and sysctl_sched_account_wait_time variables.
+ * sysctl_sched_ravg_hist_size, sysctl_sched_account_wait_time and
+ * sched_freq_legacy_mode variables.
*/
int sched_window_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 023d9e35bf41..3ce879361667 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1071,6 +1071,7 @@ extern void boost_kick(int cpu);
#else /* CONFIG_SCHED_HMP */
#define sched_enable_hmp 0
+#define sched_freq_legacy_mode 1
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_small_task_count_change(void) { }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index be2318bdf4ae..7fbe9b146343 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -310,6 +310,13 @@ static struct ctl_table kern_table[] = {
#endif
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
{
+ .procname = "sched_freq_legacy_mode",
+ .data = &sysctl_sched_freq_legacy_mode,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
+ {
.procname = "sched_account_wait_time",
.data = &sysctl_sched_account_wait_time,
.maxlen = sizeof(unsigned int),