summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/Kconfig10
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/sched.h29
-rw-r--r--kernel/sysctl.c2
6 files changed, 56 insertions, 0 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e3ddea30ac61..fd62f091f995 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -25,6 +25,16 @@ config CPU_FREQ_BOOST_SW
bool
depends on THERMAL
+config SCHED_FREQ_INPUT
+ bool "Scheduler inputs to cpufreq governor"
+ depends on SMP && FAIR_GROUP_SCHED
+ help
+ This option enables support for scheduler based CPU utilization
+ calculations which may then be used by any cpufreq governor. The
+ scheduler keeps track of "recent" cpu demand of tasks, which can
+ help determine need for changing frequency well in advance of what
+ a governor would have been able to detect on its own.
+
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
default y
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bf4b9f5e2434..42a09f9d83bd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1424,7 +1424,9 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_FREQ_INPUT
struct ravg ravg;
+#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b13260ec9ce5..35ecc5ed1f13 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1746,6 +1746,8 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
wq_worker_waking_up(p, cpu_of(rq));
}
+#ifdef CONFIG_SCHED_FREQ_INPUT
+
/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = 10000000;
@@ -1891,6 +1893,8 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
p->ravg.mark_start = wallclock;
}
+#endif /* CONFIG_SCHED_FREQ_INPUT */
+
/*
* Mark the task runnable and perform wakeup-preemption.
*/
@@ -7569,6 +7573,7 @@ void __init sched_init_smp(void)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_FREQ_INPUT
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics are scaled in reference to it.
@@ -7655,6 +7660,8 @@ static int register_sched_callback(void)
*/
core_initcall(register_sched_callback);
+#endif /* CONFIG_SCHED_FREQ_INPUT */
+
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
@@ -7792,11 +7799,13 @@ void __init sched_init(void)
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
+#ifdef CONFIG_SCHED_FREQ_INPUT
rq->cur_freq = 1;
rq->max_freq = 1;
rq->min_freq = 1;
rq->max_possible_freq = 1;
rq->cumulative_runnable_avg = 0;
+#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq->cstate = 0;
rq->wakeup_latency = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 93057b5739f7..5c37d1952e67 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2894,6 +2894,8 @@ static inline int idle_balance(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_FREQ_INPUT
+
static inline unsigned int task_load(struct task_struct *p)
{
return p->ravg.demand;
@@ -2927,6 +2929,8 @@ void init_new_task_load(struct task_struct *p)
p->ravg.sum_history[i] = 0;
}
+#endif /* CONFIG_SCHED_FREQ_INPUT */
+
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index dab9568c36c7..bb7f283b6dac 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -643,12 +643,14 @@ struct rq {
u64 max_idle_balance_cost;
#endif
+#ifdef CONFIG_SCHED_FREQ_INPUT
/*
* max_freq = user or thermal defined maximum
* max_possible_freq = maximum supported by hardware
*/
unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
u64 cumulative_runnable_avg;
+#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
@@ -915,6 +917,8 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
+#ifdef CONFIG_SCHED_FREQ_INPUT
+
extern unsigned int sched_ravg_window;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
@@ -934,6 +938,24 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
+#else /* CONFIG_SCHED_FREQ_INPUT */
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline void
+inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void init_new_task_load(struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_FREQ_INPUT */
+
#ifdef CONFIG_CGROUP_SCHED
/*
@@ -1267,8 +1289,15 @@ struct sched_class {
#endif
};
+#ifdef CONFIG_SCHED_FREQ_INPUT
extern void
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum);
+#else /* CONFIG_SCHED_FREQ_INPUT */
+static inline void
+update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
+{
+}
+#endif /* CONFIG_SCHED_FREQ_INPUT */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index a91a7a360de0..25f96e3a83a9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -292,6 +292,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_FREQ_INPUT
{
.procname = "sched_window_stats_policy",
.data = &sysctl_sched_window_stats_policy,
@@ -306,6 +307,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",