summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@google.com>2016-03-11 16:44:16 -0800
committerAmit Pundir <amit.pundir@linaro.org>2016-09-14 14:59:32 +0530
commitb312c991e9055198e96571feaf73df26e647df56 (patch)
tree9d9b0227cf9272781c53fb741452b8b961067c47
parentd42fb8f959562bc34f7f2b17ca1e370f93a306a9 (diff)
sched/fair: add tunable to set initial task load
The choice of initial task load upon fork has a large influence on CPU and OPP selection when scheduler-driven DVFS is in use. Make this tuneable by adding a new sysctl "sched_initial_task_util". If the sched governor is not used, the default remains at SCHED_LOAD_SCALE Otherwise, the value from the sysctl is used. This defaults to 0. Signed-off-by: "Todd Kjos <tkjos@google.com>"
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sysctl.c7
3 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 4883dcf3e1a9..2834841c507e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -41,6 +41,7 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_is_big_little;
extern unsigned int sysctl_sched_sync_hint_enable;
+extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
enum sched_tunable_scaling {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e2b6174db07d..c60fd6685a05 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -53,6 +53,7 @@ unsigned int normalized_sysctl_sched_latency = 6000000ULL;
unsigned int sysctl_sched_is_big_little = 0;
unsigned int sysctl_sched_sync_hint_enable = 1;
+unsigned int sysctl_sched_initial_task_util = 0;
unsigned int sysctl_sched_cstate_aware = 1;
/*
@@ -687,7 +688,9 @@ void init_entity_runnable_average(struct sched_entity *se)
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
- sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+ sa->util_avg = sched_freq() ?
+ sysctl_sched_initial_task_util :
+ scale_load_down(SCHED_LOAD_SCALE);
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 831d674a5566..dd46f370b73a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -319,6 +319,13 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "sched_initial_task_util",
+ .data = &sysctl_sched_initial_task_util,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "sched_cstate_aware",
.data = &sysctl_sched_cstate_aware,
.maxlen = sizeof(unsigned int),