From 5160d93b6dc93a863c22f242f3ea4c8ae6c08076 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Wed, 25 May 2016 11:24:14 -0700 Subject: sched: eliminate sched_account_wait_time knob Kill unused scheduler knob sched_account_wait_time. With this change scheduler always accounts task's wait time into demand. Change-Id: Ifa4bcb5685798f48fd020f3d0c9853220b3f5fdc Signed-off-by: Joonwoo Park --- kernel/sched/core.c | 14 +++----------- kernel/sched/fair.c | 6 +----- kernel/sysctl.c | 7 ------- 3 files changed, 4 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 87e93b3f3b4e..e2235e4e7158 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1763,7 +1763,7 @@ struct cpu_cycle { #if defined(CONFIG_SCHED_HMP) /* - * sched_window_stats_policy, sched_account_wait_time, sched_ravg_hist_size, + * sched_window_stats_policy, sched_ravg_hist_size, * sched_migration_fixup, sched_freq_account_wait_time have a 'sysctl' copy * associated with them. This is required for atomic update of those variables * when being modifed via sysctl interface. @@ -1786,8 +1786,7 @@ static __read_mostly unsigned int sched_window_stats_policy = __read_mostly unsigned int sysctl_sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG; -static __read_mostly unsigned int sched_account_wait_time = 1; -__read_mostly unsigned int sysctl_sched_account_wait_time = 1; +#define SCHED_ACCOUNT_WAIT_TIME 1 __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); @@ -2794,7 +2793,7 @@ static int account_busy_for_task_demand(struct task_struct *p, int event) * time. Likewise, if wait time is not treated as busy time, then * when a task begins to run or is migrated, it is not running and * is completing a segment of non-busy time. */ - if (event == TASK_WAKE || (!sched_account_wait_time && + if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME && (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) return 0; @@ -3185,7 +3184,6 @@ static void enable_window_stats(void) enum reset_reason_code { WINDOW_CHANGE, POLICY_CHANGE, - ACCOUNT_WAIT_TIME_CHANGE, HIST_SIZE_CHANGE, MIGRATION_FIXUP_CHANGE, FREQ_ACCOUNT_WAIT_TIME_CHANGE, @@ -3195,7 +3193,6 @@ enum reset_reason_code { const char *sched_window_reset_reasons[] = { "WINDOW_CHANGE", "POLICY_CHANGE", - "ACCOUNT_WAIT_TIME_CHANGE", "HIST_SIZE_CHANGE", "MIGRATION_FIXUP_CHANGE", "FREQ_ACCOUNT_WAIT_TIME_CHANGE"}; @@ -3260,11 +3257,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) old = sched_window_stats_policy; new = sysctl_sched_window_stats_policy; sched_window_stats_policy = sysctl_sched_window_stats_policy; - } else if (sched_account_wait_time != sysctl_sched_account_wait_time) { - reason = ACCOUNT_WAIT_TIME_CHANGE; - old = sched_account_wait_time; - new = sysctl_sched_account_wait_time; - sched_account_wait_time = sysctl_sched_account_wait_time; } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) { reason = HIST_SIZE_CHANGE; old = sched_ravg_hist_size; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a33eddb7b17d..6a915f49eb2f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4080,16 +4080,12 @@ static inline int invalid_value(unsigned int *data) if (data == &sysctl_sched_window_stats_policy) return val >= WINDOW_STATS_INVALID_POLICY; - if (data == &sysctl_sched_account_wait_time) - return !(val == 0 || val == 1); - return invalid_value_freq_input(data); } /* * Handle "atomic" update of sysctl_sched_window_stats_policy, - * sysctl_sched_ravg_hist_size, sysctl_sched_account_wait_time and - * sched_freq_legacy_mode variables. + * sysctl_sched_ravg_hist_size and sched_freq_legacy_mode variables. */ int sched_window_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 825be75ca1a3..13bcfe33ffc3 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -339,13 +339,6 @@ static struct ctl_table kern_table[] = { }, #endif #ifdef CONFIG_SCHED_HMP - { - .procname = "sched_account_wait_time", - .data = &sysctl_sched_account_wait_time, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_window_update_handler, - }, { .procname = "sched_cpu_high_irqload", .data = &sysctl_sched_cpu_high_irqload, -- cgit v1.2.3 From 462213d1ac46410c42a48054bd2a149dd48109a3 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Wed, 25 May 2016 11:27:35 -0700 Subject: sched: eliminate sched_freq_account_wait_time knob Kill unused scheduler knob sched_freq_account_wait_time. Change-Id: Ib74123ebd69dfa3f86cf7335099f50c12a6e93c3 Signed-off-by: Joonwoo Park --- kernel/sched/core.c | 23 +++++++---------------- kernel/sched/fair.c | 3 --- kernel/sysctl.c | 7 ------- 3 files changed, 7 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e2235e4e7158..f7207b3fb590 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1764,7 +1764,7 @@ struct cpu_cycle { /* * sched_window_stats_policy, sched_ravg_hist_size, - * sched_migration_fixup, sched_freq_account_wait_time have a 'sysctl' copy + * sched_migration_fixup have a 'sysctl' copy * associated with them. This is required for atomic update of those variables * when being modifed via sysctl interface. * @@ -1799,8 +1799,7 @@ __read_mostly unsigned int sysctl_sched_new_task_windows = 5; static __read_mostly unsigned int sched_migration_fixup = 1; __read_mostly unsigned int sysctl_sched_migration_fixup = 1; -static __read_mostly unsigned int sched_freq_account_wait_time; -__read_mostly unsigned int sysctl_sched_freq_account_wait_time; +#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0 /* * For increase, send notification if @@ -2166,11 +2165,11 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, if (rq->curr == p) return 1; - return p->on_rq ? sched_freq_account_wait_time : 0; + return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0; } /* TASK_MIGRATE, PICK_NEXT_TASK left */ - return sched_freq_account_wait_time; + return SCHED_FREQ_ACCOUNT_WAIT_TIME; } static inline int @@ -2391,7 +2390,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event) return; if (event != PUT_PREV_TASK && event != TASK_UPDATE && - (!sched_freq_account_wait_time || + (!SCHED_FREQ_ACCOUNT_WAIT_TIME || (event != TASK_MIGRATE && event != PICK_NEXT_TASK))) return; @@ -2401,7 +2400,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event) * related groups */ if (event == TASK_UPDATE) { - if (!p->on_rq && !sched_freq_account_wait_time) + if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME) return; } @@ -3186,7 +3185,6 @@ enum reset_reason_code { POLICY_CHANGE, HIST_SIZE_CHANGE, MIGRATION_FIXUP_CHANGE, - FREQ_ACCOUNT_WAIT_TIME_CHANGE, FREQ_AGGREGATE_CHANGE, }; @@ -3195,7 +3193,7 @@ const char *sched_window_reset_reasons[] = { "POLICY_CHANGE", "HIST_SIZE_CHANGE", "MIGRATION_FIXUP_CHANGE", - "FREQ_ACCOUNT_WAIT_TIME_CHANGE"}; +}; /* Called with IRQs enabled */ void reset_all_window_stats(u64 window_start, unsigned int window_size) @@ -3269,13 +3267,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) old = sched_migration_fixup; new = sysctl_sched_migration_fixup; sched_migration_fixup = sysctl_sched_migration_fixup; - } else if (sched_freq_account_wait_time != - sysctl_sched_freq_account_wait_time) { - reason = FREQ_ACCOUNT_WAIT_TIME_CHANGE; - old = sched_freq_account_wait_time; - new = sysctl_sched_freq_account_wait_time; - sched_freq_account_wait_time = - sysctl_sched_freq_account_wait_time; } else if (sched_freq_aggregate != sysctl_sched_freq_aggregate) { reason = FREQ_AGGREGATE_CHANGE; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6a915f49eb2f..43fd05817375 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4055,9 +4055,6 @@ static inline int invalid_value_freq_input(unsigned int *data) if (data == &sysctl_sched_migration_fixup) return !(*data == 0 || *data == 1); - if (data == &sysctl_sched_freq_account_wait_time) - return !(*data == 0 || *data == 1); - if (data == &sysctl_sched_freq_aggregate) return !(*data == 0 || *data == 1); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 13bcfe33ffc3..ccc6f20f11d3 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -323,13 +323,6 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_window_update_handler, }, - { - .procname = "sched_freq_account_wait_time", - .data = &sysctl_sched_freq_account_wait_time, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_window_update_handler, - }, { .procname = "sched_heavy_task", .data = &sysctl_sched_heavy_task_pct, -- cgit v1.2.3 From d009f9c149412bc3b70ae8146472fc4a3802b0de Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Wed, 25 May 2016 11:42:05 -0700 Subject: sched: eliminate sched_enable_power_aware knob and parameter Kill unused scheduler knob and parameter sched_enable_power_aware. HMP scheduler always take into account power cost for placing task. Change-Id: Ib26a21df9b903baac26c026862b0a41b4a8834f3 Signed-off-by: Joonwoo Park --- kernel/sched/core.c | 15 +-------------- kernel/sched/fair.c | 12 +----------- kernel/sysctl.c | 9 --------- 3 files changed, 2 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f7207b3fb590..0224624ab319 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -891,7 +891,7 @@ int sched_update_freq_max_load(const cpumask_t *cpumask) u32 hfreq; int hpct; - if (!per_cpu_info || !sysctl_sched_enable_power_aware) + if (!per_cpu_info) return 0; spin_lock_irqsave(&freq_max_load_lock, flags); @@ -1649,19 +1649,6 @@ static int __init set_sched_enable_hmp(char *str) early_param("sched_enable_hmp", set_sched_enable_hmp); -static int __init set_sched_enable_power_aware(char *str) -{ - int enable_power_aware = 0; - - get_option(&str, &enable_power_aware); - - sysctl_sched_enable_power_aware = !!enable_power_aware; - - return 0; -} - -early_param("sched_enable_power_aware", set_sched_enable_power_aware); - static inline int got_boost_kick(void) { int cpu = smp_processor_id(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 43fd05817375..747a30d1988f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2705,13 +2705,6 @@ unsigned int __read_mostly sched_enable_hmp = 0; */ unsigned int __read_mostly sysctl_sched_spill_nr_run = 10; -/* - * Control whether or not individual CPU power consumption is used to - * guide task placement. - * This sysctl can be set to a default value using boot command line arguments. - */ -unsigned int __read_mostly sysctl_sched_enable_power_aware = 0; - /* * Place sync wakee tasks those have less than configured demand to the waker's * cluster. @@ -3081,8 +3074,7 @@ unsigned int power_cost(int cpu, u64 demand) struct rq *rq = cpu_rq(cpu); unsigned int pc; - if (!per_cpu_info || !per_cpu_info[cpu].ptable || - !sysctl_sched_enable_power_aware) + if (!per_cpu_info || !per_cpu_info[cpu].ptable) /* When power aware scheduling is not in use, or CPU * power data is not available, just use the CPU * capacity as a rough stand-in for real CPU power @@ -4317,8 +4309,6 @@ unsigned int cpu_temp(int cpu) #else /* CONFIG_SCHED_HMP */ -#define sysctl_sched_enable_power_aware 0 - struct cpu_select_env; struct sched_cluster; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ccc6f20f11d3..5288dec335fd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -473,15 +473,6 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_boost_handler, }, - { - .procname = "sched_enable_power_aware", - .data = &sysctl_sched_enable_power_aware, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, - }, #endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_SCHED_DEBUG { -- cgit v1.2.3