summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sysctl.c7
3 files changed, 4 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 87e93b3f3b4e..e2235e4e7158 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1763,7 +1763,7 @@ struct cpu_cycle {
#if defined(CONFIG_SCHED_HMP)
/*
- * sched_window_stats_policy, sched_account_wait_time, sched_ravg_hist_size,
+ * sched_window_stats_policy, sched_ravg_hist_size,
* sched_migration_fixup, sched_freq_account_wait_time have a 'sysctl' copy
* associated with them. This is required for atomic update of those variables
* when being modifed via sysctl interface.
@@ -1786,8 +1786,7 @@ static __read_mostly unsigned int sched_window_stats_policy =
__read_mostly unsigned int sysctl_sched_window_stats_policy =
WINDOW_STATS_MAX_RECENT_AVG;
-static __read_mostly unsigned int sched_account_wait_time = 1;
-__read_mostly unsigned int sysctl_sched_account_wait_time = 1;
+#define SCHED_ACCOUNT_WAIT_TIME 1
__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
@@ -2794,7 +2793,7 @@ static int account_busy_for_task_demand(struct task_struct *p, int event)
* time. Likewise, if wait time is not treated as busy time, then
* when a task begins to run or is migrated, it is not running and
* is completing a segment of non-busy time. */
- if (event == TASK_WAKE || (!sched_account_wait_time &&
+ if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
return 0;
@@ -3185,7 +3184,6 @@ static void enable_window_stats(void)
enum reset_reason_code {
WINDOW_CHANGE,
POLICY_CHANGE,
- ACCOUNT_WAIT_TIME_CHANGE,
HIST_SIZE_CHANGE,
MIGRATION_FIXUP_CHANGE,
FREQ_ACCOUNT_WAIT_TIME_CHANGE,
@@ -3195,7 +3193,6 @@ enum reset_reason_code {
const char *sched_window_reset_reasons[] = {
"WINDOW_CHANGE",
"POLICY_CHANGE",
- "ACCOUNT_WAIT_TIME_CHANGE",
"HIST_SIZE_CHANGE",
"MIGRATION_FIXUP_CHANGE",
"FREQ_ACCOUNT_WAIT_TIME_CHANGE"};
@@ -3260,11 +3257,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
old = sched_window_stats_policy;
new = sysctl_sched_window_stats_policy;
sched_window_stats_policy = sysctl_sched_window_stats_policy;
- } else if (sched_account_wait_time != sysctl_sched_account_wait_time) {
- reason = ACCOUNT_WAIT_TIME_CHANGE;
- old = sched_account_wait_time;
- new = sysctl_sched_account_wait_time;
- sched_account_wait_time = sysctl_sched_account_wait_time;
} else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
reason = HIST_SIZE_CHANGE;
old = sched_ravg_hist_size;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a33eddb7b17d..6a915f49eb2f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4080,16 +4080,12 @@ static inline int invalid_value(unsigned int *data)
if (data == &sysctl_sched_window_stats_policy)
return val >= WINDOW_STATS_INVALID_POLICY;
- if (data == &sysctl_sched_account_wait_time)
- return !(val == 0 || val == 1);
-
return invalid_value_freq_input(data);
}
/*
* Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size, sysctl_sched_account_wait_time and
- * sched_freq_legacy_mode variables.
+ * sysctl_sched_ravg_hist_size and sched_freq_legacy_mode variables.
*/
int sched_window_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 825be75ca1a3..13bcfe33ffc3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -340,13 +340,6 @@ static struct ctl_table kern_table[] = {
#endif
#ifdef CONFIG_SCHED_HMP
{
- .procname = "sched_account_wait_time",
- .data = &sysctl_sched_account_wait_time,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
.procname = "sched_cpu_high_irqload",
.data = &sysctl_sched_cpu_high_irqload,
.maxlen = sizeof(unsigned int),