summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/sched-zone.txt55
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sysctl.c7
4 files changed, 27 insertions, 62 deletions
diff --git a/Documentation/scheduler/sched-zone.txt b/Documentation/scheduler/sched-zone.txt
index fc0157790f85..472322ee0db2 100644
--- a/Documentation/scheduler/sched-zone.txt
+++ b/Documentation/scheduler/sched-zone.txt
@@ -550,7 +550,7 @@ both tasks and CPUs to aid in the placement of tasks.
particular CPU, that CPU will be considered too small for the task. The task
will thus be seen as a "big" task on the cpu and will reflect in nr_big_tasks
statistics maintained for that cpu. Note that certain tasks (whose nice
- value exceeds sched_upmigrate_min_nice value or those that belong to a cgroup
+ value exceeds SCHED_UPMIGRATE_MIN_NICE value or those that belong to a cgroup
whose upmigrate_discourage flag is set) will never be classified as big tasks
despite their high demand.
@@ -666,7 +666,7 @@ offered by cluster. Criteria for a task to be considered as fitting in a cluster
is:
i) A low-priority task, whose nice value is greater than
- sysctl_sched_upmigrate_min_nice or whose cgroup has its
+ SCHED_UPMIGRATE_MIN_NICE or whose cgroup has its
upmigrate_discourage flag set, is considered to be fitting in all clusters,
irrespective of their capacity and task's cpu demand.
@@ -775,7 +775,7 @@ available (idle) to service it
b) A task is starving on a CPU with high irq load.
c) A task with upmigration discouraged is running on a performance cluster.
-See notes on 'cpu.upmigrate_discourage' and sched_upmigrate_min_nice tunables.
+See notes on 'cpu.upmigrate_discourage'.
In case the test for migration turns out positive (which is expected to be rare
event), a candidate cpu is identified for task migration. To avoid multiple task
@@ -1037,19 +1037,7 @@ historical load value to assign to it. This tunable specifies the
initial load value for newly created tasks. Also see Sec 2.8 on per-task
'initial task load' attribute.
-*** 7.5 sched_upmigrate_min_nice
-
-Appears at: /proc/sys/kernel/sched_upmigrate_min_nice
-
-Default value: 15
-
-A task whose nice value is greater than this tunable value will never
-be considered as a "big" task (it will not be allowed to run on a
-high-performance CPU).
-
-See also notes on 'cpu.upmigrate_discourage' tunable.
-
-*** 7.6 sched_ravg_hist_size
+*** 7.5 sched_ravg_hist_size
Appears at: /proc/sys/kernel/sched_ravg_hist_size
@@ -1058,7 +1046,7 @@ Default value: 5
This tunable controls the number of samples used from task's sum_history[]
array for determination of its demand.
-*** 7.7 sched_window_stats_policy
+*** 7.6 sched_window_stats_policy
Appears at: /proc/sys/kernel/sched_window_stats_policy
@@ -1077,7 +1065,7 @@ Possible values for this tunable are:
samples), where M = sysctl_sched_ravg_hist_size
3. Use average of first M samples, where M = sysctl_sched_ravg_hist_size
-*** 7.8 sched_ravg_window
+*** 7.7 sched_ravg_window
Appears at: kernel command line argument
@@ -1088,7 +1076,7 @@ tracking. By default each window is 10ms long. This quantity must
currently be set at boot time on the kernel command line (or the
default value of 10ms can be used).
-*** 7.9 RAVG_HIST_SIZE
+*** 7.8 RAVG_HIST_SIZE
Appears at: compile time only (see RAVG_HIST_SIZE in include/linux/sched.h)
@@ -1099,7 +1087,7 @@ tracking mechanism maintains per task. If default values are used for
both this and sched_ravg_window then a total of 50ms of task history
would be maintained in 5 10ms windows.
-*** 7.10 sched_migration_fixup
+*** 7.9 sched_migration_fixup
Appears at: /proc/sys/kernel/sched_migration_fixup
@@ -1108,7 +1096,7 @@ Default value: 1
This controls whether a cpu's busy time counters are adjusted during task
migration.
-*** 7.11 sched_freq_inc_notify
+*** 7.10 sched_freq_inc_notify
Appears at: /proc/sys/kernel/sched_freq_inc_notify
@@ -1120,7 +1108,7 @@ exceeds sched_freq_inc_notify, where freq_required is the frequency calculated
by scheduler to meet current task demand. Note that sched_freq_inc_notify is
specified in kHz units.
-*** 7.12 sched_freq_dec_notify
+*** 7.11 sched_freq_dec_notify
Appears at: /proc/sys/kernel/sched_freq_dec_notify
@@ -1133,7 +1121,7 @@ exceeds sched_freq_dec_notify, where freq_required is the frequency calculated
by scheduler to meet current task demand. Note that sched_freq_dec_notify is
specified in kHz units.
-*** 7.13 sched_heavy_task
+*** 7.12 sched_heavy_task
Appears at: /proc/sys/kernel/sched_heavy_task
@@ -1145,7 +1133,7 @@ comparison. Scheduler will request a raise in cpu frequency when heavy tasks
wakeup after at least one window of sleep, where window size is defined by
sched_ravg_window. Value 0 will disable this feature.
-*** 7.14 sched_cpu_high_irqload
+*** 7.13 sched_cpu_high_irqload
Appears at: /proc/sys/kernel/sched_cpu_high_irqload
@@ -1163,7 +1151,7 @@ longer eligible for placement. This will affect the task placement logic
described above, causing the scheduler to try and steer tasks away from
the CPU.
-*** 7.15 cpu.upmigrate_discourage
+*** 7.14 cpu.upmigrate_discourage
Default value : 0
@@ -1176,10 +1164,9 @@ Setting this flag to 1 discourages upmigration for all tasks of a cgroup. High
demand tasks of such a cgroup will never be classified as big tasks and hence
not upmigrated. Any task of the cgroup is allowed to upmigrate only under
overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
-how overcommitment threshold is defined and also notes on
-'sched_upmigrate_min_nice' tunable.
+how overcommitment threshold is defined.
-*** 7.16 sched_static_cpu_pwr_cost
+*** 7.15 sched_static_cpu_pwr_cost
Default value: 0
@@ -1194,7 +1181,7 @@ within a cluster and possibly have differing value between clusters as
needed.
-*** 7.17 sched_static_cluster_pwr_cost
+*** 7.16 sched_static_cluster_pwr_cost
Default value: 0
@@ -1205,7 +1192,7 @@ power mode. It ignores the actual D-state that a cluster may be in and assumes
the worst case power cost of the highest D-state. It is means of biasing task
placement away from idle clusters when necessary.
-*** 7.18 sched_early_detection_duration
+*** 7.17 sched_early_detection_duration
Default value: 9500000
@@ -1216,7 +1203,7 @@ tick for it to be eligible for the scheduler's early detection feature
under scheduler boost. For more information on the feature itself please
refer to section 5.2.1.
-*** 7.19 sched_restrict_cluster_spill
+*** 7.18 sched_restrict_cluster_spill
Default value: 0
@@ -1235,7 +1222,7 @@ CPU across all clusters. When this tunable is enabled, the RT tasks are
restricted to the lowest possible power cluster.
-*** 7.20 sched_downmigrate
+*** 7.19 sched_downmigrate
Appears at: /proc/sys/kernel/sched_downmigrate
@@ -1248,7 +1235,7 @@ its demand *in reference to the power-efficient cpu* drops less than 60%
(sched_downmigrate).
-*** 7.21 sched_small_wakee_task_load
+*** 7.20 sched_small_wakee_task_load
Appears at: /proc/sys/kernel/sched_small_wakee_task_load
@@ -1260,7 +1247,7 @@ categorized as small wakee tasks. Scheduler places small wakee tasks on the
waker's cluster.
-*** 7.22 sched_big_waker_task_load
+*** 7.21 sched_big_waker_task_load
Appears at: /proc/sys/kernel/sched_big_waker_task_load
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 30ba03d1679b..2af3824a40b6 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -61,7 +61,6 @@ extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
extern unsigned int sysctl_sched_downmigrate_pct;
-extern int sysctl_sched_upmigrate_min_nice;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 747a30d1988f..dab9045a6dfa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2753,12 +2753,7 @@ unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
unsigned int __read_mostly sched_downmigrate;
unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
-/*
- * Tasks whose nice value is > sysctl_sched_upmigrate_min_nice are never
- * considered as "big" tasks.
- */
-static int __read_mostly sched_upmigrate_min_nice = 15;
-int __read_mostly sysctl_sched_upmigrate_min_nice = 15;
+#define SCHED_UPMIGRATE_MIN_NICE 15
/*
* The load scale factor of a CPU gets boosted when its max frequency
@@ -2837,8 +2832,6 @@ void set_hmp_defaults(void)
div64_u64((u64)sysctl_sched_init_task_load_pct *
(u64)sched_ravg_window, 100);
- sched_upmigrate_min_nice = sysctl_sched_upmigrate_min_nice;
-
sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
NSEC_PER_USEC;
@@ -2887,7 +2880,7 @@ static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
{
int nice = task_nice(p);
- if (nice > sched_upmigrate_min_nice || upmigrate_discouraged(p))
+ if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
return 0;
return scaled_load > sched_upmigrate;
@@ -3002,7 +2995,7 @@ static int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
return 1;
if (boost_type != SCHED_BOOST_ON_BIG) {
- if (task_nice(p) > sched_upmigrate_min_nice ||
+ if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
upmigrate_discouraged(p))
return 1;
@@ -4134,16 +4127,9 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
if (write && (old_val == *data))
goto done;
- if (data == (unsigned int *)&sysctl_sched_upmigrate_min_nice) {
- if ((*(int *)data) < -20 || (*(int *)data) > 19) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
- update_min_nice = 1;
- } else if (data != &sysctl_sched_select_prev_cpu_us) {
+ if (data != &sysctl_sched_select_prev_cpu_us) {
/*
- * all tunables other than min_nice and prev_cpu_us are
+ * all tunables other than sched_select_prev_cpu_us are
* in percentage.
*/
if (sysctl_sched_downmigrate_pct >
@@ -4226,7 +4212,7 @@ static inline int migration_needed(struct task_struct *p, int cpu)
nice = task_nice(p);
rcu_read_lock();
grp = task_related_thread_group(p);
- if (!grp && (nice > sched_upmigrate_min_nice ||
+ if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
rcu_read_unlock();
return DOWN_MIGRATION;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5288dec335fd..731256c03fa6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -383,13 +383,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = sched_hmp_proc_update_handler,
},
{
- .procname = "sched_upmigrate_min_nice",
- .data = &sysctl_sched_upmigrate_min_nice,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- },
- {
.procname = "sched_init_task_load",
.data = &sysctl_sched_init_task_load_pct,
.maxlen = sizeof(unsigned int),