summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c249
-rw-r--r--kernel/sched/sched.h15
-rw-r--r--kernel/sysctl.c7
5 files changed, 61 insertions, 236 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4fa00533cf67..e128af35ee5b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2100,7 +2100,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
#endif
reset_cpu_hmp_stats(cpu, 1);
- fixup_nr_big_small_task(cpu, 0);
+ fixup_nr_big_task(cpu, 0);
}
if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
@@ -2522,23 +2522,23 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
* A changed min_max_freq or max_possible_freq (possible during bootup)
* needs to trigger re-computation of load_scale_factor and capacity for
* all possible cpus (even those offline). It also needs to trigger
- * re-computation of nr_big/small_task count on all online cpus.
+ * re-computation of nr_big_task count on all online cpus.
*
* A changed rq->max_freq otoh needs to trigger re-computation of
* load_scale_factor and capacity for just the cluster of cpus involved.
* Since small task definition depends on max_load_scale_factor, a
- * changed load_scale_factor of one cluster could influence small_task
+ * changed load_scale_factor of one cluster could influence
* classification of tasks in another cluster. Hence a changed
- * rq->max_freq will need to trigger re-computation of nr_big/small_task
+ * rq->max_freq will need to trigger re-computation of nr_big_task
* count on all online cpus.
*
- * While it should be sufficient for nr_big/small_tasks to be
+ * While it should be sufficient for nr_big_tasks to be
* re-computed for only online cpus, we have inadequate context
* information here (in policy notifier) with regard to hotplug-safety
* context in which notification is issued. As a result, we can't use
* get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
* fixed up to issue notification always in hotplug-safe context,
- * re-compute nr_big/small_task for all possible cpus.
+ * re-compute nr_big_task for all possible cpus.
*/
if (orig_min_max_freq != min_max_freq ||
@@ -2552,7 +2552,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
* big or small. Make this change "atomic" so that tasks are accounted
* properly due to changed load_scale_factor
*/
- pre_big_small_task_count_change(cpu_possible_mask);
+ pre_big_task_count_change(cpu_possible_mask);
for_each_cpu(i, cpus) {
struct rq *rq = cpu_rq(i);
@@ -2589,7 +2589,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
__update_min_max_capacity();
check_for_up_down_migrate_update(policy->related_cpus);
- post_big_small_task_count_change(cpu_possible_mask);
+ post_big_task_count_change(cpu_possible_mask);
return 0;
}
@@ -9300,7 +9300,7 @@ void __init sched_init(void)
rq->capacity = 1024;
rq->load_scale_factor = 1024;
rq->window_start = 0;
- rq->hmp_stats.nr_small_tasks = rq->hmp_stats.nr_big_tasks = 0;
+ rq->hmp_stats.nr_big_tasks = 0;
rq->hmp_flags = 0;
rq->mostly_idle_load = pct_to_real(20);
rq->mostly_idle_nr_run = 3;
@@ -10167,11 +10167,11 @@ static int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
* classification.
*/
get_online_cpus();
- pre_big_small_task_count_change(cpu_online_mask);
+ pre_big_task_count_change(cpu_online_mask);
tg->upmigrate_discouraged = discourage;
- post_big_small_task_count_change(cpu_online_mask);
+ post_big_task_count_change(cpu_online_mask);
put_online_cpus();
return 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 1154330bda65..00e80d430455 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -232,8 +232,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#ifdef CONFIG_SCHED_HMP
SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
cfs_rq->hmp_stats.nr_big_tasks);
- SEQ_printf(m, " .%-30s: %d\n", "nr_small_tasks",
- cfs_rq->hmp_stats.nr_small_tasks);
SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
cfs_rq->hmp_stats.cumulative_runnable_avg);
#endif
@@ -331,7 +329,6 @@ do { \
#endif
#ifdef CONFIG_SCHED_HMP
P(hmp_stats.nr_big_tasks);
- P(hmp_stats.nr_small_tasks);
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
rq->hmp_stats.cumulative_runnable_avg);
#endif
@@ -416,7 +413,6 @@ static void sched_debug_header(struct seq_file *m)
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#ifdef CONFIG_SCHED_HMP
- P(sched_small_task);
P(sched_upmigrate);
P(sched_downmigrate);
P(sched_init_task_load_windows);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 859eeb59d8e3..71ec53a0ac94 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2727,13 +2727,6 @@ unsigned int __read_mostly sched_spill_load;
unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
/*
- * Tasks whose bandwidth consumption on a cpu is less than
- * sched_small_task are considered as small tasks.
- */
-unsigned int __read_mostly sched_small_task;
-unsigned int __read_mostly sysctl_sched_small_task_pct = 10;
-
-/*
* Tasks with demand >= sched_heavy_task will have their
* window-based demand added to the previous window's CPU
* time when they wake up, if they have slept for at least
@@ -2826,9 +2819,6 @@ void set_hmp_defaults(void)
sched_spill_load =
pct_to_real(sysctl_sched_spill_load_pct);
- sched_small_task =
- pct_to_real(sysctl_sched_small_task_pct);
-
update_up_down_migrate();
#ifdef CONFIG_SCHED_FREQ_INPUT
@@ -2965,15 +2955,6 @@ static inline int is_big_task(struct task_struct *p)
return load > sched_upmigrate;
}
-/* Is a task "small" on the minimum capacity CPU */
-static inline int is_small_task(struct task_struct *p)
-{
- u64 load = task_load(p);
- load *= (u64)max_load_scale_factor;
- load /= 1024;
- return load < sched_small_task;
-}
-
static inline u64 cpu_load(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -3246,105 +3227,6 @@ static unsigned int power_cost(u64 total_load, int cpu)
return power_cost_at_freq(cpu, task_freq);
}
-static int best_small_task_cpu(struct task_struct *p, int sync)
-{
- int best_busy_cpu = -1, fallback_cpu = -1;
- int min_cstate_cpu = -1;
- int min_cstate = INT_MAX;
- int cpu_cost, min_cost = INT_MAX;
- int i = task_cpu(p), prev_cpu;
- int hmp_capable;
- u64 tload, cpu_load, min_load = ULLONG_MAX;
- cpumask_t temp;
- cpumask_t search_cpu;
- cpumask_t fb_search_cpu = CPU_MASK_NONE;
- struct rq *rq;
-
- cpumask_and(&temp, &mpc_mask, cpu_possible_mask);
- hmp_capable = !cpumask_full(&temp);
-
- cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
- if (unlikely(!cpumask_test_cpu(i, &search_cpu)))
- i = cpumask_first(&search_cpu);
-
- do {
- rq = cpu_rq(i);
-
- cpumask_clear_cpu(i, &search_cpu);
-
- trace_sched_cpu_load(rq, idle_cpu(i),
- mostly_idle_cpu_sync(i, cpu_load_sync(i, sync), sync),
- sched_irqload(i), power_cost(scale_load_to_cpu(task_load(p),
- i) + cpu_load_sync(i, sync), i), cpu_temp(i));
-
- if (rq->max_possible_capacity == max_possible_capacity &&
- hmp_capable) {
- cpumask_and(&fb_search_cpu, &search_cpu,
- &rq->freq_domain_cpumask);
- cpumask_andnot(&search_cpu, &search_cpu,
- &rq->freq_domain_cpumask);
- continue;
- }
-
- if (sched_cpu_high_irqload(i))
- continue;
-
- if (idle_cpu(i) && rq->cstate) {
- if (rq->cstate < min_cstate) {
- min_cstate_cpu = i;
- min_cstate = rq->cstate;
- }
- continue;
- }
-
- cpu_load = cpu_load_sync(i, sync);
- if (mostly_idle_cpu_sync(i, cpu_load, sync))
- return i;
- } while ((i = cpumask_first(&search_cpu)) < nr_cpu_ids);
-
- if (min_cstate_cpu != -1)
- return min_cstate_cpu;
-
- cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
- cpumask_andnot(&search_cpu, &search_cpu, &fb_search_cpu);
- for_each_cpu(i, &search_cpu) {
- rq = cpu_rq(i);
- prev_cpu = (i == task_cpu(p));
-
- if (sched_cpu_high_irqload(i))
- continue;
-
- tload = scale_load_to_cpu(task_load(p), i);
- cpu_load = cpu_load_sync(i, sync);
- if (!spill_threshold_crossed(tload, cpu_load, rq)) {
- if (cpu_load < min_load ||
- (prev_cpu && cpu_load == min_load)) {
- min_load = cpu_load;
- best_busy_cpu = i;
- }
- }
- }
-
- if (best_busy_cpu != -1)
- return best_busy_cpu;
-
- for_each_cpu(i, &fb_search_cpu) {
- rq = cpu_rq(i);
- prev_cpu = (i == task_cpu(p));
-
- tload = scale_load_to_cpu(task_load(p), i);
- cpu_load = cpu_load_sync(i, sync);
- cpu_cost = power_cost(tload + cpu_load, i);
- if (cpu_cost < min_cost ||
- (prev_cpu && cpu_cost == min_cost)) {
- fallback_cpu = i;
- min_cost = cpu_cost;
- }
- }
-
- return fallback_cpu;
-}
-
#define UP_MIGRATION 1
#define DOWN_MIGRATION 2
#define IRQLOAD_MIGRATION 4
@@ -3458,7 +3340,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
int min_idle_cost = INT_MAX, min_busy_cost = INT_MAX;
u64 tload, cpu_load;
u64 min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX;
- int small_task = is_small_task(p);
int boost = sched_boost();
int cstate, min_cstate = INT_MAX;
int prefer_idle = -1;
@@ -3474,7 +3355,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
if (wake_to_idle(p)) {
prefer_idle = 1;
prefer_idle_override = 1;
- small_task = 0;
/*
* If wake to idle and sync are both set prefer wake to idle
* since sync is a weak hint that might not always be correct.
@@ -3482,12 +3362,6 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
sync = 0;
}
- if (small_task && !boost) {
- best_cpu = best_small_task_cpu(p, sync);
- prefer_idle = 0; /* For sched_task_load tracepoint */
- goto done;
- }
-
trq = task_rq(p);
cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
for_each_cpu(i, &search_cpus) {
@@ -3632,7 +3506,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
if (min_cstate_cpu >= 0 && (prefer_idle > 0 || best_cpu < 0 ||
!mostly_idle_cpu_sync(best_cpu, min_load, sync)))
best_cpu = min_cstate_cpu;
-done:
+
if (best_cpu < 0) {
if (unlikely(fallback_idle_cpu < 0))
/*
@@ -3653,41 +3527,37 @@ done:
* tracepoint towards end to capture prefer_idle flag used for this
* instance of wakeup.
*/
- trace_sched_task_load(p, small_task, boost, reason, sync, prefer_idle);
+ trace_sched_task_load(p, boost, reason, sync, prefer_idle);
return best_cpu;
}
static void
-inc_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
+inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
return;
if (is_big_task(p))
stats->nr_big_tasks++;
- else if (is_small_task(p))
- stats->nr_small_tasks++;
}
static void
-dec_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
+dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
return;
if (is_big_task(p))
stats->nr_big_tasks--;
- else if (is_small_task(p))
- stats->nr_small_tasks--;
- BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0);
+ BUG_ON(stats->nr_big_tasks < 0);
}
static void
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
{
- inc_nr_big_small_task(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->hmp_stats, p);
if (change_cra)
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}
@@ -3695,14 +3565,14 @@ inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
static void
dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
{
- dec_nr_big_small_task(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->hmp_stats, p);
if (change_cra)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
{
- stats->nr_big_tasks = stats->nr_small_tasks = 0;
+ stats->nr_big_tasks = 0;
if (reset_cra)
stats->cumulative_runnable_avg = 0;
}
@@ -3745,29 +3615,23 @@ static inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
* Return total number of tasks "eligible" to run on highest capacity cpu
*
* This is simply nr_big_tasks for cpus which are not of max_capacity and
- * (nr_running - nr_small_tasks) for cpus of max_capacity
+ * nr_running for cpus of max_capacity
*/
unsigned int nr_eligible_big_tasks(int cpu)
{
struct rq *rq = cpu_rq(cpu);
int nr_big = rq->hmp_stats.nr_big_tasks;
int nr = rq->nr_running;
- int nr_small = rq->hmp_stats.nr_small_tasks;
if (rq->max_possible_capacity != max_possible_capacity)
return nr_big;
- /* Consider all (except small) tasks on max_capacity cpu as big tasks */
- nr_big = nr - nr_small;
- if (nr_big < 0)
- nr_big = 0;
-
- return nr_big;
+ return nr;
}
/*
* reset_cpu_hmp_stats - reset HMP stats for a cpu
- * nr_big_tasks, nr_small_tasks
+ * nr_big_tasks
* cumulative_runnable_avg (iff reset_cra is true)
*/
void reset_cpu_hmp_stats(int cpu, int reset_cra)
@@ -3794,7 +3658,7 @@ _inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
/*
* Although below check is not strictly required (as
- * inc/dec_nr_big_small_task and inc/dec_cumulative_runnable_avg called
+ * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
* from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
* efficiency by short-circuiting for_each_sched_entity() loop when
* !sched_enable_hmp || sched_disable_window_stats
@@ -3857,10 +3721,10 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ dec_nr_big_task(&cfs_rq->hmp_stats, p);
fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
new_task_load);
- inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ inc_nr_big_task(&cfs_rq->hmp_stats, p);
if (cfs_rq_throttled(cfs_rq))
break;
/*
@@ -3872,9 +3736,9 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
if (!se) {
- dec_nr_big_small_task(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->hmp_stats, p);
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
- inc_nr_big_small_task(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->hmp_stats, p);
}
}
@@ -3885,14 +3749,14 @@ static int task_will_be_throttled(struct task_struct *p);
static void
inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
{
- inc_nr_big_small_task(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->hmp_stats, p);
inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}
static void
dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
{
- dec_nr_big_small_task(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->hmp_stats, p);
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
@@ -3900,9 +3764,9 @@ static void
fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
u32 new_task_load)
{
- dec_nr_big_small_task(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->hmp_stats, p);
fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
- inc_nr_big_small_task(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->hmp_stats, p);
}
static inline int task_will_be_throttled(struct task_struct *p)
@@ -3913,21 +3777,20 @@ static inline int task_will_be_throttled(struct task_struct *p)
static void
_inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
{
- inc_nr_big_small_task(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->hmp_stats, p);
}
#endif /* CONFIG_CFS_BANDWIDTH */
/*
- * Walk runqueue of cpu and re-initialize 'nr_big_tasks' and 'nr_small_tasks'
- * counters.
+ * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
*/
-void fixup_nr_big_small_task(int cpu, int reset_stats)
+void fixup_nr_big_task(int cpu, int reset_stats)
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *p;
- /* fixup_nr_big_small_task() is called from two functions. In one of
+ /* fixup_nr_big_task() is called from two functions. In one of
* them stats are already reset, don't waste time resetting them again
*/
if (reset_stats) {
@@ -3940,7 +3803,7 @@ void fixup_nr_big_small_task(int cpu, int reset_stats)
}
/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
-void pre_big_small_task_count_change(const struct cpumask *cpus)
+void pre_big_task_count_change(const struct cpumask *cpus)
{
int i;
@@ -3951,16 +3814,15 @@ void pre_big_small_task_count_change(const struct cpumask *cpus)
}
/*
- * Reinitialize 'nr_big_tasks' and 'nr_small_tasks' counters on all affected
- * cpus
+ * Reinitialize 'nr_big_tasks' counters on all affected cpus
*/
-void post_big_small_task_count_change(const struct cpumask *cpus)
+void post_big_task_count_change(const struct cpumask *cpus)
{
int i;
/* Assumes local_irq_disable() keeps online cpumap stable */
for_each_cpu(i, cpus)
- fixup_nr_big_small_task(i, 1);
+ fixup_nr_big_task(i, 1);
for_each_cpu(i, cpus)
raw_spin_unlock(&cpu_rq(i)->lock);
@@ -4085,24 +3947,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
}
/*
- * Big/Small task tunable change will need to re-classify tasks on
- * runqueue as big and small and set their counters appropriately.
+ * Big task tunable change will need to re-classify tasks on
+ * runqueue as big and set their counters appropriately.
* sysctl interface affects secondary variables (*_pct), which is then
* "atomically" carried over to the primary variables. Atomic change
* includes taking runqueue lock of all online cpus and re-initiatizing
- * their big/small counter values based on changed criteria.
+ * their big counter values based on changed criteria.
*/
- if ((data == &sysctl_sched_upmigrate_pct ||
- data == &sysctl_sched_small_task_pct || update_min_nice)) {
+ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
get_online_cpus();
- pre_big_small_task_count_change(cpu_online_mask);
+ pre_big_task_count_change(cpu_online_mask);
}
set_hmp_defaults();
- if ((data == &sysctl_sched_upmigrate_pct ||
- data == &sysctl_sched_small_task_pct || update_min_nice)) {
- post_big_small_task_count_change(cpu_online_mask);
+ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
+ post_big_task_count_change(cpu_online_mask);
put_online_cpus();
}
@@ -4190,9 +4050,6 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
return 0;
}
- if (is_small_task(p))
- return 0;
-
if (sched_cpu_high_irqload(cpu_of(rq)))
return IRQLOAD_MIGRATION;
@@ -4313,11 +4170,6 @@ static inline int sched_boost(void)
return 0;
}
-static inline int is_small_task(struct task_struct *p)
-{
- return 0;
-}
-
static inline int is_big_task(struct task_struct *p)
{
return 0;
@@ -4828,14 +4680,13 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
{
cfs_rq->hmp_stats.nr_big_tasks = 0;
- cfs_rq->hmp_stats.nr_small_tasks = 0;
cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
}
static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra)
{
- inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ inc_nr_big_task(&cfs_rq->hmp_stats, p);
if (change_cra)
inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
}
@@ -4843,7 +4694,7 @@ static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra)
{
- dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ dec_nr_big_task(&cfs_rq->hmp_stats, p);
if (change_cra)
dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
}
@@ -4852,7 +4703,6 @@ static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
struct cfs_rq *cfs_rq)
{
stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
- stats->nr_small_tasks += cfs_rq->hmp_stats.nr_small_tasks;
stats->cumulative_runnable_avg +=
cfs_rq->hmp_stats.cumulative_runnable_avg;
}
@@ -4861,11 +4711,10 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
struct cfs_rq *cfs_rq)
{
stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
- stats->nr_small_tasks -= cfs_rq->hmp_stats.nr_small_tasks;
stats->cumulative_runnable_avg -=
cfs_rq->hmp_stats.cumulative_runnable_avg;
- BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0 ||
+ BUG_ON(stats->nr_big_tasks < 0 ||
(s64)stats->cumulative_runnable_avg < 0);
}
@@ -7604,7 +7453,6 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
-#define LBF_IGNORE_SMALL_TASKS 0x10
#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x40
#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
#define LBF_HMP_ACTIVE_BALANCE (LBF_SCHED_BOOST_ACTIVE_BALANCE | \
@@ -7789,9 +7637,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
!is_big_task(p))
return 0;
- if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
- return 0;
-
twf = task_will_fit(p, env->dst_cpu);
/*
@@ -7908,9 +7753,7 @@ static int detach_tasks(struct lb_env *env)
if (env->imbalance <= 0)
return 0;
- if (capacity(env->dst_rq) > capacity(env->src_rq))
- env->flags |= LBF_IGNORE_SMALL_TASKS;
- else if (capacity(env->dst_rq) < capacity(env->src_rq) &&
+ if (capacity(env->dst_rq) < capacity(env->src_rq) &&
!sched_boost())
env->flags |= LBF_IGNORE_BIG_TASKS;
@@ -7977,10 +7820,9 @@ next:
list_move_tail(&p->se.group_node, tasks);
}
- if (env->flags & (LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS)
- && !detached) {
+ if (env->flags & LBF_IGNORE_BIG_TASKS && !detached) {
tasks = &env->src_rq->cfs_tasks;
- env->flags &= ~(LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS);
+ env->flags &= ~LBF_IGNORE_BIG_TASKS;
env->loop = orig_loop;
goto redo;
}
@@ -8152,7 +7994,7 @@ struct sg_lb_stats {
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
#ifdef CONFIG_SCHED_HMP
- unsigned long sum_nr_big_tasks, sum_nr_small_tasks;
+ unsigned long sum_nr_big_tasks;
u64 group_cpu_load; /* Scaled load of all CPUs of the group */
#endif
unsigned int idle_cpus;
@@ -8516,7 +8358,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
#ifdef CONFIG_SCHED_HMP
sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
- sgs->sum_nr_small_tasks += rq->hmp_stats.nr_small_tasks;
sgs->group_cpu_load += cpu_load(i);
#endif
@@ -10153,11 +9994,7 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
&& rq->max_freq > rq->mostly_idle_freq)
return 0;
- if (rq->nr_running >= 2 &&
- (rq->nr_running - rq->hmp_stats.nr_small_tasks >= 2 ||
- rq->nr_running > rq->mostly_idle_nr_run ||
- cpu_load(cpu) > rq->mostly_idle_load)) {
-
+ if (rq->nr_running >= 2) {
if (rq->capacity == max_capacity)
return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 148f08718a9d..f0b55d329214 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -351,7 +351,7 @@ struct cfs_bandwidth { };
#ifdef CONFIG_SCHED_HMP
struct hmp_sched_stats {
- int nr_big_tasks, nr_small_tasks;
+ int nr_big_tasks;
u64 cumulative_runnable_avg;
};
@@ -996,7 +996,6 @@ extern cpumask_t mpc_mask;
extern unsigned long capacity_scale_cpu_efficiency(int cpu);
extern unsigned long capacity_scale_cpu_freq(int cpu);
extern unsigned int sched_mostly_idle_load;
-extern unsigned int sched_small_task;
extern unsigned int sched_upmigrate;
extern unsigned int sched_downmigrate;
extern unsigned int sched_init_task_load_pelt;
@@ -1004,7 +1003,7 @@ extern unsigned int sched_init_task_load_windows;
extern unsigned int sched_heavy_task;
extern unsigned int up_down_migrate_scale_factor;
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
-extern void fixup_nr_big_small_task(int cpu, int reset_stats);
+extern void fixup_nr_big_task(int cpu, int reset_stats);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
@@ -1117,7 +1116,7 @@ static inline int sched_cpu_high_irqload(int cpu)
struct hmp_sched_stats;
-static inline void fixup_nr_big_small_task(int cpu, int reset_stats)
+static inline void fixup_nr_big_task(int cpu, int reset_stats)
{
}
@@ -1223,8 +1222,8 @@ static inline void clear_reserved(int cpu)
int mostly_idle_cpu(int cpu);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
-extern void pre_big_small_task_count_change(const struct cpumask *cpus);
-extern void post_big_small_task_count_change(const struct cpumask *cpus);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
@@ -1238,8 +1237,8 @@ extern int sched_boost(void);
#define sched_freq_legacy_mode 1
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline void pre_big_small_task_count_change(void) { }
-static inline void post_big_small_task_count_change(void) { }
+static inline void pre_big_task_count_change(void) { }
+static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }
static inline void clear_reserved(int cpu) { }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e4923c409821..ae83b7653503 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -368,13 +368,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = sched_window_update_handler,
},
{
- .procname = "sched_small_task",
- .data = &sysctl_sched_small_task_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- },
- {
.procname = "sched_spill_load",
.data = &sysctl_sched_spill_load_pct,
.maxlen = sizeof(unsigned int),