summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c103
1 files changed, 76 insertions, 27 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4da8b618232e..3b5f061ec020 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2590,7 +2590,7 @@ unsigned int __read_mostly sched_enable_hmp = 0;
/* A cpu can no longer accomodate more tasks if:
*
* rq->nr_running > sysctl_sched_spill_nr_run ||
- * rq->cumulative_runnable_avg > sched_spill_load
+ * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
*/
unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
@@ -2836,7 +2836,7 @@ static inline u64 cpu_load(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- return scale_load_to_cpu(rq->cumulative_runnable_avg, cpu);
+ return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
}
static inline u64 cpu_load_sync(int cpu, int sync)
@@ -2844,7 +2844,7 @@ static inline u64 cpu_load_sync(int cpu, int sync)
struct rq *rq = cpu_rq(cpu);
u64 load;
- load = rq->cumulative_runnable_avg;
+ load = rq->hmp_stats.cumulative_runnable_avg;
/*
* If load is being checked in a sync wakeup environment,
@@ -3469,28 +3469,42 @@ done:
return best_cpu;
}
-void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
+static void
+inc_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
return;
if (is_big_task(p))
- rq->nr_big_tasks++;
+ stats->nr_big_tasks++;
else if (is_small_task(p))
- rq->nr_small_tasks++;
+ stats->nr_small_tasks++;
}
-void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
+static void
+dec_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
return;
if (is_big_task(p))
- rq->nr_big_tasks--;
+ stats->nr_big_tasks--;
else if (is_small_task(p))
- rq->nr_small_tasks--;
+ stats->nr_small_tasks--;
+
+ BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0);
+}
- BUG_ON(rq->nr_big_tasks < 0 || rq->nr_small_tasks < 0);
+static void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+ inc_nr_big_small_task(&rq->hmp_stats, p);
+}
+
+static void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+ dec_nr_big_small_task(&rq->hmp_stats, p);
}
/*
@@ -3502,10 +3516,10 @@ void fixup_nr_big_small_task(int cpu)
struct rq *rq = cpu_rq(cpu);
struct task_struct *p;
- rq->nr_big_tasks = 0;
- rq->nr_small_tasks = 0;
+ rq->hmp_stats.nr_big_tasks = 0;
+ rq->hmp_stats.nr_small_tasks = 0;
list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
- inc_nr_big_small_task(rq, p);
+ inc_nr_big_small_task(&rq->hmp_stats, p);
}
/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
@@ -3840,7 +3854,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
static inline int nr_big_tasks(struct rq *rq)
{
- return rq->nr_big_tasks;
+ return rq->hmp_stats.nr_big_tasks;
}
static inline int is_cpu_throttling_imminent(int cpu)
@@ -3873,6 +3887,19 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+static void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+ inc_nr_big_small_task(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+ dec_nr_big_small_task(&rq->hmp_stats, p);
+}
#else /* CONFIG_SCHED_HMP */
@@ -3945,6 +3972,18 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+static inline void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p) { }
+static inline void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+}
#endif /* CONFIG_SCHED_HMP */
@@ -4048,7 +4087,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (!cfs_rq && weight) {
se = container_of(sa, struct sched_entity, avg);
if (entity_is_task(se) && se->on_rq)
- dec_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se));
+ dec_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se));
}
scale_freq = arch_scale_freq_capacity(NULL, cpu);
@@ -4116,7 +4155,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
}
if (se && entity_is_task(se) && se->on_rq)
- inc_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se));
+ inc_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se));
if (running)
sa->util_sum += scaled_delta * scale_cpu;
@@ -4358,6 +4397,9 @@ static inline int idle_balance(struct rq *rq)
return 0;
}
+static inline void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p) { }
+static inline void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p) { }
+
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_HMP
@@ -5705,7 +5747,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
add_nr_running(rq, 1);
- inc_nr_big_small_task(rq, p);
+ inc_rq_hmp_stats(rq, p);
}
hrtick_update(rq);
}
@@ -5766,7 +5808,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
sub_nr_running(rq, 1);
- dec_nr_big_small_task(rq, p);
+ dec_rq_hmp_stats(rq, p);
}
hrtick_update(rq);
}
@@ -7974,8 +8016,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*overload = true;
#ifdef CONFIG_SCHED_HMP
- sgs->sum_nr_big_tasks += rq->nr_big_tasks;
- sgs->sum_nr_small_tasks += rq->nr_small_tasks;
+ sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
+ sgs->sum_nr_small_tasks += rq->hmp_stats.nr_small_tasks;
sgs->group_cpu_load += cpu_load(i);
#endif
@@ -8481,18 +8523,20 @@ out_balanced:
static struct rq *find_busiest_queue_hmp(struct lb_env *env,
struct sched_group *group)
{
- struct rq *busiest = NULL, *rq;
+ struct rq *busiest = NULL;
u64 max_runnable_avg = 0;
int i;
for_each_cpu(i, sched_group_cpus(group)) {
+ struct rq *rq = cpu_rq(i);
+ u64 cumulative_runnable_avg =
+ rq->hmp_stats.cumulative_runnable_avg;
+
if (!cpumask_test_cpu(i, env->cpus))
continue;
- rq = cpu_rq(i);
-
- if (rq->cumulative_runnable_avg > max_runnable_avg) {
- max_runnable_avg = rq->cumulative_runnable_avg;
+ if (cumulative_runnable_avg > max_runnable_avg) {
+ max_runnable_avg = cumulative_runnable_avg;
busiest = rq;
}
}
@@ -9616,8 +9660,9 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
&& rq->max_freq > rq->mostly_idle_freq)
return 0;
- if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 ||
- rq->nr_running > rq->mostly_idle_nr_run ||
+ if (rq->nr_running >= 2 &&
+ (rq->nr_running - rq->hmp_stats.nr_small_tasks >= 2 ||
+ rq->nr_running > rq->mostly_idle_nr_run ||
cpu_load(cpu) > rq->mostly_idle_load)) {
if (rq->capacity == max_capacity)
@@ -10242,6 +10287,10 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_move_group = task_move_group_fair,
#endif
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_fair,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_fair,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG