summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c17
-rw-r--r--kernel/sched/fair.c91
-rw-r--r--kernel/sched/sched.h18
3 files changed, 120 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7cf0dbf77da1..fbc51a548bf9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1806,6 +1806,8 @@ update_history(struct rq *rq, struct task_struct *p, u32 runtime, int samples)
if (p->on_rq) {
rq->cumulative_runnable_avg -= p->ravg.demand;
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
+ if (p->sched_class == &fair_sched_class)
+ dec_nr_big_small_task(rq, p);
}
avg = div64_u64(sum, RAVG_HIST_SIZE);
@@ -1819,8 +1821,11 @@ update_history(struct rq *rq, struct task_struct *p, u32 runtime, int samples)
p->ravg.demand = demand;
- if (p->on_rq)
+ if (p->on_rq) {
rq->cumulative_runnable_avg += p->ravg.demand;
+ if (p->sched_class == &fair_sched_class)
+ inc_nr_big_small_task(rq, p);
+ }
}
static int __init set_sched_ravg_window(char *str)
@@ -7730,6 +7735,12 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
load_scale *= load_scale_cpu_freq(cpu);
load_scale >>= 10;
+ /*
+ * Changed load_scale_factor can trigger reclassification of tasks as
+ * big or small. Make this change "atomic" so that tasks are accounted
+ * properly due to changed load_scale_factor
+ */
+ pre_big_small_task_count_change();
for_each_cpu(i, policy->related_cpus) {
struct rq *rq = cpu_rq(i);
@@ -7738,6 +7749,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
}
update_min_max_capacity();
+ post_big_small_task_count_change();
return 0;
}
@@ -7908,6 +7920,9 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
+#ifdef CONFIG_SCHED_HMP
+ rq->nr_small_tasks = rq->nr_big_tasks = 0;
+#endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fde9c1266a17..9ea481f86a28 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2768,6 +2768,68 @@ static int select_best_cpu(struct task_struct *p, int target)
return best_cpu;
}
+void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
+{
+ if (!task_will_fit(p, cpu_of(rq)))
+ rq->nr_big_tasks++;
+ else if (is_small_task(p))
+ rq->nr_small_tasks++;
+}
+
+void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
+{
+ if (!task_will_fit(p, cpu_of(rq)))
+ rq->nr_big_tasks--;
+ else if (is_small_task(p))
+ rq->nr_small_tasks--;
+
+ BUG_ON(rq->nr_big_tasks < 0 || rq->nr_small_tasks < 0);
+}
+
+/*
+ * Walk runqueue of cpu and re-initialize 'nr_big_tasks' and 'nr_small_tasks'
+ * counters.
+ */
+static inline void fixup_nr_big_small_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *p;
+
+ rq->nr_big_tasks = 0;
+ rq->nr_small_tasks = 0;
+ list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
+ inc_nr_big_small_task(rq, p);
+}
+
+/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
+void pre_big_small_task_count_change(void)
+{
+ int i;
+
+ local_irq_disable();
+
+ for_each_online_cpu(i)
+ raw_spin_lock(&cpu_rq(i)->lock);
+}
+
+/*
+ * Reinitialize 'nr_big_tasks' and 'nr_small_tasks' counters on all affected
+ * cpus
+ */
+void post_big_small_task_count_change(void)
+{
+ int i;
+
+ /* Assumes local_irq_disable() keeps online cpumap stable */
+ for_each_online_cpu(i)
+ fixup_nr_big_small_task(i);
+
+ for_each_online_cpu(i)
+ raw_spin_unlock(&cpu_rq(i)->lock);
+
+ local_irq_enable();
+}
+
/*
* Convert percentage value into absolute form. This will avoid div() operation
* in fast path, to convert task load in percentage scale.
@@ -2790,10 +2852,27 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
return -EINVAL;
}
+ /*
+ * Big/Small task tunable change will need to re-classify tasks on
+ * runqueue as big and small and set their counters appropriately.
+ * sysctl interface affects secondary variables (*_pct), which is then
+ * "atomically" carried over to the primary variables. Atomic change
+ * includes taking runqueue lock of all online cpus and re-initiatizing
+ * their big/small counter values based on changed criteria.
+ */
+ if ((*data != old_val) &&
+ (data == &sysctl_sched_upmigrate_pct ||
+ data == &sysctl_sched_small_task_pct))
+ pre_big_small_task_count_change();
+
set_hmp_defaults();
- return 0;
+ if ((*data != old_val) &&
+ (data == &sysctl_sched_upmigrate_pct ||
+ data == &sysctl_sched_small_task_pct))
+ post_big_small_task_count_change();
+ return 0;
}
static inline int find_new_hmp_ilb(void)
@@ -4643,9 +4722,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
- if (!se)
+ if (!se) {
add_nr_running(rq, 1);
-
+ inc_nr_big_small_task(rq, p);
+ }
hrtick_update(rq);
}
@@ -4703,9 +4783,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
- if (!se)
+ if (!se) {
sub_nr_running(rq, 1);
-
+ dec_nr_big_small_task(rq, p);
+ }
hrtick_update(rq);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1b1f75e10cfa..965db5c8437b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -656,6 +656,10 @@ struct rq {
int capacity;
#endif
+#ifdef CONFIG_SCHED_HMP
+ int nr_small_tasks, nr_big_tasks;
+#endif
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif
@@ -984,13 +988,27 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
#ifdef CONFIG_SCHED_HMP
extern void check_for_migration(struct rq *rq, struct task_struct *p);
+extern void pre_big_small_task_count_change(void);
+extern void post_big_small_task_count_change(void);
+extern void inc_nr_big_small_task(struct rq *rq, struct task_struct *p);
+extern void dec_nr_big_small_task(struct rq *rq, struct task_struct *p);
extern void set_hmp_defaults(void);
#else /* CONFIG_SCHED_HMP */
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
+static inline void pre_big_small_task_count_change(void) { }
+static inline void post_big_small_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }
+static inline void inc_nr_big_small_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
+{
+}
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_CGROUP_SCHED