summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/sched/fair.c66
2 files changed, 55 insertions, 19 deletions
diff --git a/init/Kconfig b/init/Kconfig
index ad08a40a304b..9ad1ae9d9da8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1162,6 +1162,14 @@ config SCHED_HMP
in their instructions per-cycle capability or the maximum
frequency they can attain.
+config SCHED_HMP_CSTATE_AWARE
+ bool "CPU C-state aware scheduler"
+ depends on SCHED_HMP
+ help
+ This feature will let the HMP scheduler optimize task placement
+ with CPUs C-state. If this is enabled, scheduler places tasks
+ onto the shallowest C-state CPU among the most power efficient CPUs.
+
config CHECKPOINT_RESTORE
bool "Checkpoint/restore support" if EXPERT
select PROC_CHILDREN
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b6e638bdcd98..c0e35bc86b92 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3376,28 +3376,13 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
return next;
}
-static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env)
+#ifdef SCHED_HMP_CSTATE_AWARE
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env, int cpu_cost)
{
- int cpu_cost, cpu_cstate;
+ int cpu_cstate;
int prev_cpu = env->prev_cpu;
- cpu_cost = power_cost(cpu, task_load(env->p) +
- cpu_cravg_sync(cpu, env->sync));
- if (cpu_cost > stats->min_cost)
- return;
-
- if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
- if (stats->best_sibling_cpu_cost > cpu_cost ||
- (stats->best_sibling_cpu_cost == cpu_cost &&
- stats->best_sibling_cpu_load > env->cpu_load)) {
-
- stats->best_sibling_cpu_cost = cpu_cost;
- stats->best_sibling_cpu_load = env->cpu_load;
- stats->best_sibling_cpu = cpu;
- }
- }
-
cpu_cstate = cpu_rq(cpu)->cstate;
if (env->need_idle) {
@@ -3452,6 +3437,49 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
stats->best_cpu = cpu;
}
}
+#else /* SCHED_HMP_CSTATE_AWARE */
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env, int cpu_cost)
+{
+ int prev_cpu = env->prev_cpu;
+
+ if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
+ if (stats->best_sibling_cpu_cost > cpu_cost ||
+ (stats->best_sibling_cpu_cost == cpu_cost &&
+ stats->best_sibling_cpu_load > env->cpu_load)) {
+ stats->best_sibling_cpu_cost = cpu_cost;
+ stats->best_sibling_cpu_load = env->cpu_load;
+ stats->best_sibling_cpu = cpu;
+ }
+ }
+
+ if ((cpu_cost < stats->min_cost) ||
+ ((stats->best_cpu != prev_cpu &&
+ stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
+ if (env->need_idle) {
+ if (idle_cpu(cpu)) {
+ stats->min_cost = cpu_cost;
+ stats->best_idle_cpu = cpu;
+ }
+ } else {
+ stats->min_cost = cpu_cost;
+ stats->min_load = env->cpu_load;
+ stats->best_cpu = cpu;
+ }
+ }
+}
+#endif
+
+static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env)
+{
+ int cpu_cost;
+
+ cpu_cost = power_cost(cpu, task_load(env->p) +
+ cpu_cravg_sync(cpu, env->sync));
+ if (cpu_cost <= stats->min_cost)
+ __update_cluster_stats(cpu, stats, env, cpu_cost);
+}
static void find_best_cpu_in_cluster(struct sched_cluster *c,
struct cpu_select_env *env, struct cluster_cpu_stats *stats)