diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2016-03-28 18:27:47 -0700 |
|---|---|---|
| committer | Jeevan Shriram <jshriram@codeaurora.org> | 2016-04-22 15:05:24 -0700 |
| commit | 2e0ebb0155f275b24a5feedfc8f28c5225fb8db6 (patch) | |
| tree | 6fb03d69b4d247336db34db14a8aa28b5379cde2 /kernel/sched | |
| parent | d4ca4d767f4c8a64ab212ddf2a9356c316de4f80 (diff) | |
sched: add option whether CPU C-state is used to guide task placement
There are CPUs that don't have an obvious low power mode exit latency
penalty. Add a new Kconfig CONFIG_SCHED_HMP_CSTATE_AWARE which controls
whether CPU C-state is used to guide task placement.
CRs-fixed: 1006303
Change-Id: Ie8dbab8e173c3a1842d922f4d1fbd8cc4221789c
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/fair.c | 66 |
1 files changed, 47 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b6e638bdcd98..c0e35bc86b92 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3376,28 +3376,13 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env, return next; } -static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, - struct cpu_select_env *env) +#ifdef SCHED_HMP_CSTATE_AWARE +static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env, int cpu_cost) { - int cpu_cost, cpu_cstate; + int cpu_cstate; int prev_cpu = env->prev_cpu; - cpu_cost = power_cost(cpu, task_load(env->p) + - cpu_cravg_sync(cpu, env->sync)); - if (cpu_cost > stats->min_cost) - return; - - if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) { - if (stats->best_sibling_cpu_cost > cpu_cost || - (stats->best_sibling_cpu_cost == cpu_cost && - stats->best_sibling_cpu_load > env->cpu_load)) { - - stats->best_sibling_cpu_cost = cpu_cost; - stats->best_sibling_cpu_load = env->cpu_load; - stats->best_sibling_cpu = cpu; - } - } - cpu_cstate = cpu_rq(cpu)->cstate; if (env->need_idle) { @@ -3452,6 +3437,49 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, stats->best_cpu = cpu; } } +#else /* SCHED_HMP_CSTATE_AWARE */ +static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env, int cpu_cost) +{ + int prev_cpu = env->prev_cpu; + + if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) { + if (stats->best_sibling_cpu_cost > cpu_cost || + (stats->best_sibling_cpu_cost == cpu_cost && + stats->best_sibling_cpu_load > env->cpu_load)) { + stats->best_sibling_cpu_cost = cpu_cost; + stats->best_sibling_cpu_load = env->cpu_load; + stats->best_sibling_cpu = cpu; + } + } + + if ((cpu_cost < stats->min_cost) || + ((stats->best_cpu != prev_cpu && + stats->min_load > env->cpu_load) || cpu == prev_cpu)) { + if (env->need_idle) { + if (idle_cpu(cpu)) { + stats->min_cost = cpu_cost; + stats->best_idle_cpu = cpu; + } + } else { + stats->min_cost = cpu_cost; + stats->min_load = env->cpu_load; + stats->best_cpu = cpu; + } + } +} +#endif + +static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env) +{ + int cpu_cost; + + cpu_cost = power_cost(cpu, task_load(env->p) + + cpu_cravg_sync(cpu, env->sync)); + if (cpu_cost <= stats->min_cost) + __update_cluster_stats(cpu, stats, env, cpu_cost); +} static void find_best_cpu_in_cluster(struct sched_cluster *c, struct cpu_select_env *env, struct cluster_cpu_stats *stats) |
