diff options
author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-10-16 14:09:29 +0530 |
---|---|---|
committer | Michael Bestas <mkbestas@lineageos.org> | 2019-12-23 23:43:40 +0200 |
commit | 5c43a2afb87ea44e4d52585989a76b1e6a2e6ce6 (patch) | |
tree | 21e47dbfdfdc56c4aac8f476c77e8f3532f733a7 /kernel | |
parent | 3d6ed948d991660254494856baabbf03e1915751 (diff) |
core_ctl: Consider high irq loaded CPU as busy
Currently a CPU is considered as busy if it's utilization in
the last window exceeds busy threshold. A CPU loaded with
hardirqs/softirqs does not necessarily have high utilization.
Isolating such CPUs may hurt performance as it involves waking
up the CPU frequently from deep c-state. Prevent isolating
high irq loaded CPUs by considering them as busy.
Change-Id: I169b69f6003eb59a126c8a16b67b65be2aa7af40
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core_ctl.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 |
3 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index ce15ae7fe76b..99f16128cf49 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -22,6 +22,7 @@ #include <linux/sched/rt.h> #include <trace/events/sched.h> +#include "sched.h" #define MAX_CPUS_PER_CLUSTER 4 #define MAX_CLUSTERS 2 @@ -575,7 +576,8 @@ static bool eval_need(struct cluster_data *cluster) cluster->active_cpus = get_active_cpu_count(cluster); thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0; list_for_each_entry(c, &cluster->lru, sib) { - if (c->busy >= cluster->busy_up_thres[thres_idx]) + if (c->busy >= cluster->busy_up_thres[thres_idx] || + sched_cpu_high_irqload(c->cpu)) c->is_busy = true; else if (c->busy < cluster->busy_down_thres[thres_idx]) c->is_busy = false; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 18ce8cb02272..ffc2c71d0374 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3001,6 +3001,8 @@ struct cpu_select_env *env, struct cluster_cpu_stats *stats) int i; struct cpumask search_cpus; + extern int num_clusters; + while (!bitmap_empty(env->backup_list, num_clusters)) { next = next_candidate(env->backup_list, 0, num_clusters); __clear_bit(next->id, env->backup_list); @@ -3024,6 +3026,8 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env, { struct sched_cluster *next = NULL; + extern int num_clusters; + __clear_bit(cluster->id, env->candidate_list); if (env->rtg && preferred_cluster(cluster, env->p)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ffae8d49d988..0c121d8bd4c5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -430,7 +430,6 @@ struct related_thread_group { }; extern struct list_head cluster_head; -extern int num_clusters; extern struct sched_cluster *sched_cluster[NR_CPUS]; struct cpu_cycle { |