diff options
| author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-04-13 15:45:11 +0530 |
|---|---|---|
| committer | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-04-15 17:48:48 +0530 |
| commit | e0f82761c280d159481f7938b5d75786502c135c (patch) | |
| tree | 14ccce167a1583a5abb7e0a694b40f68474c8b58 | |
| parent | d0b971c53d4b15975753e2e9528e4691c208635d (diff) | |
core_ctl: Update cluster->active_cpus in eval_need()
The cluster->active_cpus is not updated in eval_need(). The new need
for CPUs is compared against the previous cluster->active_cpus. If
another client isolates a CPU, cluster->active_cpus becomes stale and
we fail to detect the change in need for CPUs.
Change-Id: Ib58b8f0bd03dd2b4a174de2ac54eb0c60c59f9f7
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
| -rw-r--r-- | kernel/sched/core_ctl.c | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index ccf67255a909..e094cba26ea5 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -537,9 +537,8 @@ static bool eval_need(struct cluster_data *cluster) unsigned int need_cpus = 0, last_need, thres_idx; int ret = 0; bool need_flag = false; - unsigned int active_cpus; unsigned int new_need; - s64 now; + s64 now, elapsed; if (unlikely(!cluster->inited)) return 0; @@ -549,8 +548,8 @@ static bool eval_need(struct cluster_data *cluster) if (cluster->boost) { need_cpus = cluster->max_cpus; } else { - active_cpus = get_active_cpu_count(cluster); - thres_idx = active_cpus ? active_cpus - 1 : 0; + cluster->active_cpus = get_active_cpu_count(cluster); + thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0; list_for_each_entry(c, &cluster->lru, sib) { if (c->busy >= cluster->busy_up_thres[thres_idx]) c->is_busy = true; @@ -566,17 +565,16 @@ static bool eval_need(struct cluster_data *cluster) last_need = cluster->need_cpus; now = ktime_to_ms(ktime_get()); - if (new_need == last_need) { - cluster->need_ts = now; - spin_unlock_irqrestore(&state_lock, flags); - return 0; - } - - if (need_cpus > cluster->active_cpus) { + if (new_need > cluster->active_cpus) { ret = 1; - } else if (need_cpus < cluster->active_cpus) { - s64 elapsed = now - cluster->need_ts; + } else { + if (new_need == last_need) { + cluster->need_ts = now; + spin_unlock_irqrestore(&state_lock, flags); + return 0; + } + elapsed = now - cluster->need_ts; ret = elapsed >= cluster->offline_delay_ms; } @@ -584,7 +582,7 @@ static bool eval_need(struct cluster_data *cluster) cluster->need_ts = now; cluster->need_cpus = new_need; } - trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus, + trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, ret && need_flag); spin_unlock_irqrestore(&state_lock, flags); |
