diff options
| author | Archana Sathyakumar <asathyak@codeaurora.org> | 2015-07-28 11:32:28 -0600 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:22:08 -0700 |
| commit | 8804d589374895384c2ae68a2fd87d1cf9b7ef80 (patch) | |
| tree | ca1740595a7dfe9ebd03caf3cbd36040efd4d9d4 /drivers/cpuidle | |
| parent | 3aa15eecb74d443eabcb3f685ec51bcb3a9dbb2f (diff) | |
lpm-stats: Optimize stats path
Stats for cpu and cluster exit path query the ktime for every level and
this increases the overall idle exit latency. Get the timestamp only
once and use the same information to populate into different level.
Change-Id: Iece36015910fcb16bd04a25dae1bf0396ab463e2
Signed-off-by: Archana Sathyakumar <asathyak@codeaurora.org>
Conflicts:
drivers/cpuidle/lpm-levels.c
drivers/power/qcom/lpm-stats.c
Diffstat (limited to 'drivers/cpuidle')
| -rw-r--r-- | drivers/cpuidle/lpm-levels.c | 45 |
1 files changed, 31 insertions, 14 deletions
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 08753375b4ea..d4d598bcf0d6 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -92,9 +92,11 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb, unsigned long action, void *hcpu); static void cluster_unprepare(struct lpm_cluster *cluster, - const struct cpumask *cpu, int child_idx, bool from_idle); + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t time); static void cluster_prepare(struct lpm_cluster *cluster, - const struct cpumask *cpu, int child_idx, bool from_idle); + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t time); static struct notifier_block __refdata lpm_cpu_nblk = { .notifier_call = lpm_cpu_callback, @@ -173,11 +175,11 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_DYING: cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu), - NR_LPM_LEVELS, false); + NR_LPM_LEVELS, false, 0); break; case CPU_STARTING: cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu), - NR_LPM_LEVELS, false); + NR_LPM_LEVELS, false, 0); break; default: break; @@ -559,7 +561,8 @@ failed_set_mode: } static void cluster_prepare(struct lpm_cluster *cluster, - const struct cpumask *cpu, int child_idx, bool from_idle) + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t start_time) { int i; @@ -600,15 +603,21 @@ static void cluster_prepare(struct lpm_cluster *cluster, if (cluster_configure(cluster, i, from_idle)) goto failed; + cluster->stats->sleep_time = start_time; cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i, - from_idle); + from_idle, start_time); + + spin_unlock(&cluster->sync_lock); + return; failed: spin_unlock(&cluster->sync_lock); + cluster->stats->sleep_time = 0; return; } static void cluster_unprepare(struct lpm_cluster *cluster, - const struct cpumask *cpu, int child_idx, bool from_idle) + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t end_time) { struct lpm_cluster_level *level; bool first_cpu; @@ -638,6 +647,9 @@ static void cluster_unprepare(struct lpm_cluster *cluster, if (!first_cpu || cluster->last_level == cluster->default_level) goto unlock_return; + if (cluster->stats->sleep_time) + cluster->stats->sleep_time = end_time - + cluster->stats->sleep_time; lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true); level = &cluster->levels[cluster->last_level]; @@ -673,7 +685,7 @@ static void cluster_unprepare(struct lpm_cluster *cluster, cluster_notify(cluster, &cluster->levels[last_level], false); cluster_unprepare(cluster->parent, &cluster->child_cpus, - last_level, from_idle); + last_level, from_idle, end_time); unlock_return: spin_unlock(&cluster->sync_lock); } @@ -872,19 +884,21 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, pwr_params = &cluster->cpu->levels[idx].pwr; cpu_prepare(cluster, idx, true); - cluster_prepare(cluster, cpumask, idx, true); + cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get())); if (need_resched() || (idx < 0)) goto exit; BUG_ON(!use_psci); trace_cpu_idle_enter(idx); - lpm_stats_cpu_enter(idx); + lpm_stats_cpu_enter(idx, start_time); success = psci_enter_sleep(cluster, idx, true); exit: - lpm_stats_cpu_exit(idx, success); - cluster_unprepare(cluster, cpumask, idx, true); + end_time = ktime_to_ns(ktime_get()); + lpm_stats_cpu_exit(idx, end_time, success); + + cluster_unprepare(cluster, cpumask, idx, true, end_time); cpu_unprepare(cluster, idx, true); trace_cpu_idle_exit(idx, success); @@ -1095,6 +1109,7 @@ static int lpm_suspend_enter(suspend_state_t state) struct lpm_cpu *lpm_cpu = cluster->cpu; const struct cpumask *cpumask = get_cpu_mask(cpu); int idx; + int64_t time = ktime_to_ns(ktime_get()); for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) { @@ -1106,7 +1121,7 @@ static int lpm_suspend_enter(suspend_state_t state) return 0; } cpu_prepare(cluster, idx, false); - cluster_prepare(cluster, cpumask, idx, false); + cluster_prepare(cluster, cpumask, idx, false, time); if (idx > 0) update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, 0xdeaffeed, false); @@ -1125,7 +1140,9 @@ static int lpm_suspend_enter(suspend_state_t state) if (idx > 0) update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed, false); - cluster_unprepare(cluster, cpumask, idx, false); + + time = ktime_to_ns(ktime_get()); + cluster_unprepare(cluster, cpumask, idx, false, time); cpu_unprepare(cluster, idx, false); return 0; } |
