summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorConnor O'Brien <connoro@google.com>2019-02-20 12:17:48 -0800
committerMichael Bestas <mkbestas@lineageos.org>2021-08-19 08:35:04 +0300
commitcf9acebb178f5fe66275e445708717c9c0559b93 (patch)
tree257c7b393f51407ac4f92d07af7ca72fb1aa0778
parente9e9735b9d1790be22b33ce5640e2768d3b7651e (diff)
ANDROID: cpufreq: times: optimize proc files
The majority of the time spent reading /proc/uid_time_in_state is due to seq_printf calls. Use the faster seq_put_* variations instead. Also skip empty hash buckets in uid_seq_next for a further performance improvement. Bug: 111216804 Test: Read /proc/uid_time_in_state and confirm output is sane Test: Compare read times to confirm performance improvement Change-Id: If8783b498ed73d2ddb186a49438af41ac5ab9957 Signed-off-by: Connor O'Brien <connoro@google.com>
-rw-r--r--drivers/cpufreq/cpufreq_times.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index ceddc4f43189..a181de57b8d1 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -193,10 +193,12 @@ static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- (*pos)++;
+ do {
+ (*pos)++;
- if (*pos >= HASH_SIZE(uid_hash_table))
- return NULL;
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+ } while (hlist_empty(&uid_hash_table[*pos]));
return &uid_hash_table[*pos];
}
@@ -220,7 +222,8 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
if (freqs->freq_table[i] ==
CPUFREQ_ENTRY_INVALID)
continue;
- seq_printf(m, " %d", freqs->freq_table[i]);
+ seq_put_decimal_ull(m, " ",
+ freqs->freq_table[i]);
}
}
seq_putc(m, '\n');
@@ -229,13 +232,16 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
rcu_read_lock();
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
- if (uid_entry->max_state)
- seq_printf(m, "%d:", uid_entry->uid);
+ if (uid_entry->max_state) {
+ seq_put_decimal_ull(m, "", uid_entry->uid);
+ seq_putc(m, ':');
+ }
for (i = 0; i < uid_entry->max_state; ++i) {
+ u64 time;
if (freq_index_invalid(i))
continue;
- seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t(
- uid_entry->time_in_state[i]));
+ time = cputime_to_clock_t(uid_entry->time_in_state[i]);
+ seq_put_decimal_ull(m, " ", time);
}
if (uid_entry->max_state)
seq_putc(m, '\n');
@@ -256,13 +262,13 @@ static int concurrent_time_seq_show(struct seq_file *m, void *v,
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
atomic64_t *times = get_times(uid_entry->concurrent_times);
- seq_put_decimal_ull(m, 0, (u64)uid_entry->uid);
+ seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
seq_putc(m, ':');
for (i = 0; i < num_possible_cpus; ++i) {
u64 time = cputime_to_clock_t(atomic64_read(&times[i]));
- seq_put_decimal_ull(m, ' ', time);
+ seq_put_decimal_ull(m, " ", time);
}
seq_putc(m, '\n');
}
@@ -280,7 +286,7 @@ static inline atomic64_t *get_active_times(struct concurrent_times *times)
static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
{
if (v == uid_hash_table) {
- seq_printf(m, "cpus: %d", num_possible_cpus());
+ seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
seq_putc(m, '\n');
}
@@ -306,18 +312,18 @@ static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
continue;
if (freqs != last_freqs) {
if (last_freqs) {
- seq_printf(m, ": %d", cnt);
+ seq_put_decimal_ull(m, ": ", cnt);
seq_putc(m, ' ');
cnt = 0;
}
- seq_printf(m, "policy%d", i);
+ seq_put_decimal_ull(m, "policy", i);
last_freqs = freqs;
}
cnt++;
}
if (last_freqs) {
- seq_printf(m, ": %d", cnt);
+ seq_put_decimal_ull(m, ": ", cnt);
seq_putc(m, '\n');
}
}