summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2016-05-17 20:04:54 -0700
committerJeevan Shriram <jshriram@codeaurora.org>2016-05-20 19:23:47 -0700
commitd9ff0d77afb3e5b79973b402f9a65bafa7d0cbd6 (patch)
treebdb0465b2e5ad2b01db6a2e1e80e21d1698551c6 /kernel
parent167eebb0155f65077a1e5d57e3fb069fe9e6be70 (diff)
sched: simplify CPU frequency estimation and cycle counter API
Most of CPUs increase cycle counter by one every cycle which makes frequency = cycles / time_delta is correct. Therefore it's reasonable to get rid of current cpu_cycle_max_scale_factor and ask cycle counter read callback function to return scaled counter value when it's needed in such a case that cycle counter doesn't increase every cycle. Thus multiply NSEC_PER_SEC / HZ_PER_KHZ to CPU cycle counter delta as we calculate frequency in khz and remove cpu_cycle_max_scale_factor. This allows us to simplify frequency estimation and cycle counter API. Change-Id: Ie7a628d4bc77c9b6c769f6099ce8d75740262a14 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c32
-rw-r--r--kernel/sched/sched.h12
2 files changed, 4 insertions, 40 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ac5f67da19b9..0b55bbbd7431 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1403,7 +1403,6 @@ static struct sched_cluster init_cluster = {
.max_mitigated_freq = UINT_MAX,
.min_freq = 1,
.max_possible_freq = 1,
- .cpu_cycle_max_scale_factor = 1,
.dstate = 0,
.dstate_wakeup_energy = 0,
.dstate_wakeup_latency = 0,
@@ -1553,7 +1552,6 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
cluster->max_mitigated_freq = UINT_MAX;
cluster->min_freq = 1;
cluster->max_possible_freq = 1;
- cluster->cpu_cycle_max_scale_factor = 1;
cluster->dstate = 0;
cluster->dstate_wakeup_energy = 0;
cluster->dstate_wakeup_latency = 0;
@@ -1620,38 +1618,15 @@ static void init_clusters(void)
INIT_LIST_HEAD(&cluster_head);
}
-static inline void
-__update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
-{
- int cpu = cluster_first_cpu(cluster);
-
- cluster->cpu_cycle_max_scale_factor =
- div64_u64(cluster->max_possible_freq * NSEC_PER_USEC,
- cpu_cycle_counter_cb.get_cpu_cycles_max_per_us(cpu));
-}
-
-static inline void
-update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
-{
- if (!use_cycle_counter)
- return;
-
- __update_cpu_cycle_max_possible_freq(cluster);
-}
-
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
- struct sched_cluster *cluster = NULL;
-
mutex_lock(&cluster_lock);
- if (!cb->get_cpu_cycle_counter || !cb->get_cpu_cycles_max_per_us) {
+ if (!cb->get_cpu_cycle_counter) {
mutex_unlock(&cluster_lock);
return -EINVAL;
}
cpu_cycle_counter_cb = *cb;
- for_each_sched_cluster(cluster)
- __update_cpu_cycle_max_possible_freq(cluster);
use_cycle_counter = true;
mutex_unlock(&cluster_lock);
@@ -1931,8 +1906,7 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq,
int cpu = cpu_of(rq);
int sf;
- delta = DIV64_U64_ROUNDUP(delta * cc->cycles *
- cpu_cycle_max_scale_factor(cpu),
+ delta = DIV64_U64_ROUNDUP(delta * cc->cycles,
max_possible_freq * cc->time);
sf = DIV_ROUND_UP(cpu_efficiency(cpu) * 1024, max_possible_efficiency);
@@ -2613,6 +2587,7 @@ get_task_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
else
cc.cycles = cur_cycles - p->cpu_cycles;
+ cc.cycles = cc.cycles * NSEC_PER_MSEC;
cc.time = wallclock - p->ravg.mark_start;
BUG_ON((s64)cc.time < 0);
@@ -3688,7 +3663,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
sort_clusters();
update_all_clusters_stats();
- update_cpu_cycle_max_possible_freq(cluster);
mutex_unlock(&cluster_lock);
continue;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 96f5c187dfcd..a66d8a12051c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -389,11 +389,6 @@ struct sched_cluster {
*/
unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
unsigned int max_possible_freq;
- /*
- * cpu_cycle_max_scale_factor represents number of cycles per NSEC at
- * CPU's fmax.
- */
- u32 cpu_cycle_max_scale_factor;
bool freq_init_done;
int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
unsigned int static_cluster_pwr_cost;
@@ -1137,14 +1132,9 @@ static inline int cpu_max_power_cost(int cpu)
return cpu_rq(cpu)->cluster->max_power_cost;
}
-static inline int cpu_cycle_max_scale_factor(int cpu)
-{
- return cpu_rq(cpu)->cluster->cpu_cycle_max_scale_factor;
-}
-
static inline u32 cpu_cycles_to_freq(int cpu, u64 cycles, u32 period)
{
- return div64_u64(cycles * cpu_cycle_max_scale_factor(cpu), period);
+ return div64_u64(cycles, period);
}
static inline bool hmp_capable(void)