summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c32
-rw-r--r--kernel/sched/sched.h12
2 files changed, 4 insertions, 40 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ac5f67da19b9..0b55bbbd7431 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1403,7 +1403,6 @@ static struct sched_cluster init_cluster = {
.max_mitigated_freq = UINT_MAX,
.min_freq = 1,
.max_possible_freq = 1,
- .cpu_cycle_max_scale_factor = 1,
.dstate = 0,
.dstate_wakeup_energy = 0,
.dstate_wakeup_latency = 0,
@@ -1553,7 +1552,6 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
cluster->max_mitigated_freq = UINT_MAX;
cluster->min_freq = 1;
cluster->max_possible_freq = 1;
- cluster->cpu_cycle_max_scale_factor = 1;
cluster->dstate = 0;
cluster->dstate_wakeup_energy = 0;
cluster->dstate_wakeup_latency = 0;
@@ -1620,38 +1618,15 @@ static void init_clusters(void)
INIT_LIST_HEAD(&cluster_head);
}
-static inline void
-__update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
-{
- int cpu = cluster_first_cpu(cluster);
-
- cluster->cpu_cycle_max_scale_factor =
- div64_u64(cluster->max_possible_freq * NSEC_PER_USEC,
- cpu_cycle_counter_cb.get_cpu_cycles_max_per_us(cpu));
-}
-
-static inline void
-update_cpu_cycle_max_possible_freq(struct sched_cluster *cluster)
-{
- if (!use_cycle_counter)
- return;
-
- __update_cpu_cycle_max_possible_freq(cluster);
-}
-
int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
- struct sched_cluster *cluster = NULL;
-
mutex_lock(&cluster_lock);
- if (!cb->get_cpu_cycle_counter || !cb->get_cpu_cycles_max_per_us) {
+ if (!cb->get_cpu_cycle_counter) {
mutex_unlock(&cluster_lock);
return -EINVAL;
}
cpu_cycle_counter_cb = *cb;
- for_each_sched_cluster(cluster)
- __update_cpu_cycle_max_possible_freq(cluster);
use_cycle_counter = true;
mutex_unlock(&cluster_lock);
@@ -1931,8 +1906,7 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq,
int cpu = cpu_of(rq);
int sf;
- delta = DIV64_U64_ROUNDUP(delta * cc->cycles *
- cpu_cycle_max_scale_factor(cpu),
+ delta = DIV64_U64_ROUNDUP(delta * cc->cycles,
max_possible_freq * cc->time);
sf = DIV_ROUND_UP(cpu_efficiency(cpu) * 1024, max_possible_efficiency);
@@ -2613,6 +2587,7 @@ get_task_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
else
cc.cycles = cur_cycles - p->cpu_cycles;
+ cc.cycles = cc.cycles * NSEC_PER_MSEC;
cc.time = wallclock - p->ravg.mark_start;
BUG_ON((s64)cc.time < 0);
@@ -3688,7 +3663,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
sort_clusters();
update_all_clusters_stats();
- update_cpu_cycle_max_possible_freq(cluster);
mutex_unlock(&cluster_lock);
continue;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 96f5c187dfcd..a66d8a12051c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -389,11 +389,6 @@ struct sched_cluster {
*/
unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
unsigned int max_possible_freq;
- /*
- * cpu_cycle_max_scale_factor represents number of cycles per NSEC at
- * CPU's fmax.
- */
- u32 cpu_cycle_max_scale_factor;
bool freq_init_done;
int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
unsigned int static_cluster_pwr_cost;
@@ -1137,14 +1132,9 @@ static inline int cpu_max_power_cost(int cpu)
return cpu_rq(cpu)->cluster->max_power_cost;
}
-static inline int cpu_cycle_max_scale_factor(int cpu)
-{
- return cpu_rq(cpu)->cluster->cpu_cycle_max_scale_factor;
-}
-
static inline u32 cpu_cycles_to_freq(int cpu, u64 cycles, u32 period)
{
- return div64_u64(cycles * cpu_cycle_max_scale_factor(cpu), period);
+ return div64_u64(cycles, period);
}
static inline bool hmp_capable(void)