diff options
| author | Linux Build Service Account <lnxbuild@quicinc.com> | 2017-06-06 13:21:50 -0700 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-06-06 13:21:50 -0700 |
| commit | 1d5844ba9da3c90a738ac23a58e764d22c2be5ae (patch) | |
| tree | 5106f094f299d56d14d3d381b5b571a933ad9071 | |
| parent | 2d08b001ac400e58564a86fa5b67bc893fa6fd0d (diff) | |
| parent | 259636e7d070a41ccbfc5e6c5de7d5029bd827de (diff) | |
Merge "sched: hmp: Optimize cycle counter reads"
| -rw-r--r-- | kernel/sched/hmp.c | 46 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 41 insertions, 7 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 4dd3d875c242..ae6876e62c0f 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -2521,10 +2521,42 @@ static inline u32 predict_and_update_buckets(struct rq *rq, return pred_demand; } -static void update_task_cpu_cycles(struct task_struct *p, int cpu) +#define THRESH_CC_UPDATE (2 * NSEC_PER_USEC) + +/* + * Assumes rq_lock is held and wallclock was recorded in the same critical + * section as this function's invocation. + */ +static inline u64 read_cycle_counter(int cpu, u64 wallclock) +{ + struct sched_cluster *cluster = cpu_rq(cpu)->cluster; + u64 delta; + + if (unlikely(!cluster)) + return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); + + /* + * Why don't we need locking here? Let's say that delta is negative + * because some other CPU happened to update last_cc_update with a + * more recent timestamp. We simply read the conter again in that case + * with no harmful side effects. This can happen if there is an FIQ + * between when we read the wallclock and when we use it here. + */ + delta = wallclock - atomic64_read(&cluster->last_cc_update); + if (delta > THRESH_CC_UPDATE) { + atomic64_set(&cluster->cycles, + cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu)); + atomic64_set(&cluster->last_cc_update, wallclock); + } + + return atomic64_read(&cluster->cycles); +} + +static void update_task_cpu_cycles(struct task_struct *p, int cpu, + u64 wallclock) { if (use_cycle_counter) - p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); + p->cpu_cycles = read_cycle_counter(cpu, wallclock); } static void @@ -2542,7 +2574,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, return; } - cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); + cur_cycles = read_cycle_counter(cpu, wallclock); /* * If current task is idle task and irqtime == 0 CPU was @@ -2834,7 +2866,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event, update_window_start(rq, wallclock); if (!p->ravg.mark_start) { - update_task_cpu_cycles(p, cpu_of(rq)); + update_task_cpu_cycles(p, cpu_of(rq), wallclock); goto done; } @@ -2902,7 +2934,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) if (is_idle_task(curr)) { /* We're here without rq->lock held, IRQ disabled */ raw_spin_lock(&rq->lock); - update_task_cpu_cycles(curr, cpu); + update_task_cpu_cycles(curr, cpu, sched_ktime_clock()); raw_spin_unlock(&rq->lock); } } @@ -2947,7 +2979,7 @@ void mark_task_starting(struct task_struct *p) p->ravg.mark_start = p->last_wake_ts = wallclock; p->last_cpu_selected_ts = wallclock; p->last_switch_out_ts = 0; - update_task_cpu_cycles(p, cpu_of(rq)); + update_task_cpu_cycles(p, cpu_of(rq), wallclock); } void set_window_start(struct rq *rq) @@ -3560,7 +3592,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); - update_task_cpu_cycles(p, new_cpu); + update_task_cpu_cycles(p, new_cpu, wallclock); new_task = is_new_task(p); /* Protected by rq_lock */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2beda41af443..a6733b57bcbc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -397,6 +397,8 @@ struct sched_cluster { unsigned int static_cluster_pwr_cost; int notifier_sent; bool wake_up_idle; + atomic64_t last_cc_update; + atomic64_t cycles; }; extern unsigned long all_cluster_ids[]; |
