diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2016-04-28 16:41:53 -0700 |
|---|---|---|
| committer | Kyle Yan <kyan@codeaurora.org> | 2016-06-09 15:07:48 -0700 |
| commit | 6e8c9ac98d71360e0edc345928e67e47cd7e2bcf (patch) | |
| tree | bb4077dd53282ef4d7ea67a1dc43ca50cfc9d02e | |
| parent | 54c0b0001b508a5dbd383cdf6e24e01064b32bbd (diff) | |
sched: fix CPU frequency estimation while idle
CPU cycle counter won't increase when CPU or cluster is idle depending
on hardware. Thus using cycle counter in that period of time can
result in incorrect CPU frequency estimation. Use previously calculated
CPU frequency when CPU was idle.
Change-Id: I732b50c974a73c08038995900e008b4e16e9437b
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
| -rw-r--r-- | kernel/sched/core.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0da080688ddd..607e2bf0c75f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2702,7 +2702,7 @@ static void update_task_cpu_cycles(struct task_struct *p, int cpu) static void update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, - u64 wallclock) + u64 wallclock, u64 irqtime) { u64 cur_cycles; int cpu = cpu_of(rq); @@ -2716,13 +2716,23 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, } cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); - if (unlikely(cur_cycles < p->cpu_cycles)) - rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles); - else - rq->cc.cycles = cur_cycles - p->cpu_cycles; - rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC; - rq->cc.time = wallclock - p->ravg.mark_start; - BUG_ON((s64)rq->cc.time < 0); + + /* + * If current task is idle task and irqtime == 0 CPU was + * indeed idle and probably its cycle counter was not + * increasing. We still need estimatied CPU frequency + * for IO wait time accounting. Use the previously + * calculated frequency in such a case. + */ + if (!is_idle_task(rq->curr) || irqtime) { + if (unlikely(cur_cycles < p->cpu_cycles)) + rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles); + else + rq->cc.cycles = cur_cycles - p->cpu_cycles; + rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC; + rq->cc.time = wallclock - p->ravg.mark_start; + BUG_ON((s64)rq->cc.time < 0); + } p->cpu_cycles = cur_cycles; @@ -2941,7 +2951,7 @@ update_task_ravg(struct task_struct *p, struct rq *rq, int event, goto done; } - update_task_rq_cpu_cycles(p, rq, event, wallclock); + update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime); update_task_demand(p, rq, event, wallclock); update_cpu_busy_time(p, rq, event, wallclock, irqtime); update_task_pred_demand(rq, p, event); |
