summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2016-06-17 14:46:50 -0700
committerKyle Yan <kyan@codeaurora.org>2016-06-21 15:11:07 -0700
commit47c31979a1a96ee9dba1ebc43d0f04a6e7f5ee19 (patch)
tree80e4697cf7d52e1e603d472ccd9165ecc068d287
parent14ac5ed8b89b45207345f6e31c47d196a824e7ee (diff)
sched: prevent race where update CPU cycles
Updating cycle counter should be serialized by holding rq lock. Add missing rq lock hold when cycle counter is updated by irq entry point. Change-Id: I92cf75d047a45ebf15a6ddeeecf8fc3823f96e5d Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r--kernel/sched/core.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 263c15b0312e..63f08cf4f517 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3024,8 +3024,12 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
if (!rq->window_start || sched_disable_window_stats)
return;
- if (is_idle_task(curr) && use_cycle_counter)
+ if (is_idle_task(curr)) {
+ /* We're here without rq->lock held, IRQ disabled */
+ raw_spin_lock(&rq->lock);
update_task_cpu_cycles(curr, cpu);
+ raw_spin_unlock(&rq->lock);
+ }
}
static void reset_task_stats(struct task_struct *p)