summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-10-05 19:29:24 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2016-10-05 19:29:24 -0700
commit9d4ed2cb2067669b01c0cbd54fca11893f082ab3 (patch)
treec603660bf33005ab6ef6489a0b5696e7f30765b8 /kernel/sched
parent52ae4541e46f44d0a3b2b27f2b01ff860e8b94de (diff)
parentf20772adf3d0ce00c6100f462ed12e05b805edd8 (diff)
Merge "sched: Fix integer overflow in sched_update_nr_prod()"
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/sched_avg.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index c70e0466c36c..29d8a26a78ed 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,17 +60,17 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
curr_time = sched_clock();
+ diff = curr_time - per_cpu(last_time, cpu);
+ BUG_ON((s64)diff < 0);
+
tmp_avg += per_cpu(nr_prod_sum, cpu);
- tmp_avg += per_cpu(nr, cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_avg += per_cpu(nr, cpu) * diff;
tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
- tmp_big_avg += nr_eligible_big_tasks(cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_big_avg += nr_eligible_big_tasks(cpu) * diff;
tmp_iowait += per_cpu(iowait_prod_sum, cpu);
- tmp_iowait += nr_iowait_cpu(cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_iowait += nr_iowait_cpu(cpu) * diff;
per_cpu(last_time, cpu) = curr_time;
@@ -107,14 +107,15 @@ EXPORT_SYMBOL(sched_get_nr_running_avg);
*/
void sched_update_nr_prod(int cpu, long delta, bool inc)
{
- int diff;
- s64 curr_time;
+ u64 diff;
+ u64 curr_time;
unsigned long flags, nr_running;
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
nr_running = per_cpu(nr, cpu);
curr_time = sched_clock();
diff = curr_time - per_cpu(last_time, cpu);
+ BUG_ON((s64)diff < 0);
per_cpu(last_time, cpu) = curr_time;
per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);