summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2015-02-16 16:42:59 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:41 -0700
commit44d892787efab3795cf8c708b0cb5534cd90cdeb (patch)
treefc29d7b35a6c4aacfb428ba3976e8a6617973153 /kernel
parentb3f9e5ac265af4a30ee9f9c3760e5c98e588e4b1 (diff)
sched: Fix bug in average nr_running and nr_iowait calculation
sched_get_nr_running_avg() returns average nr_running and nr_iowait task count since it was last invoked. Fix several bugs in their calculation. * sched_update_nr_prod() needs to consider that nr_running count can change by more than 1 when CFS_BANDWIDTH feature is used * sched_get_nr_running_avg() needs to sum up nr_iowait count across all cpus, rather than just one * sched_get_nr_running_avg() could race with sched_update_nr_prod(), as a result of which it could use curr_time which is behind a cpu's 'last_time' value. That would lead to erroneous calculation of average nr_running or nr_iowait. While at it, fix also a bug in BUG_ON() check in sched_update_nr_prod() function and remove unnecessary nr_running argument to sched_update_nr_prod() function. Change-Id: I46737614737292fae0d7204c4648fb9b862f65b2 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/sched/sched_avg.c22
2 files changed, 15 insertions, 11 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 57a5685daac7..57190adbe5c6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1626,7 +1626,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
{
unsigned prev_nr = rq->nr_running;
- sched_update_nr_prod(cpu_of(rq), rq->nr_running, true);
+ sched_update_nr_prod(cpu_of(rq), count, true);
rq->nr_running = prev_nr + count;
if (prev_nr < 2 && rq->nr_running >= 2) {
@@ -1653,7 +1653,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
static inline void sub_nr_running(struct rq *rq, unsigned count)
{
- sched_update_nr_prod(cpu_of(rq), rq->nr_running, false);
+ sched_update_nr_prod(cpu_of(rq), count, false);
rq->nr_running -= count;
}
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 8eaf2f770a28..4f5a67ec4dd6 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -48,16 +48,16 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg)
if (!diff)
return;
- last_get_time = curr_time;
/* read and reset nr_running counts */
for_each_possible_cpu(cpu) {
unsigned long flags;
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ curr_time = sched_clock();
tmp_avg += per_cpu(nr_prod_sum, cpu);
tmp_avg += per_cpu(nr, cpu) *
(curr_time - per_cpu(last_time, cpu));
- tmp_iowait = per_cpu(iowait_prod_sum, cpu);
+ tmp_iowait += per_cpu(iowait_prod_sum, cpu);
tmp_iowait += nr_iowait_cpu(cpu) *
(curr_time - per_cpu(last_time, cpu));
per_cpu(last_time, cpu) = curr_time;
@@ -66,38 +66,42 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg)
spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
+ diff = curr_time - last_get_time;
+ last_get_time = curr_time;
+
*avg = (int)div64_u64(tmp_avg * 100, diff);
*iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
BUG_ON(*avg < 0);
pr_debug("%s - avg:%d\n", __func__, *avg);
BUG_ON(*iowait_avg < 0);
- pr_debug("%s - avg:%d\n", __func__, *iowait_avg);
+ pr_debug("%s - iowait_avg:%d\n", __func__, *iowait_avg);
}
EXPORT_SYMBOL(sched_get_nr_running_avg);
/**
* sched_update_nr_prod
* @cpu: The core id of the nr running driver.
- * @nr: Updated nr running value for cpu.
+ * @delta: Adjust nr by 'delta' amount
* @inc: Whether we are increasing or decreasing the count
* @return: N/A
*
* Update average with latest nr_running value for CPU
*/
-void sched_update_nr_prod(int cpu, unsigned long nr_running, bool inc)
+void sched_update_nr_prod(int cpu, long delta, bool inc)
{
int diff;
s64 curr_time;
- unsigned long flags;
+ unsigned long flags, nr_running;
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ nr_running = per_cpu(nr, cpu);
curr_time = sched_clock();
diff = curr_time - per_cpu(last_time, cpu);
per_cpu(last_time, cpu) = curr_time;
- per_cpu(nr, cpu) = nr_running + (inc ? 1 : -1);
+ per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
- BUG_ON(per_cpu(nr, cpu) < 0);
+ BUG_ON((s64)per_cpu(nr, cpu) < 0);
per_cpu(nr_prod_sum, cpu) += nr_running * diff;
per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;