summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2013-06-11 17:43:09 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:58:34 -0700
commit0ec4cf2484ad5860d84dda1aef44dc35e7c2a81d (patch)
treee6d5d2b173c9e9034ff60a7e0e54bc01dce86d23 /kernel
parent63249df6b26afebf6ad1b7b7385182c125c88477 (diff)
sched: re-calculate a cpu's next_balance point upon sched domain changes
A cpus next_balance point could be stale when its being attached to a sched domain hierarchy. That would lead to undesirable delay in cpu doing a load balance and hence potentially affect scheduling latencies for tasks. Fix that by initializing cpu's next_balance point when its being attached to a sched domain hierarchy. Change-Id: I855cff8da5ca28d278596c3bb0163b839d4704bc Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org: Modify commit text to reflect dropped patches] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cd488bf2a0d1..daababd6b211 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6057,6 +6057,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
+ unsigned long next_balance = rq->next_balance;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
@@ -6088,6 +6089,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
sd->child = NULL;
}
+ for (tmp = sd; tmp; ) {
+ unsigned long interval;
+
+ interval = msecs_to_jiffies(tmp->balance_interval);
+ if (time_after(next_balance, tmp->last_balance + interval))
+ next_balance = tmp->last_balance + interval;
+
+ tmp = tmp->parent;
+ }
+ rq->next_balance = next_balance;
+
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);