summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-07-13 21:04:18 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:16 -0700
commitb5a9a7b1c714e4a6711f6f88d9b963a8e6fcf9d7 (patch)
tree7fd92f53685ea4614d832e33d227fbac80fb8223 /kernel
parentd109fbbf71373e66ae6b78e0b2706ffc4cf3e716 (diff)
sched: avoid stale cumulative_runnable_avg HMP statistics
When a new window starts for a task and the task is on a rq, scheduler decreases rq's cumulative_runnable_avg momentarily, re-account task's demand and increases rq's cumulative_runnable_avg with newly accounted task's demand. Therefore there is short time period that rq's cumulative_runnable_avg is less than what it's supposed to be. Meanwhile, there is chance that other CPU is in search of best CPU to place a task and makes suboptimal decision with momentarily stale cumulative_runnable_avg. Fix such issue by adding or subtracting of delta between task's old and new demand instead of decrementing and incrementing of entire task's load. Change-Id: I3c9329961e6f96e269fa13359e7d1c39c4973ff2 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c30
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/fair.c41
-rw-r--r--kernel/sched/idle_task.c7
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/sched.h20
-rw-r--r--kernel/sched/stop_task.c8
7 files changed, 106 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6b30aa0ccf50..b92b3617bc94 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1692,29 +1692,27 @@ static void update_history(struct rq *rq, struct task_struct *p,
p->ravg.sum = 0;
+ if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
+ demand = runtime;
+ } else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
+ demand = max;
+ } else {
+ avg = div64_u64(sum, sched_ravg_hist_size);
+ if (sched_window_stats_policy == WINDOW_STATS_AVG)
+ demand = avg;
+ else
+ demand = max(avg, runtime);
+ }
+
/*
* A throttled deadline sched class task gets dequeued without
* changing p->on_rq. Since the dequeue decrements hmp stats
* avoid decrementing it here again.
*/
if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
- p->sched_class->dec_hmp_sched_stats(rq, p);
-
- avg = div64_u64(sum, sched_ravg_hist_size);
-
- if (sched_window_stats_policy == WINDOW_STATS_RECENT)
- demand = runtime;
- else if (sched_window_stats_policy == WINDOW_STATS_MAX)
- demand = max;
- else if (sched_window_stats_policy == WINDOW_STATS_AVG)
- demand = avg;
+ p->sched_class->fixup_hmp_sched_stats(rq, p, demand);
else
- demand = max(avg, runtime);
-
- p->ravg.demand = demand;
-
- if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
- p->sched_class->inc_hmp_sched_stats(rq, p);
+ p->ravg.demand = demand;
done:
trace_sched_update_history(rq, p, runtime, samples, event);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ad92418a8fee..663cd1278bff 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -865,6 +865,13 @@ dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+}
+
#else /* CONFIG_SCHED_HMP */
static inline void
@@ -1875,6 +1882,7 @@ const struct sched_class dl_sched_class = {
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_dl,
.dec_hmp_sched_stats = dec_hmp_sched_stats_dl,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
#endif
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e7ba8bce1fb..2be602775241 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3870,6 +3870,37 @@ static void dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
_dec_hmp_sched_stats_fair(rq, p, 1);
}
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+ u32 old_task_load = p->ravg.demand;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ dec_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+ new_task_load);
+ inc_nr_big_small_task(&cfs_rq->hmp_stats, p);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ /*
+ * fixup_cumulative_runnable_avg() sets p->ravg.demand to
+ * new_task_load.
+ */
+ p->ravg.demand = old_task_load;
+ }
+
+ /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se) {
+ dec_nr_big_small_task(&rq->hmp_stats, p);
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+ inc_nr_big_small_task(&rq->hmp_stats, p);
+ }
+}
+
static int task_will_be_throttled(struct task_struct *p);
#else /* CONFIG_CFS_BANDWIDTH */
@@ -3888,6 +3919,15 @@ dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+ dec_nr_big_small_task(&rq->hmp_stats, p);
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+ inc_nr_big_small_task(&rq->hmp_stats, p);
+}
+
static inline int task_will_be_throttled(struct task_struct *p)
{
return 0;
@@ -10927,6 +10967,7 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_fair,
.dec_hmp_sched_stats = dec_hmp_sched_stats_fair,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair,
#endif
};
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index cccb9c97158e..b46a0f9082d4 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -91,6 +91,12 @@ dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
{
}
+static void
+fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+}
+
#endif
/*
@@ -124,5 +130,6 @@ const struct sched_class idle_sched_class = {
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_idle,
.dec_hmp_sched_stats = dec_hmp_sched_stats_idle,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle,
#endif
};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 235f6295590d..cf8e1814142d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1196,6 +1196,13 @@ dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
+static void
+fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+}
+
#else /* CONFIG_SCHED_HMP */
static inline void
@@ -2463,6 +2470,7 @@ const struct sched_class rt_sched_class = {
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_rt,
.dec_hmp_sched_stats = dec_hmp_sched_stats_rt,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt,
#endif
};
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 08f1f7e9c0e9..148f08718a9d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1064,6 +1064,24 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
BUG_ON((s64)stats->cumulative_runnable_avg < 0);
}
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p, u32 new_task_load)
+{
+ u32 task_load;
+
+ task_load = sched_use_pelt ?
+ p->se.avg.runnable_avg_sum_scaled : p->ravg.demand;
+ p->ravg.demand = new_task_load;
+
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ stats->cumulative_runnable_avg += ((s64)new_task_load - task_load);
+ BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+}
+
+
#define pct_to_real(tunable) \
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
@@ -1566,6 +1584,8 @@ struct sched_class {
#ifdef CONFIG_SCHED_HMP
void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
+ void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+ u32 new_task_load);
#endif
};
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index f101bb39ee5e..93aba3060d9a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -31,6 +31,13 @@ dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
+static void
+fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
+ u32 new_task_load)
+{
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+}
+
#else /* CONFIG_SCHED_HMP */
static inline void
@@ -163,5 +170,6 @@ const struct sched_class stop_sched_class = {
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_stop,
.dec_hmp_sched_stats = dec_hmp_sched_stats_stop,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
#endif
};