summaryrefslogtreecommitdiff
path: root/kernel/sched/hmp.c
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-03-21 18:22:49 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-05-19 12:30:51 -0700
commit5b138bd514a25350683f7b0bd874ab2b995b94e5 (patch)
tree6f677ce1c08518e80c315891ff21cce89407601f /kernel/sched/hmp.c
parent5404e35069576a8cd8203065f60975e34a5f727b (diff)
sched: Fix load tracking bug to avoid adding phantom task demand
When update_task_ravg() is called with the TASK_UPDATE event on a task that is not on the runqueue, task demand accounting incorrectly treats the time delta as execution time. This can happen when a sleeping task is moved to/from colocation groups. This phantom execution time can cause unpredictable changes to demand that in turn can result in incorrect task placement. Fix the issue by adding special handling of TASK_UPDATE in task demand accounting. CPU busy time accounting already has all the necessary checks. Change-Id: Ibb42d83ac353bf2e849055fa3cb5c22e7acd56de Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched/hmp.c')
-rw-r--r--kernel/sched/hmp.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index df47c26ab6d2..bedfc500bd8a 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -2579,7 +2579,8 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
}
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
{
/*
* No need to bother updating task demand for exiting tasks
@@ -2598,6 +2599,17 @@ static int account_busy_for_task_demand(struct task_struct *p, int event)
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
return 0;
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+ }
+
return 1;
}
@@ -2738,7 +2750,7 @@ static u64 update_task_demand(struct task_struct *p, struct rq *rq,
u64 runtime;
new_window = mark_start < window_start;
- if (!account_busy_for_task_demand(p, event)) {
+ if (!account_busy_for_task_demand(rq, p, event)) {
if (new_window)
/*
* If the time accounted isn't being accounted as