summaryrefslogtreecommitdiff
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-07-30 10:44:13 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:30 -0700
commit383ae6b29eb14d498e3a57f40fa61115d910646d (patch)
tree73e9270e5c0509fa24aa9a6d39580a7d23fa854c /kernel/sched/rt.c
parent446beddcd4aeb4246c6a1f391063138bd4e899ee (diff)
sched: clean up fixup_hmp_sched_stats()
The commit 392edf4969d20 ("sched: avoid stale cumulative_runnable_avg HMP statistics) introduced the callback function fixup_hmp_sched_stats() so update_history() can avoid decrement and increment pair of HMP stat. However the commit also made fixup function to do obscure p->ravg.demand update which isn't the cleanest way. Revise the function fixup_hmp_sched_stats() so the caller can update p->ravg.demand directly. Change-Id: Id54667d306495d2109c26362813f80f08a1385ad [joonwoop@codeaurora.org: stripped out CONFIG_SCHED_QHMP.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index dbf8ea6dc535..4d490c90b03e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1200,7 +1200,9 @@ static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
u32 new_task_load)
{
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load);
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta);
}
#else /* CONFIG_SCHED_HMP */