summaryrefslogtreecommitdiff
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9d9eb50d4059..685ae83b2bfa 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -289,9 +289,11 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
/*
* By now the task is replenished and enqueued; migrate it.
*/
+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, p, 0);
set_task_cpu(p, later_rq->cpu);
activate_task(later_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
if (!fallback)
resched_curr(later_rq);
@@ -870,6 +872,41 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
@@ -879,6 +916,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
+ inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -893,6 +931,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+ dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1578,11 +1617,13 @@ retry:
goto retry;
}
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
clear_average_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
add_average_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, 0);
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
ret = 1;
resched_curr(later_rq);
@@ -1668,11 +1709,13 @@ static void pull_dl_task(struct rq *this_rq)
resched = true;
+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
clear_average_bw(&p->dl, &src_rq->dl);
set_task_cpu(p, this_cpu);
add_average_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
@@ -1875,6 +1918,11 @@ const struct sched_class dl_sched_class = {
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_dl,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_dl,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG