diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 23 |
1 files changed, 23 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 13ae48336c92..2ecc87e12491 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2566,6 +2566,16 @@ static int register_sched_callback(void) */ core_initcall(register_sched_callback); +static u64 orig_mark_start(struct task_struct *p) +{ + return p->ravg.mark_start; +} + +static void restore_orig_mark_start(struct task_struct *p, u64 mark_start) +{ + p->ravg.mark_start = mark_start; +} + #else /* CONFIG_SCHED_HMP */ static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } @@ -2590,6 +2600,13 @@ static inline void set_window_start(struct rq *rq) {} static inline void migrate_sync_cpu(int cpu) {} +static inline u64 orig_mark_start(struct task_struct *p) { return 0; } + +static inline void +restore_orig_mark_start(struct task_struct *p, u64 mark_start) +{ +} + #endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_SMP @@ -6639,11 +6656,17 @@ void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; + u64 mark_start = orig_mark_start(idle); raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); __sched_fork(0, idle); + /* + * Restore idle thread's original mark_start as we rely on it being + * correct for maintaining per-cpu counters, curr/prev_runnable_sum. + */ + restore_orig_mark_start(idle, mark_start); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); |
