diff options
| author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2016-01-28 16:12:40 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:25:19 -0700 |
| commit | fbeb32ce8f807fbb2c20bc2d153457bcef914bc3 (patch) | |
| tree | 478d51ed273dc892219a10527759fb8143c20f1b /kernel | |
| parent | 58d411413ffb4c5945948609eb370e287447b5c1 (diff) | |
sched: clean up idle task's mark_start restoring in init_idle()
The idle task's mark_start can get updated even without the CPU being
online. Hence the mark_start is restored when the CPU is coming online.
The idle task's mark_start is reset in init_idle()->__sched_fork()->
init_new_task_load(). The original mark_start is saved and restored
later. This can be avoided by moving init_new_task_load() to
wake_up_new_task(), which never gets called for an idle task.
We only care about idle task's ravg.mark_start and not initializing
the other fields of ravg struct will not have any side effects.
This clean up allows the subsequent patches to drop the rq->lock
while calling __sched_fork() in init_idle().
CRs-Fixed: 965873
Change-Id: I41de6d69944d7d44b9c4d11b2d97ad01bd8fe96d
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
[joonwoop@codeaurora.org: fixed a minor conflict in core.c. omitted
changes for CONFIG_SCHED_QHMP.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 27 |
1 files changed, 1 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f71e4882093c..4e5abc3bb294 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3648,16 +3648,6 @@ static int register_sched_callback(void) */ core_initcall(register_sched_callback); -static u64 orig_mark_start(struct task_struct *p) -{ - return p->ravg.mark_start; -} - -static void restore_orig_mark_start(struct task_struct *p, u64 mark_start) -{ - p->ravg.mark_start = mark_start; -} - static inline int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load) { @@ -3699,13 +3689,6 @@ static inline void set_window_start(struct rq *rq) {} static inline void migrate_sync_cpu(int cpu) {} -static inline u64 orig_mark_start(struct task_struct *p) { return 0; } - -static inline void -restore_orig_mark_start(struct task_struct *p, u64 mark_start) -{ -} - #endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_SMP @@ -4912,7 +4895,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; p->se.vruntime = 0; - init_new_task_load(p); INIT_LIST_HEAD(&p->se.group_node); @@ -5182,6 +5164,7 @@ void wake_up_new_task(struct task_struct *p) struct rq *rq; raw_spin_lock_irqsave(&p->pi_lock, flags); + init_new_task_load(p); /* Initialize new task's runnable average */ init_entity_runnable_average(&p->se); #ifdef CONFIG_SMP @@ -7861,19 +7844,11 @@ void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; - u64 mark_start; raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); - mark_start = orig_mark_start(idle); - __sched_fork(0, idle); - /* - * Restore idle thread's original mark_start as we rely on it being - * correct for maintaining per-cpu counters, curr/prev_runnable_sum. - */ - restore_orig_mark_start(idle, mark_start); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); |
