summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cccb3564410b..543f7113b1d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2447,7 +2447,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
unsigned long flags;
int cpu;
- init_new_task_load(p, false);
+ init_new_task_load(p);
cpu = get_cpu();
__sched_fork(clone_flags, p);
@@ -5407,19 +5407,15 @@ void init_idle_bootup_task(struct task_struct *idle)
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
- * @cpu_up: differentiate between initial boot vs hotplug
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
-void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
+void init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!cpu_up)
- init_new_task_load(idle, true);
-
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
@@ -8571,7 +8567,8 @@ void __init sched_init(void)
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
- init_idle(current, smp_processor_id(), false);
+ init_idle(current, smp_processor_id());
+ init_new_task_load(current);
calc_load_update = jiffies + LOAD_FREQ;