diff options
author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2018-09-20 15:31:36 +0530 |
---|---|---|
committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2019-06-25 20:37:06 -0700 |
commit | f395d5810f27a22e5ff0230ca7b3ef88857d98d9 (patch) | |
tree | 0657de979d070f93516d97c9069b3b81b7eb5ca9 | |
parent | c94369b4c1fc531a4521dacdb6b08a70ea71fc1b (diff) |
sched/walt: Fix the memory leak of idle task load pointers
The memory for task load pointers are allocated twice for each
idle thread except for the boot CPU. This happens during boot
from idle_threads_init()->idle_init() in the following 2 paths.
1. idle_init()->fork_idle()->copy_process()->
sched_fork()->init_new_task_load()
2. idle_init()->fork_idle()-> init_idle()->init_new_task_load()
The memory allocation for all tasks happens through the 1st path,
so use the same for idle tasks and kill the 2nd path. Since
the idle thread of boot CPU does not go through fork_idle(),
allocate the memory for it separately.
Change-Id: I4696a414ffe07d4114b56d326463026019e278f1
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
[schikk@codeaurora.org: resolved merge conflicts]
Signed-off-by: Swetha Chikkaboraiah <schikk@codeaurora.org>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 11 | ||||
-rw-r--r-- | kernel/sched/hmp.c | 5 | ||||
-rw-r--r-- | kernel/sched/sched.h | 4 | ||||
-rw-r--r-- | kernel/smpboot.c | 2 |
6 files changed, 10 insertions, 16 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ccd0f37dcff7..2378cbf2612d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -378,7 +378,7 @@ extern int lockdep_tasklist_lock_is_held(void); extern void sched_init(void); extern void sched_init_smp(void); extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu, bool hotplug); +extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_var_t cpu_isolated_map; diff --git a/kernel/fork.c b/kernel/fork.c index 25d0a60d166c..e8bc7ed77b59 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1769,7 +1769,7 @@ struct task_struct *fork_idle(int cpu) cpu_to_node(cpu)); if (!IS_ERR(task)) { init_idle_pids(task->pids); - init_idle(task, cpu, false); + init_idle(task, cpu); } return task; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cccb3564410b..543f7113b1d2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2447,7 +2447,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) unsigned long flags; int cpu; - init_new_task_load(p, false); + init_new_task_load(p); cpu = get_cpu(); __sched_fork(clone_flags, p); @@ -5407,19 +5407,15 @@ void init_idle_bootup_task(struct task_struct *idle) * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to - * @cpu_up: differentiate between initial boot vs hotplug * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void init_idle(struct task_struct *idle, int cpu, bool cpu_up) +void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!cpu_up) - init_new_task_load(idle, true); - raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); @@ -8571,7 +8567,8 @@ void __init sched_init(void) * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ - init_idle(current, smp_processor_id(), false); + init_idle(current, smp_processor_id()); + init_new_task_load(current); calc_load_update = jiffies + LOAD_FREQ; diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index ddcf7cfb7248..5337ac7fcba1 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1544,7 +1544,7 @@ void free_task_load_ptrs(struct task_struct *p) p->ravg.prev_window_cpu = NULL; } -void init_new_task_load(struct task_struct *p, bool idle_task) +void init_new_task_load(struct task_struct *p) { int i; u32 init_load_windows = sched_init_task_load_windows; @@ -1571,9 +1571,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task) /* Don't have much choice. CPU frequency would be bogus */ BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu); - if (idle_task) - return; - if (init_load_pct) init_load_windows = div64_u64((u64)init_load_pct * (u64)sched_ravg_window, 100); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 12883703a021..ffae8d49d988 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1154,7 +1154,7 @@ extern unsigned int __read_mostly sched_downmigrate; extern unsigned int __read_mostly sysctl_sched_spill_nr_run; extern unsigned int __read_mostly sched_load_granule; -extern void init_new_task_load(struct task_struct *p, bool idle_task); +extern void init_new_task_load(struct task_struct *p); extern u64 sched_ktime_clock(void); extern int got_boost_kick(void); extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); @@ -1644,7 +1644,7 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq) return NULL; } -static inline void init_new_task_load(struct task_struct *p, bool idle_task) +static inline void init_new_task_load(struct task_struct *p) { } diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 552e154fc77e..979248f5e21a 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu) if (!tsk) return ERR_PTR(-ENOMEM); - init_idle(tsk, cpu, true); + init_idle(tsk, cpu); return tsk; } |