summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2016-11-27 19:17:28 -0800
committerOlav Haugan <ohaugan@codeaurora.org>2016-11-28 11:00:29 -0800
commite5c095a2c715f7dd1ac71194db5cb5bc2e98ab19 (patch)
tree8339427847034aa97d4f48fd5b4551995c0b66e9
parent841264c5051b8cbd44b2e1b12db94f8cbcd0c43a (diff)
sched/core: Do not free task while holding rq lock
Clearing the hmp request can cause a task to be freed. When a task is freed the free call might wake up a kworker which will cause a spinlock lockup (rq lock). Fix this by avoiding calling put_task_struct when holding the rq lock. In addition move call to clear_hmp_request out of stopper thread context since it is not necessary to do this on the cpu being isolated. Change-Id: Ie577db4701a88849560df385869ff7cf73695a05 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/hmp.c6
2 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a5d101e8a5f2..e60093ea6d68 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5600,7 +5600,6 @@ int do_isolation_work_cpu_stop(void *data)
*/
nohz_balance_clear_nohz_mask(cpu);
- clear_hmp_request(cpu);
local_irq_enable();
return 0;
}
@@ -5725,6 +5724,7 @@ int sched_isolate_cpu(int cpu)
migrate_sync_cpu(cpu, cpumask_first(&avail_cpus));
stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+ clear_hmp_request(cpu);
calc_load_migrate(rq);
update_max_interval();
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 968a41e0e81e..a9ccb63c8e23 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -641,14 +641,18 @@ void clear_hmp_request(int cpu)
clear_boost_kick(cpu);
clear_reserved(cpu);
if (rq->push_task) {
+ struct task_struct *push_task = NULL;
+
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->push_task) {
clear_reserved(rq->push_cpu);
- put_task_struct(rq->push_task);
+ push_task = rq->push_task;
rq->push_task = NULL;
}
rq->active_balance = 0;
raw_spin_unlock_irqrestore(&rq->lock, flags);
+ if (push_task)
+ put_task_struct(push_task);
}
}