diff options
author | John Dias <joaodias@google.com> | 2016-09-15 08:52:27 -0700 |
---|---|---|
committer | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-06-09 15:14:07 +0530 |
commit | c3544e35ef382d2ae666464865c9ba779d2264d3 (patch) | |
tree | 2a3c8e9072d71f614d917e791cd1da343966f09c /kernel/sched/rt.c | |
parent | 9725c4d90bee3b0be78bb1fdc084df1ec08d7d24 (diff) |
sched: avoid scheduling RT threads on cores currently handling softirqs
Bug: 31501544
Change-Id: I99dd7aaa12c11270b28dbabea484bcc8fb8ba0c1
Git-commit: 080ea011fd9f47315e1fc53185872ef813b59d00
Git-repo: https://android.googlesource.com/kernel/msm
[pkondeti@codeaurora.org: resolved minor merge conflicts and fixed
checkpatch warnings]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 43 |
1 files changed, 37 insertions, 6 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 29345ed74069..338d019d0f25 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -5,6 +5,7 @@ #include "sched.h" +#include <linux/interrupt.h> #include <linux/slab.h> #include <linux/irq_work.h> #include <trace/events/sched.h> @@ -1456,11 +1457,26 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags) } #endif +/* + * Return whether the task on the given cpu is currently non-preemptible + * while handling a softirq or is likely to block preemptions soon because + * it is a ksoftirq thread. + */ +bool +task_may_not_preempt(struct task_struct *task, int cpu) +{ + struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); + + return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) || + task == cpu_ksoftirqd; +} + static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) { struct task_struct *curr; struct rq *rq; + bool may_not_preempt; #ifdef CONFIG_SCHED_HMP return select_task_rq_rt_hmp(p, cpu, sd_flag, flags); @@ -1476,7 +1492,17 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) curr = READ_ONCE(rq->curr); /* unlocked access */ /* - * If the current task on @p's runqueue is an RT task, then + * If the current task on @p's runqueue is a softirq task, + * it may run without preemption for a time that is + * ill-suited for a waiting RT task. Therefore, try to + * wake this RT task on another runqueue. + * + * Also, if the current task on @p's runqueue is an RT task, then + * it may run without preemption for a time that is + * ill-suited for a waiting RT task. Therefore, try to + * wake this RT task on another runqueue. + * + * Also, if the current task on @p's runqueue is an RT task, then * try to see if we can wake this RT task up on another * runqueue. Otherwise simply start this RT task * on its current runqueue. @@ -1497,17 +1523,22 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) * This test is optimistic, if we get it wrong the load-balancer * will have to sort it out. */ - if (curr && unlikely(rt_task(curr)) && + may_not_preempt = task_may_not_preempt(curr, cpu); + if (may_not_preempt || + (unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 2 || - curr->prio <= p->prio)) { + curr->prio <= p->prio))) { int target = find_lowest_rq(p); /* - * Don't bother moving it if the destination CPU is - * not running a lower priority task. + * If cpu is non-preemptible, prefer remote cpu + * even if it's running a higher-prio task. + * Otherwise: Don't bother moving it if the + * destination CPU is not running a lower priority task. */ if (target != -1 && - p->prio < cpu_rq(target)->rt.highest_prio.curr) + (may_not_preempt || + p->prio < cpu_rq(target)->rt.highest_prio.curr)) cpu = target; } rcu_read_unlock(); |