From c3544e35ef382d2ae666464865c9ba779d2264d3 Mon Sep 17 00:00:00 2001 From: John Dias Date: Thu, 15 Sep 2016 08:52:27 -0700 Subject: sched: avoid scheduling RT threads on cores currently handling softirqs Bug: 31501544 Change-Id: I99dd7aaa12c11270b28dbabea484bcc8fb8ba0c1 Git-commit: 080ea011fd9f47315e1fc53185872ef813b59d00 Git-repo: https://android.googlesource.com/kernel/msm [pkondeti@codeaurora.org: resolved minor merge conflicts and fixed checkpatch warnings] Signed-off-by: Pavankumar Kondeti --- kernel/sched/rt.c | 43 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) (limited to 'kernel/sched/rt.c') diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 29345ed74069..338d019d0f25 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -5,6 +5,7 @@ #include "sched.h" +#include #include #include #include @@ -1456,11 +1457,26 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags) } #endif +/* + * Return whether the task on the given cpu is currently non-preemptible + * while handling a softirq or is likely to block preemptions soon because + * it is a ksoftirq thread. + */ +bool +task_may_not_preempt(struct task_struct *task, int cpu) +{ + struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); + + return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) || + task == cpu_ksoftirqd; +} + static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) { struct task_struct *curr; struct rq *rq; + bool may_not_preempt; #ifdef CONFIG_SCHED_HMP return select_task_rq_rt_hmp(p, cpu, sd_flag, flags); @@ -1476,7 +1492,17 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) curr = READ_ONCE(rq->curr); /* unlocked access */ /* - * If the current task on @p's runqueue is an RT task, then + * If the current task on @p's runqueue is a softirq task, + * it may run without preemption for a time that is + * ill-suited for a waiting RT task. Therefore, try to + * wake this RT task on another runqueue. + * + * Also, if the current task on @p's runqueue is an RT task, then + * it may run without preemption for a time that is + * ill-suited for a waiting RT task. Therefore, try to + * wake this RT task on another runqueue. + * + * Also, if the current task on @p's runqueue is an RT task, then * try to see if we can wake this RT task up on another * runqueue. Otherwise simply start this RT task * on its current runqueue. @@ -1497,17 +1523,22 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) * This test is optimistic, if we get it wrong the load-balancer * will have to sort it out. */ - if (curr && unlikely(rt_task(curr)) && + may_not_preempt = task_may_not_preempt(curr, cpu); + if (may_not_preempt || + (unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 2 || - curr->prio <= p->prio)) { + curr->prio <= p->prio))) { int target = find_lowest_rq(p); /* - * Don't bother moving it if the destination CPU is - * not running a lower priority task. + * If cpu is non-preemptible, prefer remote cpu + * even if it's running a higher-prio task. + * Otherwise: Don't bother moving it if the + * destination CPU is not running a lower priority task. */ if (target != -1 && - p->prio < cpu_rq(target)->rt.highest_prio.curr) + (may_not_preempt || + p->prio < cpu_rq(target)->rt.highest_prio.curr)) cpu = target; } rcu_read_unlock(); -- cgit v1.2.3 From 25e8ecf9daca3078043039ab56d4cb9e1a08bbbb Mon Sep 17 00:00:00 2001 From: John Dias Date: Wed, 5 Oct 2016 15:11:40 -0700 Subject: sched: avoid migrating when softint on tgt cpu should be short The scheduling change (bug 31501544) to avoid putting RT threads on cores that are handling softint's was catching cases where there was no reason to believe the softint would take a long time, resulting in unnecessary migration overhead. This patch reduces the migration to cases where the core has a softint that is actually likely to take a long time, as opposed to the RCU, SCHED, and TIMER softints that are rather quick. Bug: 31752786 Change-Id: Ib4e179f1e15c736b2fdba31070494e357e9fbbe2 Git-commit: ce05770bd37b8065b61ef650108ecef2b97b148b Git-repo: https://android.googlesource.com/kernel/msm [pkondeti@codeaurora.org: resolved minor merge conflicts] Signed-off-by: Pavankumar Kondeti --- kernel/sched/rt.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel/sched/rt.c') diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 338d019d0f25..4af75994f283 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1459,16 +1459,20 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags) /* * Return whether the task on the given cpu is currently non-preemptible - * while handling a softirq or is likely to block preemptions soon because - * it is a ksoftirq thread. + * while handling a potentially long softint, or if the task is likely + * to block preemptions soon because it is a ksoftirq thread that is + * handling slow softints. */ bool task_may_not_preempt(struct task_struct *task, int cpu) { + __u32 softirqs = per_cpu(active_softirqs, cpu) | + __IRQ_STAT(cpu, __softirq_pending); struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); - return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) || - task == cpu_ksoftirqd; + return ((softirqs & LONG_SOFTIRQ_MASK) && + (task == cpu_ksoftirqd || + task_thread_info(task)->preempt_count & SOFTIRQ_MASK)); } static int -- cgit v1.2.3