summaryrefslogtreecommitdiff
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorJohn Dias <joaodias@google.com>2016-10-05 15:11:40 -0700
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-06-09 15:14:07 +0530
commit25e8ecf9daca3078043039ab56d4cb9e1a08bbbb (patch)
tree062c3c3b990440738c55232562f41e41233560db /kernel/sched/rt.c
parentc3544e35ef382d2ae666464865c9ba779d2264d3 (diff)
sched: avoid migrating when softint on tgt cpu should be short
The scheduling change (bug 31501544) to avoid putting RT threads on cores that are handling softint's was catching cases where there was no reason to believe the softint would take a long time, resulting in unnecessary migration overhead. This patch reduces the migration to cases where the core has a softint that is actually likely to take a long time, as opposed to the RCU, SCHED, and TIMER softints that are rather quick. Bug: 31752786 Change-Id: Ib4e179f1e15c736b2fdba31070494e357e9fbbe2 Git-commit: ce05770bd37b8065b61ef650108ecef2b97b148b Git-repo: https://android.googlesource.com/kernel/msm [pkondeti@codeaurora.org: resolved minor merge conflicts] Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 338d019d0f25..4af75994f283 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1459,16 +1459,20 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
/*
* Return whether the task on the given cpu is currently non-preemptible
- * while handling a softirq or is likely to block preemptions soon because
- * it is a ksoftirq thread.
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
+ __u32 softirqs = per_cpu(active_softirqs, cpu) |
+ __IRQ_STAT(cpu, __softirq_pending);
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
- return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
- task == cpu_ksoftirqd;
+ return ((softirqs & LONG_SOFTIRQ_MASK) &&
+ (task == cpu_ksoftirqd ||
+ task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
static int