diff options
| author | John Dias <joaodias@google.com> | 2016-10-05 15:11:40 -0700 |
|---|---|---|
| committer | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-06-09 15:14:07 +0530 |
| commit | 25e8ecf9daca3078043039ab56d4cb9e1a08bbbb (patch) | |
| tree | 062c3c3b990440738c55232562f41e41233560db /kernel | |
| parent | c3544e35ef382d2ae666464865c9ba779d2264d3 (diff) | |
sched: avoid migrating when softint on tgt cpu should be short
The scheduling change (bug 31501544) to avoid putting RT threads on cores that
are handling softint's was catching cases where there was no reason
to believe the softint would take a long time, resulting in unnecessary
migration overhead. This patch reduces the migration to cases where
the core has a softint that is actually likely to take a long time,
as opposed to the RCU, SCHED, and TIMER softints that are rather quick.
Bug: 31752786
Change-Id: Ib4e179f1e15c736b2fdba31070494e357e9fbbe2
Git-commit: ce05770bd37b8065b61ef650108ecef2b97b148b
Git-repo: https://android.googlesource.com/kernel/msm
[pkondeti@codeaurora.org: resolved minor merge conflicts]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/rt.c | 12 | ||||
| -rw-r--r-- | kernel/softirq.c | 9 |
2 files changed, 17 insertions, 4 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 338d019d0f25..4af75994f283 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1459,16 +1459,20 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags) /* * Return whether the task on the given cpu is currently non-preemptible - * while handling a softirq or is likely to block preemptions soon because - * it is a ksoftirq thread. + * while handling a potentially long softint, or if the task is likely + * to block preemptions soon because it is a ksoftirq thread that is + * handling slow softints. */ bool task_may_not_preempt(struct task_struct *task, int cpu) { + __u32 softirqs = per_cpu(active_softirqs, cpu) | + __IRQ_STAT(cpu, __softirq_pending); struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); - return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) || - task == cpu_ksoftirqd; + return ((softirqs & LONG_SOFTIRQ_MASK) && + (task == cpu_ksoftirqd || + task_thread_info(task)->preempt_count & SOFTIRQ_MASK)); } static int diff --git a/kernel/softirq.c b/kernel/softirq.c index 479e4436f787..39ffd41594ce 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -57,6 +57,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +/* + * active_softirqs -- per cpu, a mask of softirqs that are being handled, + * with the expectation that approximate answers are acceptable and therefore + * no synchronization. + */ +DEFINE_PER_CPU(__u32, active_softirqs); + const char * const softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "TASKLET", "SCHED", "HRTIMER", "RCU" @@ -253,6 +260,7 @@ asmlinkage __visible void __do_softirq(void) restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); + __this_cpu_write(active_softirqs, pending); local_irq_enable(); @@ -282,6 +290,7 @@ restart: pending >>= softirq_bit; } + __this_cpu_write(active_softirqs, 0); rcu_bh_qs(); local_irq_disable(); |
