diff options
| author | Linux Build Service Account <lnxbuild@quicinc.com> | 2017-07-03 07:57:51 -0700 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-07-03 07:57:51 -0700 |
| commit | 09bf411c31201e31b059d69992213aa2ab898594 (patch) | |
| tree | c65d420e2e97259dd8118bc2c29dd4858eb85350 /kernel | |
| parent | e82415666d470a57a3fd4835a4fc2b952df4a55e (diff) | |
| parent | 6933043199fe89f4fc9150df58039b3ee05942fb (diff) | |
Merge "osq_lock: avoid live-lock issue for RT task"
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/locking/osq_lock.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 05a37857ab55..99b8d991126f 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -1,6 +1,7 @@ #include <linux/percpu.h> #include <linux/sched.h> #include <linux/osq_lock.h> +#include <linux/sched/rt.h> /* * An MCS like lock especially tailored for optimistic spinning for sleeping @@ -85,6 +86,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) { struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *prev, *next; + struct task_struct *task = current; int curr = encode_cpu(smp_processor_id()); int old; @@ -118,8 +120,13 @@ bool osq_lock(struct optimistic_spin_queue *lock) while (!READ_ONCE(node->locked)) { /* * If we need to reschedule bail... so we can block. + * If a task spins on owner on a CPU after acquiring + * osq_lock while a RT task spins on another CPU to + * acquire osq_lock, it will starve the owner from + * completing if owner is to be scheduled on the same CPU. + * It will be a live lock. */ - if (need_resched()) + if (need_resched() || rt_task(task)) goto unqueue; cpu_relax_lowlatency(); |
