diff options
| author | Mark Brown <broonie@kernel.org> | 2016-07-29 21:38:36 +0100 |
|---|---|---|
| committer | Mark Brown <broonie@kernel.org> | 2016-07-29 21:38:36 +0100 |
| commit | b0ba6b0a5eb2b51037a07dbf5a7470ca804d575c (patch) | |
| tree | 0a4c5f048afa1125d8bc242477965cec699f94ee /kernel/locking/qspinlock.c | |
| parent | ac248a8b81109d6b41a9338a09538e103f7e31c0 (diff) | |
| parent | b05965f284db3e086022f4e318e46cb5bffb1376 (diff) | |
Merge tag 'v4.4.16' into linux-linaro-lsk-v4.4
This is the 4.4.16 stable release
# gpg: Signature made Wed 27 Jul 2016 17:48:38 BST using RSA key ID 6092693E
# gpg: requesting key 6092693E from hkp server the.earth.li
# gpg: key 6092693E: public key "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>" imported
# gpg: public key of ultimately trusted key B4B0BED6 not found
# gpg: 2 marginal(s) needed, 1 complete(s) needed, PGP trust model
# gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
# gpg: Total number processed: 1
# gpg: imported: 1 (RSA: 1)
# gpg: Good signature from "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 647F 2865 4894 E3BD 4571 99BE 38DB BDC8 6092 693E
Diffstat (limited to 'kernel/locking/qspinlock.c')
| -rw-r--r-- | kernel/locking/qspinlock.c | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 87e9ce6a63c5..8173bc7fec92 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -255,6 +255,66 @@ static __always_inline void __pv_wait_head(struct qspinlock *lock, #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif +/* + * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before + * issuing an _unordered_ store to set _Q_LOCKED_VAL. + * + * This means that the store can be delayed, but no later than the + * store-release from the unlock. This means that simply observing + * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. + * + * There are two paths that can issue the unordered store: + * + * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 + * + * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 + * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 + * + * However, in both cases we have other !0 state we've set before to queue + * ourseves: + * + * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our + * load is constrained by that ACQUIRE to not pass before that, and thus must + * observe the store. + * + * For (2) we have a more intersting scenario. We enqueue ourselves using + * xchg_tail(), which ends up being a RELEASE. This in itself is not + * sufficient, however that is followed by an smp_cond_acquire() on the same + * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and + * guarantees we must observe that store. + * + * Therefore both cases have other !0 state that is observable before the + * unordered locked byte store comes through. This means we can use that to + * wait for the lock store, and then wait for an unlock. + */ +#ifndef queued_spin_unlock_wait +void queued_spin_unlock_wait(struct qspinlock *lock) +{ + u32 val; + + for (;;) { + val = atomic_read(&lock->val); + + if (!val) /* not locked, we're done */ + goto done; + + if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ + break; + + /* not locked, but pending, wait until we observe the lock */ + cpu_relax(); + } + + /* any unlock is good */ + while (atomic_read(&lock->val) & _Q_LOCKED_MASK) + cpu_relax(); + +done: + smp_rmb(); /* CTRL + RMB -> ACQUIRE */ +} +EXPORT_SYMBOL(queued_spin_unlock_wait); +#endif + #endif /* _GEN_PV_LOCK_SLOWPATH */ /** |
