summaryrefslogtreecommitdiff
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 89350f924c85..14b9cca36b05 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
+#include <linux/delay.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
+
+ /*
+ * On arm systems, we must slow down the waiter's repeated
+ * aquisition of spin_mlock and atomics on the lock count, or
+ * we risk starving out a thread attempting to release the
+ * mutex. The mutex slowpath release must take spin lock
+ * wait_lock. This spin lock can share a monitor with the
+ * other waiter atomics in the mutex data structure, so must
+ * take care to rate limit the waiters.
+ */
+ udelay(1);
}
osq_unlock(&lock->osq);