summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-12-12 16:44:23 -0800
committerVinayak Menon <vinmenon@codeaurora.org>2017-06-19 15:28:58 +0530
commitbcfa0a914033438a4e8812496f32f3bd512332a9 (patch)
treeb0d3718a619d37df14c2b9c2d88980c4bba58ab9 /mm/vmalloc.c
parentfe5c1c8fb937022820d4c04c89c4e972ee2b9639 (diff)
mm: turn vmap_purge_lock into a mutex
The purge_lock spinlock causes high latencies with non RT kernel. This has been reported multiple times on lkml [1] [2] and affects applications like audio. This patch replaces it with a mutex to allow preemption while holding the lock. Thanks to Joel Fernandes for the detailed report and analysis as well as an earlier attempt at fixing this issue. [1] http://lists.openwall.net/linux-kernel/2016/03/23/29 [2] https://lkml.org/lkml/2016/10/9/59 Change-Id: I57d4e9c7ce5aeb3273574026da2a8b737ef0b809 Link: http://lkml.kernel.org/r/1479474236-4139-10-git-send-email-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Jisheng Zhang <jszhang@marvell.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Joel Fernandes <joelaf@google.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Git-commit: f9e09977671b618aeb25ddc0d4c9a84d5b5cde9d Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index db37a7208c03..30818930331f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -634,7 +634,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
* by this look, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
-static DEFINE_SPINLOCK(vmap_purge_lock);
+static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
@@ -688,9 +688,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
*/
static void try_purge_vmap_area_lazy(void)
{
- if (spin_trylock(&vmap_purge_lock)) {
+ if (mutex_trylock(&vmap_purge_lock)) {
__purge_vmap_area_lazy(ULONG_MAX, 0);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
}
@@ -699,10 +699,10 @@ static void try_purge_vmap_area_lazy(void)
*/
static void purge_vmap_area_lazy(void)
{
- spin_lock(&vmap_purge_lock);
+ mutex_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
__purge_vmap_area_lazy(ULONG_MAX, 0);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
/*
@@ -1091,11 +1091,11 @@ void vm_unmap_aliases(void)
rcu_read_unlock();
}
- spin_lock(&vmap_purge_lock);
+ mutex_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
if (!__purge_vmap_area_lazy(start, end) && flush)
flush_tlb_kernel_range(start, end);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);