From 1897bdc4d33167e9036460631d1349e59d841f2d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 13 Nov 2014 13:46:09 +1100 Subject: mmu_notifier: add mmu_notifier_invalidate_range() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This notifier closes an important gap in the current mmu_notifier implementation, the existing callbacks are called too early or too late to reliably manage a non-CPU TLB. Specifically, invalidate_range_start() is called when all pages are still mapped and invalidate_range_end() when all pages are unmapped and potentially freed. This is fine when the users of the mmu_notifiers manage their own SoftTLB, like KVM does. When the TLB is managed in software it is easy to wipe out entries for a given range and prevent new entries to be established until invalidate_range_end is called. But when the user of mmu_notifiers has to manage a hardware TLB it can still wipe out TLB entries in invalidate_range_start, but it can't make sure that no new TLB entries in the given range are established between invalidate_range_start and invalidate_range_end. To avoid silent data corruption the entries in the non-CPU TLB need to be flushed when the pages are unmapped (at this point in time no _new_ TLB entries can be established in the non-CPU TLB) but not yet freed (as the non-CPU TLB may still have _existing_ entries pointing to the pages about to be freed). To fix this problem we need to catch the moment when the Linux VMM flushes remote TLBs (as a non-CPU TLB is not very CPU TLB), as this is the point in time when the pages are unmapped but _not_ yet freed. The mmu_notifier_invalidate_range() function aims to catch that moment. IOMMU code will be one user of the notifier-callback. Currently this is only the AMD IOMMUv2 driver, but its code is about to be more generalized and converted to a generic IOMMU-API extension to fit the needs of similar functionality in other IOMMUs as well. The current attempt in the AMD IOMMUv2 driver to work around the invalidate_range_start/end() shortcoming is to assign an empty page table to the non-CPU TLB between any invalidata_range_start/end calls. With the empty page-table assigned, every page-table walk to re-fill the non-CPU TLB will cause a page-fault reported to the IOMMU driver via an interrupt, possibly causing interrupt storms. The page-fault handler in the AMD IOMMUv2 driver doesn't handle the fault if an invalidate_range_start/end pair is active, it just reports back SUCCESS to the device and let it refault the page. But existing hardware (newer Radeon GPUs) that makes use of this feature don't re-fault indefinitly, after a certain number of faults for the same address the device enters a failure state and needs to be resetted. To avoid the GPUs entering a failure state we need to get rid of the empty-page-table workaround and use the mmu_notifier_invalidate_range() function introduced with this patch. Signed-off-by: Joerg Roedel Reviewed-by: Andrea Arcangeli Reviewed-by: Jérôme Glisse Cc: Peter Zijlstra Cc: Rik van Riel Cc: Hugh Dickins Cc: Mel Gorman Cc: Johannes Weiner Cc: Jay Cornwall Cc: Oded Gabbay Cc: Suravee Suthikulpanit Cc: Jesse Barnes Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Oded Gabbay --- include/linux/mmu_notifier.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 88787bb4b3b9..17907908d1df 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -242,6 +242,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, __mmu_notifier_invalidate_range_end(mm, start, end); } +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { mm->mmu_notifier_mm = NULL; @@ -342,6 +347,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, { } +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } -- cgit v1.2.3 From 34ee645e83b60ae3d5955f70ab9ab9a159136673 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 13 Nov 2014 13:46:09 +1100 Subject: mmu_notifier: call mmu_notifier_invalidate_range() from VMM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add calls to the new mmu_notifier_invalidate_range() function to all places in the VMM that need it. Signed-off-by: Joerg Roedel Reviewed-by: Andrea Arcangeli Reviewed-by: Jérôme Glisse Cc: Peter Zijlstra Cc: Rik van Riel Cc: Hugh Dickins Cc: Mel Gorman Cc: Johannes Weiner Cc: Jay Cornwall Cc: Oded Gabbay Cc: Suravee Suthikulpanit Cc: Jesse Barnes Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Oded Gabbay --- include/linux/mmu_notifier.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 17907908d1df..966da2b4b803 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -284,6 +284,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __young; \ }) +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + unsigned long ___addr = __address & PAGE_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pte_t ___pte; \ + \ + ___pte = ptep_clear_flush(__vma, __address, __ptep); \ + mmu_notifier_invalidate_range(___mm, ___addr, \ + ___addr + PAGE_SIZE); \ + \ + ___pte; \ +}) + +#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pmd_t ___pmd; \ + \ + ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ + \ + ___pmd; \ +}) + +#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + pmd_t ___pmd; \ + \ + ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ + mmu_notifier_invalidate_range(__mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ + \ + ___pmd; \ +}) + /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -362,6 +400,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_young_notify ptep_clear_flush_young #define pmdp_clear_flush_young_notify pmdp_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush +#define pmdp_clear_flush_notify pmdp_clear_flush +#define pmdp_get_and_clear_notify pmdp_get_and_clear #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ -- cgit v1.2.3 From 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 13 Nov 2014 13:46:09 +1100 Subject: mmu_notifier: add the callback for mmu_notifier_invalidate_range() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that the mmu_notifier_invalidate_range() calls are in place, add the callback to allow subsystems to register against it. Signed-off-by: Joerg Roedel Reviewed-by: Andrea Arcangeli Reviewed-by: Jérôme Glisse Cc: Peter Zijlstra Cc: Rik van Riel Cc: Hugh Dickins Cc: Mel Gorman Cc: Johannes Weiner Cc: Jay Cornwall Cc: Oded Gabbay Cc: Suravee Suthikulpanit Cc: Jesse Barnes Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Oded Gabbay --- include/linux/mmu_notifier.h | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 966da2b4b803..94d19f64cecf 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -98,11 +98,11 @@ struct mmu_notifier_ops { /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_sem and/or the - * locks protecting the reverse maps are held. The subsystem - * must guarantee that no additional references are taken to - * the pages in the range established between the call to - * invalidate_range_start() and the matching call to - * invalidate_range_end(). + * locks protecting the reverse maps are held. If the subsystem + * can't guarantee that no additional references are taken to + * the pages in the range, it has to implement the + * invalidate_range() notifier to remove any references taken + * after invalidate_range_start(). * * Invalidation of multiple concurrent ranges may be * optionally permitted by the driver. Either way the @@ -144,6 +144,29 @@ struct mmu_notifier_ops { void (*invalidate_range_end)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + + /* + * invalidate_range() is either called between + * invalidate_range_start() and invalidate_range_end() when the + * VM has to free pages that where unmapped, but before the + * pages are actually freed, or outside of _start()/_end() when + * a (remote) TLB is necessary. + * + * If invalidate_range() is used to manage a non-CPU TLB with + * shared page-tables, it not necessary to implement the + * invalidate_range_start()/end() notifiers, as + * invalidate_range() alread catches the points in time when an + * external TLB range needs to be flushed. + * + * The invalidate_range() function is called under the ptl + * spin-lock and not allowed to sleep. + * + * Note that this function might be called with just a sub-range + * of what was passed to invalidate_range_start()/end(), if + * called between those functions. + */ + void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); }; /* @@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end); extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end); static inline void mmu_notifier_release(struct mm_struct *mm) { @@ -245,6 +270,8 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end) { + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range(mm, start, end); } static inline void mmu_notifier_mm_init(struct mm_struct *mm) -- cgit v1.2.3