diff options
Diffstat (limited to 'arch/x86/mm')
| -rw-r--r-- | arch/x86/mm/fault.c | 6 | ||||
| -rw-r--r-- | arch/x86/mm/ioremap.c | 4 | ||||
| -rw-r--r-- | arch/x86/mm/kaiser.c | 2 | ||||
| -rw-r--r-- | arch/x86/mm/kmmio.c | 12 | ||||
| -rw-r--r-- | arch/x86/mm/pgtable.c | 48 |
5 files changed, 61 insertions, 11 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e830c71a1323..e0a34b0d381e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -287,7 +287,7 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) + if (pmd_large(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -407,7 +407,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); - if (pud_huge(*pud)) + if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); @@ -418,7 +418,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); - if (pmd_huge(*pmd)) + if (pmd_large(*pmd)) return 0; pte_ref = pte_offset_kernel(pmd_ref, address); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b9c78f3bcd67..53ab3f367472 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -348,11 +348,11 @@ void iounmap(volatile void __iomem *addr) (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c index 2298434f7bdb..7a72e32e4806 100644 --- a/arch/x86/mm/kaiser.c +++ b/arch/x86/mm/kaiser.c @@ -363,7 +363,7 @@ void __init kaiser_init(void) kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end, __PAGE_KERNEL_RX); -#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER kaiser_add_user_map_ptrs_early(__irqentry_text_start, __irqentry_text_end, __PAGE_KERNEL_RX); diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index ddb2244b06a1..76604c8a2a48 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -434,17 +434,18 @@ int register_kmmio_probe(struct kmmio_probe *p) unsigned long flags; int ret = 0; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); unsigned int l; pte_t *pte; spin_lock_irqsave(&kmmio_lock, flags); - if (get_kmmio_probe(p->addr)) { + if (get_kmmio_probe(addr)) { ret = -EEXIST; goto out; } - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) { ret = -EINVAL; goto out; @@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_probe *p) kmmio_count++; list_add_rcu(&p->list, &kmmio_probes); while (size < size_lim) { - if (add_kmmio_fault_page(p->addr + size)) + if (add_kmmio_fault_page(addr + size)) pr_err("Unable to set page fault.\n"); size += page_level_size(l); } @@ -527,19 +528,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); struct kmmio_fault_page *release_list = NULL; struct kmmio_delayed_release *drelease; unsigned int l; pte_t *pte; - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) return; spin_lock_irqsave(&kmmio_lock, flags); while (size < size_lim) { - release_kmmio_fault_page(p->addr + size, &release_list); + release_kmmio_fault_page(addr + size, &release_list); size += page_level_size(l); } list_del_rcu(&p->list); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index dbc27a2b4ad5..c013326a0d7a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -666,4 +666,52 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } + +/** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. + * + * Context: The pud range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pud_free_pmd_page(pud_t *pud) +{ + pmd_t *pmd; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_free_pte_page(&pmd[i])) + return 0; + + pud_clear(pud); + free_page((unsigned long)pmd); + + return 1; +} + +/** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. + * + * Context: The pmd range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pmd_free_pte_page(pmd_t *pmd) +{ + pte_t *pte; + + if (pmd_none(*pmd)) + return 1; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); + free_page((unsigned long)pte); + + return 1; +} #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
