diff options
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/dma-noncoherent.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/mm/fault.c | 26 | ||||
| -rw-r--r-- | arch/powerpc/mm/gup.c | 42 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 46 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 40 | ||||
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage-book3e.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 12 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_32.c | 5 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_64.c | 55 | ||||
| -rw-r--r-- | arch/powerpc/mm/mem.c | 11 | ||||
| -rw-r--r-- | arch/powerpc/mm/numa.c | 111 | ||||
| -rw-r--r-- | arch/powerpc/mm/pgtable.c | 19 | ||||
| -rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 5 | ||||
| -rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 7 | ||||
| -rw-r--r-- | arch/powerpc/mm/slb.c | 9 | ||||
| -rw-r--r-- | arch/powerpc/mm/slice.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/mm/subpage-prot.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 2 |
18 files changed, 247 insertions, 156 deletions
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 6747eece84af..7b6c10750179 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -287,9 +287,7 @@ void __dma_free_coherent(size_t size, void *vaddr) pte_clear(&init_mm, addr, ptep); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); } } addr += PAGE_SIZE; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8726779e1409..51ab9e7e6c39 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -206,7 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, int trap = TRAP(regs); int is_exec = trap == 0x400; int fault; - int rc = 0; + int rc = 0, store_update_sp = 0; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* @@ -223,9 +223,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ - if (is_write) - flags |= FAULT_FLAG_WRITE; - #ifdef CONFIG_PPC_ICSWX /* * we need to do this early because this "data storage @@ -280,6 +277,17 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* + * We want to do this outside mmap_sem, because reading code around nip + * can result in fault, which will cause a deadlock when called with + * mmap_sem held + */ + if (user_mode(regs)) + store_update_sp = store_updates_sp(regs); + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -345,8 +353,7 @@ retry: * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 < uregs->gpr[1] - && (!user_mode(regs) || !store_updates_sp(regs))) + if (address + 2048 < uregs->gpr[1] && !store_update_sp) goto bad_area; } if (expand_stack(vma, address)) @@ -408,6 +415,7 @@ good_area: } else if (is_write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; + flags |= FAULT_FLAG_WRITE; /* a read */ } else { /* protection fault */ @@ -443,8 +451,12 @@ good_area: regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { + u32 page_ins; + preempt_disable(); - get_lppaca()->page_ins += (1 << PAGE_FACTOR); + page_ins = be32_to_cpu(get_lppaca()->page_ins); + page_ins += 1 << PAGE_FACTOR; + get_lppaca()->page_ins = cpu_to_be32(page_ins); preempt_enable(); } #endif /* CONFIG_PPC_SMLPAR */ diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index 49822d90ea96..c5f734e20b0f 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -117,12 +117,13 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } -int get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages) +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; + unsigned long flags; pgd_t *pgdp; int nr = 0; @@ -135,7 +136,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, start, len))) - goto slow_irqon; + return 0; pr_devel(" aligned: %lx .. %lx\n", start, end); @@ -156,7 +157,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, * So long as we atomically load page table pointers versus teardown, * we can follow the address down to the the page and take a ref on it. */ - local_irq_disable(); + local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { @@ -166,30 +167,35 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, (void *)pgd_val(pgd)); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) - goto slow; + break; if (pgd_huge(pgd)) { if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next, write, pages, &nr)) - goto slow; + break; } else if (is_hugepd(pgdp)) { if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT, addr, next, write, pages, &nr)) - goto slow; + break; } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) - goto slow; + break; } while (pgdp++, addr = next, addr != end); - local_irq_enable(); + local_irq_restore(flags); - VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); return nr; +} - { - int ret; +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + int nr, ret; + + start &= PAGE_MASK; + nr = __get_user_pages_fast(start, nr_pages, write, pages); + ret = nr; -slow: - local_irq_enable(); -slow_irqon: + if (nr < nr_pages) { pr_devel(" slow path ! nr = %d\n", nr); /* Try to get the remaining pages with get_user_pages */ @@ -198,7 +204,7 @@ slow_irqon: down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, - (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); + nr_pages - nr, write, 0, pages, NULL); up_read(&mm->mmap_sem); /* Have to be a bit careful with return values */ @@ -208,9 +214,9 @@ slow_irqon: else ret += nr; } - - return ret; } + + return ret; } #endif /* __HAVE_ARCH_PTE_SPECIAL */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index c33d939120c9..3ea26c25590b 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -35,7 +35,11 @@ #define DBG_LOW(fmt...) #endif +#ifdef __BIG_ENDIAN__ #define HPTE_LOCK_BIT 3 +#else +#define HPTE_LOCK_BIT (56+3) +#endif DEFINE_RAW_SPINLOCK(native_tlbie_lock); @@ -172,7 +176,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, static inline void native_lock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; while (1) { if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) @@ -184,7 +188,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) static inline void native_unlock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; clear_bit_unlock(HPTE_LOCK_BIT, word); } @@ -204,10 +208,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, } for (i = 0; i < HPTES_PER_GROUP; i++) { - if (! (hptep->v & HPTE_V_VALID)) { + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { /* retry with lock held */ native_lock_hpte(hptep); - if (! (hptep->v & HPTE_V_VALID)) + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) break; native_unlock_hpte(hptep); } @@ -226,14 +230,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, i, hpte_v, hpte_r); } - hptep->r = hpte_r; + hptep->r = cpu_to_be64(hpte_r); /* Guarantee the second dword is visible before the valid bit */ eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte */ - hptep->v = hpte_v; + hptep->v = cpu_to_be64(hpte_v); __asm__ __volatile__ ("ptesync" : : : "memory"); @@ -254,12 +258,12 @@ static long native_hpte_remove(unsigned long hpte_group) for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + hpte_group + slot_offset; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { /* retry with lock held */ native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) break; @@ -294,7 +298,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -308,8 +312,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, } else { DBG_LOW(" -> hit\n"); /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); } native_unlock_hpte(hptep); @@ -334,7 +338,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize) slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + slot; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ @@ -369,8 +373,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, hptep = htab_address + slot; /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & + ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N))); /* * Ensure it is out of the tlb too. Bolted entries base and * actual page size will be same. @@ -392,7 +397,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, want_v = hpte_encode_avpn(vpn, bpsize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do @@ -458,7 +463,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm, hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) @@ -519,11 +524,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *apsize, int *ssize, unsigned long *vpn) { unsigned long avpn, pteg, vpi; - unsigned long hpte_v = hpte->v; + unsigned long hpte_v = be64_to_cpu(hpte->v); + unsigned long hpte_r = be64_to_cpu(hpte->r); unsigned long vsid, seg_off; int size, a_size, shift; /* Look at the 8 bit LP value */ - unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1); + unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); if (!(hpte_v & HPTE_V_LARGE)) { size = MMU_PAGE_4K; @@ -612,7 +618,7 @@ static void native_hpte_clear(void) * running, right? and for crash dump, we probably * don't want to wait for a maybe bad cpu. */ - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * Call __tlbie() here rather than tlbie() since we @@ -664,7 +670,7 @@ static void native_flush_hash_range(unsigned long number, int local) hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) native_unlock_hpte(hptep); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 6ecc38bd5b24..6176b3cdf579 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -251,19 +251,18 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; unsigned long size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", - &size); + prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); if (prop == NULL) return 0; for (; size >= 4; size -= 4, ++prop) { - if (prop[0] == 40) { + if (be32_to_cpu(prop[0]) == 40) { DBG("1T segment support detected\n"); cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; @@ -307,23 +306,22 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; unsigned long size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, - "ibm,segment-page-sizes", &size); + prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); if (prop != NULL) { pr_info("Page sizes from device-tree:\n"); size /= 4; cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { - unsigned int base_shift = prop[0]; - unsigned int slbenc = prop[1]; - unsigned int lpnum = prop[2]; + unsigned int base_shift = be32_to_cpu(prop[0]); + unsigned int slbenc = be32_to_cpu(prop[1]); + unsigned int lpnum = be32_to_cpu(prop[2]); struct mmu_psize_def *def; int idx, base_idx; @@ -356,8 +354,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, def->tlbiel = 0; while (size > 0 && lpnum) { - unsigned int shift = prop[0]; - int penc = prop[1]; + unsigned int shift = be32_to_cpu(prop[0]); + int penc = be32_to_cpu(prop[1]); prop += 2; size -= 2; lpnum--; @@ -390,8 +388,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - unsigned long *addr_prop; - u32 *page_count_prop; + __be64 *addr_prop; + __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; @@ -405,12 +403,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); if (page_count_prop == NULL) return 0; - expected_pages = (1 << page_count_prop[0]); + expected_pages = (1 << be32_to_cpu(page_count_prop[0])); addr_prop = of_get_flat_dt_prop(node, "reg", NULL); if (addr_prop == NULL) return 0; - phys_addr = addr_prop[0]; - block_size = addr_prop[1]; + phys_addr = be64_to_cpu(addr_prop[0]); + block_size = be64_to_cpu(addr_prop[1]); if (block_size != (16 * GB)) return 0; printk(KERN_INFO "Huge page(16GB) memory: " @@ -534,16 +532,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); + prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); if (prop != NULL) { /* pft_size[0] is the NUMA CEC cookie */ - ppc64_pft_size = prop[1]; + ppc64_pft_size = be32_to_cpu(prop[1]); return 1; } return 0; @@ -907,7 +905,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea) if (ea >= spt->maxaddr) return 0; - if (ea < 0x100000000) { + if (ea < 0x100000000UL) { /* addresses below 4GB use spt->low_prot */ sbpm = spt->low_prot; } else { diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 3bc700655fc8..74551b5e41e5 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) struct hstate *hstate = hstate_file(vma->vm_file); unsigned long tsize = huge_page_shift(hstate) - 10; - __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0); - + __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 834ca8eb38f2..90bb6d9409bf 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -86,6 +86,11 @@ int pgd_huge(pgd_t pgd) */ return ((pgd_val(pgd) & 0x3) != 0x0); } + +int pmd_huge_support(void) +{ + return 1; +} #else int pmd_huge(pmd_t pmd) { @@ -101,6 +106,11 @@ int pgd_huge(pgd_t pgd) { return 0; } + +int pmd_huge_support(void) +{ + return 0; +} #endif pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) @@ -623,8 +633,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, /* * This function frees user-level page tables of a process. - * - * Must be called with pagetable lock held. */ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index d47d3dab4870..cff59f1bec23 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -213,7 +213,12 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, */ BUG_ON(first_memblock_base != 0); +#ifdef CONFIG_PIN_TLB + /* 8xx can only access 24MB at the moment */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); +#else /* 8xx can only access 8MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +#endif } #endif /* CONFIG_8xx */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d0cd9e4c6837..e3734edffa69 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -300,5 +300,58 @@ void vmemmap_free(unsigned long start, unsigned long end) { } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ +void register_page_bootmem_memmap(unsigned long section_nr, + struct page *start_page, unsigned long size) +{ +} + +/* + * We do not have access to the sparsemem vmemmap, so we fallback to + * walking the list of sparsemem blocks which we already maintain for + * the sake of crashdump. In the long run, we might want to maintain + * a tree if performance of that linear walk becomes a problem. + * + * realmode_pfn_to_page functions can fail due to: + * 1) As real sparsemem blocks do not lay in RAM continously (they + * are in virtual address space which is not available in the real mode), + * the requested page struct can be split between blocks so get_page/put_page + * may fail. + * 2) When huge pages are used, the get_page/put_page API will fail + * in real mode as the linked addresses in the page struct are virtual + * too. + */ +struct page *realmode_pfn_to_page(unsigned long pfn) +{ + struct vmemmap_backing *vmem_back; + struct page *page; + unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; + unsigned long pg_va = (unsigned long) pfn_to_page(pfn); + + for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { + if (pg_va < vmem_back->virt_addr) + continue; + + /* Check that page struct is not split between real pages */ + if ((pg_va + sizeof(struct page)) > + (vmem_back->virt_addr + page_size)) + return NULL; + + page = (struct page *) (vmem_back->phys + pg_va - + vmem_back->virt_addr); + return page; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(realmode_pfn_to_page); + +#elif defined(CONFIG_FLATMEM) + +struct page *realmode_pfn_to_page(unsigned long pfn) +{ + struct page *page = pfn_to_page(pfn); + return page; +} +EXPORT_SYMBOL_GPL(realmode_pfn_to_page); +#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 7f4bea162026..3fa93dc7fe75 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -297,12 +297,21 @@ void __init paging_init(void) } #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ +static void __init register_page_bootmem_info(void) +{ + int i; + + for_each_online_node(i) + register_page_bootmem_info_node(NODE_DATA(i)); +} + void __init mem_init(void) { #ifdef CONFIG_SWIOTLB swiotlb_init(0); #endif + register_page_bootmem_info(); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); set_max_mapnr(max_pfn); free_all_bootmem(); @@ -514,7 +523,7 @@ static int add_system_ram_resources(void) res->name = "System RAM"; res->start = base; res->end = base + size - 1; - res->flags = IORESOURCE_MEM; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; WARN_ON(request_resource(&iomem_resource, res) < 0); } } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 5850798826cd..078d3e00a616 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -58,7 +58,7 @@ static int form1_affinity; #define MAX_DISTANCE_REF_POINTS 4 static int distance_ref_points_depth; -static const unsigned int *distance_ref_points; +static const __be32 *distance_ref_points; static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; /* @@ -179,7 +179,7 @@ static void unmap_cpu_from_node(unsigned long cpu) #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ /* must hold reference to node during call */ -static const int *of_get_associativity(struct device_node *dev) +static const __be32 *of_get_associativity(struct device_node *dev) { return of_get_property(dev, "ibm,associativity", NULL); } @@ -189,13 +189,13 @@ static const int *of_get_associativity(struct device_node *dev) * it exists (the property exists only in kexec/kdump kernels, * added by kexec-tools) */ -static const u32 *of_get_usable_memory(struct device_node *memory) +static const __be32 *of_get_usable_memory(struct device_node *memory) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "linux,drconf-usable-memory", &len); if (!prop || len < sizeof(unsigned int)) - return 0; + return NULL; return prop; } @@ -219,7 +219,7 @@ int __node_distance(int a, int b) } static void initialize_distance_lookup_table(int nid, - const unsigned int *associativity) + const __be32 *associativity) { int i; @@ -227,29 +227,32 @@ static void initialize_distance_lookup_table(int nid, return; for (i = 0; i < distance_ref_points_depth; i++) { - distance_lookup_table[nid][i] = - associativity[distance_ref_points[i]]; + const __be32 *entry; + + entry = &associativity[be32_to_cpu(distance_ref_points[i])]; + distance_lookup_table[nid][i] = of_read_number(entry, 1); } } /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa * info is found. */ -static int associativity_to_nid(const unsigned int *associativity) +static int associativity_to_nid(const __be32 *associativity) { int nid = -1; if (min_common_depth == -1) goto out; - if (associativity[0] >= min_common_depth) - nid = associativity[min_common_depth]; + if (of_read_number(associativity, 1) >= min_common_depth) + nid = of_read_number(&associativity[min_common_depth], 1); /* POWER4 LPAR uses 0xffff as invalid node */ if (nid == 0xffff || nid >= MAX_NUMNODES) nid = -1; - if (nid > 0 && associativity[0] >= distance_ref_points_depth) + if (nid > 0 && + of_read_number(associativity, 1) >= distance_ref_points_depth) initialize_distance_lookup_table(nid, associativity); out: @@ -262,7 +265,7 @@ out: static int of_node_to_nid_single(struct device_node *device) { int nid = -1; - const unsigned int *tmp; + const __be32 *tmp; tmp = of_get_associativity(device); if (tmp) @@ -334,7 +337,7 @@ static int __init find_min_common_depth(void) } if (form1_affinity) { - depth = distance_ref_points[0]; + depth = of_read_number(distance_ref_points, 1); } else { if (distance_ref_points_depth < 2) { printk(KERN_WARNING "NUMA: " @@ -342,7 +345,7 @@ static int __init find_min_common_depth(void) goto err; } - depth = distance_ref_points[1]; + depth = of_read_number(&distance_ref_points[1], 1); } /* @@ -376,12 +379,12 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) of_node_put(memory); } -static unsigned long read_n_cells(int n, const unsigned int **buf) +static unsigned long read_n_cells(int n, const __be32 **buf) { unsigned long result = 0; while (n--) { - result = (result << 32) | **buf; + result = (result << 32) | of_read_number(*buf, 1); (*buf)++; } return result; @@ -391,17 +394,17 @@ static unsigned long read_n_cells(int n, const unsigned int **buf) * Read the next memblock list entry from the ibm,dynamic-memory property * and return the information in the provided of_drconf_cell structure. */ -static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) +static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp) { - const u32 *cp; + const __be32 *cp; drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); cp = *cellp; - drmem->drc_index = cp[0]; - drmem->reserved = cp[1]; - drmem->aa_index = cp[2]; - drmem->flags = cp[3]; + drmem->drc_index = of_read_number(cp, 1); + drmem->reserved = of_read_number(&cp[1], 1); + drmem->aa_index = of_read_number(&cp[2], 1); + drmem->flags = of_read_number(&cp[3], 1); *cellp = cp + 4; } @@ -413,16 +416,16 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) * list entries followed by N memblock list entries. Each memblock list entry * contains information as laid out in the of_drconf_cell struct above. */ -static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) +static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) { - const u32 *prop; + const __be32 *prop; u32 len, entries; prop = of_get_property(memory, "ibm,dynamic-memory", &len); if (!prop || len < sizeof(unsigned int)) return 0; - entries = *prop++; + entries = of_read_number(prop++, 1); /* Now that we know the number of entries, revalidate the size * of the property read in to ensure we have everything @@ -440,7 +443,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) */ static u64 of_get_lmb_size(struct device_node *memory) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "ibm,lmb-size", &len); @@ -453,7 +456,7 @@ static u64 of_get_lmb_size(struct device_node *memory) struct assoc_arrays { u32 n_arrays; u32 array_sz; - const u32 *arrays; + const __be32 *arrays; }; /* @@ -469,15 +472,15 @@ struct assoc_arrays { static int of_get_assoc_arrays(struct device_node *memory, struct assoc_arrays *aa) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); if (!prop || len < 2 * sizeof(unsigned int)) return -1; - aa->n_arrays = *prop++; - aa->array_sz = *prop++; + aa->n_arrays = of_read_number(prop++, 1); + aa->array_sz = of_read_number(prop++, 1); /* Now that we know the number of arrays and size of each array, * revalidate the size of the property read in. @@ -504,7 +507,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, !(drmem->flags & DRCONF_MEM_AI_INVALID) && drmem->aa_index < aa->n_arrays) { index = drmem->aa_index * aa->array_sz + min_common_depth - 1; - nid = aa->arrays[index]; + nid = of_read_number(&aa->arrays[index], 1); if (nid == 0xffff || nid >= MAX_NUMNODES) nid = default_nid; @@ -595,7 +598,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, * Reads the counter for a given entry in * linux,drconf-usable-memory property */ -static inline int __init read_usm_ranges(const u32 **usm) +static inline int __init read_usm_ranges(const __be32 **usm) { /* * For each lmb in ibm,dynamic-memory a corresponding @@ -612,7 +615,7 @@ static inline int __init read_usm_ranges(const u32 **usm) */ static void __init parse_drconf_memory(struct device_node *memory) { - const u32 *uninitialized_var(dm), *usm; + const __be32 *uninitialized_var(dm), *usm; unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned long lmb_size, base, size, sz; int nid; @@ -721,7 +724,7 @@ static int __init parse_numa_properties(void) unsigned long size; int nid; int ranges; - const unsigned int *memcell_buf; + const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, @@ -935,8 +938,7 @@ static void __init mark_reserved_regions_for_nid(int nid) unsigned long start_pfn = physbase >> PAGE_SHIFT; unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; - unsigned long node_end_pfn = node->node_start_pfn + - node->node_spanned_pages; + unsigned long node_end_pfn = pgdat_end_pfn(node); /* * Check to make sure that this memblock.reserved area is @@ -1106,7 +1108,7 @@ early_param("numa", early_numa); static int hot_add_drconf_scn_to_nid(struct device_node *memory, unsigned long scn_addr) { - const u32 *dm; + const __be32 *dm; unsigned int drconf_cell_cnt, rc; unsigned long lmb_size; struct assoc_arrays aa; @@ -1151,7 +1153,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, * represented in the device tree as a node (i.e. memory@XXXX) for * each memblock. */ -int hot_add_node_scn_to_nid(unsigned long scn_addr) +static int hot_add_node_scn_to_nid(unsigned long scn_addr) { struct device_node *memory; int nid = -1; @@ -1159,7 +1161,7 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr) for_each_node_by_type(memory, "memory") { unsigned long start, size; int ranges; - const unsigned int *memcell_buf; + const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, "reg", &len); @@ -1232,7 +1234,7 @@ static u64 hot_add_drconf_memory_max(void) struct device_node *memory = NULL; unsigned int drconf_cell_cnt = 0; u64 lmb_size = 0; - const u32 *dm = 0; + const __be32 *dm = NULL; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { @@ -1337,40 +1339,41 @@ static int update_cpu_associativity_changes_mask(void) * Convert the associativity domain numbers returned from the hypervisor * to the sequence they would appear in the ibm,associativity property. */ -static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) +static int vphn_unpack_associativity(const long *packed, __be32 *unpacked) { int i, nr_assoc_doms = 0; - const u16 *field = (const u16*) packed; + const __be16 *field = (const __be16 *) packed; #define VPHN_FIELD_UNUSED (0xffff) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { - if (*field == VPHN_FIELD_UNUSED) { + if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. * Just store them. */ - unpacked[i] = *((u32*)field); + unpacked[i] = *((__be32 *)field); field += 2; - } else if (*field & VPHN_FIELD_MSB) { + } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ - unpacked[i] = *field & VPHN_FIELD_MASK; + unpacked[i] = cpu_to_be32( + be16_to_cpup(field) & VPHN_FIELD_MASK); field++; nr_assoc_doms++; } else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ - unpacked[i] = *((u32*)field); + unpacked[i] = *((__be32 *)field); field += 2; nr_assoc_doms++; } } /* The first cell contains the length of the property */ - unpacked[0] = nr_assoc_doms; + unpacked[0] = cpu_to_be32(nr_assoc_doms); return nr_assoc_doms; } @@ -1379,7 +1382,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) * Retrieve the new associativity information for a virtual processor's * home node. */ -static long hcall_vphn(unsigned long cpu, unsigned int *associativity) +static long hcall_vphn(unsigned long cpu, __be32 *associativity) { long rc; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; @@ -1393,7 +1396,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) } static long vphn_get_associativity(unsigned long cpu, - unsigned int *associativity) + __be32 *associativity) { long rc; @@ -1450,7 +1453,7 @@ int arch_update_cpu_topology(void) { unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; - unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; cpumask_t updated_cpus; struct device *dev; int weight, new_nid, i = 0; @@ -1531,7 +1534,7 @@ static void topology_work_fn(struct work_struct *work) } static DECLARE_WORK(topology_work, topology_work_fn); -void topology_schedule_update(void) +static void topology_schedule_update(void) { schedule_work(&topology_work); } @@ -1609,7 +1612,7 @@ int start_topology_update(void) #endif } } else if (firmware_has_feature(FW_FEATURE_VPHN) && - get_lppaca()->shared_proc) { + lppaca_shared_proc(get_lppaca())) { if (!vphn_enabled) { prrn_enabled = 0; vphn_enabled = 1; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index edda589795c3..841e0d00863c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -32,8 +32,6 @@ #include <asm/tlbflush.h> #include <asm/tlb.h> -#include "mmu_decl.h" - static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; @@ -72,7 +70,7 @@ struct page * maybe_pte_to_page(pte_t pte) * support falls into the same category. */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || @@ -81,17 +79,6 @@ static pte_t set_pte_filter(pte_t pte, unsigned long addr) if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { -#ifdef CONFIG_8xx - /* On 8xx, cache control instructions (particularly - * "dcbst" from flush_dcache_icache) fault as write - * operation if there is an unpopulated TLB entry - * for the address in question. To workaround that, - * we invalidate the TLB here, thus avoiding dcbst - * misbehaviour. - */ - /* 8xx doesn't care about PID, size or ind args */ - _tlbil_va(addr, 0, 0, 0); -#endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } @@ -111,7 +98,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte) { struct page *pg; @@ -193,7 +180,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, * this context might not have been activated yet when this * is called. */ - pte = set_pte_filter(pte, addr); + pte = set_pte_filter(pte); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6c856fb8c15b..5b9601715289 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -121,7 +121,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) ptepage = alloc_pages(flags, 0); if (!ptepage) return NULL; - pgtable_page_ctor(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } return ptepage; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 536eec72c0f7..9d95786aa80f 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -378,6 +378,10 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) __GFP_REPEAT | __GFP_ZERO); if (!page) return NULL; + if (!kernel && !pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } ret = page_address(page); spin_lock(&mm->page_table_lock); @@ -392,9 +396,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) } spin_unlock(&mm->page_table_lock); - if (!kernel) - pgtable_page_ctor(page); - return (pte_t *)ret; } diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index a538c80db2df..9d1d33cd2be5 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); - get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); + get_slb_shadow()->save_area[entry].vsid = + cpu_to_be64(mk_vsid_data(ea, ssize, flags)); + get_slb_shadow()->save_area[entry].esid = + cpu_to_be64(mk_esid_data(ea, ssize, entry)); } static inline void slb_shadow_clear(unsigned long entry) @@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void) } else { /* Update stack entry; others don't change */ slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); - ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; + ksp_vsid_data = + be64_to_cpu(get_slb_shadow()->save_area[2].vsid); } /* We need to do this all in asm, so we're sure we don't touch diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 3e99c149271a..7ce9cf3b6988 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr, slice = GET_HIGH_SLICE_INDEX(addr); *boundary_addr = (slice + end) ? ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; - return !!(available.high_slices & (1u << slice)); + return !!(available.high_slices & (1ul << slice)); } } diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index aa74acb0fdfc..a770df2dae70 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -105,7 +105,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) limit = spt->maxaddr; for (; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); - if (addr < 0x100000000) { + if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; @@ -219,7 +219,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) for (limit = addr + len; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); err = -ENOMEM; - if (addr < 0x100000000) { + if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 41cd68dee681..358d74303138 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE - if (is_vm_hugetlb_page(vma)) + if (vma && is_vm_hugetlb_page(vma)) flush_hugetlb_page(vma, vmaddr); #endif |
