summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c168
1 files changed, 166 insertions, 2 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index de9f8921e407..221b11bb50e3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1116,6 +1116,19 @@ void __init sanity_check_meminfo(void)
struct memblock_region *reg;
bool should_use_highmem = false;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ struct memblock_region *prev_reg = NULL;
+
+ for_each_memblock(memory, reg) {
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+ continue;
+ }
+ vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size);
+ prev_reg = reg;
+ }
+#endif
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
@@ -1385,12 +1398,21 @@ static void __init map_lowmem(void)
struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ struct static_vm *svm;
+ phys_addr_t start;
+ phys_addr_t end;
+ unsigned long vaddr;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+ int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
+ nr++;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1439,6 +1461,34 @@ static void __init map_lowmem(void)
}
}
}
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+
+ for_each_memblock(memory, reg) {
+ struct vm_struct *vm;
+
+ start = reg->base;
+ end = start + reg->size;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+ vm = &svm->vm;
+ pfn = __phys_to_pfn(start);
+ vaddr = __phys_to_virt(start);
+ length = end - start;
+ type = MT_MEMORY_RW;
+
+ vm->addr = (void *)(vaddr & PAGE_MASK);
+ vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(pfn);
+ vm->flags = VM_LOWMEM;
+ vm->flags |= VM_ARM_MTYPE(type);
+ vm->caller = map_lowmem;
+ add_static_vm_early(svm++);
+ mark_vmalloc_reserved_area(vm->addr, vm->size);
+ }
}
#ifdef CONFIG_ARM_PV_FIXUP
@@ -1537,6 +1587,119 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif
+#ifdef CONFIG_FORCE_PAGES
+/*
+ * remap a PMD into pages
+ * We split a single pmd here none of this two pmd nonsense
+ */
+static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn,
+ const struct mem_type *type)
+{
+ pte_t *pte, *start_pte;
+ pmd_t *base_pmd;
+
+ base_pmd = pmd_offset(
+ pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+ if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
+ start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * Following is needed when new pte is allocated for pmd[1]
+ * cases, which may happen when base (start) address falls
+ * under pmd[1].
+ */
+ if (addr & SECTION_SIZE)
+ start_pte += pte_index(addr);
+#endif
+ } else {
+ start_pte = pte_offset_kernel(base_pmd, addr);
+ }
+
+ pte = start_pte;
+
+ do {
+ set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ *pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
+ mb(); /* let pmd be programmed */
+ flush_pmd_entry(pmd);
+ flush_tlb_all();
+}
+
+/*
+ * It's significantly easier to remap as pages later after all memory is
+ * mapped. Everything is sections so all we have to do is split
+ */
+static void __init remap_pages(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg) {
+ phys_addr_t phys_start = reg->base;
+ phys_addr_t phys_end = reg->base + reg->size;
+ unsigned long addr = (unsigned long)__va(phys_start);
+ unsigned long end = (unsigned long)__va(phys_end);
+ pmd_t *pmd = NULL;
+ unsigned long next;
+ unsigned long pfn = __phys_to_pfn(phys_start);
+ bool fixup = false;
+ unsigned long saved_start = addr;
+
+ if (phys_start > arm_lowmem_limit)
+ break;
+ if (phys_end > arm_lowmem_limit)
+ end = (unsigned long)__va(arm_lowmem_limit);
+ if (phys_start >= phys_end)
+ break;
+
+ pmd = pmd_offset(
+ pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+#ifndef CONFIG_ARM_LPAE
+ if (addr & SECTION_SIZE) {
+ fixup = true;
+ pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
+ pmd++;
+ }
+
+ if (end & SECTION_SIZE)
+ pmd_empty_section_gap(end);
+#endif
+
+ do {
+ next = addr + SECTION_SIZE;
+
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ split_pmd(pmd, addr, next, pfn,
+ &mem_types[MT_MEMORY_RWX]);
+ pmd++;
+ pfn += SECTION_SIZE >> PAGE_SHIFT;
+
+ } while (addr = next, addr < end);
+
+ if (fixup) {
+ /*
+ * Put a faulting page table here to avoid detecting no
+ * pmd when accessing an odd section boundary. This
+ * needs to be faulting to help catch errors and avoid
+ * speculation
+ */
+ pmd = pmd_off_k(saved_start);
+ pmd[0] = pmd[1] & ~1;
+ }
+ }
+}
+#else
+static void __init remap_pages(void)
+{
+
+}
+#endif
+
static void __init early_fixmap_shutdown(void)
{
int i;
@@ -1580,6 +1743,7 @@ void __init paging_init(const struct machine_desc *mdesc)
memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
early_fixmap_shutdown();
+ remap_pages();
devicemaps_init(mdesc);
kmap_init();
tcm_init();