summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig91
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig6
-rw-r--r--arch/arm64/configs/msmcortex_defconfig6
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative.h64
-rw-r--r--arch/arm64/include/asm/assembler.h37
-rw-r--r--arch/arm64/include/asm/atomic_lse.h38
-rw-r--r--arch/arm64/include/asm/boot.h6
-rw-r--r--arch/arm64/include/asm/brk-imm.h25
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h1
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/cputype.h39
-rw-r--r--arch/arm64/include/asm/debug-monitors.h14
-rw-r--r--arch/arm64/include/asm/elf.h24
-rw-r--r--arch/arm64/include/asm/fixmap.h10
-rw-r--r--arch/arm64/include/asm/ftrace.h2
-rw-r--r--arch/arm64/include/asm/futex.h14
-rw-r--r--arch/arm64/include/asm/hugetlb.h44
-rw-r--r--arch/arm64/include/asm/irq.h45
-rw-r--r--arch/arm64/include/asm/kasan.h5
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h12
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h65
-rw-r--r--arch/arm64/include/asm/mmu_context.h62
-rw-r--r--arch/arm64/include/asm/module.h17
-rw-r--r--arch/arm64/include/asm/pgalloc.h26
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h156
-rw-r--r--arch/arm64/include/asm/processor.h9
-rw-r--r--arch/arm64/include/asm/shmparam.h2
-rw-r--r--arch/arm64/include/asm/smp.h8
-rw-r--r--arch/arm64/include/asm/spinlock.h23
-rw-r--r--arch/arm64/include/asm/stacktrace.h9
-rw-r--r--arch/arm64/include/asm/sysreg.h23
-rw-r--r--arch/arm64/include/asm/thread_info.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h82
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h7
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c153
-rw-r--r--arch/arm64/kernel/alternative.c6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c13
-rw-r--r--arch/arm64/kernel/cpu_errata.c20
-rw-r--r--arch/arm64/kernel/cpu_ops.c27
-rw-r--r--arch/arm64/kernel/cpufeature.c133
-rw-r--r--arch/arm64/kernel/cpuinfo.c55
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/entry.S69
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/ftrace.c27
-rw-r--r--arch/arm64/kernel/head.S152
-rw-r--r--arch/arm64/kernel/image.h85
-rw-r--r--arch/arm64/kernel/irq.c3
-rw-r--r--arch/arm64/kernel/kaslr.c177
-rw-r--r--arch/arm64/kernel/module-plts.c201
-rw-r--r--arch/arm64/kernel/module.c96
-rw-r--r--arch/arm64/kernel/module.lds3
-rw-r--r--arch/arm64/kernel/perf_callchain.c5
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/return_address.c5
-rw-r--r--arch/arm64/kernel/setup.c36
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/smp.c30
-rw-r--r--arch/arm64/kernel/stacktrace.c74
-rw-r--r--arch/arm64/kernel/suspend.c20
-rw-r--r--arch/arm64/kernel/time.c5
-rw-r--r--arch/arm64/kernel/traps.c61
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/hyp.S6
-rw-r--r--arch/arm64/lib/Makefile13
-rw-r--r--arch/arm64/lib/clear_user.S12
-rw-r--r--arch/arm64/lib/copy_from_user.S12
-rw-r--r--arch/arm64/lib/copy_in_user.S20
-rw-r--r--arch/arm64/lib/copy_page.S63
-rw-r--r--arch/arm64/lib/copy_to_user.S12
-rw-r--r--arch/arm64/mm/cache.S28
-rw-r--r--arch/arm64/mm/context.c2
-rw-r--r--arch/arm64/mm/copypage.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dump.c17
-rw-r--r--arch/arm64/mm/extable.c2
-rw-r--r--arch/arm64/mm/fault.c34
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c260
-rw-r--r--arch/arm64/mm/init.c132
-rw-r--r--arch/arm64/mm/kasan_init.c79
-rw-r--r--arch/arm64/mm/mmu.c701
-rw-r--r--arch/arm64/mm/pageattr.c66
-rw-r--r--arch/arm64/mm/pgd.c12
-rw-r--r--arch/arm64/mm/proc-macros.S22
-rw-r--r--arch/arm64/mm/proc.S28
97 files changed, 1014 insertions, 3119 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 65fbf6633e28..0314b80695ca 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -51,7 +51,6 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
- select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
@@ -425,7 +424,6 @@ config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
depends on MODULES
default y
- select ARM64_MODULE_CMODEL_LARGE
help
This option builds kernel modules using the large memory model in
order to avoid the use of the ADRP instruction, which can cause
@@ -603,9 +601,6 @@ config ARCH_NR_GPIO
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config ARCH_HAS_HOLES_MEMORYMODEL
def_bool y if SPARSEMEM
@@ -656,6 +651,9 @@ config PERF_EVENTS_RESET_PMU_DEBUGFS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
config ARCH_WANT_HUGE_PMD_SHARE
def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
@@ -828,93 +826,10 @@ config ARM64_LSE_ATOMICS
endmenu
-config ARM64_UAO
- bool "Enable support for User Access Override (UAO)"
- default y
- help
- User Access Override (UAO; part of the ARMv8.2 Extensions)
- causes the 'unprivileged' variant of the load/store instructions to
- be overriden to be privileged.
-
- This option changes get_user() and friends to use the 'unprivileged'
- variant of the load/store instructions. This ensures that user-space
- really did have access to the supplied memory. When addr_limit is
- set to kernel memory the UAO bit will be set, allowing privileged
- access to kernel memory.
-
- Choosing this option will cause copy_to_user() et al to use user-space
- memory permissions.
-
- The feature is detected at runtime, the kernel will use the
- regular load/store instructions if the cpu does not implement the
- feature.
-
-config ARM64_MODULE_CMODEL_LARGE
- bool
-
-config ARM64_MODULE_PLTS
- bool
- select ARM64_MODULE_CMODEL_LARGE
- select HAVE_MOD_ARCH_SPECIFIC
-
-config RELOCATABLE
- bool
- help
- This builds the kernel as a Position Independent Executable (PIE),
- which retains all relocation metadata required to relocate the
- kernel binary at runtime to a different virtual address than the
- address it was linked at.
- Since AArch64 uses the RELA relocation format, this requires a
- relocation pass at runtime even if the kernel is loaded at the
- same address it was linked at.
-
-config RANDOMIZE_BASE
- bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS
- select RELOCATABLE
- help
- Randomizes the virtual address at which the kernel image is
- loaded, as a security feature that deters exploit attempts
- relying on knowledge of the location of kernel internals.
-
- It is the bootloader's job to provide entropy, by passing a
- random u64 value in /chosen/kaslr-seed at kernel entry.
-
- When booting via the UEFI stub, it will invoke the firmware's
- EFI_RNG_PROTOCOL implementation (if available) to supply entropy
- to the kernel proper. In addition, it will randomise the physical
- location of the kernel Image as well.
-
- If unsure, say N.
-
-config RANDOMIZE_MODULE_REGION_FULL
- bool "Randomize the module region independently from the core kernel"
- depends on RANDOMIZE_BASE
- default y
- help
- Randomizes the location of the module region without considering the
- location of the core kernel. This way, it is impossible for modules
- to leak information about the location of core kernel data structures
- but it does imply that function calls between modules and the core
- kernel will need to be resolved via veneers in the module PLT.
-
- When this option is not set, the module region will be randomized over
- a limited range that contains the [_stext, _etext] interval of the
- core kernel, so branch relocations are always in range.
-
endmenu
menu "Boot options"
-config ARM64_ACPI_PARKING_PROTOCOL
- bool "Enable support for the ARM64 ACPI parking protocol"
- depends on ACPI
- help
- Enable support for the ARM64 ACPI parking protocol. If disabled
- the kernel will not allow booting through the ARM64 ACPI parking
- protocol even if the corresponding data is present in the ACPI
- MADT table.
-
config CMDLINE
string "Default kernel command string"
default ""
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 14c74a66c58e..285b32fa41c1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,10 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie
-endif
-
KBUILD_DEFCONFIG := defconfig
# Check for binutils support for specific extensions
@@ -33,7 +29,6 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-pic
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -48,14 +43,10 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
-ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
-KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
-endif
-
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index f48abaf656d6..2d3ddbaccb72 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -376,9 +376,9 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_VIDC_V4L2=m
+CONFIG_MSM_VIDC_VMEM=m
+CONFIG_MSM_VIDC_GOVERNORS=m
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index f8ab577b4b84..3523cb67d49a 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -379,9 +379,9 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_VIDC_V4L2=m
+CONFIG_MSM_VIDC_VMEM=m
+CONFIG_MSM_VIDC_GOVERNORS=m
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b13802..caafd63b8092 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -87,26 +87,9 @@ void __init acpi_init_cpus(void);
static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-bool acpi_parking_protocol_valid(int cpu);
-void __init
-acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
-#else
-static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
-static inline void
-acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
-{}
-#endif
-
static inline const char *acpi_get_enable_method(int cpu)
{
- if (acpi_psci_present())
- return "psci";
-
- if (acpi_parking_protocol_valid(cpu))
- return "parking-protocol";
-
- return NULL;
+ return acpi_psci_present() ? "psci" : NULL;
}
#ifdef CONFIG_ACPI_APEI
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index beccbdefa106..d56ec0715157 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,8 +1,6 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
-
#ifndef __ASSEMBLY__
#include <linux/init.h>
@@ -21,6 +19,7 @@ struct alt_instr {
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
+void free_alternatives_memory(void);
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
@@ -65,8 +64,6 @@ void apply_alternatives(void *start, size_t length);
#else
-#include <asm/assembler.h>
-
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
@@ -140,65 +137,6 @@ void apply_alternatives(void *start, size_t length);
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-/*
- * Generate the assembly for UAO alternatives with exception table entries.
- * This is complicated as there is no post-increment or pair versions of the
- * unprivileged instructions, and USER() only works for single instructions.
- */
-#ifdef CONFIG_ARM64_UAO
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: ldp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- ldtr \reg1, [\addr];
- ldtr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_stp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: stp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- sttr \reg1, [\addr];
- sttr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: \inst \reg, [\addr], \post_inc;
- nop;
- alternative_else
- \alt_inst \reg, [\addr];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- .endm
-#else
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_stp l, reg1, reg2, addr, post_inc
- USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- USER(\l, \inst \reg, [\addr], \post_inc)
- .endm
-#endif
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fcaf3cce639a..09f13a96941b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -106,19 +106,12 @@
dmb \opt
.endm
-/*
- * Emit an entry into the exception table
- */
- .macro _asm_extable, from, to
- .pushsection __ex_table, "a"
- .align 3
- .long (\from - .), (\to - .)
- .popsection
- .endm
-
#define USER(l, x...) \
9999: x; \
- _asm_extable 9999b, l
+ .section __ex_table,"a"; \
+ .align 3; \
+ .quad 9999b,l; \
+ .previous
/*
* Register aliases.
@@ -212,17 +205,6 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
- /*
- * @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
- * @tmp: scratch register
- */
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
- mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
- .endm
-
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
@@ -234,15 +216,4 @@ lr .req x30 // link register
.size __pi_##x, . - x; \
ENDPROC(x)
- /*
- * Emit a 64-bit absolute little endian symbol reference in a way that
- * ensures that it will be resolved at build time, even when building a
- * PIE binary. This requires cooperation from the linker script, which
- * must emit the lo32/hi32 halves individually.
- */
- .macro le64sym, sym
- .long \sym\()_lo32
- .long \sym\()_hi32
- .endm
-
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 39c1d340fec5..197e06afbf71 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -36,7 +36,7 @@ static inline void atomic_andnot(int i, atomic_t *v)
" stclr %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_or(int i, atomic_t *v)
@@ -48,7 +48,7 @@ static inline void atomic_or(int i, atomic_t *v)
" stset %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_xor(int i, atomic_t *v)
@@ -60,7 +60,7 @@ static inline void atomic_xor(int i, atomic_t *v)
" steor %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_add(int i, atomic_t *v)
@@ -72,7 +72,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stadd %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
@@ -90,7 +90,7 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return w0; \
}
@@ -116,7 +116,7 @@ static inline void atomic_and(int i, atomic_t *v)
" stclr %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic_sub(int i, atomic_t *v)
@@ -133,7 +133,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stadd %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
@@ -153,7 +153,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS , ##cl); \
+ : "x30" , ##cl); \
\
return w0; \
}
@@ -177,7 +177,7 @@ static inline void atomic64_andnot(long i, atomic64_t *v)
" stclr %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_or(long i, atomic64_t *v)
@@ -189,7 +189,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
" stset %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_xor(long i, atomic64_t *v)
@@ -201,7 +201,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
" steor %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_add(long i, atomic64_t *v)
@@ -213,7 +213,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
" stadd %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
@@ -231,7 +231,7 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -257,7 +257,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
" stclr %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
static inline void atomic64_sub(long i, atomic64_t *v)
@@ -274,7 +274,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
" stadd %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
- : __LL_SC_CLOBBERS);
+ : "x30");
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
@@ -294,7 +294,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -330,7 +330,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
"2:")
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
:
- : __LL_SC_CLOBBERS, "cc", "memory");
+ : "x30", "cc", "memory");
return x0;
}
@@ -359,7 +359,7 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
@@ -416,7 +416,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
[v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : "x30" , ##cl); \
\
return x0; \
}
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index ebf2481889c3..81151b67b26b 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -11,10 +11,4 @@
#define MIN_FDT_ALIGN 8
#define MAX_FDT_SIZE SZ_2M
-/*
- * arm64 requires the kernel image to placed
- * TEXT_OFFSET bytes beyond a 2 MB aligned base
- */
-#define MIN_KIMG_ALIGN SZ_2M
-
#endif
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
deleted file mode 100644
index ed693c5bcec0..000000000000
--- a/arch/arm64/include/asm/brk-imm.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_BRK_IMM_H
-#define __ASM_BRK_IMM_H
-
-/*
- * #imm16 values used for BRK instruction generation
- * Allowed values for kgdb are 0x400 - 0x7ff
- * 0x100: for triggering a fault on purpose (reserved)
- * 0x400: for dynamic BRK instruction
- * 0x401: for compile time BRK instruction
- * 0x800: kernel-mode BUG() and WARN() traps
- */
-#define FAULT_BRK_IMM 0x100
-#define KGDB_DYN_DBG_BRK_IMM 0x400
-#define KGDB_COMPILED_DBG_BRK_IMM 0x401
-#define BUG_BRK_IMM 0x800
-
-#endif
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 561190d15881..4a748ce9ba1a 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -18,7 +18,7 @@
#ifndef _ARCH_ARM64_ASM_BUG_H
#define _ARCH_ARM64_ASM_BUG_H
-#include <asm/brk-imm.h>
+#include <asm/debug-monitors.h>
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index df06d37374cc..a7d7d360a514 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -73,7 +73,6 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 510c7b404454..9ea611ea69df 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,6 +19,7 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
+#include <linux/mmdebug.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 13a6103130cd..b5e9cee4b5f8 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -36,7 +36,6 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
- u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 44e8b3d76fb7..2e73ef7b9239 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -30,11 +30,8 @@
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_NCAPS 11
+#define ARM64_NCAPS 8
#ifndef __ASSEMBLY__
@@ -181,7 +178,7 @@ u64 read_system_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
- return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
static inline bool system_supports_mixed_endian_el0(void)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 1ddc9c930097..a2014784cab2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -36,6 +36,12 @@
#define MMFR0_16KGRAN_SHFT 20
#define MMFR0_EL1_16KGRAN_MASK (MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
+#define read_cpuid(reg) ({ \
+ u64 __val; \
+ asm("mrs %0, " #reg : "=r" (__val)); \
+ __val; \
+})
+
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -55,22 +61,11 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
-#define MIDR_CPU_MODEL(imp, partnum) \
+#define MIDR_CPU_PART(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
-#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
- MIDR_ARCHITECTURE_MASK)
-
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
-({ \
- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
- \
- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
- })
-
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@@ -88,22 +83,8 @@
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
-#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-#define MIDR_KRYO2XX_SILVER \
- MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
-
#ifndef __ASSEMBLY__
-#include <asm/sysreg.h>
-
-#define read_cpuid(reg) ({ \
- u64 __val; \
- asm("mrs_s %0, " __stringify(reg) : "=r" (__val)); \
- __val; \
-})
-
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -111,12 +92,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(SYS_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(SYS_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -131,7 +112,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(SYS_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7c876c..279c85b5ec09 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <asm/brk-imm.h>
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/ptrace.h>
@@ -48,6 +47,19 @@
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
+ */
+#define FAULT_BRK_IMM 0x100
+#define KGDB_DYN_DBG_BRK_IMM 0x400
+#define KGDB_COMPILED_DBG_BRK_IMM 0x401
+#define BUG_BRK_IMM 0x800
+
+/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 24ed037f09fd..faad6df49e5b 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -24,6 +24,15 @@
#include <asm/ptrace.h>
#include <asm/user.h>
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
+
/*
* AArch64 static relocation types.
*/
@@ -77,8 +86,6 @@
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
-#define R_AARCH64_RELATIVE 1027
-
/*
* These are used to set parameters in the core dumps.
*/
@@ -120,17 +127,6 @@
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
-#ifndef __ASSEMBLY__
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
-#define ELF_CORE_COPY_REGS(dest, regs) \
- *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef struct user_fpsimd_state elf_fpregset_t;
-
/*
* When the program starts, a1 contains a pointer to a function to be
* registered with atexit, as per the SVR4 ABI. A value of 0 means we have no
@@ -190,6 +186,4 @@ extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
#endif /* CONFIG_COMPAT */
-#endif /* !__ASSEMBLY__ */
-
#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 1a617d46fce9..309704544d22 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -62,16 +62,6 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
-
- /*
- * Used for kernel page table creation, so unmapped memory may be used
- * for tables.
- */
- FIX_PTE,
- FIX_PMD,
- FIX_PUD,
- FIX_PGD,
-
__end_of_fixed_addresses
};
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 3c60f37e48ab..c5534facf941 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -28,8 +28,6 @@ struct dyn_arch_ftrace {
extern unsigned long ftrace_graph_call;
-extern void return_to_handler(void);
-
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cdd32c2..007a69fc4f40 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -42,8 +42,10 @@
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+" .pushsection __ex_table,\"a\"\n" \
+" .align 3\n" \
+" .quad 1b, 4b, 2b, 4b\n" \
+" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
@@ -119,7 +121,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@@ -132,9 +133,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
"4: mov %w0, %w6\n"
" b 3b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 4b)
- _ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+" .pushsection __ex_table,\"a\"\n"
+" .align 3\n"
+" .quad 1b, 4b, 2b, 4b\n"
+" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bbc1e35aa601..bb4052e85dba 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -26,7 +26,36 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
@@ -68,19 +97,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
clear_bit(PG_dcache_clean, &page->flags);
}
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
-#define arch_make_huge_pte arch_make_huge_pte
-extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty);
-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
-
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 3e1c0e7ef082..a8464c18ef88 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,45 +1,10 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
-#define IRQ_STACK_SIZE THREAD_SIZE
-#define IRQ_STACK_START_SP THREAD_START_SP
-
-#ifndef __ASSEMBLER__
-
-#include <linux/percpu.h>
-
#include <asm-generic/irq.h>
-#include <asm/thread_info.h>
struct pt_regs;
-DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
-
-/*
- * The highest address on the stack, and the first to be used. Used to
- * find the dummy-stack frame put down by el?_irq() in entry.S, which
- * is structured as follows:
- *
- * ------------
- * | | <- irq_stack_ptr
- * top ------------
- * | x19 | <- irq_stack_ptr - 0x08
- * ------------
- * | x29 | <- irq_stack_ptr - 0x10
- * ------------
- *
- * where x19 holds a copy of the task stack pointer where the struct pt_regs
- * from kernel_entry can be found.
- *
- */
-#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
-
-/*
- * The offset from irq_stack_ptr where entry.S will store the original
- * stack pointer. Used by unwind_frame() and dump_backtrace().
- */
-#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
-
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
@@ -50,14 +15,4 @@ static inline int nr_legacy_irqs(void)
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-static inline bool on_irq_stack(unsigned long sp, int cpu)
-{
- /* variable names the same as kernel/stacktrace.c */
- unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
- unsigned long high = low + IRQ_STACK_START_SP;
-
- return (low <= sp && sp <= high);
-}
-
-#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 71ad0f93eb71..2774fa384c47 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -7,14 +7,13 @@
#include <linux/linkage.h>
#include <asm/memory.h>
-#include <asm/pgtable-types.h>
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
*/
#define KASAN_SHADOW_START (VA_START)
-#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
/*
* This value is used to map an address to the corresponding shadow
@@ -29,12 +28,10 @@
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
void kasan_init(void);
-void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void);
#else
static inline void kasan_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif
#endif
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 5c6375d8528b..a459714ee29e 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -79,17 +79,5 @@
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#endif
-/*
- * To make optimal use of block mappings when laying out the linear
- * mapping, round down the base of physical memory to a size that can
- * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
- * (64k granule), or a multiple that can be mapped using contiguous bits
- * in the page tables: 32 * PMD_SIZE (16k granule)
- */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define ARM64_MEMSTART_ALIGN SZ_512M
-#else
-#define ARM64_MEMSTART_ALIGN SZ_1G
-#endif
#endif /* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 419bc6661b5c..5e377101f919 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -102,8 +102,6 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
-#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
-
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 90c6368ad7c8..a35ce7266aac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -222,7 +222,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-u64 __kvm_call_hyp(void *hypfn, ...);
+u64 kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -243,8 +243,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
* Call initialization code, and switch to the full blown
* HYP code.
*/
- __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
- hyp_stack_ptr, vector_ptr);
+ kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+ hyp_stack_ptr, vector_ptr);
}
static inline void kvm_arch_hardware_disable(void) {}
@@ -258,6 +258,4 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
-
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..3de42d68611d 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -26,7 +26,6 @@ __asm__(".arch_extension lse");
/* Macro for constructing calls to out-of-line ll/sc atomics */
#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
-#define __LL_SC_CLOBBERS "x16", "x17", "x30"
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 12f8a00fb3f1..853953cd1f08 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -24,7 +24,6 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
-#include <asm/bug.h>
#include <asm/sizes.h>
/*
@@ -46,15 +45,15 @@
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define KIMAGE_VADDR (MODULES_END)
-#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
-#define MODULES_VSIZE (SZ_128M)
-#define PCI_IO_END (PAGE_OFFSET - SZ_2M)
+#define MODULES_END (PAGE_OFFSET)
+#define MODULES_VADDR (MODULES_END - SZ_64M)
+#define PCI_IO_END (MODULES_VADDR - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
@@ -72,27 +71,12 @@
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
- */
-#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
-#else
-#define KASAN_SHADOW_SIZE (0)
-#endif
-
-/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
-#define __virt_to_phys(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
- __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
- (__x - kimage_voffset); })
-
-#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
-#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
+#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
+#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
/*
* Convert a page to/from a physical address
@@ -116,40 +100,19 @@
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
-#ifdef CONFIG_ARM64_4K_PAGES
-#define IOREMAP_MAX_ORDER (PUD_SHIFT)
-#else
-#define IOREMAP_MAX_ORDER (PMD_SHIFT)
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
-#define __early_init_dt_declare_initrd(__start, __end) \
- do { \
- initrd_start = (__start); \
- initrd_end = (__end); \
- } while (0)
-#endif
-
#ifndef __ASSEMBLY__
-#include <linux/bitops.h>
-#include <linux/mmdebug.h>
-
-extern s64 memstart_addr;
+extern phys_addr_t memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
-#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-
-/* the virtual base of the kernel image (minus TEXT_OFFSET) */
-extern u64 kimage_vaddr;
-
-/* the offset between the kernel virtual and physical mappings */
-extern u64 kimage_voffset;
+#define PHYS_OFFSET ({ memstart_addr; })
/*
- * Allow all memory at the discovery stage. We will clip it later.
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
*/
-#define MIN_MEMBLOCK_ADDR 0
-#define MAX_MEMBLOCK_ADDR U64_MAX
+#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
/*
* PFNs are used to describe any physical page; this means
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a00f7cf35bbd..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -27,7 +27,6 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
@@ -49,7 +48,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = virt_to_phys(empty_zero_page);
+ unsigned long ttbr = page_to_phys(empty_zero_page);
asm(
" msr ttbr0_el1, %0 // set TTBR0\n"
@@ -74,7 +73,7 @@ static inline bool __cpu_uses_extended_idmap(void)
/*
* Set TCR.T0SZ to its default value (based on VA_BITS)
*/
-static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
+static inline void cpu_set_default_tcr_t0sz(void)
{
unsigned long tcr;
@@ -87,62 +86,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
" msr tcr_el1, %0 ;"
" isb"
: "=&r" (tcr)
- : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
-}
-
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
-#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
-
-/*
- * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
- *
- * The idmap lives in the same VA range as userspace, but uses global entries
- * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
- * speculative TLB fetches, we must temporarily install the reserved page
- * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
- *
- * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
- * which should not be installed in TTBR0_EL1. In this case we can leave the
- * reserved page tables in place.
- */
-static inline void cpu_uninstall_idmap(void)
-{
- struct mm_struct *mm = current->active_mm;
-
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
-
- if (mm != &init_mm)
- cpu_switch_mm(mm->pgd, mm);
-}
-
-static inline void cpu_install_idmap(void)
-{
- cpu_set_reserved_ttbr0();
- local_flush_tlb_all();
- cpu_set_idmap_tcr_t0sz();
-
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
- * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
- * avoiding the possibility of conflicting TLB entries being allocated.
- */
-static inline void cpu_replace_ttbr1(pgd_t *pgd)
-{
- typedef void (ttbr_replace_func)(phys_addr_t);
- extern ttbr_replace_func idmap_cpu_replace_ttbr1;
- ttbr_replace_func *replace_phys;
-
- phys_addr_t pgd_phys = virt_to_phys(pgd);
-
- replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
-
- cpu_install_idmap();
- replace_phys(pgd_phys);
- cpu_uninstall_idmap();
+ : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
}
/*
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..e80e232b730e 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -20,21 +20,4 @@
#define MODULE_ARCH_VERMAGIC "aarch64"
-#ifdef CONFIG_ARM64_MODULE_PLTS
-struct mod_arch_specific {
- struct elf64_shdr *plt;
- int plt_num_entries;
- int plt_max_entries;
-};
-#endif
-
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
- Elf64_Sym *sym);
-
-#ifdef CONFIG_RANDOMIZE_BASE
-extern u64 module_alloc_base;
-#else
-#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
-#endif
-
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index ff98585d085a..c15053902942 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -42,20 +42,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
-{
- set_pud(pud, __pud(pmd | prot));
-}
-
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
- __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
-}
-#else
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
-{
- BUILD_BUG();
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
}
+
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -71,20 +62,11 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((unsigned long)pud);
}
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
-{
- set_pgd(pgdp, __pgd(pud | prot));
-}
-
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
- __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
-}
-#else
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
-{
- BUILD_BUG();
+ set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
}
+
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5c25b831273d..d6739e836f7b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -90,23 +90,7 @@
/*
* Contiguous page definitions.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define CONT_PTE_SHIFT 5
-#define CONT_PMD_SHIFT 5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define CONT_PTE_SHIFT 7
-#define CONT_PMD_SHIFT 5
-#else
-#define CONT_PTE_SHIFT 4
-#define CONT_PMD_SHIFT 4
-#endif
-
-#define CONT_PTES (1 << CONT_PTE_SHIFT)
-#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
-#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
-#define CONT_PMDS (1 << CONT_PMD_SHIFT)
-#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
-#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
+#define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
/* the the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c3c2518eecfe..eaa9cabf4066 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -36,13 +36,19 @@
*
* VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel vmalloc space
+ * VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
-#define VMALLOC_START (MODULES_END)
+#ifndef CONFIG_KASAN
+#define VMALLOC_START (VA_START)
+#else
+#include <asm/kasan.h>
+#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
+#endif
+
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
@@ -53,7 +59,6 @@
#ifndef __ASSEMBLY__
-#include <asm/fixmap.h>
#include <linux/mmdebug.h>
extern void __pte_error(const char *file, int line, unsigned long val);
@@ -64,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -78,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
@@ -118,8 +123,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
@@ -131,6 +136,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+/* Find an entry in the third-level page table. */
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
@@ -140,7 +155,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
-#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -151,18 +165,10 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
-
-/*
- * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
- * so that we don't erroneously return false for pages that have been
- * remapped as PROT_NONE but are yet to be flushed from the TLB.
- */
-#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -213,8 +219,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkcont(pte_t pte)
{
- pte = set_pte_bit(pte, __pgprot(PTE_CONT));
- return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
+ return set_pte_bit(pte, __pgprot(PTE_CONT));
}
static inline pte_t pte_mknoncont(pte_t pte)
@@ -222,11 +227,6 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
-static inline pmd_t pmd_mkcont(pmd_t pmd)
-{
- return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
-}
-
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
@@ -264,13 +264,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_present(pte)) {
+ if (pte_valid_user(pte)) {
+ if (!pte_special(pte) && pte_exec(pte))
+ __sync_icache_dcache(pte, addr);
if (pte_sw_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
else
pte_val(pte) |= PTE_RDONLY;
- if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
- __sync_icache_dcache(pte, addr);
}
/*
@@ -300,7 +300,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Hugetlb definitions.
*/
-#define HUGE_MAX_HSTATE 4
+#define HUGE_MAX_HSTATE 2
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -354,7 +354,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
@@ -427,31 +426,13 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pmd(pmdp, __pmd(0));
}
-static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the third-level page table. */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
-#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
-#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
-#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
-
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -478,37 +459,21 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-static inline phys_addr_t pud_page_paddr(pud_t pud)
+static inline pmd_t *pud_page_vaddr(pud_t pud)
{
- return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
-#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
-
-#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
-#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
-#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
-
-#else
-
-#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
-
-/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
-#define pmd_set_fixmap(addr) NULL
-#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
-#define pmd_clear_fixmap()
-
-#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
-
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -530,37 +495,21 @@ static inline void pgd_clear(pgd_t *pgdp)
set_pgd(pgdp, __pgd(0));
}
-static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
+static inline pud_t *pgd_page_vaddr(pgd_t pgd)
{
- return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
}
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
-#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
-
-#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
-#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
-#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
+}
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
-/* use ONLY for statically allocated translation tables */
-#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
-
-#else
-
-#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
-
-/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
-#define pud_set_fixmap(addr) NULL
-#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
-#define pud_clear_fixmap()
-
-#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
-
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -568,16 +517,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
/* to find an entry in a page-table-directory */
#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
-#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
+#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
-#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -697,7 +641,6 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* bits 0-1: present (must be zero)
* bits 2-7: swap type
* bits 8-57: swap offset
- * bit 58: PTE_PROT_NONE (must be zero)
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
@@ -723,8 +666,7 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-void pgd_cache_init(void);
-#define pgtable_cache_init pgd_cache_init
+#define pgtable_cache_init() do { } while (0)
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c97d01eb219f..c8c7c4d38171 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -29,10 +29,8 @@
#include <linux/string.h>
-#include <asm/alternative.h>
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
-#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -182,11 +180,9 @@ static inline void prefetchw(const void *ptr)
}
#define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *ptr)
+static inline void spin_lock_prefetch(const void *x)
{
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- "prfm pstl1strm, %a0",
- "nop") : : "p" (ptr));
+ prefetchw(x);
}
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -194,6 +190,5 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
index e368a55ebd22..4df608a8459e 100644
--- a/arch/arm64/include/asm/shmparam.h
+++ b/arch/arm64/include/asm/shmparam.h
@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
-#define COMPAT_SHMLBA (4 * PAGE_SIZE)
+#define COMPAT_SHMLBA 0x4000
#include <asm-generic/shmparam.h>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2013a4dc5124..c63c432f1a82 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -63,15 +63,7 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
-#else
-static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- BUILD_BUG();
-}
-#endif
extern int __cpu_disable(void);
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7b369923b2eb..be4d2f9b7e8c 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,28 +26,9 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval;
- asm volatile(
-" sevl\n"
-"1: wfe\n"
-"2: ldaxr %w0, %2\n"
-" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 1b\n"
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
- /* LSE atomics */
-" nop\n"
-" nop\n")
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- :
- : "memory");
-}
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 801a16dbbdf6..7318f6d54aa9 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -16,19 +16,14 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
-struct task_struct;
-
struct stackframe {
unsigned long fp;
unsigned long sp;
unsigned long pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- unsigned int graph;
-#endif
};
-extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
-extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+extern int unwind_frame(struct stackframe *frame);
+extern void walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b9fd8ec79033..d48ab5b41f52 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -70,19 +70,15 @@
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
-#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
(!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
- (!!x)<<8 | 0x1f)
/* SCTLR_EL1 */
#define SCTLR_EL1_CP15BEN (0x1 << 5)
@@ -139,9 +135,6 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
-/* id_aa64mmfr2 */
-#define ID_AA64MMFR2_UAO_SHIFT 4
-
/* id_aa64dfr0 */
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
@@ -201,32 +194,32 @@
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
- .equ .L__reg_num_x\num, \num
+ .equ __reg_num_x\num, \num
.endr
- .equ .L__reg_num_xzr, 31
+ .equ __reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt)
+ .inst 0xd5200000|(\sreg)|(__reg_num_\rt)
.endm
.macro msr_s, sreg, rt
- .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt)
+ .inst 0xd5000000|(\sreg)|(__reg_num_\rt)
.endm
#else
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-" .equ .L__reg_num_x\\num, \\num\n"
+" .equ __reg_num_x\\num, \\num\n"
" .endr\n"
-" .equ .L__reg_num_xzr, 31\n"
+" .equ __reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
+" .inst 0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
+" .inst 0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 4a9e82e4f724..bfef76e14e2d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -73,16 +73,10 @@ register unsigned long current_stack_pointer asm ("sp");
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
-/*
- * struct thread_info can be accessed directly via sp_el0.
- */
static inline struct thread_info *current_thread_info(void)
{
- unsigned long sp_el0;
-
- asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
- return (struct thread_info *)sp_el0;
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..b2ede967fe7d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -36,11 +36,11 @@
#define VERIFY_WRITE 1
/*
- * The exception table consists of pairs of relative offsets: the first
- * is the relative offset to an instruction that is allowed to fault,
- * and the second is the relative offset at which the program should
- * continue. No registers are modified, so it is entirely up to the
- * continuation code to figure out what to do.
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
@@ -50,11 +50,9 @@
struct exception_table_entry
{
- int insn, fixup;
+ unsigned long insn, fixup;
};
-#define ARCH_HAS_RELATIVE_EXTABLE
-
extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS (-1UL)
@@ -66,16 +64,6 @@ extern int fixup_exception(struct pt_regs *regs);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
-
- /*
- * Enable/disable UAO so that copy_to_user() etc can access
- * kernel memory with the unprivileged instructions.
- */
- if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
- CONFIG_ARM64_UAO));
}
#define segment_eq(a, b) ((a) == (b))
@@ -117,12 +105,6 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
-#define _ASM_EXTABLE(from, to) \
- " .pushsection __ex_table, \"a\"\n" \
- " .align 3\n" \
- " .long (" #from " - .), (" #to " - .)\n" \
- " .popsection\n"
-
/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
@@ -131,10 +113,9 @@ static inline void set_fs(mm_segment_t fs)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_user_asm(instr, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " instr " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
@@ -142,7 +123,10 @@ static inline void set_fs(mm_segment_t fs)
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 1b, 3b\n" \
+ " .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
@@ -150,30 +134,26 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
break; \
case 2: \
- __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
break; \
case 4: \
- __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)
@@ -201,17 +181,19 @@ do { \
((x) = 0, -EFAULT); \
})
-#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __put_user_asm(instr, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " instr " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %3\n" \
" b 2b\n" \
" .previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 1b, 3b\n" \
+ " .previous" \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
@@ -219,29 +201,25 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
break; \
case 2: \
- __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
break; \
case 4: \
- __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_user_asm("str", "%", __pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 2b79b8a89457..aab5bf09e9d9 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -16,8 +16,6 @@
#ifndef __ASM_WORD_AT_A_TIME_H
#define __ASM_WORD_AT_A_TIME_H
-#include <asm/uaccess.h>
-
#ifndef __AARCH64EB__
#include <linux/kernel.h>
@@ -83,7 +81,10 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
#endif
" b 2b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 3b)
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
: "=&r" (ret), "=&r" (offset)
: "r" (addr), "Q" (*(unsigned long *)addr));
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..208db3df135a 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -45,7 +45,6 @@
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
-#define PSR_UAO_BIT 0x00800000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f6e0269de4c7..9f7794c5743f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,7 +30,6 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \
perf_trace_counters.o \
@@ -44,8 +43,6 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
-arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
deleted file mode 100644
index 4b1e5a7a98da..000000000000
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * ARM64 ACPI Parking Protocol implementation
- *
- * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- * Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/acpi.h>
-#include <linux/types.h>
-
-#include <asm/cpu_ops.h>
-
-struct cpu_mailbox_entry {
- phys_addr_t mailbox_addr;
- u8 version;
- u8 gic_cpu_id;
-};
-
-static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
-
-void __init acpi_set_mailbox_entry(int cpu,
- struct acpi_madt_generic_interrupt *p)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
-
- cpu_entry->mailbox_addr = p->parked_address;
- cpu_entry->version = p->parking_version;
- cpu_entry->gic_cpu_id = p->cpu_interface_number;
-}
-
-bool acpi_parking_protocol_valid(int cpu)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
-
- return cpu_entry->mailbox_addr && cpu_entry->version;
-}
-
-static int acpi_parking_protocol_cpu_init(unsigned int cpu)
-{
- pr_debug("%s: ACPI parked addr=%llx\n", __func__,
- cpu_mailbox_entries[cpu].mailbox_addr);
-
- return 0;
-}
-
-static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
-{
- return 0;
-}
-
-struct parking_protocol_mailbox {
- __le32 cpu_id;
- __le32 reserved;
- __le64 entry_point;
-};
-
-static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
-{
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
- struct parking_protocol_mailbox __iomem *mailbox;
- __le32 cpu_id;
-
- /*
- * Map mailbox memory with attribute device nGnRE (ie ioremap -
- * this deviates from the parking protocol specifications since
- * the mailboxes are required to be mapped nGnRnE; the attribute
- * discrepancy is harmless insofar as the protocol specification
- * is concerned).
- * If the mailbox is mistakenly allocated in the linear mapping
- * by FW ioremap will fail since the mapping will be prevented
- * by the kernel (it clashes with the linear mapping attributes
- * specifications).
- */
- mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
- if (!mailbox)
- return -EIO;
-
- cpu_id = readl_relaxed(&mailbox->cpu_id);
- /*
- * Check if firmware has set-up the mailbox entry properly
- * before kickstarting the respective cpu.
- */
- if (cpu_id != ~0U) {
- iounmap(mailbox);
- return -ENXIO;
- }
-
- /*
- * We write the entry point and cpu id as LE regardless of the
- * native endianness of the kernel. Therefore, any boot-loaders
- * that read this address need to convert this address to the
- * Boot-Loader's endianness before jumping.
- */
- writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
- writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
-
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
-
- iounmap(mailbox);
-
- return 0;
-}
-
-static void acpi_parking_protocol_cpu_postboot(void)
-{
- int cpu = smp_processor_id();
- struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
- struct parking_protocol_mailbox __iomem *mailbox;
- __le64 entry_point;
-
- /*
- * Map mailbox memory with attribute device nGnRE (ie ioremap -
- * this deviates from the parking protocol specifications since
- * the mailboxes are required to be mapped nGnRnE; the attribute
- * discrepancy is harmless insofar as the protocol specification
- * is concerned).
- * If the mailbox is mistakenly allocated in the linear mapping
- * by FW ioremap will fail since the mapping will be prevented
- * by the kernel (it clashes with the linear mapping attributes
- * specifications).
- */
- mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
- if (!mailbox)
- return;
-
- entry_point = readl_relaxed(&mailbox->entry_point);
- /*
- * Check if firmware has cleared the entry_point as expected
- * by the protocol specification.
- */
- WARN_ON(entry_point);
-
- iounmap(mailbox);
-}
-
-const struct cpu_operations acpi_parking_protocol_ops = {
- .name = "parking-protocol",
- .cpu_init = acpi_parking_protocol_cpu_init,
- .cpu_prepare = acpi_parking_protocol_cpu_prepare,
- .cpu_boot = acpi_parking_protocol_cpu_boot,
- .cpu_postboot = acpi_parking_protocol_cpu_postboot
-};
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index e6cb1dc63a2a..47a8caf28bcc 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -158,3 +158,9 @@ void apply_alternatives(void *start, size_t length)
__apply_alternatives(&region);
}
+
+void free_alternatives_memory(void)
+{
+ free_reserved_area(__alt_instructions, __alt_instructions_end,
+ 0, "alternatives");
+}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c37202c0c838..937f5e58a4d3 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -62,7 +62,7 @@ struct insn_emulation {
};
static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated __initdata;
+static int nr_insn_emulated;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
-static void __init register_insn_emulation(struct insn_emulation_ops *ops)
+static void register_insn_emulation(struct insn_emulation_ops *ops)
{
unsigned long flags;
struct insn_emulation *insn;
@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
{ }
};
-static void __init register_insn_emulation_sysctl(struct ctl_table *table)
+static void register_insn_emulation_sysctl(struct ctl_table *table)
{
unsigned long flags;
int i = 0;
@@ -297,8 +297,11 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection" \
- _ASM_EXTABLE(0b, 4b) \
- _ASM_EXTABLE(1b, 4b) \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 0b, 4b\n" \
+ " .quad 1b, 4b\n" \
+ " .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1555520bbb74..34aa4b3b47e9 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -21,12 +21,26 @@
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+ MIDR_CPU_PART(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
- return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
- entry->midr_range_min,
- entry->midr_range_max);
+ u32 midr = read_cpuid_id();
+
+ if ((midr & CPU_MODEL_MASK) != entry->midr_model)
+ return false;
+
+ midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+
+ return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index c7cfb8fe06f9..b6bd7d447768 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -25,30 +25,19 @@
#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
-extern const struct cpu_operations acpi_parking_protocol_ops;
extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
&smp_spin_table_ops,
&cpu_psci_ops,
NULL,
};
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- &acpi_parking_protocol_ops,
-#endif
- &cpu_psci_ops,
- NULL,
-};
-
static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
- const struct cpu_operations **ops;
-
- ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
+ const struct cpu_operations **ops = supported_cpu_ops;
while (*ops) {
if (!strcmp(name, (*ops)->name))
@@ -86,16 +75,8 @@ static const char *__init cpu_read_enable_method(int cpu)
}
} else {
enable_method = acpi_get_enable_method(cpu);
- if (!enable_method) {
- /*
- * In ACPI systems the boot CPU does not require
- * checking the enable method since for some
- * boot protocol (ie parking protocol) it need not
- * be initialized. Don't warn spuriously.
- */
- if (cpu != 0)
- pr_err("Unsupported ACPI enable-method\n");
- }
+ if (!enable_method)
+ pr_err("Unsupported ACPI enable-method\n");
}
return enable_method;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7566cad9fa1d..0669c63281ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -67,10 +67,6 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.width = 0, \
}
-/* meta feature for alternatives */
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
-
static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -127,11 +123,6 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_END,
};
-static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_END,
-};
-
static struct arm64_ftr_bits ftr_ctr[] = {
U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -293,7 +284,6 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
- ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 3, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -418,7 +408,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
- init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -528,8 +517,6 @@ void update_cpu_features(int cpu,
info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
- taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
- info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
@@ -634,18 +621,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
return has_sre;
}
-static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
-{
- u32 midr = read_cpuid_id();
- u32 rv_min, rv_max;
-
- /* Cavium ThunderX pass 1.x and 2.x */
- rv_min = 0;
- rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
-
- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
-}
-
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -676,28 +651,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
- {
- .desc = "Software prefetching using PRFM",
- .capability = ARM64_HAS_NO_HW_PREFETCH,
- .matches = has_no_hw_prefetch,
- },
-#ifdef CONFIG_ARM64_UAO
- {
- .desc = "User Access Override",
- .capability = ARM64_HAS_UAO,
- .matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .field_pos = ID_AA64MMFR2_UAO_SHIFT,
- .min_field_value = 1,
- .enable = cpu_enable_uao,
- },
-#endif /* CONFIG_ARM64_UAO */
-#ifdef CONFIG_ARM64_PAN
- {
- .capability = ARM64_ALT_PAN_NOT_UAO,
- .matches = cpufeature_pan_not_uao,
- },
-#endif /* CONFIG_ARM64_PAN */
{},
};
@@ -731,7 +684,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
{},
};
-static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -776,12 +729,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
return rc;
}
-static void __init setup_cpu_hwcaps(void)
+static void setup_cpu_hwcaps(void)
{
int i;
const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
- for (i = 0; hwcaps[i].matches; i++)
+ for (i = 0; hwcaps[i].desc; i++)
if (hwcaps[i].matches(&hwcaps[i]))
cap_set_hwcap(&hwcaps[i]);
}
@@ -791,11 +744,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
{
int i;
- for (i = 0; caps[i].matches; i++) {
+ for (i = 0; caps[i].desc; i++) {
if (!caps[i].matches(&caps[i]))
continue;
- if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
+ if (!cpus_have_cap(caps[i].capability))
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
@@ -805,12 +758,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
int i;
- for (i = 0; caps[i].matches; i++)
+ for (i = 0; caps[i].desc; i++)
if (caps[i].enable && cpus_have_cap(caps[i].capability))
on_each_cpu(caps[i].enable, NULL, true);
}
@@ -838,36 +790,35 @@ static inline void set_sys_caps_initialised(void)
static u64 __raw_read_system_reg(u32 sys_id)
{
switch (sys_id) {
- case SYS_ID_PFR0_EL1: return read_cpuid(SYS_ID_PFR0_EL1);
- case SYS_ID_PFR1_EL1: return read_cpuid(SYS_ID_PFR1_EL1);
- case SYS_ID_DFR0_EL1: return read_cpuid(SYS_ID_DFR0_EL1);
- case SYS_ID_MMFR0_EL1: return read_cpuid(SYS_ID_MMFR0_EL1);
- case SYS_ID_MMFR1_EL1: return read_cpuid(SYS_ID_MMFR1_EL1);
- case SYS_ID_MMFR2_EL1: return read_cpuid(SYS_ID_MMFR2_EL1);
- case SYS_ID_MMFR3_EL1: return read_cpuid(SYS_ID_MMFR3_EL1);
- case SYS_ID_ISAR0_EL1: return read_cpuid(SYS_ID_ISAR0_EL1);
- case SYS_ID_ISAR1_EL1: return read_cpuid(SYS_ID_ISAR1_EL1);
- case SYS_ID_ISAR2_EL1: return read_cpuid(SYS_ID_ISAR2_EL1);
- case SYS_ID_ISAR3_EL1: return read_cpuid(SYS_ID_ISAR3_EL1);
- case SYS_ID_ISAR4_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
- case SYS_ID_ISAR5_EL1: return read_cpuid(SYS_ID_ISAR4_EL1);
- case SYS_MVFR0_EL1: return read_cpuid(SYS_MVFR0_EL1);
- case SYS_MVFR1_EL1: return read_cpuid(SYS_MVFR1_EL1);
- case SYS_MVFR2_EL1: return read_cpuid(SYS_MVFR2_EL1);
-
- case SYS_ID_AA64PFR0_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
- case SYS_ID_AA64PFR1_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1);
- case SYS_ID_AA64DFR0_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
- case SYS_ID_AA64DFR1_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1);
- case SYS_ID_AA64MMFR0_EL1: return read_cpuid(SYS_ID_AA64MMFR0_EL1);
- case SYS_ID_AA64MMFR1_EL1: return read_cpuid(SYS_ID_AA64MMFR1_EL1);
- case SYS_ID_AA64MMFR2_EL1: return read_cpuid(SYS_ID_AA64MMFR2_EL1);
- case SYS_ID_AA64ISAR0_EL1: return read_cpuid(SYS_ID_AA64ISAR0_EL1);
- case SYS_ID_AA64ISAR1_EL1: return read_cpuid(SYS_ID_AA64ISAR1_EL1);
-
- case SYS_CNTFRQ_EL0: return read_cpuid(SYS_CNTFRQ_EL0);
- case SYS_CTR_EL0: return read_cpuid(SYS_CTR_EL0);
- case SYS_DCZID_EL0: return read_cpuid(SYS_DCZID_EL0);
+ case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1);
+ case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1);
+ case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1);
+ case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1);
+ case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1);
+ case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1);
+ case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1);
+ case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1);
+ case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1);
+ case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1);
+ case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1);
+ case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
+ case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
+ case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1);
+ case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1);
+ case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1);
+
+ case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1);
+ case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1);
+ case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1);
+ case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1);
+
+ case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0);
+ case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0);
+ case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0);
default:
BUG();
return 0;
@@ -917,7 +868,7 @@ void verify_local_cpu_capabilities(void)
return;
caps = arm64_features;
- for (i = 0; caps[i].matches; i++) {
+ for (i = 0; caps[i].desc; i++) {
if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
continue;
/*
@@ -930,7 +881,7 @@ void verify_local_cpu_capabilities(void)
caps[i].enable(NULL);
}
- for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
+ for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
if (!cpus_have_hwcap(&caps[i]))
continue;
if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -946,7 +897,7 @@ static inline void set_sys_caps_initialised(void)
#endif /* CONFIG_HOTPLUG_CPU */
-static void __init setup_feature_capabilities(void)
+static void setup_feature_capabilities(void)
{
update_cpu_capabilities(arm64_features, "detected feature:");
enable_cpu_capabilities(arm64_features);
@@ -976,9 +927,3 @@ void __init setup_cpu_features(void)
pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
L1_CACHE_BYTES, cls);
}
-
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
-{
- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
-}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 48281e4a719f..3691553f218e 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -211,41 +211,40 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
info->reg_ctr = read_cpuid_cachetype();
- info->reg_dczid = read_cpuid(SYS_DCZID_EL0);
+ info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
- info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1);
- info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
- info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
- info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
+ info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+ info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+ info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
/*
* Explicitly mask out 16KB granule since we donot
* want to support it
*/
- info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) &
+ info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1) &
(~MMFR0_EL1_16KGRAN_MASK);
- info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
- info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
- info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
- info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1);
-
- info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1);
- info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1);
- info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1);
- info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1);
-
- info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1);
+ info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
cpuinfo_detect_icache_policy(info);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index f82036e02485..a773db92908b 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,7 @@ ENTRY(entry)
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- movz x21, #:abs_g0:stext_offset
+ ldr x21, =stext_offset
add x21, x0, x21
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b67e70c34888..e3131b39fbf2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -27,7 +27,6 @@
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
-#include <asm/irq.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@@ -89,12 +88,9 @@
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
+ get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
-
- mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
@@ -112,13 +108,6 @@
.endif
/*
- * Set sp_el0 to current thread_info.
- */
- .if \el == 0
- msr sp_el0, tsk
- .endif
-
- /*
* Registers that may be useful after this macro is invoked:
*
* x21 - aborted SP
@@ -175,44 +164,8 @@ alternative_endif
.endm
.macro get_thread_info, rd
- mrs \rd, sp_el0
- .endm
-
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-
- /*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
- */
- and x25, x19, #~(THREAD_SIZE - 1)
- cmp x25, tsk
- b.ne 9998f
-
- this_cpu_ptr irq_stack, x25, x26
- mov x26, #IRQ_STACK_START_SP
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
- /*
- * Add a dummy stack frame, this non-standard format is fixed up
- * by unwind_frame()
- */
- stp x29, x19, [sp, #-16]!
- mov x29, sp
-
-9998:
- .endm
-
- /*
- * x19 should be preserved between irq_stack_entry and
- * irq_stack_exit.
- */
- .macro irq_stack_exit
- mov sp, x19
+ mov \rd, sp
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
@@ -230,11 +183,10 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
- ldr_l x1, handle_arch_irq
+ adrp x1, handle_arch_irq
+ ldr x1, [x1, #:lo12:handle_arch_irq]
mov x0, sp
- irq_stack_entry
blr x1
- irq_stack_exit
.endm
.text
@@ -406,10 +358,10 @@ el1_irq:
bl trace_hardirqs_off
#endif
- get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
+ get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
@@ -674,8 +626,6 @@ ENTRY(cpu_switch_to)
mov v15.16b, v15.16b
#endif
mov sp, x9
- and x9, x9, #~(THREAD_SIZE - 1)
- msr sp_el0, x9
ret
ENDPROC(cpu_switch_to)
@@ -703,14 +653,14 @@ ret_fast_syscall_trace:
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
+ ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
+ tst x2, #PSR_MODE_MASK // user mode regs?
+ b.ne no_work_pending // returning to kernel
enable_irq // enable interrupts for do_notify_resume()
bl do_notify_resume
b ret_to_user
work_resched:
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off // the IRQs are off here, inform the tracing code
-#endif
bl schedule
/*
@@ -722,6 +672,7 @@ ret_to_user:
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
+no_work_pending:
kernel_exit 0
ENDPROC(ret_to_user)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index acc1afd5c749..4c46c54a3ad7 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -289,7 +289,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = {
.notifier_call = fpsimd_cpu_pm_notifier,
};
-static void __init fpsimd_pm_init(void)
+static void fpsimd_pm_init(void)
{
cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
}
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index ebecf9aa33d1..c851be795080 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -29,11 +29,12 @@ static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
/*
* Note:
- * We are paranoid about modifying text, as if a bug were to happen, it
- * could cause us to read or write to someplace that could cause harm.
- * Carefully read and modify the code with aarch64_insn_*() which uses
- * probe_kernel_*(), and make sure what we read is what we expected it
- * to be before modifying it.
+ * Due to modules and __init, code can disappear and change,
+ * we need to protect against faulting as well as code changing.
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
+ *
+ * No lock is held here because all the modifications are run
+ * through stop_machine().
*/
if (validate) {
if (aarch64_insn_read((void *)pc, &replaced))
@@ -92,11 +93,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, old, new, true);
}
-void arch_ftrace_update_code(int command)
-{
- ftrace_modify_all_code(command);
-}
-
int __init ftrace_dyn_arch_init(void)
{
return 0;
@@ -129,20 +125,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
* on other archs. It's unlikely on AArch64.
*/
old = *parent;
+ *parent = return_hooker;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
+ if (!ftrace_graph_entry(&trace)) {
+ *parent = old;
return;
+ }
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
- if (err == -EBUSY)
+ if (err == -EBUSY) {
+ *parent = old;
return;
- else
- *parent = return_hooker;
+ }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a88a15447c3b..b685257926f0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -29,7 +29,6 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/cputype.h>
-#include <asm/elf.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -68,11 +67,12 @@
* in the entry routines.
*/
__HEAD
-_head:
+
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
#ifdef CONFIG_EFI
+efi_head:
/*
* This add instruction has no meaningful effect except that
* its opcode forms the magic "MZ" signature required by UEFI.
@@ -83,9 +83,9 @@ _head:
b stext // branch to kernel start, magic
.long 0 // reserved
#endif
- le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
- le64sym _kernel_size_le // Effective size of kernel image, little-endian
- le64sym _kernel_flags_le // Informative flags, little-endian
+ .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
+ .quad _kernel_size_le // Effective size of kernel image, little-endian
+ .quad _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
@@ -94,14 +94,14 @@ _head:
.byte 0x4d
.byte 0x64
#ifdef CONFIG_EFI
- .long pe_header - _head // Offset to the PE header.
+ .long pe_header - efi_head // Offset to the PE header.
#else
.word 0 // reserved
#endif
#ifdef CONFIG_EFI
.globl __efistub_stext_offset
- .set __efistub_stext_offset, stext - _head
+ .set __efistub_stext_offset, stext - efi_head
.align 3
pe_header:
.ascii "PE"
@@ -124,7 +124,7 @@ optional_header:
.long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
- .long __efistub_entry - _head // AddressOfEntryPoint
+ .long __efistub_entry - efi_head // AddressOfEntryPoint
.long __efistub_stext_offset // BaseOfCode
extra_header_fields:
@@ -139,7 +139,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _end - _head // SizeOfImage
+ .long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long __efistub_stext_offset // SizeOfHeaders
@@ -210,7 +210,6 @@ section_table:
ENTRY(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- mov x23, xzr // KASLR offset, defaults to 0
adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
bl __create_page_tables // x25=TTBR0, x26=TTBR1
@@ -220,13 +219,11 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, 0f // address to jump to after
+ ldr x27, =__mmap_switched // address to jump to after
// MMU has been enabled
adr_l lr, __enable_mmu // return (PIC) address
b __cpu_setup // initialise processor
ENDPROC(stext)
- .align 3
-0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
@@ -314,7 +311,7 @@ ENDPROC(preserve_boot_args)
__create_page_tables:
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
- mov x28, lr
+ mov x27, lr
/*
* Invalidate the idmap and swapper page tables to avoid potential
@@ -392,11 +389,9 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- ldr x5, =KIMAGE_VADDR
- add x5, x5, x23 // add KASLR displacement
+ mov x5, #PAGE_OFFSET
create_pgd_entry x0, x5, x3, x6
- ldr w6, kernel_img_size
- add x6, x6, x5
+ ldr x6, =KERNEL_END // __va(KERNEL_END)
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -410,11 +405,9 @@ __create_page_tables:
dmb sy
bl __inval_cache_range
- ret x28
+ mov lr, x27
+ ret
ENDPROC(__create_page_tables)
-
-kernel_img_size:
- .long _end - (_head - TEXT_OFFSET)
.ltorg
/*
@@ -422,81 +415,21 @@ kernel_img_size:
*/
.set initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
- mov x28, lr // preserve LR
- adr_l x8, vectors // load VBAR_EL1 with virtual
- msr vbar_el1, x8 // vector table address
- isb
+ adr_l x6, __bss_start
+ adr_l x7, __bss_stop
- // Clear BSS
- adr_l x0, __bss_start
- mov x1, xzr
- adr_l x2, __bss_stop
- sub x2, x2, x0
- bl __pi_memset
- dsb ishst // Make zero page visible to PTW
-
-#ifdef CONFIG_RELOCATABLE
-
- /*
- * Iterate over each entry in the relocation table, and apply the
- * relocations in place.
- */
- adr_l x8, __dynsym_start // start of symbol table
- adr_l x9, __reloc_start // start of reloc table
- adr_l x10, __reloc_end // end of reloc table
-
-0: cmp x9, x10
+1: cmp x6, x7
b.hs 2f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
- b 0b
-
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
- dc cvac, x8 // value visible to secondaries
- dsb sy // with MMU off
-#endif
-
+ str xzr, [x6], #8 // Clear BSS
+ b 1b
+2:
adr_l sp, initial_sp, x4
- mov x4, sp
- and x4, x4, #~(THREAD_SIZE - 1)
- msr sp_el0, x4 // Save thread_info
str_l x21, __fdt_pointer, x5 // Save FDT pointer
-
- ldr_l x4, kimage_vaddr // Save the offset between
- sub x4, x4, x24 // the kernel virtual and
- str_l x4, kimage_voffset, x5 // physical mappings
-
+ str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
mov x29, #0
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
-#ifdef CONFIG_RANDOMIZE_BASE
- cbnz x23, 0f // already running randomized?
- mov x0, x21 // pass FDT address in x0
- bl kaslr_early_init // parse FDT for KASLR options
- cbz x0, 0f // KASLR disabled? just proceed
- mov x23, x0 // record KASLR offset
- ret x28 // we must enable KASLR, return
- // to __enable_mmu()
-0:
-#endif
b start_kernel
ENDPROC(__mmap_switched)
@@ -505,10 +438,6 @@ ENDPROC(__mmap_switched)
* hotplug and needs to have the same protections as the text region
*/
.section ".text","ax"
-
-ENTRY(kimage_vaddr)
- .quad _text - TEXT_OFFSET
-
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
@@ -674,22 +603,14 @@ ENTRY(secondary_startup)
adrp x26, swapper_pg_dir
bl __cpu_setup // initialise processor
- ldr x8, kimage_vaddr
- ldr w9, 0f
- sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
+ ldr x21, =secondary_data
+ ldr x27, =__secondary_switched // address to jump to after enabling the MMU
b __enable_mmu
ENDPROC(secondary_startup)
-0: .long (_text - TEXT_OFFSET) - __secondary_switched
ENTRY(__secondary_switched)
- adr_l x5, vectors
- msr vbar_el1, x5
- isb
-
- ldr_l x0, secondary_data // get secondary_data.stack
+ ldr x0, [x21] // get secondary_data.stack
mov sp, x0
- and x0, x0, #~(THREAD_SIZE - 1)
- msr sp_el0, x0 // save thread_info
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
@@ -707,11 +628,12 @@ ENDPROC(__secondary_switched)
*/
.section ".idmap.text", "ax"
__enable_mmu:
- mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
+ ldr x5, =vectors
+ msr vbar_el1, x5
msr ttbr0_el1, x25 // load TTBR0
msr ttbr1_el1, x26 // load TTBR1
isb
@@ -725,26 +647,6 @@ __enable_mmu:
ic iallu
dsb nsh
isb
-#ifdef CONFIG_RANDOMIZE_BASE
- mov x19, x0 // preserve new SCTLR_EL1 value
- blr x27
-
- /*
- * If we return here, we have a KASLR displacement in x23 which we need
- * to take into account by discarding the current kernel mapping and
- * creating a new one.
- */
- msr sctlr_el1, x18 // disable the MMU
- isb
- bl __create_page_tables // recreate kernel mapping
-
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
- isb
- add x27, x27, x23 // relocated __mmap_switched
-#endif
br x27
ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index db1bf57948f1..bc2abb8b1599 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -26,40 +26,31 @@
* There aren't any ELF relocations we can use to endian-swap values known only
* at link time (e.g. the subtraction of two symbol addresses), so we must get
* the linker to endian-swap certain values before emitting them.
- *
- * Note that, in order for this to work when building the ELF64 PIE executable
- * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
- * relocations, since these are fixed up at runtime rather than at build time
- * when PIE is in effect. So we need to split them up in 32-bit high and low
- * words.
*/
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define DATA_LE32(data) \
- ((((data) & 0x000000ff) << 24) | \
- (((data) & 0x0000ff00) << 8) | \
- (((data) & 0x00ff0000) >> 8) | \
- (((data) & 0xff000000) >> 24))
+#define DATA_LE64(data) \
+ ((((data) & 0x00000000000000ff) << 56) | \
+ (((data) & 0x000000000000ff00) << 40) | \
+ (((data) & 0x0000000000ff0000) << 24) | \
+ (((data) & 0x00000000ff000000) << 8) | \
+ (((data) & 0x000000ff00000000) >> 8) | \
+ (((data) & 0x0000ff0000000000) >> 24) | \
+ (((data) & 0x00ff000000000000) >> 40) | \
+ (((data) & 0xff00000000000000) >> 56))
#else
-#define DATA_LE32(data) ((data) & 0xffffffff)
+#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
#endif
-#define DEFINE_IMAGE_LE64(sym, data) \
- sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
- sym##_hi32 = DATA_LE32((data) >> 32)
-
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE 1
#else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE 0
#endif
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
-
-#define __HEAD_FLAG_PHYS_BASE 1
+#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
-#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
- (__HEAD_FLAG_PAGE_SIZE << 1) | \
- (__HEAD_FLAG_PHYS_BASE << 3))
+#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
+ (__HEAD_FLAG_PAGE_SIZE << 1))
/*
* These will output as part of the Image header, which should be little-endian
@@ -67,23 +58,13 @@
* endian swapped in head.S, all are done here for consistency.
*/
#define HEAD_SYMBOLS \
- DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
- DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
- DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
+ _kernel_size_le = DATA_LE64(_end - _text); \
+ _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \
+ _kernel_flags_le = DATA_LE64(__HEAD_FLAGS);
#ifdef CONFIG_EFI
/*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
-
-/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
* isolate it from the kernel proper. The following symbols are legally
* accessed by the stub, so provide some aliases to make them accessible.
@@ -92,25 +73,25 @@
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset = KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
-__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
-__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub___flush_dcache_area = __pi___flush_dcache_area;
#ifdef CONFIG_KASAN
-__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset = KALLSYMS_HIDE(__pi_memset);
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
#endif
-__efistub__text = KALLSYMS_HIDE(_text);
-__efistub__end = KALLSYMS_HIDE(_end);
-__efistub__edata = KALLSYMS_HIDE(_edata);
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
#endif
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 2386b26c0712..9f17ec071ee0 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -30,9 +30,6 @@
unsigned long irq_err_count;
-/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
-DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
show_ipi_list(p, prec);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
deleted file mode 100644
index 582983920054..000000000000
--- a/arch/arm64/kernel/kaslr.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/libfdt.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-#include <asm/fixmap.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/memory.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-u64 __read_mostly module_alloc_base;
-u16 __initdata memstart_offset_seed;
-
-static __init u64 get_kaslr_seed(void *fdt)
-{
- int node, len;
- u64 *prop;
- u64 ret;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- return 0;
-
- prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
- if (!prop || len != sizeof(u64))
- return 0;
-
- ret = fdt64_to_cpu(*prop);
- *prop = 0;
- return ret;
-}
-
-static __init const u8 *get_cmdline(void *fdt)
-{
- static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
-
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- int node;
- const u8 *prop;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
-
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
- return prop;
- }
-out:
- return default_cmdline;
-}
-
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
- pgprot_t prot);
-
-/*
- * This routine will be executed with the kernel mapped at its default virtual
- * address, and if it returns successfully, the kernel will be remapped, and
- * start_kernel() will be executed from a randomized virtual offset. The
- * relocation will result in all absolute references (e.g., static variables
- * containing function pointers) to be reinitialized, and zero-initialized
- * .bss variables will be reset to 0.
- */
-u64 __init kaslr_early_init(u64 dt_phys)
-{
- void *fdt;
- u64 seed, offset, mask, module_range;
- const u8 *cmdline, *str;
- int size;
-
- /*
- * Set a reasonable default for module_alloc_base in case
- * we end up running with module randomization disabled.
- */
- module_alloc_base = (u64)_etext - MODULES_VSIZE;
-
- /*
- * Try to map the FDT early. If this fails, we simply bail,
- * and proceed with KASLR disabled. We will make another
- * attempt at mapping the FDT in setup_machine()
- */
- early_fixmap_init();
- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
- if (!fdt)
- return 0;
-
- /*
- * Retrieve (and wipe) the seed from the FDT
- */
- seed = get_kaslr_seed(fdt);
- if (!seed)
- return 0;
-
- /*
- * Check if 'nokaslr' appears on the command line, and
- * return 0 if that is the case.
- */
- cmdline = get_cmdline(fdt);
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
- return 0;
-
- /*
- * OK, so we are proceeding with KASLR enabled. Calculate a suitable
- * kernel image offset from the seed. Let's place the kernel in the
- * lower half of the VMALLOC area (VA_BITS - 2).
- * Even if we could randomize at page granularity for 16k and 64k pages,
- * let's always round to 2 MB so we don't interfere with the ability to
- * map using contiguous PTEs
- */
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = seed & mask;
-
- /* use the top 16 bits to randomize the linear region */
- memstart_offset_seed = seed >> 48;
-
- /*
- * The kernel Image should not extend across a 1GB/32MB/512MB alignment
- * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image.
- */
- if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
- offset = (offset + (u64)(_end - _text)) & mask;
-
- if (IS_ENABLED(CONFIG_KASAN))
- /*
- * KASAN does not expect the module region to intersect the
- * vmalloc region, since shadow memory is allocated for each
- * module at load time, whereas the vmalloc region is shadowed
- * by KASAN zero pages. So keep modules out of the vmalloc
- * region if KASAN is enabled.
- */
- return offset;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
- /*
- * Randomize the module region independently from the core
- * kernel. This prevents modules from leaking any information
- * about the address of the kernel itself, but results in
- * branches between modules and the core kernel that are
- * resolved via PLTs. (Branches between modules will be
- * resolved normally.)
- */
- module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
- module_alloc_base = VMALLOC_START;
- } else {
- /*
- * Randomize the module region by setting module_alloc_base to
- * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
- * _stext) . This guarantees that the resulting region still
- * covers [_stext, _etext], and that all relative branches can
- * be resolved without veneers.
- */
- module_range = MODULES_VSIZE - (u64)(_etext - _stext);
- module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
- }
-
- /* use the lower 21 bits to randomize the base of the module region */
- module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
- module_alloc_base &= PAGE_MASK;
-
- return offset;
-}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
deleted file mode 100644
index 1ce90d8450ae..000000000000
--- a/arch/arm64/kernel/module-plts.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/elf.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sort.h>
-
-struct plt_entry {
- /*
- * A program that conforms to the AArch64 Procedure Call Standard
- * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
- * IP1 (x17) may be inserted at any branch instruction that is
- * exposed to a relocation that supports long branches. Since that
- * is exactly what we are dealing with here, we are free to use x16
- * as a scratch register in the PLT veneers.
- */
- __le32 mov0; /* movn x16, #0x.... */
- __le32 mov1; /* movk x16, #0x...., lsl #16 */
- __le32 mov2; /* movk x16, #0x...., lsl #32 */
- __le32 br; /* br x16 */
-};
-
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
- Elf64_Sym *sym)
-{
- struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
- int i = mod->arch.plt_num_entries;
- u64 val = sym->st_value + rela->r_addend;
-
- /*
- * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
- * which are listed in the ELF symtab section, but without a type
- * or a size.
- * So, similar to how the module loader uses the Elf64_Sym::st_value
- * field to store the resolved addresses of undefined symbols, let's
- * borrow the Elf64_Sym::st_size field (whose value is never used by
- * the module loader, even for symbols that are defined) to record
- * the address of a symbol's associated PLT entry as we emit it for a
- * zero addend relocation (which is the only kind we have to deal with
- * in practice). This allows us to find duplicates without having to
- * go through the table every time.
- */
- if (rela->r_addend == 0 && sym->st_size != 0) {
- BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
- return sym->st_size;
- }
-
- mod->arch.plt_num_entries++;
- BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
-
- /*
- * MOVK/MOVN/MOVZ opcode:
- * +--------+------------+--------+-----------+-------------+---------+
- * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
- * +--------+------------+--------+-----------+-------------+---------+
- *
- * Rd := 0x10 (x16)
- * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
- * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
- * sf := 1 (64-bit variant)
- */
- plt[i] = (struct plt_entry){
- cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
- cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
- cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
- cpu_to_le32(0xd61f0200)
- };
-
- if (rela->r_addend == 0)
- sym->st_size = (u64)&plt[i];
-
- return (u64)&plt[i];
-}
-
-#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
-
-static int cmp_rela(const void *a, const void *b)
-{
- const Elf64_Rela *x = a, *y = b;
- int i;
-
- /* sort by type, symbol index and addend */
- i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
- if (i == 0)
- i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
- if (i == 0)
- i = cmp_3way(x->r_addend, y->r_addend);
- return i;
-}
-
-static bool duplicate_rel(const Elf64_Rela *rela, int num)
-{
- /*
- * Entries are sorted by type, symbol index and addend. That means
- * that, if a duplicate entry exists, it must be in the preceding
- * slot.
- */
- return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
-}
-
-static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
-{
- unsigned int ret = 0;
- Elf64_Sym *s;
- int i;
-
- for (i = 0; i < num; i++) {
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_AARCH64_JUMP26:
- case R_AARCH64_CALL26:
- /*
- * We only have to consider branch targets that resolve
- * to undefined symbols. This is not simply a heuristic,
- * it is a fundamental limitation, since the PLT itself
- * is part of the module, and needs to be within 128 MB
- * as well, so modules can never grow beyond that limit.
- */
- s = syms + ELF64_R_SYM(rela[i].r_info);
- if (s->st_shndx != SHN_UNDEF)
- break;
-
- /*
- * Jump relocations with non-zero addends against
- * undefined symbols are supported by the ELF spec, but
- * do not occur in practice (e.g., 'jump n bytes past
- * the entry point of undefined function symbol f').
- * So we need to support them, but there is no need to
- * take them into consideration when trying to optimize
- * this code. So let's only check for duplicates when
- * the addend is zero: this allows us to record the PLT
- * entry address in the symbol table itself, rather than
- * having to search the list for duplicates each time we
- * emit one.
- */
- if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
- ret++;
- break;
- }
- }
- return ret;
-}
-
-int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- unsigned long plt_max_entries = 0;
- Elf64_Sym *syms = NULL;
- int i;
-
- /*
- * Find the empty .plt section so we can expand it to store the PLT
- * entries. Record the symtab address as well.
- */
- for (i = 0; i < ehdr->e_shnum; i++) {
- if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
- mod->arch.plt = sechdrs + i;
- else if (sechdrs[i].sh_type == SHT_SYMTAB)
- syms = (Elf64_Sym *)sechdrs[i].sh_addr;
- }
-
- if (!mod->arch.plt) {
- pr_err("%s: module PLT section missing\n", mod->name);
- return -ENOEXEC;
- }
- if (!syms) {
- pr_err("%s: module symtab section missing\n", mod->name);
- return -ENOEXEC;
- }
-
- for (i = 0; i < ehdr->e_shnum; i++) {
- Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
- int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
- Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
-
- if (sechdrs[i].sh_type != SHT_RELA)
- continue;
-
- /* ignore relocations that operate on non-exec sections */
- if (!(dstsec->sh_flags & SHF_EXECINSTR))
- continue;
-
- /* sort by type, symbol index and addend */
- sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
-
- plt_max_entries += count_plts(syms, rels, numrels);
- }
-
- mod->arch.plt->sh_type = SHT_NOBITS;
- mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
- mod->arch.plt_num_entries = 0;
- mod->arch.plt_max_entries = plt_max_entries;
- return 0;
-}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f316982ce00..f4bc779e62e8 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -30,30 +30,17 @@
#include <asm/insn.h>
#include <asm/sections.h>
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
+
void *module_alloc(unsigned long size)
{
void *p;
- p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
+ p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
- if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- !IS_ENABLED(CONFIG_KASAN))
- /*
- * KASAN can only deal with module allocations being served
- * from the reserved module region, since the remainder of
- * the vmalloc region is already backed by zero shadow pages,
- * and punching holes into it is non-trivial. Since the module
- * region is not randomized when KASAN is enabled, it is even
- * less likely that the module region gets exhausted, so we
- * can simply omit this fallback in that case.
- */
- p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
- VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
- NUMA_NO_NODE, __builtin_return_address(0));
-
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
@@ -88,18 +75,15 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
+ u64 imm_mask = (1 << len) - 1;
s64 sval = do_reloc(op, place, val);
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
- return -ERANGE;
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
- return -ERANGE;
break;
case 64:
*(s64 *)place = sval;
@@ -108,23 +92,34 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
pr_err("Invalid length (%d) for data relocation\n", len);
return 0;
}
+
+ /*
+ * Extract the upper value bits (including the sign bit) and
+ * shift them to bit 0.
+ */
+ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
+
+ /*
+ * Overflow has occurred if the value is not representable in
+ * len bits (i.e the bottom len bits are not sign-extended and
+ * the top bits are not all zero).
+ */
+ if ((u64)(sval + 1) > 2)
+ return -ERANGE;
+
return 0;
}
-enum aarch64_insn_movw_imm_type {
- AARCH64_INSN_IMM_MOVNZ,
- AARCH64_INSN_IMM_MOVKZ,
-};
-
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_insn_movw_imm_type imm_type)
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u64 imm;
+ u64 imm, limit = 0;
s64 sval;
u32 insn = le32_to_cpu(*(u32 *)place);
sval = do_reloc(op, place, val);
- imm = sval >> lsb;
+ sval >>= lsb;
+ imm = sval & 0xffff;
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
@@ -133,7 +128,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* immediate is less than zero.
*/
insn &= ~(3 << 29);
- if (sval >= 0) {
+ if ((s64)imm >= 0) {
/* >=0: Set the instruction to MOVZ (opcode 10b). */
insn |= 2 << 29;
} else {
@@ -145,13 +140,29 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
*/
imm = ~imm;
}
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
/* Update the instruction with the new encoding. */
- insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
*(u32 *)place = cpu_to_le32(insn);
- if (imm > U16_MAX)
+ /* Shift out the immediate field. */
+ sval >>= 16;
+
+ /*
+ * For unsigned immediates, the overflow check is straightforward.
+ * For signed immediates, the sign bit is actually the bit past the
+ * most significant bit of the field.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
+ */
+ if (imm_type != AARCH64_INSN_IMM_16) {
+ sval++;
+ limit++;
+ }
+
+ /* Check the upper bits depending on the sign of the immediate. */
+ if ((u64)sval > limit)
return -ERANGE;
return 0;
@@ -256,25 +267,25 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
@@ -291,7 +302,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
@@ -300,7 +311,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
@@ -309,7 +320,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- AARCH64_INSN_IMM_MOVKZ);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
@@ -377,13 +388,6 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
AARCH64_INSN_IMM_26);
-
- if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- ovf == -ERANGE) {
- val = module_emit_plt_entry(me, &rel[i], sym);
- ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
- 26, AARCH64_INSN_IMM_26);
- }
break;
default:
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
deleted file mode 100644
index 8949f6c6f729..000000000000
--- a/arch/arm64/kernel/module.lds
+++ /dev/null
@@ -1,3 +0,0 @@
-SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
-}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index ff4665462a02..3aa74830cc69 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -164,11 +164,8 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
- walk_stackframe(current, &frame, callchain_trace, entry);
+ walk_stackframe(&frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 496e2e33bbec..129fb3f8c322 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -46,7 +46,6 @@
#include <linux/notifier.h>
#include <trace/events/power.h>
-#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
#include <asm/fpsimd.h>
@@ -351,9 +350,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
- if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_cap(ARM64_HAS_UAO))
- childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
@@ -382,17 +378,6 @@ static void tls_thread_switch(struct task_struct *next)
: : "r" (tpidr), "r" (tpidrro));
}
-/* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
-{
- if (IS_ENABLED(CONFIG_ARM64_UAO)) {
- if (task_thread_info(next)->addr_limit == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
- }
-}
-
/*
* Thread switching.
*/
@@ -405,7 +390,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
- uao_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -430,14 +414,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = p->curr_ret_stack;
-#endif
stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
- unwind_frame(p, &frame))
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706fde83..6c4fd2810ecb 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -43,11 +43,8 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
- walk_stackframe(current, &frame, save_return_addr, &data);
+ walk_stackframe(&frame, save_return_addr, &data);
if (!data.level)
return data.addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d1e01e6498bb..cd4eb7ee618c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -63,7 +63,6 @@
#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
-#include <asm/mmu_context.h>
unsigned int boot_reason;
EXPORT_SYMBOL(boot_reason);
@@ -328,12 +327,6 @@ void __init setup_arch(char **cmdline_p)
*/
local_async_enable();
- /*
- * TTBR0 is only used for the identity mapping at this stage. Make it
- * point to zero page to avoid speculatively fetching new entries.
- */
- cpu_uninstall_idmap();
-
efi_init();
arm64_memblock_init();
@@ -409,32 +402,3 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
}
-
-/*
- * Dump out kernel offset information on panic.
- */
-static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
- void *p)
-{
- u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
- pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
- kaslr_offset, KIMAGE_VADDR);
- } else {
- pr_emerg("Kernel Offset: disabled\n");
- }
- return 0;
-}
-
-static struct notifier_block kernel_offset_notifier = {
- .notifier_call = dump_kernel_offset
-};
-
-static int __init register_kernel_offset_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &kernel_offset_notifier);
- return 0;
-}
-__initcall(register_kernel_offset_dumper);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index e33fe33876ab..f586f7c875e2 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -173,9 +173,6 @@ ENTRY(cpu_resume)
/* load physical address of identity map page table in x1 */
adrp x1, idmap_pg_dir
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context physical address
* pointer and x1 to contain physical address of 1:1 page tables
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2f5631c3785..08e78e47be95 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -154,7 +154,9 @@ asmlinkage void secondary_start_kernel(void)
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
*/
- cpu_uninstall_idmap();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
preempt_disable();
trace_hardirqs_off();
@@ -457,17 +459,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(cpu_count) = hwid;
- /*
- * Set-up the ACPI parking protocol cpu entries
- * while initializing the cpu_logical_map to
- * avoid parsing MADT entries multiple times for
- * nothing (ie a valid cpu_logical_map entry should
- * contain a valid parking protocol data set to
- * initialize the cpu if the parking protocol is
- * the only available enable method).
- */
- acpi_set_mailbox_entry(cpu_count, processor);
-
cpu_count++;
}
@@ -710,12 +701,10 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
- smp_cross_call(mask, IPI_WAKEUP);
+ smp_cross_call_common(mask, IPI_WAKEUP);
}
-#endif
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
@@ -866,17 +855,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
- case IPI_CPU_BACKTRACE:
- ipi_cpu_backtrace(cpu, regs);
+ case IPI_WAKEUP:
break;
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- case IPI_WAKEUP:
- WARN_ONCE(!acpi_parking_protocol_valid(cpu),
- "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
- cpu);
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
break;
-#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 85aea381fbf6..665c7fedc65b 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,11 +18,9 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include <asm/irq.h>
#include <asm/stacktrace.h>
/*
@@ -38,29 +36,15 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
- unsigned long irq_stack_ptr;
-
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
low = frame->sp;
- /* irq stacks are not THREAD_SIZE aligned */
- if (on_irq_stack(frame->sp, raw_smp_processor_id()))
- high = irq_stack_ptr;
- else
- high = ALIGN(low, THREAD_SIZE) - 0x20;
+ high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
kasan_disable_current();
@@ -69,55 +53,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (tsk && tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
- /*
- * This is a case where function graph tracer has
- * modified a return address (LR) in a stack frame
- * to hook a function return.
- * So replace it to an original value.
- */
- frame->pc = tsk->ret_stack[frame->graph--].ret;
- }
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
- /*
- * Check whether we are going to walk through from interrupt stack
- * to task stack.
- * If we reach the end of the stack - and its an interrupt stack,
- * unpack the dummy frame to find the original elr.
- *
- * Check the frame->fp we read from the bottom of the irq_stack,
- * and the original task stack pointer are both in current->stack.
- */
- if (frame->sp == irq_stack_ptr) {
- struct pt_regs *irq_args;
- unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
- if (object_is_on_stack((void *)orig_sp) &&
- object_is_on_stack((void *)frame->fp)) {
- frame->sp = orig_sp;
-
- /* orig_sp is the saved pt_regs, find the elr */
- irq_args = (struct pt_regs *)orig_sp;
- frame->pc = irq_args->pc;
- } else {
- /*
- * This frame has a non-standard format, and we
- * didn't fix it, because the data looked wrong.
- * Refuse to output this frame.
- */
- return -EINVAL;
- }
- }
-
kasan_enable_current();
return 0;
}
-void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
@@ -125,7 +66,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
if (fn(frame, data))
break;
- ret = unwind_frame(tsk, frame);
+ ret = unwind_frame(frame);
if (ret < 0)
break;
}
@@ -176,11 +117,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
- walk_stackframe(tsk, &frame, save_trace, &data);
+ walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 66055392f445..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -60,6 +60,7 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
*/
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
+ struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
@@ -86,11 +87,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
/*
- * We are resuming from reset with the idmap active in TTBR0_EL1.
- * We must uninstall the idmap and restore the expected MMU
- * state before we can possibly return to userspace.
+ * We are resuming from reset with TTBR0_EL1 set to the
+ * idmap to enable the MMU; set the TTBR0 to the reserved
+ * page tables to prevent speculative TLB allocations, flush
+ * the local tlb and set the default tcr_el1.t0sz so that
+ * the TTBR0 address space set-up is properly restored.
+ * If the current active_mm != &init_mm we entered cpu_suspend
+ * with mappings in TTBR0 that must be restored, so we switch
+ * them back to complete the address space configuration
+ * restoration before returning.
*/
- cpu_uninstall_idmap();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+
+ if (mm != &init_mm)
+ cpu_switch_mm(mm->pgd, mm);
/*
* Restore per-cpu offset before any kernel
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 59779699a1a4..13339b6ffc1a 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,11 +52,8 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = -1; /* no task info */
-#endif
do {
- int ret = unwind_frame(NULL, &frame);
+ int ret = unwind_frame(&frame);
if (ret < 0)
return 0;
} while (in_lock_functions(frame.pc));
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 48b75ece4c17..7d54c168d19c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -150,24 +150,17 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long irq_stack_ptr;
- int skip;
-
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
- if (tsk == current) {
+ if (regs) {
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+ } else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
@@ -179,49 +172,21 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
- skip = !!regs;
- printk("Call trace:\n");
+ pr_emerg("Call trace:\n");
while (1) {
unsigned long where = frame.pc;
unsigned long stack;
int ret;
- /* skip until specified stack frame */
- if (!skip) {
- dump_backtrace_entry(where);
- } else if (frame.fp == regs->regs[29]) {
- skip = 0;
- /*
- * Mostly, this is the case where this function is
- * called in panic/abort. As exception handler's
- * stack frame does not contain the corresponding pc
- * at which an exception has taken place, use regs->pc
- * instead.
- */
- dump_backtrace_entry(regs->pc);
- }
- ret = unwind_frame(tsk, &frame);
+ dump_backtrace_entry(where);
+ ret = unwind_frame(&frame);
if (ret < 0)
break;
stack = frame.sp;
- if (in_exception_text(where)) {
- /*
- * If we switched to the irq_stack before calling this
- * exception handler, then the pt_regs will be on the
- * task stack. The easiest way to tell is if the large
- * pt_regs would overlap with the end of the irq_stack.
- */
- if (stack < irq_stack_ptr &&
- (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
- stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
+ if (in_exception_text(where))
dump_mem("", "Exception stack", stack,
stack + sizeof(struct pt_regs), false);
- }
}
}
@@ -537,22 +502,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __pte_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
}
void __pmd_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
}
void __pud_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
}
void __pgd_error(const char *file, int line, unsigned long val)
{
- pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6b562a318f84..b3f48316f888 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -88,16 +88,15 @@ SECTIONS
EXIT_CALL
*(.discard)
*(.discard.*)
- *(.interp .dynamic)
}
- . = KIMAGE_VADDR + TEXT_OFFSET;
+ . = PAGE_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
}
- ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+ ALIGN_DEBUG_RO
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -115,12 +114,14 @@ SECTIONS
*(.got) /* Global offset table */
}
+ ALIGN_DEBUG_RO
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
+ ALIGN_DEBUG_RO
+ _etext = .; /* End of text and rodata section */
ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
- _etext = .; /* End of text and rodata section */
__init_begin = .;
INIT_TEXT_SECTION(8)
@@ -128,6 +129,7 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_TEXT)
}
+ ALIGN_DEBUG_RO_MIN(16)
.init.data : {
INIT_DATA
INIT_SETUP(16)
@@ -142,6 +144,9 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
@@ -151,25 +156,8 @@ SECTIONS
.altinstr_replacement : {
*(.altinstr_replacement)
}
- .rela : ALIGN(8) {
- __reloc_start = .;
- *(.rela .rela*)
- __reloc_end = .;
- }
- .dynsym : ALIGN(8) {
- __dynsym_start = .;
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
- }
. = ALIGN(PAGE_SIZE);
- __init_end = .;
-
_data = .;
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -203,4 +191,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 309e3479dc2c..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -923,7 +923,7 @@ __hyp_panic_str:
.align 2
/*
- * u64 __kvm_call_hyp(void *hypfn, ...);
+ * u64 kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
@@ -940,10 +940,10 @@ __hyp_panic_str:
* used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
-ENTRY(__kvm_call_hyp)
+ENTRY(kvm_call_hyp)
hvc #0
ret
-ENDPROC(__kvm_call_hyp)
+ENDPROC(kvm_call_hyp)
.macro invalid_vector label, target
.align 2
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index c86b7909ef31..1a811ecf71da 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -4,16 +4,15 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
-# Tell the compiler to treat all general purpose registers (with the
-# exception of the IP registers, which are already handled by the caller
-# in case of a PLT) as callee-saved, which allows for efficient runtime
-# patching of the bl instruction in the caller with an atomic instruction
-# when supported by the CPU. Result and argument registers are handled
-# correctly, based on the function prototype.
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
- -fcall-saved-x18
+ -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3ce6d6..a9723c71c52b 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -33,28 +33,28 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
1:
-uao_user_alternative 9f, str, sttr, xzr, x0, 8
+USER(9f, str xzr, [x0], #8 )
subs x1, x1, #8
b.pl 1b
2: adds x1, x1, #4
b.mi 3f
-uao_user_alternative 9f, str, sttr, wzr, x0, 4
+USER(9f, str wzr, [x0], #4 )
sub x1, x1, #4
3: adds x1, x1, #2
b.mi 4f
-uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
+USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
-uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
+USER(9f, strb wzr, [x0] )
5: mov x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306dca29..4699cd74f87e 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -34,7 +34,7 @@
*/
.macro ldrb1 ptr, regB, val
- uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
+ USER(9998f, ldrb \ptr, [\regB], \val)
.endm
.macro strb1 ptr, regB, val
@@ -42,7 +42,7 @@
.endm
.macro ldrh1 ptr, regB, val
- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
+ USER(9998f, ldrh \ptr, [\regB], \val)
.endm
.macro strh1 ptr, regB, val
@@ -50,7 +50,7 @@
.endm
.macro ldr1 ptr, regB, val
- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
+ USER(9998f, ldr \ptr, [\regB], \val)
.endm
.macro str1 ptr, regB, val
@@ -58,7 +58,7 @@
.endm
.macro ldp1 ptr, regB, regC, val
- uao_ldp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, ldp \ptr, \regB, [\regC], \val)
.endm
.macro stp1 ptr, regB, regC, val
@@ -67,11 +67,11 @@
end .req x5
ENTRY(__copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd08c84..81c8fc93c100 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -35,44 +35,44 @@
* x0 - bytes not copied
*/
.macro ldrb1 ptr, regB, val
- uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
+ USER(9998f, ldrb \ptr, [\regB], \val)
.endm
.macro strb1 ptr, regB, val
- uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
+ USER(9998f, strb \ptr, [\regB], \val)
.endm
.macro ldrh1 ptr, regB, val
- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
+ USER(9998f, ldrh \ptr, [\regB], \val)
.endm
.macro strh1 ptr, regB, val
- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
+ USER(9998f, strh \ptr, [\regB], \val)
.endm
.macro ldr1 ptr, regB, val
- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
+ USER(9998f, ldr \ptr, [\regB], \val)
.endm
.macro str1 ptr, regB, val
- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
+ USER(9998f, str \ptr, [\regB], \val)
.endm
.macro ldp1 ptr, regB, regC, val
- uao_ldp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, ldp \ptr, \regB, [\regC], \val)
.endm
.macro stp1 ptr, regB, regC, val
- uao_stp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, stp \ptr, \regB, [\regC], \val)
.endm
end .req x5
ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index 4c1e700840b6..512b9a7b980e 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -18,8 +18,6 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
-#include <asm/cpufeature.h>
-#include <asm/alternative.h>
/*
* Copy a page from src to dest (both are page aligned)
@@ -29,65 +27,20 @@
* x1 - src
*/
ENTRY(copy_page)
-alternative_if_not ARM64_HAS_NO_HW_PREFETCH
- nop
- nop
-alternative_else
- # Prefetch two cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
-alternative_endif
-
- ldp x2, x3, [x1]
+ /* Assume cache line size is 64 bytes. */
+ prfm pldl1strm, [x1, #64]
+1: ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
- ldp x10, x11, [x1, #64]
- ldp x12, x13, [x1, #80]
- ldp x14, x15, [x1, #96]
- ldp x16, x17, [x1, #112]
-
- mov x18, #(PAGE_SIZE - 128)
- add x1, x1, #128
-1:
- subs x18, x18, #128
-
-alternative_if_not ARM64_HAS_NO_HW_PREFETCH
- nop
-alternative_else
- prfm pldl1strm, [x1, #384]
-alternative_endif
-
+ add x1, x1, #64
+ prfm pldl1strm, [x1, #64]
stnp x2, x3, [x0]
- ldp x2, x3, [x1]
stnp x4, x5, [x0, #16]
- ldp x4, x5, [x1, #16]
stnp x6, x7, [x0, #32]
- ldp x6, x7, [x1, #32]
stnp x8, x9, [x0, #48]
- ldp x8, x9, [x1, #48]
- stnp x10, x11, [x0, #64]
- ldp x10, x11, [x1, #64]
- stnp x12, x13, [x0, #80]
- ldp x12, x13, [x1, #80]
- stnp x14, x15, [x0, #96]
- ldp x14, x15, [x1, #96]
- stnp x16, x17, [x0, #112]
- ldp x16, x17, [x1, #112]
-
- add x0, x0, #128
- add x1, x1, #128
-
- b.gt 1b
-
- stnp x2, x3, [x0]
- stnp x4, x5, [x0, #16]
- stnp x6, x7, [x0, #32]
- stnp x8, x9, [x0, #48]
- stnp x10, x11, [x0, #64]
- stnp x12, x13, [x0, #80]
- stnp x14, x15, [x0, #96]
- stnp x16, x17, [x0, #112]
-
+ add x0, x0, #64
+ tst x1, #(PAGE_SIZE - 1)
+ b.ne 1b
ret
ENDPROC(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae60f988..7512bbbc07ac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -37,7 +37,7 @@
.endm
.macro strb1 ptr, regB, val
- uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
+ USER(9998f, strb \ptr, [\regB], \val)
.endm
.macro ldrh1 ptr, regB, val
@@ -45,7 +45,7 @@
.endm
.macro strh1 ptr, regB, val
- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
+ USER(9998f, strh \ptr, [\regB], \val)
.endm
.macro ldr1 ptr, regB, val
@@ -53,7 +53,7 @@
.endm
.macro str1 ptr, regB, val
- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
+ USER(9998f, str \ptr, [\regB], \val)
.endm
.macro ldp1 ptr, regB, regC, val
@@ -61,16 +61,16 @@
.endm
.macro stp1 ptr, regB, regC, val
- uao_stp 9998f, \ptr, \regB, \regC, \val
+ USER(9998f, stp \ptr, \regB, [\regC], \val)
.endm
end .req x5
ENTRY(__copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2f06997dcd59..ce32a2229a9d 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -154,32 +154,26 @@ ENDPROC(__flush_cache_user_range)
/*
* __flush_dcache_area(kaddr, size)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
- * are cleaned and invalidated to the PoC.
+ * Ensure that the data held in the page kaddr is written back to the
+ * page in question.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__flush_dcache_area)
- dcache_by_line_op civac, sy, x0, x1, x2, x3
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D line / unified line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
ret
ENDPIPROC(__flush_dcache_area)
/*
- * __clean_dcache_area_pou(kaddr, size)
- *
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
- * are cleaned to the PoU.
- *
- * - kaddr - kernel address
- * - size - size in question
- */
-ENTRY(__clean_dcache_area_pou)
- dcache_by_line_op cvau, ish, x0, x1, x2, x3
- ret
-ENDPROC(__clean_dcache_area_pou)
-
-/*
* __inval_cache_range(start, end)
* - start - start address of region
* - end - end address of region
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 7275628ba59f..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -187,7 +187,7 @@ switch_mm_fastpath:
static int asids_init(void)
{
- int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
+ int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
switch (fld) {
default:
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 22e4cb4d6f53..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -24,9 +24,8 @@
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
- struct page *page = virt_to_page(kto);
copy_page(kto, kfrom);
- flush_dcache_page(page);
+ __flush_dcache_area(kto, PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 7109e27de235..e5389bc981ee 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -52,7 +52,7 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
static struct gen_pool *atomic_pool;
#define NO_KERNEL_MAPPING_DUMMY 0x2222
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
@@ -1015,7 +1015,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
return 0;
}
-static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int register_iommu_dma_ops_notifier(struct bus_type *bus)
{
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
int ret;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 6be918478f85..5a22a119a74c 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -35,9 +35,7 @@ struct addr_marker {
};
enum address_markers_idx {
- MODULES_START_NR = 0,
- MODULES_END_NR,
- VMALLOC_START_NR,
+ VMALLOC_START_NR = 0,
VMALLOC_END_NR,
#ifdef CONFIG_SPARSEMEM_VMEMMAP
VMEMMAP_START_NR,
@@ -47,12 +45,12 @@ enum address_markers_idx {
FIXADDR_END_NR,
PCI_START_NR,
PCI_END_NR,
+ MODULES_START_NR,
+ MODUELS_END_NR,
KERNEL_SPACE_NR,
};
static struct addr_marker address_markers[] = {
- { MODULES_VADDR, "Modules start" },
- { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() Area" },
{ VMALLOC_END, "vmalloc() End" },
#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -63,7 +61,9 @@ static struct addr_marker address_markers[] = {
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
- { PAGE_OFFSET, "Linear Mapping" },
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { PAGE_OFFSET, "Kernel Mapping" },
{ -1, NULL },
};
@@ -90,11 +90,6 @@ struct prot_bits {
static const struct prot_bits pte_bits[] = {
{
- .mask = PTE_VALID,
- .val = PTE_VALID,
- .set = " ",
- .clear = "F",
- }, {
.mask = PTE_USER,
.val = PTE_USER,
.set = "USR",
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 81acd4706878..79444279ba8c 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup)
- regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
+ regs->pc = fixup->fixup;
return fixup != NULL;
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 69079e5bfc84..88ada16a1301 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -197,14 +197,6 @@ out:
return fault;
}
-static inline int permission_fault(unsigned int esr)
-{
- unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
-
- return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
-}
-
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -238,13 +230,12 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
- die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
-
- if (!search_exception_tables(regs->pc))
- die("Accessing user space memory outside uaccess.h routines", regs, esr);
- }
+ /*
+ * PAN bit set implies the fault happened in kernel space, but not
+ * in the arch's user access functions.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+ goto no_context;
/*
* As per x86, we may deadlock here. However, since the kernel only
@@ -576,16 +567,3 @@ void cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
}
#endif /* CONFIG_ARM64_PAN */
-
-#ifdef CONFIG_ARM64_UAO
-/*
- * Kernel threads have fs=KERNEL_DS by default, and don't need to call
- * set_fs(), devtmpfs in particular relies on this behaviour.
- * We need to enable the feature at runtime (instead of adding it to
- * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
- */
-void cpu_enable_uao(void *__unused)
-{
- asm(SET_PSTATE_UAO(1));
-}
-#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 61b0911aa475..07844184975b 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -34,24 +34,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
__flush_icache_all();
}
-static void sync_icache_aliases(void *kaddr, unsigned long len)
-{
- unsigned long addr = (unsigned long)kaddr;
-
- if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
- } else {
- flush_icache_range(addr, addr + len);
- }
-}
-
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len)
{
- if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long addr = (unsigned long)kaddr;
+ if (icache_is_aliasing()) {
+ __flush_dcache_area(kaddr, len);
+ __flush_icache_all();
+ } else {
+ flush_icache_range(addr, addr + len);
+ }
+ }
}
/*
@@ -79,11 +74,13 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
if (!page_mapping(page))
return;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page),
- PAGE_SIZE << compound_order(page));
- else if (icache_is_aivivt())
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+ __flush_dcache_area(page_address(page),
+ PAGE_SIZE << compound_order(page));
__flush_icache_all();
+ } else if (icache_is_aivivt()) {
+ __flush_icache_all();
+ }
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index da30529bb1f6..383b03ff38f8 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -41,273 +41,15 @@ int pud_huge(pud_t pud)
#endif
}
-static int find_num_contig(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, size_t *pgsize)
-{
- pgd_t *pgd = pgd_offset(mm, addr);
- pud_t *pud;
- pmd_t *pmd;
-
- *pgsize = PAGE_SIZE;
- if (!pte_cont(pte))
- return 1;
- if (!pgd_present(*pgd)) {
- VM_BUG_ON(!pgd_present(*pgd));
- return 1;
- }
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud)) {
- VM_BUG_ON(!pud_present(*pud));
- return 1;
- }
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- VM_BUG_ON(!pmd_present(*pmd));
- return 1;
- }
- if ((pte_t *)pmd == ptep) {
- *pgsize = PMD_SIZE;
- return CONT_PMDS;
- }
- return CONT_PTES;
-}
-
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- size_t pgsize;
- int i;
- int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
- unsigned long pfn;
- pgprot_t hugeprot;
-
- if (ncontig == 1) {
- set_pte_at(mm, addr, ptep, pte);
- return;
- }
-
- pfn = pte_pfn(pte);
- hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
- for (i = 0; i < ncontig; i++) {
- pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
- pte_val(pfn_pte(pfn, hugeprot)));
- set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
- ptep++;
- pfn += pgsize >> PAGE_SHIFT;
- addr += pgsize;
- }
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return NULL;
-
- if (sz == PUD_SIZE) {
- pte = (pte_t *)pud;
- } else if (sz == (PAGE_SIZE * CONT_PTES)) {
- pmd_t *pmd = pmd_alloc(mm, pud, addr);
-
- WARN_ON(addr & (sz - 1));
- /*
- * Note that if this code were ever ported to the
- * 32-bit arm platform then it will cause trouble in
- * the case where CONFIG_HIGHPTE is set, since there
- * will be no pte_unmap() to correspond with this
- * pte_alloc_map().
- */
- pte = pte_alloc_map(mm, NULL, pmd, addr);
- } else if (sz == PMD_SIZE) {
- if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
- pud_none(*pud))
- pte = huge_pmd_share(mm, addr, pud);
- else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- } else if (sz == (PMD_SIZE * CONT_PMDS)) {
- pmd_t *pmd;
-
- pmd = pmd_alloc(mm, pud, addr);
- WARN_ON(addr & (sz - 1));
- return (pte_t *)pmd;
- }
-
- pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
- sz, pte, pte_val(*pte));
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
- if (!pgd_present(*pgd))
- return NULL;
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- return NULL;
-
- if (pud_huge(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- return NULL;
-
- if (pte_cont(pmd_pte(*pmd))) {
- pmd = pmd_offset(
- pud, (addr & CONT_PMD_MASK));
- return (pte_t *)pmd;
- }
- if (pmd_huge(*pmd))
- return (pte_t *)pmd;
- pte = pte_offset_kernel(pmd, addr);
- if (pte_present(*pte) && pte_cont(*pte)) {
- pte = pte_offset_kernel(
- pmd, (addr & CONT_PTE_MASK));
- return pte;
- }
- return NULL;
-}
-
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- size_t pagesize = huge_page_size(hstate_vma(vma));
-
- if (pagesize == CONT_PTE_SIZE) {
- entry = pte_mkcont(entry);
- } else if (pagesize == CONT_PMD_SIZE) {
- entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
- } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
- pr_warn("%s: unrecognized huge page size 0x%lx\n",
- __func__, pagesize);
- }
- return entry;
-}
-
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte;
-
- if (pte_cont(*ptep)) {
- int ncontig, i;
- size_t pgsize;
- pte_t *cpte;
- bool is_dirty = false;
-
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- /* save the 1st pte to return */
- pte = ptep_get_and_clear(mm, addr, cpte);
- for (i = 1; i < ncontig; ++i) {
- /*
- * If HW_AFDBM is enabled, then the HW could
- * turn on the dirty bit for any of the page
- * in the set, so check them all.
- */
- ++cpte;
- if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
- is_dirty = true;
- }
- if (is_dirty)
- return pte_mkdirty(pte);
- else
- return pte;
- } else {
- return ptep_get_and_clear(mm, addr, ptep);
- }
-}
-
-int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- pte_t *cpte;
-
- if (pte_cont(pte)) {
- int ncontig, i, changed = 0;
- size_t pgsize = 0;
- unsigned long pfn = pte_pfn(pte);
- /* Select all bits except the pfn */
- pgprot_t hugeprot =
- __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
- pte_val(pte));
-
- cpte = huge_pte_offset(vma->vm_mm, addr);
- pfn = pte_pfn(*cpte);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte) {
- changed = ptep_set_access_flags(vma, addr, cpte,
- pfn_pte(pfn,
- hugeprot),
- dirty);
- pfn += pgsize >> PAGE_SHIFT;
- }
- return changed;
- } else {
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- }
-}
-
-void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- pte_t *cpte;
- size_t pgsize = 0;
-
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
- ptep_set_wrprotect(mm, addr, cpte);
- } else {
- ptep_set_wrprotect(mm, addr, ptep);
- }
-}
-
-void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- pte_t *cpte;
- size_t pgsize = 0;
-
- cpte = huge_pte_offset(vma->vm_mm, addr);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
- ptep_clear_flush(vma, addr, cpte);
- } else {
- ptep_clear_flush(vma, addr, ptep);
- }
-}
-
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
-
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
- pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
+ pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
return 0;
}
return 1;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0d5b0d0578b3..52406a2a39ad 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,10 +35,7 @@
#include <linux/efi.h>
#include <linux/swiotlb.h>
-#include <asm/boot.h>
#include <asm/fixmap.h>
-#include <asm/kasan.h>
-#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -48,13 +45,7 @@
#include "mm.h"
-/*
- * We need to be able to catch inadvertent references to memstart_addr
- * that occur (potentially in generic code) before arm64_memblock_init()
- * executes, which assigns it its actual value. So use a default value
- * that cannot be mistaken for a real physical address.
- */
-s64 memstart_addr __read_mostly = -1;
+phys_addr_t memstart_addr __read_mostly = 0;
phys_addr_t arm64_dma_phys_limit __read_mostly;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -67,8 +58,8 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- initrd_start = start;
- initrd_end = start + size;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(start + size);
}
return 0;
}
@@ -80,7 +71,7 @@ early_param("initrd", early_initrd);
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t __init max_zone_dma_phys(void)
+static phys_addr_t max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -137,11 +128,11 @@ EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void __init arm64_memory_present(void)
+static void arm64_memory_present(void)
{
}
#else
-static void __init arm64_memory_present(void)
+static void arm64_memory_present(void)
{
struct memblock_region *reg;
@@ -170,57 +161,7 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = -(s64)PAGE_OFFSET;
-
- /*
- * Ensure that the linear region takes up exactly half of the kernel
- * virtual address space. This way, we can distinguish a linear address
- * from a kernel/module/vmalloc address by testing a single bit.
- */
- BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
-
- /*
- * Select a suitable value for the base of physical memory.
- */
- memstart_addr = round_down(memblock_start_of_DRAM(),
- ARM64_MEMSTART_ALIGN);
-
- /*
- * Remove the memory that we will not be able to cover with the
- * linear mapping. Take care not to clip the kernel which may be
- * high in memory.
- */
- memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
- ULLONG_MAX);
- if (memblock_end_of_DRAM() > linear_region_size)
- memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
-
- /*
- * Apply the memory limit if it was set. Since the kernel may be loaded
- * high up in memory, add back the kernel region that must be accessible
- * via the linear mapping.
- */
- if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- memblock_enforce_memory_limit(memory_limit);
- memblock_add(__pa(_text), (u64)(_end - _text));
- }
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern u16 memstart_offset_seed;
- u64 range = linear_region_size -
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
-
- /*
- * If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the available physical
- * memory spans, randomize the linear region as well.
- */
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
- range = range / ARM64_MEMSTART_ALIGN + 1;
- memstart_addr -= ARM64_MEMSTART_ALIGN *
- ((range * memstart_offset_seed) >> 16);
- }
- }
+ memblock_enforce_memory_limit(memory_limit);
/*
* Register the kernel text, kernel data, initrd, and initial
@@ -228,13 +169,8 @@ void __init arm64_memblock_init(void)
*/
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- memblock_reserve(initrd_start, initrd_end - initrd_start);
-
- /* the generic initrd code expects virtual addresses */
- initrd_start = __phys_to_virt(initrd_start);
- initrd_end = __phys_to_virt(initrd_end);
- }
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
early_init_fdt_scan_reserved_mem();
@@ -368,36 +304,35 @@ void __init mem_init(void)
#ifdef CONFIG_KASAN
" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n"
#endif
- " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
- " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
- " .data : 0x%p" " - 0x%p" " (%6ld KB)\n"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
- " memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
+ " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
#ifdef CONFIG_KASAN
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
#endif
- MLM(MODULES_VADDR, MODULES_END),
MLG(VMALLOC_START, VMALLOC_END),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_sdata, _edata),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE),
- MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
+ MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
MLK(FIXADDR_START, FIXADDR_TOP),
MLM(PCI_IO_START, PCI_IO_END),
- MLM(__phys_to_virt(memblock_start_of_DRAM()),
- (unsigned long)high_memory));
+ MLM(MODULES_VADDR, MODULES_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(_sdata, _edata));
#undef MLK
#undef MLM
@@ -430,8 +365,9 @@ static inline void poison_init_mem(void *s, size_t count)
void free_initmem(void)
{
- free_initmem_default(0);
fixup_init();
+ free_initmem_default(0);
+ free_alternatives_memory();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -465,27 +401,3 @@ void set_kernel_text_ro(void)
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}
#endif
-/*
- * Dump out memory limit information on panic.
- */
-static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
-{
- if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
- } else {
- pr_emerg("Memory Limit: none\n");
- }
- return 0;
-}
-
-static struct notifier_block mem_limit_notifier = {
- .notifier_call = dump_mem_limit,
-};
-
-static int __init register_mem_limit_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &mem_limit_notifier);
- return 0;
-}
-__initcall(register_mem_limit_dumper);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009daa9ed..cf038c7d9fa9 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -16,12 +16,9 @@
#include <linux/memblock.h>
#include <linux/start_kernel.h>
-#include <asm/mmu_context.h>
-#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/sections.h>
#include <asm/tlbflush.h>
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -35,7 +32,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
- pte = pte_offset_kimg(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
do {
next = addr + PAGE_SIZE;
set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -53,7 +50,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
if (pud_none(*pud))
pud_populate(&init_mm, pud, kasan_zero_pmd);
- pmd = pmd_offset_kimg(pud, addr);
+ pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
kasan_early_pte_populate(pmd, addr, next);
@@ -70,7 +67,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, kasan_zero_pud);
- pud = pud_offset_kimg(pgd, addr);
+ pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
kasan_early_pmd_populate(pud, addr, next);
@@ -99,21 +96,6 @@ asmlinkage void __init kasan_early_init(void)
kasan_map_early_shadow();
}
-/*
- * Copy the current shadow region into a new pgdir.
- */
-void __init kasan_copy_shadow(pgd_t *pgdir)
-{
- pgd_t *pgd, *pgd_new, *pgd_end;
-
- pgd = pgd_offset_k(KASAN_SHADOW_START);
- pgd_end = pgd_offset_k(KASAN_SHADOW_END);
- pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
- do {
- set_pgd(pgd_new, *pgd);
- } while (pgd++, pgd_new++, pgd != pgd_end);
-}
-
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
@@ -126,18 +108,18 @@ static void __init clear_pgds(unsigned long start,
set_pgd(pgd_offset_k(start), __pgd(0));
}
+static void __init cpu_set_ttbr1(unsigned long ttbr1)
+{
+ asm(
+ " msr ttbr1_el1, %0\n"
+ " isb"
+ :
+ : "r" (ttbr1));
+}
+
void __init kasan_init(void)
{
- u64 kimg_shadow_start, kimg_shadow_end;
- u64 mod_shadow_start, mod_shadow_end;
struct memblock_region *reg;
- int i;
-
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
- kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
-
- mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
- mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
/*
* We are going to perform proper setup of shadow memory.
@@ -147,33 +129,13 @@ void __init kasan_init(void)
* setup will be finished.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
- dsb(ishst);
- cpu_replace_ttbr1(tmp_pg_dir);
+ cpu_set_ttbr1(__pa(tmp_pg_dir));
+ flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
-
- /*
- * vmemmap_populate() has populated the shadow region that covers the
- * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
- * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
- * kasan_populate_zero_shadow() from replacing the page table entries
- * (PMD or PTE) at the edges of the shadow region for the kernel
- * image.
- */
- kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
- kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
-
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- (void *)mod_shadow_start);
- kasan_populate_zero_shadow((void *)kimg_shadow_end,
- kasan_mem_to_shadow((void *)PAGE_OFFSET));
-
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_zero_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ kasan_mem_to_shadow((void *)MODULES_VADDR));
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -193,16 +155,9 @@ void __init kasan_init(void)
pfn_to_nid(virt_to_pfn(start)));
}
- /*
- * KAsan may reuse the contents of kasan_zero_pte directly, so we
- * should make sure that it maps the zero page read-only.
- */
- for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_zero_pte[i],
- pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
-
memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_set_ttbr1(__pa(swapper_pg_dir));
+ flush_tlb_all();
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 62096a7e047a..e82aabb3c5e2 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,10 +32,8 @@
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
-#include <asm/barrier.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
-#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -48,21 +46,14 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
-u64 kimage_voffset __read_mostly;
-EXPORT_SYMBOL(kimage_voffset);
-
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
*/
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
-
-static bool dma_overlap(phys_addr_t start, phys_addr_t end);
+static bool __init dma_overlap(phys_addr_t start, phys_addr_t end);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
@@ -75,30 +66,16 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(void)
+static void __init *early_alloc(unsigned long sz)
{
phys_addr_t phys;
void *ptr;
- phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_alloc(sz, sz);
BUG_ON(!phys);
-
- /*
- * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
- * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
- * any level of table.
- */
- ptr = pte_set_fixmap(phys);
-
- memset(ptr, 0, PAGE_SIZE);
-
- /*
- * Implicit barriers also ensure the zeroed page is visible to the page
- * table walker
- */
- pte_clear_fixmap();
-
- return phys;
+ ptr = __va(phys);
+ memset(ptr, 0, sz);
+ return ptr;
}
/*
@@ -122,30 +99,24 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size))
{
pte_t *pte;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
- phys_addr_t pte_phys;
- BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc();
- pte = pte_set_fixmap(pte_phys);
+ pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
if (pmd_sect(*pmd))
split_pmd(pmd, pte);
- __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
flush_tlb_all();
- pte_clear_fixmap();
}
BUG_ON(pmd_bad(*pmd));
- pte = pte_set_fixmap_offset(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
do {
set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
-
- pte_clear_fixmap();
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -160,29 +131,10 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
} while (pmd++, i++, i < PTRS_PER_PMD);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-
- /*
- * If debug_page_alloc is enabled we must map the linear map
- * using pages. However, other mappings created by
- * create_mapping_noalloc must use sections in some cases. Allow
- * sections to be used in those cases, where no pgtable_alloc
- * function is provided.
- */
- return !pgtable_alloc || !debug_pagealloc_enabled();
-}
-#else
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
- return true;
-}
-#endif
-
-static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool pages)
{
pmd_t *pmd;
unsigned long next;
@@ -191,10 +143,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
* Check for initial section mappings in the pgd/pud and remove them.
*/
if (pud_none(*pud) || pud_sect(*pud)) {
- phys_addr_t pmd_phys;
- BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc();
- pmd = pmd_set_fixmap(pmd_phys);
+ pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
if (pud_sect(*pud)) {
/*
* need to have the 1G of mappings continue to be
@@ -202,21 +151,19 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
*/
split_pud(pud, pmd);
}
- __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
+ pud_populate(mm, pud, pmd);
flush_tlb_all();
- pmd_clear_fixmap();
}
BUG_ON(pud_bad(*pud));
- pmd = pmd_set_fixmap_offset(pud, addr);
+ pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- block_mappings_allowed(pgtable_alloc) &&
- !dma_overlap(phys, phys + next - addr)) {
+ if (!pages && ((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
- pmd_set_huge(pmd, phys, prot);
+ set_pmd(pmd, __pmd(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
@@ -224,19 +171,17 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
if (!pmd_none(old_pmd)) {
flush_tlb_all();
if (pmd_table(old_pmd)) {
- phys_addr_t table = pmd_page_paddr(old_pmd);
+ phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc);
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
-
- pmd_clear_fixmap();
}
static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -251,22 +196,21 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
return true;
}
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool force_pages)
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
- phys_addr_t pud_phys;
- BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc();
- __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
+ pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
+ pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
- pud = pud_set_fixmap_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
@@ -274,10 +218,12 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
- block_mappings_allowed(pgtable_alloc) &&
- !dma_overlap(phys, phys + next - addr)) {
+ !force_pages &&
+ !dma_overlap(phys, phys + next - addr) &&
+ !IS_ENABLED(CONFIG_FORCE_PAGES)) {
pud_t old_pud = *pud;
- pud_set_huge(pud, phys, prot);
+ set_pud(pud, __pud(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
@@ -289,274 +235,359 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
if (!pud_none(old_pud)) {
flush_tlb_all();
if (pud_table(old_pud)) {
- phys_addr_t table = pud_page_paddr(old_pud);
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
}
} else {
- alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc);
+ alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc, force_pages);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
-
- pud_clear_fixmap();
}
/*
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
+static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
+ phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ void *(*alloc)(unsigned long size), bool force_pages)
{
unsigned long addr, length, end, next;
- /*
- * If the virtual and physical address don't have the same offset
- * within a page, we cannot map the region as the caller expects.
- */
- if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
- return;
-
- phys &= PAGE_MASK;
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
+ alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc, force_pages);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static phys_addr_t late_pgtable_alloc(void)
+static void *late_alloc(unsigned long size)
{
- void *ptr = (void *)__get_free_page(PGALLOC_GFP);
- BUG_ON(!ptr);
-
- /* Ensure the zeroed page is visible to the page table walker */
- dsb(ishst);
- return __pa(ptr);
-}
+ void *ptr;
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*alloc)(void))
-{
- init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
+ BUG_ON(size > PAGE_SIZE);
+ ptr = (void *)__get_free_page(PGALLOC_GFP);
+ BUG_ON(!ptr);
+ return ptr;
}
-/*
- * This function can only be used to modify existing table entries,
- * without allocating new levels of table. Note that this permits the
- * creation of new section or page entries.
- */
-static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot, bool force_pages)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL);
+ __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
+ size, prot, early_alloc, force_pages);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
- __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
+ late_alloc, false);
}
-static void create_mapping_late(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+static inline pmd_t *pmd_off_k(unsigned long virt)
{
- if (virt < VMALLOC_START) {
- pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
- &phys, virt);
- return;
- }
-
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
-static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
+void __init remap_as_pages(unsigned long start, unsigned long size)
{
- unsigned long kernel_start = __pa(_stext);
- unsigned long kernel_end = __pa(_etext);
+ unsigned long addr;
+ unsigned long end = start + size;
/*
- * Take care not to create a writable alias for the
- * read-only text and rodata sections of the kernel image.
+ * Make start and end PMD_SIZE aligned, observing memory
+ * boundaries
*/
+ if (memblock_is_memory(start & PMD_MASK))
+ start = start & PMD_MASK;
+ if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
+ end = ALIGN(end, PMD_SIZE);
- /* No overlap with the kernel text */
- if (end < kernel_start || start >= kernel_end) {
- __create_pgd_mapping(pgd, start, __phys_to_virt(start),
- end - start, PAGE_KERNEL,
- early_pgtable_alloc);
- return;
- }
+ size = end - start;
/*
- * This block overlaps the kernel text mapping.
- * Map the portion(s) which don't overlap.
+ * Clear previous low-memory mapping
*/
- if (start < kernel_start)
- __create_pgd_mapping(pgd, start,
- __phys_to_virt(start),
- kernel_start - start, PAGE_KERNEL,
- early_pgtable_alloc);
- if (kernel_end < end)
- __create_pgd_mapping(pgd, kernel_end,
- __phys_to_virt(kernel_end),
- end - kernel_end, PAGE_KERNEL,
- early_pgtable_alloc);
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PMD_SIZE) {
+ pmd_t *pmd;
+ pmd = pmd_off_k(addr);
+ if (pmd_bad(*pmd) || pmd_sect(*pmd))
+ pmd_clear(pmd);
+ }
- /*
- * Map the linear alias of the [_stext, _etext) interval as
- * read-only/non-executable. This makes the contents of the
- * region accessible to subsystems such as hibernate, but
- * protects it from inadvertent modification or execution.
- */
- __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
- kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc);
+ create_mapping(start, __phys_to_virt(start), size, PAGE_KERNEL, true);
}
-static void __init map_mem(pgd_t *pgd)
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
- struct memblock_region *reg;
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
- /* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+static bool __init dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+ int i;
- if (start >= end)
- break;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t dma_base = dma_mmu_remap[i].base;
+ phys_addr_t dma_end = dma_mmu_remap[i].base +
+ dma_mmu_remap[i].size;
- __map_memblock(pgd, start, end);
+ if ((dma_base < end) && (dma_end > start))
+ return true;
}
+ return false;
}
-void mark_rodata_ro(void)
+static void __init dma_contiguous_remap(void)
{
- if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++)
+ remap_as_pages(dma_mmu_remap[i].base,
+ dma_mmu_remap[i].size);
+}
+
+static void create_mapping_late(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
+{
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
return;
+ }
- create_mapping_late(__pa(_stext), (unsigned long)_stext,
- (unsigned long)_etext - (unsigned long)_stext,
- PAGE_KERNEL_ROX);
+ return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
+ phys, virt, size, prot, late_alloc,
+ IS_ENABLED(CONFIG_FORCE_PAGES));
}
-void fixup_init(void)
+#ifdef CONFIG_DEBUG_RODATA
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
/*
- * Unmap the __init region but leave the VM area in place. This
- * prevents the region from being reused for kernel modules, which
- * is not supported by kallsyms.
+ * Set up the executable regions using the existing section mappings
+ * for now. This will get more fine grained later once all memory
+ * is mapped
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL, false);
+ } else if (start >= kernel_x_end) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL, false);
+ } else {
+ if (start < kernel_x_start)
+ create_mapping(start, __phys_to_virt(start),
+ kernel_x_start - start,
+ PAGE_KERNEL, false);
+ create_mapping(kernel_x_start,
+ __phys_to_virt(kernel_x_start),
+ kernel_x_end - kernel_x_start,
+ PAGE_KERNEL_EXEC, false);
+ if (kernel_x_end < end)
+ create_mapping(kernel_x_end,
+ __phys_to_virt(kernel_x_end),
+ end - kernel_x_end,
+ PAGE_KERNEL, false);
+ }
}
+#else
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ create_mapping(start, __phys_to_virt(start), end - start,
+ PAGE_KERNEL_EXEC, false);
+}
+#endif
-static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma)
+static void __init map_mem(void)
{
- phys_addr_t pa_start = __pa(va_start);
- unsigned long size = va_end - va_start;
+ struct memblock_region *reg;
+ phys_addr_t limit;
- BUG_ON(!PAGE_ALIGNED(pa_start));
- BUG_ON(!PAGE_ALIGNED(size));
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir, gives
+ * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
+ * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
+ * per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
+ memblock_set_current_limit(limit);
+
+ /* map all the memory banks */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
- __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc);
+ if (start >= end)
+ break;
- vma->addr = va_start;
- vma->phys_addr = pa_start;
- vma->size = size;
- vma->flags = VM_MAP;
- vma->caller = __builtin_return_address(0);
+ if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory. With
+ * the section maps, if the first block doesn't end on section
+ * size boundary, create_mapping() will try to allocate a pte
+ * page, which may be returned from an unmapped area.
+ * When section maps are not used, the pte page table for the
+ * current limit is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, SECTION_SIZE);
+ if (end < limit) {
+ limit = end & SECTION_MASK;
+ memblock_set_current_limit(limit);
+ }
+ }
+ __map_memblock(start, end);
+ }
- vm_area_add_early(vma);
+ /* Limit no longer required. */
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
-
-/*
- * Create fine-grained mappings for the kernel.
- */
-static void __init map_kernel(pgd_t *pgd)
+#ifdef CONFIG_FORCE_PAGES
+static noinline void __init split_and_set_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn)
{
- static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
+ pte_t *pte, *start_pte;
- map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
- &vmlinux_init);
- map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+ start_pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ pte = start_pte;
- if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
- *pgd_offset_k(FIXADDR_START));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- /*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
- pud_clear_fixmap();
- } else {
- BUG();
- }
+ do {
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
- kasan_copy_shadow(pgd);
+ set_pmd(pmd, __pmd((__pa(start_pte)) | PMD_TYPE_TABLE));
}
-struct dma_contig_early_reserve {
- phys_addr_t base;
- unsigned long size;
-};
+static noinline void __init remap_pages(void)
+{
+ struct memblock_region *reg;
-static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+ for_each_memblock(memory, reg) {
+ phys_addr_t phys_pgd = reg->base;
+ phys_addr_t phys_end = reg->base + reg->size;
+ unsigned long addr_pgd = (unsigned long)__va(phys_pgd);
+ unsigned long end = (unsigned long)__va(phys_end);
+ pmd_t *pmd = NULL;
+ pud_t *pud = NULL;
+ pgd_t *pgd = NULL;
+ unsigned long next_pud, next_pmd, next_pgd;
+ unsigned long addr_pmd, addr_pud;
+ phys_addr_t phys_pud, phys_pmd;
+
+ if (phys_pgd >= phys_end)
+ break;
-static int dma_mmu_remap_num;
+ pgd = pgd_offset(&init_mm, addr_pgd);
+ do {
+ next_pgd = pgd_addr_end(addr_pgd, end);
+ pud = pud_offset(pgd, addr_pgd);
+ addr_pud = addr_pgd;
+ phys_pud = phys_pgd;
+ do {
+ next_pud = pud_addr_end(addr_pud, next_pgd);
+ pmd = pmd_offset(pud, addr_pud);
+ addr_pmd = addr_pud;
+ phys_pmd = phys_pud;
+ do {
+ next_pmd = pmd_addr_end(addr_pmd,
+ next_pud);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ split_and_set_pmd(pmd, addr_pmd,
+ next_pmd, __phys_to_pfn(phys_pmd));
+ pmd++;
+ phys_pmd += next_pmd - addr_pmd;
+ } while (addr_pmd = next_pmd,
+ addr_pmd < next_pud);
+ phys_pud += next_pud - addr_pud;
+ } while (pud++, addr_pud = next_pud,
+ addr_pud < next_pgd);
+ phys_pgd += next_pgd - addr_pgd;
+ } while (pgd++, addr_pgd = next_pgd, addr_pgd < end);
+ }
+}
-void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+#else
+static void __init remap_pages(void)
{
- dma_mmu_remap[dma_mmu_remap_num].base = base;
- dma_mmu_remap[dma_mmu_remap_num].size = size;
- dma_mmu_remap_num++;
+
}
+#endif
-static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+static void __init fixup_executable(void)
{
- int i;
+#ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+ SWAPPER_BLOCK_SIZE);
- for (i = 0; i < dma_mmu_remap_num; i++) {
- phys_addr_t dma_base = dma_mmu_remap[i].base;
- phys_addr_t dma_end = dma_mmu_remap[i].base +
- dma_mmu_remap[i].size;
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL, false);
+ }
- if ((dma_base < end) && (dma_end > start))
- return true;
+ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+ SWAPPER_BLOCK_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL, false);
}
- return false;
+#endif
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ create_mapping_late(__pa(_stext), (unsigned long)_stext,
+ (unsigned long)_etext - (unsigned long)_stext,
+ PAGE_KERNEL_ROX);
+
+}
+#endif
+
+void fixup_init(void)
+{
+ create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
+ (unsigned long)__init_end - (unsigned long)__init_begin,
+ PAGE_KERNEL);
}
/*
@@ -565,35 +596,39 @@ static bool dma_overlap(phys_addr_t start, phys_addr_t end)
*/
void __init paging_init(void)
{
- phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+ void *zero_page;
- map_kernel(pgd);
- map_mem(pgd);
+ map_mem();
+ fixup_executable();
+ dma_contiguous_remap();
+ remap_pages();
/*
- * We want to reuse the original swapper_pg_dir so we don't have to
- * communicate the new address to non-coherent secondaries in
- * secondary_entry, and so cpu_switch_mm can generate the address with
- * adrp+add rather than a load from some global variable.
- *
- * To do this we need to go via a temporary pgd.
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state.
*/
- cpu_replace_ttbr1(__va(pgd_phys));
- memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ flush_tlb_all();
+
+ /* allocate the zero page. */
+ zero_page = early_alloc(PAGE_SIZE);
- pgd_clear_fixmap();
- memblock_free(pgd_phys, PAGE_SIZE);
+ bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+
+ /* Ensure the zero page is visible to the page table walker */
+ dsb(ishst);
/*
- * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
- * allocated with it.
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
*/
- memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
- SWAPPER_DIR_SIZE - PAGE_SIZE);
-
- bootmem_init();
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+ flush_tlb_all();
+ set_kernel_text_ro();
+ flush_tlb_all();
}
/*
@@ -680,13 +715,21 @@ void vmemmap_free(unsigned long start, unsigned long end)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_PGTABLE_LEVELS > 2
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 3
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
- return pud_offset_kimg(pgd, addr);
+ return pud_offset(pgd, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -695,12 +738,16 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
BUG_ON(pud_none(*pud) || pud_bad(*pud));
- return pmd_offset_kimg(pud, addr);
+ return pmd_offset(pud, addr);
}
static inline pte_t * fixmap_pte(unsigned long addr)
{
- return &bm_pte[pte_index(addr)];
+ pmd_t *pmd = fixmap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
}
void __init early_fixmap_init(void)
@@ -711,26 +758,15 @@ void __init early_fixmap_init(void)
unsigned long addr = FIXADDR_START;
pgd = pgd_offset_k(addr);
- if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
- /*
- * We only end up here if the kernel mapping and the fixmap
- * share the top level pgd entry, which should only happen on
- * 16k/4 levels configurations.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pud = pud_offset_kimg(pgd, addr);
- } else {
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = fixmap_pud(addr);
- }
+ pgd_populate(&init_mm, pgd, bm_pud);
+ pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, bm_pmd);
- pmd = fixmap_pmd(addr);
+ pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
/*
* The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
+ * we are not preparted:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -769,10 +805,11 @@ void __set_fixmap(enum fixed_addresses idx,
}
}
-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
- int offset;
+ pgprot_t prot = PAGE_KERNEL_RO;
+ int size, offset;
void *dt_virt;
/*
@@ -789,7 +826,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Make sure that the FDT region can be mapped without the need to
* allocate additional translation table pages, so that it is safe
- * to call create_mapping_noalloc() this early.
+ * to call create_mapping() this early.
*
* On 64k pages, the FDT will be mapped using PTEs, so we need to
* be in the same PMD as the rest of the fixmap.
@@ -805,73 +842,21 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
- dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+ create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ SWAPPER_BLOCK_SIZE, prot, false);
if (fdt_check_header(dt_virt) != 0)
return NULL;
- *size = fdt_totalsize(dt_virt);
- if (*size > MAX_FDT_SIZE)
+ size = fdt_totalsize(dt_virt);
+ if (size > MAX_FDT_SIZE)
return NULL;
- if (offset + *size > SWAPPER_BLOCK_SIZE)
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
-
- return dt_virt;
-}
-
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
-{
- void *dt_virt;
- int size;
-
- dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
- if (!dt_virt)
- return NULL;
+ if (offset + size > SWAPPER_BLOCK_SIZE)
+ create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ round_up(offset + size, SWAPPER_BLOCK_SIZE), prot, false);
memblock_reserve(dt_phys, size);
- return dt_virt;
-}
-
-int __init arch_ioremap_pud_supported(void)
-{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- return 1;
-}
-
-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
-{
- BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
- return 1;
-}
-
-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
-{
- BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
- return 1;
-}
-int pud_clear_huge(pud_t *pud)
-{
- if (!pud_sect(*pud))
- return 0;
- pud_clear(pud);
- return 1;
-}
-
-int pmd_clear_huge(pmd_t *pmd)
-{
- if (!pmd_sect(*pmd))
- return 0;
- pmd_clear(pmd);
- return 1;
+ return dt_virt;
}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 4754762bde49..0f35e8ba0f33 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -37,32 +36,14 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
-/*
- * This function assumes that the range is mapped with PAGE_SIZE pages.
- */
-static int __change_memory_common(unsigned long start, unsigned long size,
- pgprot_t set_mask, pgprot_t clear_mask)
-{
- struct page_change_data data;
- int ret;
-
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, start + size);
- return ret;
-}
-
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
- struct vm_struct *area;
+ int ret;
+ struct page_change_data data;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
@@ -77,29 +58,18 @@ static int change_memory_common(unsigned long addr, int numpages,
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
}
- /*
- * Kernel VA mappings are always live, and splitting live section
- * mappings into page mappings may cause TLB conflicts. This means
- * we have to ensure that changing the permission bits of the range
- * we are operating on does not result in such splitting.
- *
- * Let's restrict ourselves to mappings created by vmalloc (or vmap).
- * Those are guaranteed to consist entirely of page mappings, and
- * splitting is never needed.
- *
- * So check whether the [addr, addr + size) interval is entirely
- * covered by precisely one VM area that has the VM_ALLOC flag set.
- */
- area = find_vm_area((void *)addr);
- if (!area ||
- end > (unsigned long)area->addr + area->size ||
- !(area->flags & VM_ALLOC))
- return -EINVAL;
if (!numpages)
return 0;
- return __change_memory_common(start, size, set_mask, clear_mask);
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
@@ -131,19 +101,3 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void __kernel_map_pages(struct page *page, int numpages, int enable)
-{
- unsigned long addr = (unsigned long) page_address(page);
-
- if (enable)
- __change_memory_common(addr, PAGE_SIZE * numpages,
- __pgprot(PTE_VALID),
- __pgprot(0));
- else
- __change_memory_common(addr, PAGE_SIZE * numpages,
- __pgprot(0),
- __pgprot(PTE_VALID));
-}
-#endif
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index ae11d4e03d0e..cb3ba1b812e7 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-void __init pgd_cache_init(void)
+static int __init pgd_cache_init(void)
{
- if (PGD_SIZE == PAGE_SIZE)
- return;
-
/*
* Naturally aligned pgds required by the architecture.
*/
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
- SLAB_PANIC, NULL);
+ if (PGD_SIZE != PAGE_SIZE)
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
+ return 0;
}
+core_initcall(pgd_cache_init);
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 984edcda1850..d69dffffaa89 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -74,25 +74,3 @@
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
-
-/*
- * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
- *
- * op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
- */
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
- dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
- sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998: dc \op, \kaddr
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
- dsb \domain
- .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index dc22de0ce413..cd8853e11f2a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -186,33 +186,7 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
- .pushsection ".idmap.text", "ax"
-/*
- * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
- *
- * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
- * called by anything else. It can only be executed from a TTBR0 mapping.
- */
-ENTRY(idmap_cpu_replace_ttbr1)
- mrs x2, daif
- msr daifset, #0xf
-
- adrp x1, empty_zero_page
- msr ttbr1_el1, x1
- isb
-
- tlbi vmalle1
- dsb nsh
- isb
-
- msr ttbr1_el1, x0
- isb
-
- msr daif, x2
-
- ret
-ENDPROC(idmap_cpu_replace_ttbr1)
- .popsection
+ .section ".text.init", #alloc, #execinstr
/*
* __cpu_setup