summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig259
-rw-r--r--arch/arm64/Kconfig.debug42
-rw-r--r--arch/arm64/Kconfig.platforms56
-rw-r--r--arch/arm64/Makefile41
-rw-r--r--arch/arm64/boot/Makefile6
-rw-r--r--arch/arm64/boot/dts/Makefile13
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts305
l---------arch/arm64/boot/dts/qcom1
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi40
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi117
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi502
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi443
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi99
-rw-r--r--arch/arm64/configs/cuttlefish_defconfig1
-rw-r--r--arch/arm64/configs/defconfig4
-rw-r--r--arch/arm64/configs/fsmcortex-perf_defconfig596
-rw-r--r--arch/arm64/configs/fsmcortex_defconfig671
-rw-r--r--arch/arm64/configs/msm-perf_defconfig637
-rw-r--r--arch/arm64/configs/msm_defconfig675
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig680
-rw-r--r--arch/arm64/configs/msmcortex_defconfig762
-rw-r--r--arch/arm64/configs/msmcortex_mediabox-perf_defconfig653
-rw-r--r--arch/arm64/configs/msmcortex_mediabox_defconfig714
-rw-r--r--arch/arm64/configs/ranchu64_defconfig1
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig687
-rw-r--r--arch/arm64/configs/sdm660_defconfig761
l---------arch/arm64/configs/vendor/msm-perf_defconfig1
l---------arch/arm64/configs/vendor/msm_defconfig1
l---------arch/arm64/configs/vendor/msmcortex-perf_defconfig1
l---------arch/arm64/configs/vendor/msmcortex_defconfig1
l---------arch/arm64/configs/vendor/msmcortex_mediabox-perf_defconfig1
l---------arch/arm64/configs/vendor/msmcortex_mediabox_defconfig1
l---------arch/arm64/configs/vendor/sdm660-perf_defconfig1
l---------arch/arm64/configs/vendor/sdm660_defconfig1
-rw-r--r--arch/arm64/crypto/Kconfig7
-rw-r--r--arch/arm64/crypto/Makefile3
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c308
-rw-r--r--arch/arm64/include/asm/Kbuild3
-rw-r--r--arch/arm64/include/asm/app_api.h50
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h13
-rw-r--r--arch/arm64/include/asm/arch_timer.h46
-rw-r--r--arch/arm64/include/asm/assembler.h12
-rw-r--r--arch/arm64/include/asm/cache.h12
-rw-r--r--arch/arm64/include/asm/cacheflush.h27
-rw-r--r--arch/arm64/include/asm/checksum.h51
-rw-r--r--arch/arm64/include/asm/cpufeature.h15
-rw-r--r--arch/arm64/include/asm/cputype.h21
-rw-r--r--arch/arm64/include/asm/debug-monitors.h5
-rw-r--r--arch/arm64/include/asm/debugv8.h229
-rw-r--r--arch/arm64/include/asm/device.h12
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h (renamed from arch/arm64/boot/dts/qcom/msm8916-mtp.dts)18
-rw-r--r--arch/arm64/include/asm/dma-iommu.h64
-rw-r--r--arch/arm64/include/asm/dma-mapping.h10
-rw-r--r--arch/arm64/include/asm/edac.h (renamed from arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi)34
-rw-r--r--arch/arm64/include/asm/elf.h16
-rw-r--r--arch/arm64/include/asm/etmv4x.h385
-rw-r--r--arch/arm64/include/asm/fpsimd.h8
-rw-r--r--arch/arm64/include/asm/gpio.h32
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/insn.h41
-rw-r--r--arch/arm64/include/asm/io.h86
-rw-r--r--arch/arm64/include/asm/irq.h3
-rw-r--r--arch/arm64/include/asm/kprobes.h60
-rw-r--r--arch/arm64/include/asm/kvm_arm.h14
-rw-r--r--arch/arm64/include/asm/kvm_asm.h94
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h98
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h10
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/mmu.h48
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/pci.h5
-rw-r--r--arch/arm64/include/asm/perf_event.h87
-rw-r--r--arch/arm64/include/asm/pgtable.h55
-rw-r--r--arch/arm64/include/asm/probes.h35
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/include/asm/ptrace.h61
-rw-r--r--arch/arm64/include/asm/sections.h28
-rw-r--r--arch/arm64/include/asm/signal32.h46
-rw-r--r--arch/arm64/include/asm/sparsemem.h4
-rw-r--r--arch/arm64/include/asm/spinlock.h1
-rw-r--r--arch/arm64/include/asm/suspend.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h20
-rw-r--r--arch/arm64/include/asm/system_misc.h1
-rw-r--r--arch/arm64/include/asm/thread_info.h6
-rw-r--r--arch/arm64/include/asm/topology.h1
-rw-r--r--arch/arm64/include/asm/traps.h7
-rw-r--r--arch/arm64/include/asm/uaccess.h3
-rw-r--r--arch/arm64/include/asm/vdso.h3
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h25
-rw-r--r--arch/arm64/include/asm/virt.h27
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild18
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h32
-rw-r--r--arch/arm64/kernel/Makefile25
-rw-r--r--arch/arm64/kernel/alternative.c9
-rw-r--r--arch/arm64/kernel/app_api.c135
-rw-r--r--arch/arm64/kernel/app_setting.c139
-rw-r--r--arch/arm64/kernel/arm64ksyms.c7
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c19
-rw-r--r--arch/arm64/kernel/asm-offsets.c108
-rw-r--r--arch/arm64/kernel/bpi.S87
-rw-r--r--arch/arm64/kernel/cpu_errata.c192
-rw-r--r--arch/arm64/kernel/cpufeature.c27
-rw-r--r--arch/arm64/kernel/cpuinfo.c23
-rw-r--r--arch/arm64/kernel/debug-monitors.c33
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S16
-rw-r--r--arch/arm64/kernel/entry.S54
-rw-r--r--arch/arm64/kernel/fpsimd.c49
-rw-r--r--arch/arm64/kernel/head.S164
-rw-r--r--arch/arm64/kernel/hibernate-asm.S176
-rw-r--r--arch/arm64/kernel/hibernate.c520
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm64/kernel/hyp-stub.S45
-rw-r--r--arch/arm64/kernel/image.h2
-rw-r--r--arch/arm64/kernel/insn.c301
-rw-r--r--arch/arm64/kernel/io.c19
-rw-r--r--arch/arm64/kernel/kgdb.c4
-rw-r--r--arch/arm64/kernel/kuser32.S48
-rw-r--r--arch/arm64/kernel/module.c7
-rw-r--r--arch/arm64/kernel/perf_debug.c73
-rw-r--r--arch/arm64/kernel/perf_event.c682
-rw-r--r--arch/arm64/kernel/perf_trace_counters.c180
-rw-r--r--arch/arm64/kernel/perf_trace_counters.h111
-rw-r--r--arch/arm64/kernel/perf_trace_user.c96
-rw-r--r--arch/arm64/kernel/perf_trace_user.h85
-rw-r--r--arch/arm64/kernel/probes/Makefile3
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c174
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h35
-rw-r--r--arch/arm64/kernel/probes/kprobes.c657
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S81
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c217
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.h28
-rw-r--r--arch/arm64/kernel/process.c24
-rw-r--r--arch/arm64/kernel/psci.c102
-rw-r--r--arch/arm64/kernel/ptrace.c108
-rw-r--r--arch/arm64/kernel/setup.c26
-rw-r--r--arch/arm64/kernel/signal.c4
-rw-r--r--arch/arm64/kernel/signal32.c66
-rw-r--r--arch/arm64/kernel/sigreturn32.S67
-rw-r--r--arch/arm64/kernel/sleep.S161
-rw-r--r--arch/arm64/kernel/smp.c144
-rw-r--r--arch/arm64/kernel/smp_spin_table.c3
-rw-r--r--arch/arm64/kernel/stacktrace.c24
-rw-r--r--arch/arm64/kernel/suspend.c82
-rw-r--r--arch/arm64/kernel/topology.c187
-rw-r--r--arch/arm64/kernel/traps.c74
-rw-r--r--arch/arm64/kernel/vdso.c253
-rw-r--r--arch/arm64/kernel/vdso/Makefile29
-rw-r--r--arch/arm64/kernel/vdso/compiler.h70
-rw-r--r--arch/arm64/kernel/vdso/datapage.h59
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S1
-rw-r--r--arch/arm64/kernel/vdso/vgettimeofday.c3
-rw-r--r--arch/arm64/kernel/vdso32/.gitignore2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile178
-rw-r--r--arch/arm64/kernel/vdso32/compiler.h122
-rw-r--r--arch/arm64/kernel/vdso32/datapage.h1
-rw-r--r--arch/arm64/kernel/vdso32/sigreturn.S76
-rw-r--r--arch/arm64/kernel/vdso32/vdso.S32
-rw-r--r--arch/arm64/kernel/vdso32/vdso.lds.S95
-rw-r--r--arch/arm64/kernel/vdso32/vgettimeofday.c3
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S79
-rw-r--r--arch/arm64/kvm/Makefile3
-rw-r--r--arch/arm64/kvm/guest.c1
-rw-r--r--arch/arm64/kvm/handle_exit.c8
-rw-r--r--arch/arm64/kvm/hyp-init.S51
-rw-r--r--arch/arm64/kvm/hyp.S1091
-rw-r--r--arch/arm64/kvm/hyp/Makefile14
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c141
-rw-r--r--arch/arm64/kvm/hyp/entry.S160
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S33
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S232
-rw-r--r--arch/arm64/kvm/hyp/hyp.h90
-rw-r--r--arch/arm64/kvm/hyp/switch.c175
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c138
-rw-r--r--arch/arm64/kvm/hyp/timer-sr.c71
-rw-r--r--arch/arm64/kvm/hyp/tlb.c80
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-sr.c84
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c228
-rw-r--r--arch/arm64/kvm/reset.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c59
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S134
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S269
-rw-r--r--arch/arm64/mm/cache.S77
-rw-r--r--arch/arm64/mm/context.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c1118
-rw-r--r--arch/arm64/mm/fault.c73
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/init.c159
-rw-r--r--arch/arm64/mm/mmu.c519
-rw-r--r--arch/arm64/mm/pageattr.c48
-rw-r--r--arch/arm64/mm/proc.S124
198 files changed, 18217 insertions, 5221 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 06dd9d22d989..2e9931d9f1e4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -13,6 +13,7 @@ config ARM64
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -24,12 +25,13 @@ config ARM64
select ARM_PSCI_FW
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
- select COMMON_CLK
+ select COMMON_CLK if !ARCH_QCOM
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
select EDAC_SUPPORT
select FRAME_POINTER
select GENERIC_ALLOCATOR
+ select EDAC_SUPPORT
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
@@ -82,9 +84,12 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
- select IOMMU_DMA if IOMMU_SUPPORT
+ select IOMMU_DMA if (IOMMU_SUPPORT && !ARCH_QCOM)
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES if HAVE_KPROBES
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
@@ -98,6 +103,7 @@ config ARM64
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_CONTEXT_TRACKING
+ select HAVE_ARM_SMCCC
select THREAD_INFO_IN_TASK
select HAVE_ARM_SMCCC
help
@@ -197,6 +203,42 @@ config NEED_SG_DMA_LENGTH
config SMP
def_bool y
+config HOTPLUG_SIZE_BITS
+ int "Memory hotplug block size(28 => 256MB 30 => 1GB)"
+ depends on SPARSEMEM
+ default 30
+ help
+ Selects granularity of hotplug memory. Block
+ size for memory hotplug is represent as a power
+ of 2.
+ If unsure, stick with default value.
+
+config ARM64_DMA_USE_IOMMU
+ bool
+ select ARM_HAS_SG_CHAIN
+ select NEED_SG_DMA_LENGTH
+
+if ARM64_DMA_USE_IOMMU
+
+config ARM64_DMA_IOMMU_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+ range 4 9
+ default 9
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a waste of address space. Drivers which has
+ relatively small addressing window (like 64Mib) might run out of
+ virtual space with just a few allocations.
+
+ With this parameter you can specify the maximum PAGE_SIZE order for
+ DMA IOMMU buffers. Larger buffers will be aligned only to this
+ specified order. The order is expressed as a power of two multiplied
+ by the PAGE_SIZE.
+
+endif
+
config SWIOTLB
def_bool y
@@ -218,6 +260,24 @@ config PGTABLE_LEVELS
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
+config MSM_GVM
+ bool "Enable virtualization Support for MSM kernel"
+ help
+ This enables support for MSM Kernel based virtual
+ machine for any platform.
+ This helps to enable virtual driver support.
+ This should work on 64bit machine.
+ If you don't know what to do here, say N.
+
+config MSM_GVM_QUIN
+ bool "Enable virtualization Support for MSM kernel required for QUIN platform"
+ help
+ This enables support for MSM Kernel based virtual
+ machine for QUIN platform.
+ This helps to enable virtual driver support.
+ This should work on 64bit machine.
+ If you don't know what to do here, say N.
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -496,6 +556,25 @@ config ARM64_16K_PAGES
requires applications compiled with 16K (or a multiple of 16K)
aligned segments.
+config ARM64_DCACHE_DISABLE
+ bool "Disable CPU Data Caches"
+ help
+ Disable CPU data cache usage by setting the SCTLR[C] bit during
+ kernel initialization. This will result in a considerable
+ performance impact, but may be useful in certain situations.
+
+ If you are not sure what to do, select 'N' here.
+
+config ARM64_ICACHE_DISABLE
+ bool "Disable CPU Instruction Caches"
+ help
+ Disable CPU instruction cache usage by setting the SCTLR[I]
+ bit during kernel initialization. This will result in a
+ considerable performance impact, but may be useful in certain
+ situations.
+
+ If you are not sure what to do, select 'N' here.
+
config ARM64_64K_PAGES
bool "64KB"
help
@@ -506,6 +585,37 @@ config ARM64_64K_PAGES
endchoice
+config MSM_APP_API
+ bool "API support to enable / disable app settings for MSM8996"
+ depends on ARCH_MSM8996 && (ENABLE_FP_SIMD_SETTINGS || MSM_APP_SETTINGS)
+ help
+ Add API support to enable / disable the app settings to be used
+ at runtime. These APIs are used to enable / disable app setting
+ when specific aarch32 or aarch64 processes are running.
+
+ If you are not sure what to do, select 'N' here.
+
+config ENABLE_FP_SIMD_SETTINGS
+ bool "Enable FP(Floating Point) Settings for Qualcomm MSM8996"
+ depends on ARCH_MSM8996
+ select MSM_APP_API
+ help
+ Enable FP(Floating Point) and SIMD settings for the MSM8996 during
+ the execution of the aarch32 processes and disable these settings
+ when you switch to the aarch64 processes.
+
+ If you are not sure what to do, select 'N' here.
+
+config MSM_APP_SETTINGS
+ bool "Support to enable / disable app settings for MSM8996"
+ depends on ARCH_MSM8996
+ select MSM_APP_API
+ help
+ Expose an interface used by the userspace at runtime to
+ enable / disable the app specific settings.
+
+ If you are not sure what to do, select 'N' here.
+
choice
prompt "Virtual address space size"
default ARM64_VA_BITS_39 if ARM64_4K_PAGES
@@ -577,6 +687,32 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on !NUMA
+ def_bool y
+
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+ def_bool y
+
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
+config ARCH_NR_GPIO
+ int
+ default 1024 if ARCH_TEGRA
+ default 1024 if ARCH_QCOM
+ default 256
+ help
+ Maximum number of GPIOs in the system.
+
+ If unsure, leave the default value.
+
+config QCOM_TLB_EL2_HANDLER
+ bool "Raise TLB conflict exception to EL2"
+ help
+ This option enables TLB conflict to be handled
+ by EL2.
+
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
@@ -603,6 +739,33 @@ config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
+config ARM64_REG_REBALANCE_ON_CTX_SW
+ bool "Rebalance registers during context switches."
+ def_bool ARCH_MSM8996
+ help
+ Forcefully re-balance register rename pools on context switches for
+ improved performance on some devices.
+
+config PERF_EVENTS_USERMODE
+ bool "Enable usermode access for perf events"
+ depends on PERF_EVENTS
+ help
+ Enable user-mode access to performance counters for perf events.
+ If enabled, the access permissions allowing CPU performance
+ counters to be accessed from user-mode are set.
+
+ If you want user-mode programs to access perf events, say Y
+
+config PERF_EVENTS_RESET_PMU_DEBUGFS
+ bool "Reset PMU via debugfs node"
+ depends on PERF_EVENTS
+ help
+ Enable the debugfs node that can be used to reset PMUs and all
+ state variables associated with PMUs. If enabled, PMU and internal
+ state variable are cleared.
+ If you want to reset PMU and PMU related internal Perf variables
+ via debugfs then say Y.
+
config SYS_SUPPORTS_HUGETLBFS
def_bool y
@@ -617,6 +780,10 @@ config ARCH_HAS_CACHE_LINE_SIZE
source "mm/Kconfig"
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
@@ -676,6 +843,23 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y.
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors rely on
+ being able to manipulate the branch predictor for a victim context by
+ executing aliasing branches in the attacker context. Such attacks
+ can be partially mitigated against by clearing internal branch
+ predictor state and limiting the prediction logic in some situations.
+
+ This config option will take CPU-specific actions to harden the
+ branch predictor against aliasing attacks and may rely on specific
+ instruction sequences or control bits being set by the system
+ firmware.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -755,7 +939,6 @@ menu "ARMv8.1 architectural features"
config ARM64_HW_AFDBM
bool "Support for hardware updates of the Access and Dirty page flags"
- default y
help
The ARMv8.1 architecture extensions introduce support for
hardware updates of the access and dirty information in page
@@ -772,7 +955,6 @@ config ARM64_HW_AFDBM
config ARM64_PAN
bool "Enable support for Privileged Access Never (PAN)"
- default y
help
Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
prevents the kernel or hypervisor from accessing user-space (EL0)
@@ -800,7 +982,6 @@ endmenu
config ARM64_UAO
bool "Enable support for User Access Override (UAO)"
- default y
help
User Access Override (UAO; part of the ARMv8.2 Extensions)
causes the 'unprivileged' variant of the load/store instructions to
@@ -985,6 +1166,11 @@ config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
Space separated list of names of dtbs to append when
building a concatenated Image.gz-dtb.
+config BUILD_ARM64_DT_OVERLAY
+ bool "enable DT overlay compilation support"
+ depends on OF
+ help
+ This option enables support for DT overlay compilation.
endmenu
menu "Userspace binary formats"
@@ -1010,6 +1196,36 @@ config COMPAT
If you want to execute 32-bit userspace applications, say Y.
+config KUSER_HELPERS
+ bool "Enable the kuser helpers page in 32-bit processes"
+ depends on COMPAT
+ default y
+ help
+ Warning: disabling this option may break 32-bit applications.
+
+ Provide kuser helpers in a special purpose fixed-address page. The
+ kernel provides helper code to userspace in read-only form at a fixed
+ location to allow userspace to be independent of the CPU type fitted
+ to the system. This permits 32-bit binaries to be run on ARMv6 through
+ to ARMv8 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed-address nature of these helpers can be used by ROP
+ (return-orientated programming) authors when creating exploits.
+
+ If all of the 32-bit binaries and libraries that run on your platform
+ are built specifically for your platform, and make no use of these
+ helpers, then you can turn this option off to hinder such exploits.
+ However, in that case, if a binary or library relying on those helpers
+ is run, it will receive a SIGSEGV signal, which will terminate the
+ program. Typically, binaries compiled for ARMv7 or later do not use
+ the kuser helpers.
+
+ Say N here only if you are absolutely certain that you do not need
+ these helpers; otherwise, the safe option is to say Y (the default
+ for now)
+
config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
@@ -1018,12 +1234,45 @@ config KEYS_COMPAT
def_bool y
depends on COMPAT && KEYS
+config COMPAT_VDSO
+ bool "32-bit vDSO"
+ depends on COMPAT
+ select ARM_ARCH_TIMER_VCT_ACCESS
+ default n
+ help
+ Warning: a 32-bit toolchain is necessary to build the vDSO. You
+ must explicitly define which toolchain should be used by setting
+ CROSS_COMPILE_ARM32 to the prefix of the 32-bit toolchain (same format
+ as CROSS_COMPILE). If CROSS_COMPILE_ARM32 is empty, a warning will be
+ printed and the kernel will be built as if COMPAT_VDSO had not been
+ set. If CROSS_COMPILE_ARM32 is set to an invalid prefix, compilation
+ will be aborted.
+
+ Provide a vDSO to 32-bit processes. It includes the symbols provided
+ by the vDSO from the 32-bit kernel, so that a 32-bit libc can use
+ the compat vDSO without modification. It also provides sigreturn
+ trampolines, replacing the sigreturn page.
+
+config CROSS_COMPILE_ARM32
+ string "32-bit toolchain prefix"
+ help
+ Same as setting CROSS_COMPILE_ARM32 in the environment, but saved for
+ future builds. The environment variable overrides this config option.
+
endmenu
menu "Power management options"
source "kernel/power/Kconfig"
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y
+ depends on CPU_PM
+
+config ARCH_HIBERNATION_HEADER
+ def_bool y
+ depends on HIBERNATION
+
config ARCH_SUSPEND_POSSIBLE
def_bool y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 8b0cd45de394..441005f88761 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -73,7 +73,7 @@ config DEBUG_RODATA
If in doubt, say Y
config DEBUG_ALIGN_RODATA
- depends on DEBUG_RODATA && ARM64_4K_PAGES
+ depends on DEBUG_RODATA
bool "Align linker sections up to SECTION_SIZE"
help
If this option is enabled, sections that may potentially be marked as
@@ -85,6 +85,46 @@ config DEBUG_ALIGN_RODATA
If in doubt, say N
+config FORCE_PAGES
+ bool "Force lowmem to be mapped with 4K pages"
+ help
+ There are some advanced debug features that can only be done when
+ memory is mapped with pages instead of sections. Enable this option
+ to always map lowmem pages with pages. This may have a performance
+ cost due to increased TLB pressure.
+
+ If unsure say N.
+
+config FREE_PAGES_RDONLY
+ bool "Set pages as read only while on the buddy list"
+ select FORCE_PAGES
+ select DEBUG_PAGEALLOC
+ help
+ Pages are always mapped in the kernel. This means that anyone
+ can write to the page if they have the address. Enable this option
+ to mark pages as read only to trigger a fault if any code attempts
+ to write to a page on the buddy list. This may have a performance
+ impact.
+
+ If unsure, say N.
+
+config KERNEL_TEXT_RDONLY
+ bool "Set kernel text section pages as read only"
+ depends on FREE_PAGES_RDONLY
+ depends on !DEBUG_RODATA
+ help
+ The kernel text pages are always mapped in the kernel.
+ This means that anyone can write to the page if they have
+ the address. Enable this option to mark the kernel text pages
+ as read only to trigger a fault if any code attempts to write
+ to a page part of the kernel text section. This may have a
+ performance impact.
+
+ If unsure, say N.
+
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+ bool "Enforce strict break-before-make on page table updates "
+
source "drivers/hwtracing/coresight/Kconfig"
endmenu
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 5fef6284a0bc..4c30bfb9b06f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -50,9 +50,65 @@ config ARCH_MEDIATEK
config ARCH_QCOM
bool "Qualcomm Platforms"
select PINCTRL
+ select CLKDEV_LOOKUP
+ select HAVE_CLK
+ select HAVE_CLK_PREPARE
+ select PM_OPP
+ select SOC_BUS
+ select MSM_IRQ
+ select THERMAL_WRITABLE_TRIPS
+ select RATIONAL
+ select ARCH_HAS_RESET_CONTROLLER
help
This enables support for the ARMv8 based Qualcomm chipsets.
+config ARCH_MSM8996
+ bool "Enable Support for Qualcomm MSM8996"
+ depends on ARCH_QCOM
+ select COMMON_CLK_MSM
+ help
+ This enables support for the MSM8996 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
+config ARCH_MSM8998
+ bool "Enable Support for Qualcomm MSM8998"
+ depends on ARCH_QCOM
+ select COMMON_CLK_MSM
+ help
+ This enables support for the MSM8998 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
+config ARCH_MSMHAMSTER
+ bool "Enable Support for Qualcomm Technologies Inc MSMHAMSTER"
+ depends on ARCH_QCOM
+ select COMMON_CLK_MSM
+ help
+ This enables support for the MSMHAMSTER chipset.
+ If you do not wish to build a kernel that runs
+ on this chipset,say 'N' here.
+
+config ARCH_SDM660
+ bool "Enable Support for Qualcomm Technologies Inc SDM660"
+ depends on ARCH_QCOM
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
+ help
+ This enables support for the SDM660 chipset.
+ If you do not wish to build a kernel that runs
+ on this chipset,say 'N' here.
+
+config ARCH_SDM630
+ bool "Enable Support for Qualcomm Technologies Inc SDM630"
+ depends on ARCH_QCOM
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
+ help
+ This enables support for the SDM630 chipset.
+ If you do not wish to build a kernel that runs
+ on this chipset,say 'N' here.
+
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 6340d3256e2f..4df2b48fae44 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -30,6 +30,35 @@ $(warning LSE atomics not supported by binutils)
endif
endif
+ifeq ($(CONFIG_COMPAT_VDSO), y)
+ CROSS_COMPILE_ARM32 ?= $(CONFIG_CROSS_COMPILE_ARM32:"%"=%)
+
+ # Check that the user has provided a valid prefix for the 32-bit toolchain.
+ # To prevent selecting the system $(cc-name) by default, the prefix is not
+ # allowed to be empty, unlike CROSS_COMPILE. In the unlikely event that the
+ # system $(cc-name) is actually the 32-bit ARM compiler to be used, the
+ # variable can be set to the dirname (e.g. CROSS_COMPILE_ARM32=/usr/bin/).
+ # Note: this Makefile is read both before and after regenerating the config
+ # (if needed). Any warning appearing before the config has been regenerated
+ # should be ignored. If the error is triggered and you set
+ # CONFIG_CROSS_COMPILE_ARM32, set CROSS_COMPILE_ARM32 to an appropriate value
+ # when invoking make and fix CONFIG_CROSS_COMPILE_ARM32.
+ ifeq ($(CROSS_COMPILE_ARM32),)
+ $(error CROSS_COMPILE_ARM32 not defined or empty, the compat vDSO will not be built)
+ else ifeq ($(cc-name),clang)
+ export CLANG_TRIPLE_ARM32 ?= $(CROSS_COMPILE_ARM32)
+ export CLANG_TARGET_ARM32 := --target=$(notdir $(CLANG_TRIPLE_ARM32:%-=%))
+ export CONFIG_VDSO32 := y
+ vdso32 := -DCONFIG_VDSO32=1
+ else ifeq ($(shell which $(CROSS_COMPILE_ARM32)$(cc-name) 2> /dev/null),)
+ $(error $(CROSS_COMPILE_ARM32)$(cc-name) not found, check CROSS_COMPILE_ARM32)
+ else
+ export CROSS_COMPILE_ARM32
+ export CONFIG_VDSO32 := y
+ vdso32 := -DCONFIG_VDSO32=1
+ endif
+endif
+
ifeq ($(cc-name),clang)
# This is a workaround for https://bugs.llvm.org/show_bug.cgi?id=30792.
# TODO: revert when this is fixed in LLVM.
@@ -37,11 +66,11 @@ KBUILD_CFLAGS += -mno-implicit-float
else
KBUILD_CFLAGS += -mgeneral-regs-only
endif
-KBUILD_CFLAGS += $(lseinstr)
+KBUILD_CFLAGS += $(lseinstr) $(vdso32)
KBUILD_CFLAGS += -fno-pic
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_AFLAGS += $(lseinstr)
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+KBUILD_AFLAGS += $(lseinstr) $(vdso32)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
@@ -103,6 +132,10 @@ endif
KBUILD_DTBS := dtbs
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm64/boot
@@ -148,6 +181,8 @@ archclean:
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+ $(if $(CONFIG_VDSO32),$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
+ include/generated/vdso32-offsets.h)
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index aae168a2eeb0..5dc2c1c3668c 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -14,17 +14,15 @@
# Based on the ia64 boot/Makefile.
#
-include $(srctree)/arch/arm64/boot/dts/Makefile
-
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
ifneq ($(DTB_NAMES),)
DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
else
-DTB_LIST := $(dtb-y)
+DTB_OBJS := $(shell find -L $(obj)/dts/ -name \*.dtb)
endif
-DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 062edb210993..6441404ee2ed 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -21,17 +21,4 @@ dtstree := $(srctree)/$(src)
dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
always := $(dtb-y)
-
targets += dtbs
-
-DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
-ifneq ($(DTB_NAMES),)
-DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
-else
-DTB_LIST := $(dtb-y)
-endif
-targets += $(DTB_LIST)
-
-dtbs: $(addprefix $(obj)/, $(DTB_LIST))
-
-clean-files := dts/*.dtb *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index a5004931e2f1..68816f71fa51 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -179,5 +179,310 @@
<&A53_3>;
};
+ etr@20070000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x20070000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etr_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ tpiu@20030000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0 0x20030000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ tpiu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+
+ replicator@20020000 {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etr_in_port>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out_port>;
+ };
+ };
+ };
+ };
+
+ etf@20010000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x20010000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* input port */
+ port@0 {
+ reg = <0>;
+ etf_in_port: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&main_funnel_out_port>;
+ };
+ };
+
+ /* output port */
+ port@1 {
+ reg = <0>;
+ etf_out_port: endpoint {
+ remote-endpoint =
+ <&replicator_in_port0>;
+ };
+ };
+ };
+ };
+
+ main_funnel@20040000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x20040000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ main_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&etf_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ main_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&A72_57_funnel_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ main_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&A53_funnel_out_port>;
+ };
+ };
+
+ };
+ };
+
+ A72_57_funnel@220c0000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x220c0000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ A72_57_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&main_funnel_in_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ A72_57_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&A72_57_etm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ A72_57_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&A72_57_etm1_out_port>;
+ };
+ };
+ };
+ };
+
+ A53_funnel@220c0000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x230c0000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ A53_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&main_funnel_in_port1>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ A53_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&A53_etm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ A53_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&A53_etm1_out_port>;
+ };
+ };
+ port@3 {
+ reg = <2>;
+ A53_funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&A53_etm2_out_port>;
+ };
+ };
+ port@4 {
+ reg = <3>;
+ A53_funnel_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&A53_etm3_out_port>;
+ };
+ };
+ };
+ };
+
+ etm@22040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x22040000 0 0x1000>;
+
+ cpu = <&A57_0>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A72_57_etm0_out_port: endpoint {
+ remote-endpoint = <&A72_57_funnel_in_port0>;
+ };
+ };
+ };
+
+ etm@22140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x22140000 0 0x1000>;
+
+ cpu = <&A57_1>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A72_57_etm1_out_port: endpoint {
+ remote-endpoint = <&A72_57_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@23040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23040000 0 0x1000>;
+
+ cpu = <&A53_0>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A53_etm0_out_port: endpoint {
+ remote-endpoint = <&A53_funnel_in_port0>;
+ };
+ };
+ };
+
+ etm@23140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23140000 0 0x1000>;
+
+ cpu = <&A53_1>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A53_etm1_out_port: endpoint {
+ remote-endpoint = <&A53_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@23240000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23240000 0 0x1000>;
+
+ cpu = <&A53_2>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A53_etm2_out_port: endpoint {
+ remote-endpoint = <&A53_funnel_in_port2>;
+ };
+ };
+ };
+
+ etm@23340000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23340000 0 0x1000>;
+
+ cpu = <&A53_3>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ port {
+ A53_etm3_out_port: endpoint {
+ remote-endpoint = <&A53_funnel_in_port3>;
+ };
+ };
+ };
+
#include "juno-base.dtsi"
};
diff --git a/arch/arm64/boot/dts/qcom b/arch/arm64/boot/dts/qcom
new file mode 120000
index 000000000000..d1df72391997
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom
@@ -0,0 +1 @@
+../../../../arch/arm/boot/dts/qcom \ No newline at end of file
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
deleted file mode 100644
index 8e94af64ee94..000000000000
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb msm8916-mtp.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
deleted file mode 100644
index e03c11d9d834..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
-
-&pm8916_gpios {
-
- usb_hub_reset_pm: usb_hub_reset_pm {
- pinconf {
- pins = "gpio3";
- function = PMIC_GPIO_FUNC_NORMAL;
- output-low;
- };
- };
-
- usb_sw_sel_pm: usb_sw_sel_pm {
- pinconf {
- pins = "gpio4";
- function = PMIC_GPIO_FUNC_NORMAL;
- power-source = <PM8916_GPIO_VPH>;
- input-disable;
- };
- };
-
- pm8916_gpios_leds: pm8916_gpios_leds {
- pinconf {
- pins = "gpio1", "gpio2";
- function = PMIC_GPIO_FUNC_NORMAL;
- output-low;
- };
- };
-};
-
-&pm8916_mpps {
-
- pm8916_mpps_leds: pm8916_mpps_leds {
- pinconf {
- pins = "mpp2", "mpp3";
- function = PMIC_GPIO_FUNC_NORMAL;
- output-low;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
deleted file mode 100644
index cbeee0bcdf52..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#include <dt-bindings/gpio/gpio.h>
-
-&msmgpio {
-
- msmgpio_leds: msmgpio_leds {
- pinconf {
- pins = "gpio21", "gpio120";
- function = "gpio";
- output-low;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
deleted file mode 100644
index 825f489a2af7..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "apq8016-sbc.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. APQ 8016 SBC";
- compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc";
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
deleted file mode 100644
index 3011c88bd2f3..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "msm8916.dtsi"
-#include "pm8916.dtsi"
-#include "apq8016-sbc-soc-pins.dtsi"
-#include "apq8016-sbc-pmic-pins.dtsi"
-
-/ {
- aliases {
- serial0 = &blsp1_uart2;
- serial1 = &blsp1_uart1;
- };
-
- chosen {
- stdout-path = "serial0";
- };
-
- soc {
- serial@78b0000 {
- status = "okay";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_uart2_default>;
- pinctrl-1 = <&blsp1_uart2_sleep>;
- };
-
- i2c@78b6000 {
- /* On Low speed expansion */
- status = "okay";
- };
-
- i2c@78b8000 {
- /* On High speed expansion */
- status = "okay";
- };
-
- i2c@78ba000 {
- /* On Low speed expansion */
- status = "okay";
- };
-
- spi@78b7000 {
- /* On High speed expansion */
- status = "okay";
- };
-
- spi@78b9000 {
- /* On Low speed expansion */
- status = "okay";
- };
-
- leds {
- pinctrl-names = "default";
- pinctrl-0 = <&msmgpio_leds>,
- <&pm8916_gpios_leds>,
- <&pm8916_mpps_leds>;
-
- compatible = "gpio-leds";
-
- led@1 {
- label = "apq8016-sbc:green:user1";
- gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
-
- led@2 {
- label = "apq8016-sbc:green:user2";
- gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- default-state = "off";
- };
-
- led@3 {
- label = "apq8016-sbc:green:user3";
- gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc1";
- default-state = "off";
- };
-
- led@4 {
- label = "apq8016-sbc:green:user4";
- gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "none";
- default-state = "off";
- };
-
- led@5 {
- label = "apq8016-sbc:yellow:wlan";
- gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "wlan";
- default-state = "off";
- };
-
- led@6 {
- label = "apq8016-sbc:blue:bt";
- gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "bluetooth-power";
- default-state = "off";
- };
- };
- };
-};
-
-&sdhc_1 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
deleted file mode 100644
index 49ec55a37614..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-&msmgpio {
-
- blsp1_uart1_default: blsp1_uart1_default {
- pinmux {
- function = "blsp_uart1";
- pins = "gpio0", "gpio1";
- };
- pinconf {
- pins = "gpio0", "gpio1";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp1_uart1_sleep: blsp1_uart1_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio0", "gpio1";
- };
- pinconf {
- pins = "gpio0", "gpio1";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- blsp1_uart2_default: blsp1_uart2_default {
- pinmux {
- function = "blsp_uart2";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp1_uart2_sleep: blsp1_uart2_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi1_default: spi1_default {
- pinmux {
- function = "blsp_spi1";
- pins = "gpio0", "gpio1", "gpio3";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio2";
- };
- pinconf {
- pins = "gpio0", "gpio1", "gpio3";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio2";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi1_sleep: spi1_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- };
- pinconf {
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi2_default: spi2_default {
- pinmux {
- function = "blsp_spi2";
- pins = "gpio4", "gpio5", "gpio7";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio6";
- };
- pinconf {
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio6";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi2_sleep: spi2_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- };
- pinconf {
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi3_default: spi3_default {
- pinmux {
- function = "blsp_spi3";
- pins = "gpio8", "gpio9", "gpio11";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio10";
- };
- pinconf {
- pins = "gpio8", "gpio9", "gpio10", "gpio11";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio10";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi3_sleep: spi3_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio8", "gpio9", "gpio10", "gpio11";
- };
- pinconf {
- pins = "gpio8", "gpio9", "gpio10", "gpio11";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi4_default: spi4_default {
- pinmux {
- function = "blsp_spi4";
- pins = "gpio12", "gpio13", "gpio15";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio14";
- };
- pinconf {
- pins = "gpio12", "gpio13", "gpio14", "gpio15";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio14";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi4_sleep: spi4_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio12", "gpio13", "gpio14", "gpio15";
- };
- pinconf {
- pins = "gpio12", "gpio13", "gpio14", "gpio15";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi5_default: spi5_default {
- pinmux {
- function = "blsp_spi5";
- pins = "gpio16", "gpio17", "gpio19";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio18";
- };
- pinconf {
- pins = "gpio16", "gpio17", "gpio18", "gpio19";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio18";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi5_sleep: spi5_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio16", "gpio17", "gpio18", "gpio19";
- };
- pinconf {
- pins = "gpio16", "gpio17", "gpio18", "gpio19";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- spi6_default: spi6_default {
- pinmux {
- function = "blsp_spi6";
- pins = "gpio20", "gpio21", "gpio23";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio22";
- };
- pinconf {
- pins = "gpio20", "gpio21", "gpio22", "gpio23";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio22";
- drive-strength = <2>;
- bias-disable;
- output-high;
- };
- };
-
- spi6_sleep: spi6_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio20", "gpio21", "gpio22", "gpio23";
- };
- pinconf {
- pins = "gpio20", "gpio21", "gpio22", "gpio23";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- i2c2_default: i2c2_default {
- pinmux {
- function = "blsp_i2c2";
- pins = "gpio6", "gpio7";
- };
- pinconf {
- pins = "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- i2c2_sleep: i2c2_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio6", "gpio7";
- };
- pinconf {
- pins = "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- i2c4_default: i2c4_default {
- pinmux {
- function = "blsp_i2c4";
- pins = "gpio14", "gpio15";
- };
- pinconf {
- pins = "gpio14", "gpio15";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- i2c4_sleep: i2c4_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio14", "gpio15";
- };
- pinconf {
- pins = "gpio14", "gpio15";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- i2c6_default: i2c6_default {
- pinmux {
- function = "blsp_i2c6";
- pins = "gpio22", "gpio23";
- };
- pinconf {
- pins = "gpio22", "gpio23";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- i2c6_sleep: i2c6_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio22", "gpio23";
- };
- pinconf {
- pins = "gpio22", "gpio23";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- sdhc2_cd_pin {
- sdc2_cd_on: cd_on {
- pinmux {
- function = "gpio";
- pins = "gpio38";
- };
- pinconf {
- pins = "gpio38";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- sdc2_cd_off: cd_off {
- pinmux {
- function = "gpio";
- pins = "gpio38";
- };
- pinconf {
- pins = "gpio38";
- drive-strength = <2>;
- bias-disable;
- };
- };
- };
-
- pmx_sdc1_clk {
- sdc1_clk_on: clk_on {
- pinmux {
- pins = "sdc1_clk";
- };
- pinconf {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <16>;
- };
- };
- sdc1_clk_off: clk_off {
- pinmux {
- pins = "sdc1_clk";
- };
- pinconf {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <2>;
- };
- };
- };
-
- pmx_sdc1_cmd {
- sdc1_cmd_on: cmd_on {
- pinmux {
- pins = "sdc1_cmd";
- };
- pinconf {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <10>;
- };
- };
- sdc1_cmd_off: cmd_off {
- pinmux {
- pins = "sdc1_cmd";
- };
- pinconf {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
- };
-
- pmx_sdc1_data {
- sdc1_data_on: data_on {
- pinmux {
- pins = "sdc1_data";
- };
- pinconf {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <10>;
- };
- };
- sdc1_data_off: data_off {
- pinmux {
- pins = "sdc1_data";
- };
- pinconf {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
- };
-
- pmx_sdc2_clk {
- sdc2_clk_on: clk_on {
- pinmux {
- pins = "sdc2_clk";
- };
- pinconf {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <16>;
- };
- };
- sdc2_clk_off: clk_off {
- pinmux {
- pins = "sdc2_clk";
- };
- pinconf {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <2>;
- };
- };
- };
-
- pmx_sdc2_cmd {
- sdc2_cmd_on: cmd_on {
- pinmux {
- pins = "sdc2_cmd";
- };
- pinconf {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <10>;
- };
- };
- sdc2_cmd_off: cmd_off {
- pinmux {
- pins = "sdc2_cmd";
- };
- pinconf {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
- };
-
- pmx_sdc2_data {
- sdc2_data_on: data_on {
- pinmux {
- pins = "sdc2_data";
- };
- pinconf {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <10>;
- };
- };
- sdc2_data_off: data_off {
- pinmux {
- pins = "sdc2_data";
- };
- pinconf {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
deleted file mode 100644
index 8d184ff19642..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/clock/qcom,gcc-msm8916.h>
-#include <dt-bindings/reset/qcom,gcc-msm8916.h>
-
-/ {
- model = "Qualcomm Technologies, Inc. MSM8916";
- compatible = "qcom,msm8916";
-
- interrupt-parent = <&intc>;
-
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
- sdhc2 = &sdhc_2; /* SDC2 SD card slot */
- };
-
- chosen { };
-
- memory {
- device_type = "memory";
- /* We expect the bootloader to fill in the reg */
- reg = <0 0 0 0>;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
- reg = <0x0>;
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
- reg = <0x1>;
- };
-
- CPU2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
- reg = <0x2>;
- };
-
- CPU3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
- reg = <0x3>;
- };
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- };
-
- soc: soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0xffffffff>;
- compatible = "simple-bus";
-
- restart@4ab000 {
- compatible = "qcom,pshold";
- reg = <0x4ab000 0x4>;
- };
-
- msmgpio: pinctrl@1000000 {
- compatible = "qcom,msm8916-pinctrl";
- reg = <0x1000000 0x300000>;
- interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gcc: qcom,gcc@1800000 {
- compatible = "qcom,gcc-msm8916";
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- reg = <0x1800000 0x80000>;
- };
-
- blsp1_uart1: serial@78af000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x78af000 0x200>;
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- status = "disabled";
- };
-
- blsp1_uart2: serial@78b0000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x78b0000 0x200>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- status = "disabled";
- };
-
- blsp_dma: dma@7884000 {
- compatible = "qcom,bam-v1.7.0";
- reg = <0x07884000 0x23000>;
- interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "bam_clk";
- #dma-cells = <1>;
- qcom,ee = <0>;
- status = "disabled";
- };
-
- blsp_spi1: spi@78b5000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078b5000 0x600>;
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 5>, <&blsp_dma 4>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi1_default>;
- pinctrl-1 = <&spi1_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_spi2: spi@78b6000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078b6000 0x600>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 7>, <&blsp_dma 6>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi2_default>;
- pinctrl-1 = <&spi2_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_spi3: spi@78b7000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078b7000 0x600>;
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 9>, <&blsp_dma 8>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi3_default>;
- pinctrl-1 = <&spi3_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_spi4: spi@78b8000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078b8000 0x600>;
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 11>, <&blsp_dma 10>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi4_default>;
- pinctrl-1 = <&spi4_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_spi5: spi@78b9000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078b9000 0x600>;
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 13>, <&blsp_dma 12>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi5_default>;
- pinctrl-1 = <&spi5_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_spi6: spi@78ba000 {
- compatible = "qcom,spi-qup-v2.2.1";
- reg = <0x078ba000 0x600>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_QUP6_SPI_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- dmas = <&blsp_dma 15>, <&blsp_dma 14>;
- dma-names = "rx", "tx";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&spi6_default>;
- pinctrl-1 = <&spi6_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c2: i2c@78b6000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0x78b6000 0x1000>;
- interrupts = <GIC_SPI 96 0>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c2_default>;
- pinctrl-1 = <&i2c2_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c4: i2c@78b8000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0x78b8000 0x1000>;
- interrupts = <GIC_SPI 98 0>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c4_default>;
- pinctrl-1 = <&i2c4_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c6: i2c@78ba000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0x78ba000 0x1000>;
- interrupts = <GIC_SPI 100 0>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c6_default>;
- pinctrl-1 = <&i2c6_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- sdhc_1: sdhci@07824000 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0x07824900 0x11c>, <0x07824000 0x800>;
- reg-names = "hc_mem", "core_mem";
-
- interrupts = <0 123 0>, <0 138 0>;
- interrupt-names = "hc_irq", "pwr_irq";
- clocks = <&gcc GCC_SDCC1_APPS_CLK>,
- <&gcc GCC_SDCC1_AHB_CLK>;
- clock-names = "core", "iface";
- bus-width = <8>;
- non-removable;
- status = "disabled";
- };
-
- sdhc_2: sdhci@07864000 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0x07864900 0x11c>, <0x07864000 0x800>;
- reg-names = "hc_mem", "core_mem";
-
- interrupts = <0 125 0>, <0 221 0>;
- interrupt-names = "hc_irq", "pwr_irq";
- clocks = <&gcc GCC_SDCC2_APPS_CLK>,
- <&gcc GCC_SDCC2_AHB_CLK>;
- clock-names = "core", "iface";
- bus-width = <4>;
- status = "disabled";
- };
-
- usb_dev: usb@78d9000 {
- compatible = "qcom,ci-hdrc";
- reg = <0x78d9000 0x400>;
- dr_mode = "peripheral";
- interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
- usb-phy = <&usb_otg>;
- status = "disabled";
- };
-
- usb_host: ehci@78d9000 {
- compatible = "qcom,ehci-host";
- reg = <0x78d9000 0x400>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
- usb-phy = <&usb_otg>;
- status = "disabled";
- };
-
- usb_otg: phy@78d9000 {
- compatible = "qcom,usb-otg-snps";
- reg = <0x78d9000 0x400>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_BOTH>,
- <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
-
- qcom,vdd-levels = <1 5 7>;
- qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
- dr_mode = "peripheral";
- qcom,otg-control = <2>; // PMIC
-
- clocks = <&gcc GCC_USB_HS_AHB_CLK>,
- <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
- clock-names = "iface", "core", "sleep";
-
- resets = <&gcc GCC_USB2A_PHY_BCR>,
- <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
- status = "disabled";
- };
-
- intc: interrupt-controller@b000000 {
- compatible = "qcom,msm-qgic2";
- interrupt-controller;
- #interrupt-cells = <3>;
- reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
- };
-
- timer@b020000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- compatible = "arm,armv7-timer-mem";
- reg = <0xb020000 0x1000>;
- clock-frequency = <19200000>;
-
- frame@b021000 {
- frame-number = <0>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb021000 0x1000>,
- <0xb022000 0x1000>;
- };
-
- frame@b023000 {
- frame-number = <1>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb023000 0x1000>;
- status = "disabled";
- };
-
- frame@b024000 {
- frame-number = <2>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb024000 0x1000>;
- status = "disabled";
- };
-
- frame@b025000 {
- frame-number = <3>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb025000 0x1000>;
- status = "disabled";
- };
-
- frame@b026000 {
- frame-number = <4>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb026000 0x1000>;
- status = "disabled";
- };
-
- frame@b027000 {
- frame-number = <5>;
- interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb027000 0x1000>;
- status = "disabled";
- };
-
- frame@b028000 {
- frame-number = <6>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb028000 0x1000>;
- status = "disabled";
- };
- };
-
- spmi_bus: spmi@200f000 {
- compatible = "qcom,spmi-pmic-arb";
- reg = <0x200f000 0x001000>,
- <0x2400000 0x400000>,
- <0x2c00000 0x400000>,
- <0x3800000 0x200000>,
- <0x200a000 0x002100>;
- reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
- interrupt-names = "periph_irq";
- interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
- qcom,ee = <0>;
- qcom,channel = <0>;
- #address-cells = <2>;
- #size-cells = <0>;
- interrupt-controller;
- #interrupt-cells = <4>;
- };
-
- rng@22000 {
- compatible = "qcom,prng";
- reg = <0x00022000 0x200>;
- clocks = <&gcc GCC_PRNG_AHB_CLK>;
- clock-names = "core";
- };
- };
-};
-
-#include "msm8916-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
deleted file mode 100644
index b222ece7e3d2..000000000000
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <dt-bindings/iio/qcom,spmi-vadc.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/spmi/spmi.h>
-
-&spmi_bus {
-
- usid0: pm8916@0 {
- compatible = "qcom,spmi-pmic";
- reg = <0x0 SPMI_USID>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- rtc@6000 {
- compatible = "qcom,pm8941-rtc";
- reg = <0x6000 0x6100>;
- reg-names = "rtc", "alarm";
- interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
- };
-
- pwrkey@800 {
- compatible = "qcom,pm8941-pwrkey";
- reg = <0x800>;
- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- };
-
- pm8916_gpios: gpios@c000 {
- compatible = "qcom,pm8916-gpio";
- reg = <0xc000 0x400>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupts = <0 0xc0 0 IRQ_TYPE_NONE>,
- <0 0xc1 0 IRQ_TYPE_NONE>,
- <0 0xc2 0 IRQ_TYPE_NONE>,
- <0 0xc3 0 IRQ_TYPE_NONE>;
- };
-
- pm8916_mpps: mpps@a000 {
- compatible = "qcom,pm8916-mpp";
- reg = <0xa000 0x400>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupts = <0 0xa0 0 IRQ_TYPE_NONE>,
- <0 0xa1 0 IRQ_TYPE_NONE>,
- <0 0xa2 0 IRQ_TYPE_NONE>,
- <0 0xa3 0 IRQ_TYPE_NONE>;
- };
-
- pm8916_temp: temp-alarm@2400 {
- compatible = "qcom,spmi-temp-alarm";
- reg = <0x2400 0x100>;
- interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
- io-channels = <&pm8916_vadc VADC_DIE_TEMP>;
- io-channel-names = "thermal";
- #thermal-sensor-cells = <0>;
- };
-
- pm8916_vadc: vadc@3100 {
- compatible = "qcom,spmi-vadc";
- reg = <0x3100 0x100>;
- interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
- #address-cells = <1>;
- #size-cells = <0>;
- #io-channel-cells = <1>;
-
- usb_in {
- reg = <VADC_USBIN>;
- qcom,pre-scaling = <1 10>;
- };
- vph_pwr {
- reg = <VADC_VSYS>;
- qcom,pre-scaling = <1 3>;
- };
- die_temp {
- reg = <VADC_DIE_TEMP>;
- };
- ref_625mv {
- reg = <VADC_REF_625MV>;
- };
- ref_1250v {
- reg = <VADC_REF_1250MV>;
- };
- ref_gnd {
- reg = <VADC_GND_REF>;
- };
- ref_vdd {
- reg = <VADC_VDD_VADC>;
- };
- };
- };
-
- usid1: pm8916@1 {
- compatible = "qcom,spmi-pmic";
- reg = <0x1 SPMI_USID>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-};
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index c17bac47626b..729e80868871 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -384,6 +384,7 @@ CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index bdd7aa358d2a..365e13004651 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -122,6 +122,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_VIRTIO_CONSOLE=y
+CONFIG_DIAG_CHAR=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_QUP=y
@@ -225,3 +226,6 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_HIBERNATION=y
+CONFIG_KPROBES=y
+CONFIG_CORESIGHT=y
diff --git a/arch/arm64/configs/fsmcortex-perf_defconfig b/arch/arm64/configs/fsmcortex-perf_defconfig
new file mode 100644
index 000000000000..57e441c53ea9
--- /dev/null
+++ b/arch/arm64/configs/fsmcortex-perf_defconfig
@@ -0,0 +1,596 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+# CONFIG_VGA_ARB is not set
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ONESHOT_SYNC=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_EXT_DISPLAY=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_WCD_DSP_GLINK=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/fsmcortex_defconfig b/arch/arm64/configs/fsmcortex_defconfig
new file mode 100644
index 000000000000..db312ae174a5
--- /dev/null
+++ b/arch/arm64/configs/fsmcortex_defconfig
@@ -0,0 +1,671 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+# CONFIG_VGA_ARB is not set
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ONESHOT_SYNC=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_MSM_MHI_DEBUG=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_EXT_DISPLAY=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_WCD_DSP_GLINK=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
new file mode 100644
index 000000000000..95fb47583db4
--- /dev/null
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -0,0 +1,637 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_ENABLE_FP_SIMD_SETTINGS=y
+CONFIG_MSM_APP_SETTINGS=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CNSS_CRYPTO=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CNSS=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_GEN_VKEYS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+# CONFIG_ACPI_I2C_OPREGION is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_EPM_ADC=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_KRYO=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_PLATFORM=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_RADIO_SILABS=y
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=4
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_MHI_XPRT=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_SCM_XPU=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
new file mode 100644
index 000000000000..3661419528c2
--- /dev/null
+++ b/arch/arm64/configs/msm_defconfig
@@ -0,0 +1,675 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8996=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_ENABLE_FP_SIMD_SETTINGS=y
+CONFIG_MSM_APP_SETTINGS=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CNSS_CRYPTO=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CNSS=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_GEN_VKEYS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QUP=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_EPM_ADC=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_KRYO=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_PLATFORM=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_RADIO_SILABS=y
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8996=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_MSM_MHI_DEBUG=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_MHI_XPRT=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_SCM_XPU=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
new file mode 100644
index 000000000000..7bd9ddba5f49
--- /dev/null
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -0,0 +1,680 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
new file mode 100644
index 000000000000..a37953ad33c5
--- /dev/null
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -0,0 +1,762 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_MSM_MHI_DEBUG=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
new file mode 100644
index 000000000000..b0b7088e3327
--- /dev/null
+++ b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
@@ -0,0 +1,653 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_VS=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_SKY2=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_TARGET_SNOC=m
+CONFIG_ATH10K_SNOC=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_MSM_SDE_HDMI_CEC=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y
+CONFIG_TSPP=m
+CONFIG_DRM=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig
new file mode 100644
index 000000000000..4e4be010e4c3
--- /dev/null
+++ b/arch/arm64/configs/msmcortex_mediabox_defconfig
@@ -0,0 +1,714 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_VS=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_TARGET_SNOC=m
+CONFIG_ATH10K_SNOC=y
+CONFIG_ATH10K_DEBUG=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_MSM_SDE_HDMI_CEC=y
+CONFIG_DVB_MPQ=y
+CONFIG_DVB_MPQ_DEMUX=y
+CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y
+CONFIG_TSPP=y
+CONFIG_DRM=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig
index 7f847fc40d14..ceebab60da2b 100644
--- a/arch/arm64/configs/ranchu64_defconfig
+++ b/arch/arm64/configs/ranchu64_defconfig
@@ -214,7 +214,6 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
# CONFIG_SERIO_SERPORT is not set
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
new file mode 100644
index 000000000000..104138449323
--- /dev/null
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -0,0 +1,687 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_ARCH_SDM630=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
+CONFIG_PROCESS_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_CPR4_MMSS_LDO=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_SND_SOC_SDM660_COMMON=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_MSM_GPUCC_660=y
+CONFIG_MSM_MMCC_660=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_ICNSS=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
new file mode 100644
index 000000000000..a6433cbb8fc0
--- /dev/null
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -0,0 +1,761 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_ARCH_SDM630=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
+CONFIG_PROCESS_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_CPR4_MMSS_LDO=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_SND_SOC_SDM660_COMMON=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_CE=y
+CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_MSM_GPUCC_660=y
+CONFIG_MSM_MMCC_660=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_ICNSS=y
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/vendor/msm-perf_defconfig b/arch/arm64/configs/vendor/msm-perf_defconfig
new file mode 120000
index 000000000000..8b124b7e4bbe
--- /dev/null
+++ b/arch/arm64/configs/vendor/msm-perf_defconfig
@@ -0,0 +1 @@
+../msm-perf_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/msm_defconfig b/arch/arm64/configs/vendor/msm_defconfig
new file mode 120000
index 000000000000..b3d2530373fa
--- /dev/null
+++ b/arch/arm64/configs/vendor/msm_defconfig
@@ -0,0 +1 @@
+../msm_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/msmcortex-perf_defconfig b/arch/arm64/configs/vendor/msmcortex-perf_defconfig
new file mode 120000
index 000000000000..4af6507b9e8e
--- /dev/null
+++ b/arch/arm64/configs/vendor/msmcortex-perf_defconfig
@@ -0,0 +1 @@
+../msmcortex-perf_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/msmcortex_defconfig b/arch/arm64/configs/vendor/msmcortex_defconfig
new file mode 120000
index 000000000000..da30fee62781
--- /dev/null
+++ b/arch/arm64/configs/vendor/msmcortex_defconfig
@@ -0,0 +1 @@
+../msmcortex_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/msmcortex_mediabox-perf_defconfig b/arch/arm64/configs/vendor/msmcortex_mediabox-perf_defconfig
new file mode 120000
index 000000000000..87c3757d6db5
--- /dev/null
+++ b/arch/arm64/configs/vendor/msmcortex_mediabox-perf_defconfig
@@ -0,0 +1 @@
+../msmcortex_mediabox-perf_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/msmcortex_mediabox_defconfig b/arch/arm64/configs/vendor/msmcortex_mediabox_defconfig
new file mode 120000
index 000000000000..9d87f6373ed9
--- /dev/null
+++ b/arch/arm64/configs/vendor/msmcortex_mediabox_defconfig
@@ -0,0 +1 @@
+../msmcortex_mediabox_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig
new file mode 120000
index 000000000000..5a629cc6da53
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig
@@ -0,0 +1 @@
+../sdm660-perf_defconfig \ No newline at end of file
diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig
new file mode 120000
index 000000000000..b1f9b548fbd6
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660_defconfig
@@ -0,0 +1 @@
+../sdm660_defconfig \ No newline at end of file
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 487cd382d333..de1aab4b5da8 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -58,11 +58,4 @@ config CRYPTO_CRC32_ARM64
tristate "CRC32 and CRC32C using optional ARMv8 instructions"
depends on ARM64
select CRYPTO_HASH
-
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_GF128MUL
- select CRYPTO_SPECK
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 28a71246e0a3..f0a8f2475ea3 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -32,9 +32,6 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
-
AFLAGS_aes-ce.o := -DINTERLEAVE=4
AFLAGS_aes-neon.o := -DINTERLEAVE=4
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b14463438b09..000000000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
-
- // arguments
- ROUND_KEYS .req x0 // const {u64,u32} *round_keys
- NROUNDS .req w1 // int nrounds
- NROUNDS_X .req x1
- DST .req x2 // void *dst
- SRC .req x3 // const void *src
- NBYTES .req w4 // unsigned int nbytes
- TWEAK .req x5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- // (underscores avoid a naming collision with ARM64 registers x0-x3)
- X_0 .req v0
- Y_0 .req v1
- X_1 .req v2
- Y_1 .req v3
- X_2 .req v4
- Y_2 .req v5
- X_3 .req v6
- Y_3 .req v7
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req v8
-
- // index vector for tbl-based 8-bit rotates
- ROTATE_TABLE .req v9
- ROTATE_TABLE_Q .req q9
-
- // temporary registers
- TMP0 .req v10
- TMP1 .req v11
- TMP2 .req v12
- TMP3 .req v13
-
- // multiplication table for updating XTS tweaks
- GFMUL_TABLE .req v14
- GFMUL_TABLE_Q .req q14
-
- // next XTS tweak value(s)
- TWEAKV_NEXT .req v15
-
- // XTS tweaks for the blocks currently being encrypted/decrypted
- TWEAKV0 .req v16
- TWEAKV1 .req v17
- TWEAKV2 .req v18
- TWEAKV3 .req v19
- TWEAKV4 .req v20
- TWEAKV5 .req v21
- TWEAKV6 .req v22
- TWEAKV7 .req v23
-
- .align 4
-.Lror64_8_table:
- .octa 0x080f0e0d0c0b0a090007060504030201
-.Lror32_8_table:
- .octa 0x0c0f0e0d080b0a090407060500030201
-.Lrol64_8_table:
- .octa 0x0e0d0c0b0a09080f0605040302010007
-.Lrol32_8_table:
- .octa 0x0e0d0c0f0a09080b0605040702010003
-.Lgf128mul_table:
- .octa 0x00000000000000870000000000000001
-.Lgf64mul_table:
- .octa 0x0000000000000000000000002d361b00
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
- */
-.macro _speck_round_128bytes n, lanes
-
- // x = ror(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-
- // x += y
- add X_0.\lanes, X_0.\lanes, Y_0.\lanes
- add X_1.\lanes, X_1.\lanes, Y_1.\lanes
- add X_2.\lanes, X_2.\lanes, Y_2.\lanes
- add X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // y = rol(y, 3)
- shl TMP0.\lanes, Y_0.\lanes, #3
- shl TMP1.\lanes, Y_1.\lanes, #3
- shl TMP2.\lanes, Y_2.\lanes, #3
- shl TMP3.\lanes, Y_3.\lanes, #3
- sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
- sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
- sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
- sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
-
- // y ^= x
- eor Y_0.16b, TMP0.16b, X_0.16b
- eor Y_1.16b, TMP1.16b, X_1.16b
- eor Y_2.16b, TMP2.16b, X_2.16b
- eor Y_3.16b, TMP3.16b, X_3.16b
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n, lanes
-
- // y ^= x
- eor TMP0.16b, Y_0.16b, X_0.16b
- eor TMP1.16b, Y_1.16b, X_1.16b
- eor TMP2.16b, Y_2.16b, X_2.16b
- eor TMP3.16b, Y_3.16b, X_3.16b
-
- // y = ror(y, 3)
- ushr Y_0.\lanes, TMP0.\lanes, #3
- ushr Y_1.\lanes, TMP1.\lanes, #3
- ushr Y_2.\lanes, TMP2.\lanes, #3
- ushr Y_3.\lanes, TMP3.\lanes, #3
- sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
- sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
- sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
- sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // x -= y
- sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
- sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
- sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
- sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x = rol(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-.endm
-
-.macro _next_xts_tweak next, cur, tmp, n
-.if \n == 64
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- sshr \tmp\().2d, \cur\().2d, #63
- and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
- shl \next\().2d, \cur\().2d, #1
- ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
- eor \next\().16b, \next\().16b, \tmp\().16b
-.else
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- ushr \tmp\().2d, \cur\().2d, #62
- shl \next\().2d, \cur\().2d, #2
- tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
- eor \next\().16b, \next\().16b, \tmp\().16b
-.endif
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, lanes, decrypting
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
- mov NROUNDS, NROUNDS /* zero the high 32 bits */
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
- sub ROUND_KEYS, ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
- sub ROUND_KEYS, ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for tbl-based 8-bit rotates
-.if \decrypting
- ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
-.else
- ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
-.endif
-
- // One-time XTS preparation
-.if \n == 64
- // Load first tweak
- ld1 {TWEAKV0.16b}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf128mul_table
-.else
- // Load first tweak
- ld1 {TWEAKV0.8b}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf64mul_table
-
- // Calculate second tweak, packing it together with the first
- ushr TMP0.2d, TWEAKV0.2d, #63
- shl TMP1.2d, TWEAKV0.2d, #1
- tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
- eor TMP0.8b, TMP0.8b, TMP1.8b
- mov TWEAKV0.d[1], TMP0.d[0]
-.endif
-
-.Lnext_128bytes_\@:
-
- // Calculate XTS tweaks for next 128 bytes
- _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
- _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
- _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
- _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
- _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
- _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
- _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
- _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
-
- // Load the next source blocks into {X,Y}[0-3]
- ld1 {X_0.16b-Y_1.16b}, [SRC], #64
- ld1 {X_2.16b-Y_3.16b}, [SRC], #64
-
- // XOR the source blocks with their XTS tweaks
- eor TMP0.16b, X_0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor TMP1.16b, X_1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor TMP2.16b, X_2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor TMP3.16b, X_3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
-
- /*
- * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
- uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
-
- // Do the cipher rounds
- mov x6, ROUND_KEYS
- mov w7, NROUNDS
-.Lnext_round_\@:
-.if \decrypting
- ld1r {ROUND_KEY.\lanes}, [x6]
- sub x6, x6, #( \n / 8 )
- _speck_unround_128bytes \n, \lanes
-.else
- ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
- _speck_round_128bytes \n, \lanes
-.endif
- subs w7, w7, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
- zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
- zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
- zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
- zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
- zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
- zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
- zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
- zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
-
- // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
- eor X_0.16b, TMP0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor X_1.16b, TMP1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor X_2.16b, TMP2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor X_3.16b, TMP3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
- mov TWEAKV0.16b, TWEAKV_NEXT.16b
-
- // Store the ciphertext in the destination buffer
- st1 {X_0.16b-Y_1.16b}, [DST], #64
- st1 {X_2.16b-Y_3.16b}, [DST], #64
-
- // Continue if there are more 128-byte chunks remaining
- subs NBYTES, NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak and return
-.if \n == 64
- st1 {TWEAKV_NEXT.16b}, [TWEAK]
-.else
- st1 {TWEAKV_NEXT.8b}, [TWEAK]
-.endif
- ret
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index c7d18569d408..000000000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,308 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- * (64-bit version; based on the 32-bit version)
- *
- * Copyright (c) 2018 Google, Inc
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- le128 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- __le64 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "xts(speck128)",
- .cra_driver_name = "xts-speck128-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }
- }
- }, {
- .cra_name = "xts(speck64)",
- .cra_driver_name = "xts-speck64-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
- }
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_ASIMD))
- return -ENODEV;
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 70fd9ffb58cf..213c78f84e56 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += bug.h
generic-y += bugs.h
-generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
@@ -14,6 +13,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -36,7 +36,6 @@ generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
-generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/app_api.h b/arch/arm64/include/asm/app_api.h
new file mode 100644
index 000000000000..0e6a469cd683
--- /dev/null
+++ b/arch/arm64/include/asm/app_api.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_APP_API_H
+#define __ASM_APP_API_H
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+
+#define APP_SETTING_BIT 30
+#define MAX_ENTRIES 10
+
+/*
+ * APIs to set / clear the app setting bits
+ * in the register.
+ */
+#ifdef CONFIG_MSM_APP_API
+extern void set_app_setting_bit(uint32_t bit);
+extern void clear_app_setting_bit(uint32_t bit);
+extern void set_app_setting_bit_for_32bit_apps(void);
+extern void clear_app_setting_bit_for_32bit_apps(void);
+#else
+static inline void set_app_setting_bit(uint32_t bit) {}
+static inline void clear_app_setting_bit(uint32_t bit) {}
+static inline void set_app_setting_bit_for_32bit_apps(void) {}
+static inline void clear_app_setting_bit_for_32bit_apps(void) {}
+#endif
+
+#ifdef CONFIG_MSM_APP_SETTINGS
+extern void switch_app_setting_bit(struct task_struct *prev,
+ struct task_struct *next);
+extern void switch_32bit_app_setting_bit(struct task_struct *prev,
+ struct task_struct *next);
+extern void apply_app_setting_bit(struct file *file);
+extern bool use_app_setting;
+extern bool use_32bit_app_setting;
+extern bool use_32bit_app_setting_pro;
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8ec88e5b290f..30cf6f5961ef 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -103,7 +103,8 @@ static inline u64 gic_read_iar_common(void)
u64 irqstat;
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
- dsb(sy);
+ /* As per the architecture specification */
+ mb();
return irqstat;
}
@@ -132,6 +133,9 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+ /* As per the architecture specification */
+ isb();
+ mb();
}
static inline void gic_write_ctlr(u32 val)
@@ -149,6 +153,9 @@ static inline void gic_write_grpen1(u32 val)
static inline void gic_write_sgi1r(u64 val)
{
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+ /* As per the architecture specification */
+ isb();
+ mb();
}
static inline u32 gic_read_sre(void)
@@ -165,8 +172,8 @@ static inline void gic_write_sre(u32 val)
isb();
}
-#define gic_read_typer(c) readq_relaxed(c)
-#define gic_write_irouter(v, c) writeq_relaxed(v, c)
+#define gic_read_typer(c) readq_relaxed_no_log(c)
+#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index fbe0ca31a99c..902db125d994 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -20,6 +20,7 @@
#define __ASM_ARCH_TIMER_H
#include <asm/barrier.h>
+#include <asm/sysreg.h>
#include <linux/bug.h>
#include <linux/init.h>
@@ -38,19 +39,19 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
+ write_sysreg(val, cntp_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
- asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
+ write_sysreg(val, cntp_tval_el0);
break;
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
+ write_sysreg(val, cntv_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
- asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
+ write_sysreg(val, cntv_tval_el0);
break;
}
}
@@ -61,48 +62,38 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
static __always_inline
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
- u32 val;
-
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
- break;
+ return read_sysreg(cntp_ctl_el0);
case ARCH_TIMER_REG_TVAL:
- asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
- break;
+ return read_sysreg(cntp_tval_el0);
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val));
- break;
+ return read_sysreg(cntv_ctl_el0);
case ARCH_TIMER_REG_TVAL:
- asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
- break;
+ return read_sysreg(cntv_tval_el0);
}
}
- return val;
+ BUG();
}
static inline u32 arch_timer_get_cntfrq(void)
{
- u32 val;
- asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
- return val;
+ return read_sysreg(cntfrq_el0);
}
static inline u32 arch_timer_get_cntkctl(void)
{
- u32 cntkctl;
- asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- return cntkctl;
+ return read_sysreg(cntkctl_el1);
}
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
- asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
+ write_sysreg(cntkctl, cntkctl_el1);
}
static inline u64 arch_counter_get_cntpct(void)
@@ -117,10 +108,15 @@ static inline u64 arch_counter_get_cntpct(void)
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
-
isb();
- asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
-
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define L32_BITS 0x00000000FFFFFFFF
+ do {
+ cval = read_sysreg(cntvct_el0);
+ } while ((cval & L32_BITS) == L32_BITS);
+#else
+ cval = read_sysreg(cntvct_el0);
+#endif
return cval;
}
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1a1516fabd07..7dcfd83ff5e8 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -64,6 +64,18 @@
.endm
/*
+ * Save/disable and restore interrupts.
+ */
+ .macro save_and_disable_irqs, olddaif
+ mrs \olddaif, daif
+ disable_irq
+ .endm
+
+ .macro restore_irqs, olddaif
+ msr daif, \olddaif
+ .endm
+
+/*
* Enable and disable debug exceptions.
*/
.macro disable_dbg
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5082b30bc2c0..f9359d32fae5 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -18,17 +18,17 @@
#include <asm/cachetype.h>
-#define L1_CACHE_SHIFT 7
+#define L1_CACHE_SHIFT 6
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
- * sure that all such allocations are cache aligned. Otherwise,
- * unrelated code may cause parts of the buffer to be read into the
- * cache before the transfer is done, causing old data to be seen by
- * the CPU.
+ * sure that all such allocations are aligned to the maximum *known*
+ * cache line size on ARMv8 systems. Otherwise, unrelated code may
+ * cause parts of the buffer to be read into the cache before the
+ * transfer is done, causing old data to be seen by the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 22dda613f9c9..df06d37374cc 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,6 +40,10 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT or ASID-tagged VIVT I-cache.
*
+ * flush_cache_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
+ *
* flush_cache_mm(mm)
*
* Clean and invalidate all user space cache entries
@@ -65,6 +69,7 @@
* - kaddr - page address
* - size - region size
*/
+extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
@@ -86,6 +91,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
@@ -155,5 +166,21 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_KERNEL_TEXT_RDONLY
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_ro(void) { }
+#endif
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a) set_memory_ro((unsigned long)a, 1);
+#define mark_addr_rdwrite(a) set_memory_rw((unsigned long)a, 1);
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
#endif
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
new file mode 100644
index 000000000000..09f65339d66d
--- /dev/null
+++ b/arch/arm64/include/asm/checksum.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/types.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum += (sum >> 16) | (sum << 16);
+ return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ __uint128_t tmp;
+ u64 sum;
+
+ tmp = *(const __uint128_t *)iph;
+ iph += 16;
+ ihl -= 4;
+ tmp += ((tmp >> 64) | (tmp << 64));
+ sum = tmp >> 64;
+ do {
+ sum += *(const u32 *)iph;
+ iph += 4;
+ } while (--ihl);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold(sum >> 32);
+}
+#define ip_fast_csum ip_fast_csum
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 67fff4a9212c..2c94aecc8992 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -33,17 +33,20 @@
#define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_NCAPS 24
+#define ARM64_WORKAROUND_CAVIUM_27456 11
+#define ARM64_HAS_VIRT_HOST_EXTN 12
+#define ARM64_HARDEN_BRANCH_PREDICTOR 13
+#define ARM64_UNMAP_KERNEL_AT_EL0 14
+#define ARM64_HAS_32BIT_EL0 15
+#define ARM64_NCAPS 16
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+extern const char *machine_name;
+
/* CPU feature register tracking */
enum ftr_type {
FTR_EXACT, /* Use a predefined safe value */
@@ -177,7 +180,9 @@ void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_errata(void);
+void __init enable_errata_workarounds(void);
#ifdef CONFIG_HOTPLUG_CPU
void verify_local_cpu_capabilities(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 0a23fb0600c8..f857adc51b0f 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -32,6 +32,10 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+#define MMFR0_16KGRAN_SIZE 15
+#define MMFR0_16KGRAN_SHFT 20
+#define MMFR0_EL1_16KGRAN_MASK (MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
+
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -78,12 +82,19 @@
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
+#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
-#define ARM_CPU_PART_CORTEX_A57 0xD07
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
+#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
+#define ARM_CPU_PART_KRYO2XX_GOLD 0x800
+#define ARM_CPU_PART_KRYO2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO 0x200
#define APM_CPU_PART_POTENZA 0x000
@@ -92,7 +103,15 @@
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+#define MIDR_KRYO2XX_GOLD \
+ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7c876c..4b6b3f72a215 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -66,6 +66,11 @@
#define CACHE_FLUSH_IS_SAFE 1
+/* kprobes BRK opcodes with ESR encoding */
+#define BRK64_ESR_MASK 0xFFFF
+#define BRK64_ESR_KPROBES 0x0004
+#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
diff --git a/arch/arm64/include/asm/debugv8.h b/arch/arm64/include/asm/debugv8.h
new file mode 100644
index 000000000000..6a2538279f39
--- /dev/null
+++ b/arch/arm64/include/asm/debugv8.h
@@ -0,0 +1,229 @@
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_DEBUGV8_H
+#define __ASM_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for aarch 64 bit */
+#define dbg_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for aarch 64 bit */
+#define dbg_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for aarch 64 bit */
+#define dbg_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(reg) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(reg) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, reg) \
+({ \
+asm volatile("msr "#reg", %0" : : "r" (val)); \
+})
+
+/*
+ * Debug Feature Register
+ *
+ * Read only
+ */
+#define RSYSQ_ID_AA64DFR0_EL1() MRSQ(ID_AA64DFR0_EL1)
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv8
+ *
+ * Read only
+ * MDCCSR_EL0, MDRAR_EL1, OSLSR_EL1, DBGDTRRX_EL0, DBGAUTHSTATUS_EL1
+ *
+ * Write only
+ * DBGDTRTX_EL0, OSLAR_EL1
+ */
+/* 32 bit registers */
+#define RSYSL_DBGDTRRX_EL0() MRSL(DBGDTRRX_EL0)
+#define RSYSL_MDCCSR_EL0() MRSL(MDCCSR_EL0)
+#define RSYSL_MDSCR_EL1() MRSL(MDSCR_EL1)
+#define RSYSL_OSDTRRX_EL1() MRSL(OSDTRRX_EL1)
+#define RSYSL_OSDTRTX_EL1() MRSL(OSDTRTX_EL1)
+#define RSYSL_OSDLR_EL1() MRSL(OSDLR_EL1)
+#define RSYSL_OSLSR_EL1() MRSL(OSLSR_EL1)
+#define RSYSL_MDCCINT_EL1() MRSL(MDCCINT_EL1)
+#define RSYSL_OSECCR_EL1() MRSL(OSECCR_EL1)
+#define RSYSL_DBGPRCR_EL1() MRSL(DBGPRCR_EL1)
+#define RSYSL_DBGBCR0_EL1() MRSL(DBGBCR0_EL1)
+#define RSYSL_DBGBCR1_EL1() MRSL(DBGBCR1_EL1)
+#define RSYSL_DBGBCR2_EL1() MRSL(DBGBCR2_EL1)
+#define RSYSL_DBGBCR3_EL1() MRSL(DBGBCR3_EL1)
+#define RSYSL_DBGBCR4_EL1() MRSL(DBGBCR4_EL1)
+#define RSYSL_DBGBCR5_EL1() MRSL(DBGBCR5_EL1)
+#define RSYSL_DBGBCR6_EL1() MRSL(DBGBCR6_EL1)
+#define RSYSL_DBGBCR7_EL1() MRSL(DBGBCR7_EL1)
+#define RSYSL_DBGBCR8_EL1() MRSL(DBGBCR8_EL1)
+#define RSYSL_DBGBCR9_EL1() MRSL(DBGBCR9_EL1)
+#define RSYSL_DBGBCR10_EL1() MRSL(DBGBCR10_EL1)
+#define RSYSL_DBGBCR11_EL1() MRSL(DBGBCR11_EL1)
+#define RSYSL_DBGBCR12_EL1() MRSL(DBGBCR12_EL1)
+#define RSYSL_DBGBCR13_EL1() MRSL(DBGBCR13_EL1)
+#define RSYSL_DBGBCR14_EL1() MRSL(DBGBCR14_EL1)
+#define RSYSL_DBGBCR15_EL1() MRSL(DBGBCR15_EL1)
+#define RSYSL_DBGWCR0_EL1() MRSL(DBGWCR0_EL1)
+#define RSYSL_DBGWCR1_EL1() MRSL(DBGWCR1_EL1)
+#define RSYSL_DBGWCR2_EL1() MRSL(DBGWCR2_EL1)
+#define RSYSL_DBGWCR3_EL1() MRSL(DBGWCR3_EL1)
+#define RSYSL_DBGWCR4_EL1() MRSL(DBGWCR4_EL1)
+#define RSYSL_DBGWCR5_EL1() MRSL(DBGWCR5_EL1)
+#define RSYSL_DBGWCR6_EL1() MRSL(DBGWCR6_EL1)
+#define RSYSL_DBGWCR7_EL1() MRSL(DBGWCR7_EL1)
+#define RSYSL_DBGWCR8_EL1() MRSL(DBGWCR8_EL1)
+#define RSYSL_DBGWCR9_EL1() MRSL(DBGWCR9_EL1)
+#define RSYSL_DBGWCR10_EL1() MRSL(DBGWCR10_EL1)
+#define RSYSL_DBGWCR11_EL1() MRSL(DBGWCR11_EL1)
+#define RSYSL_DBGWCR12_EL1() MRSL(DBGWCR12_EL1)
+#define RSYSL_DBGWCR13_EL1() MRSL(DBGWCR13_EL1)
+#define RSYSL_DBGWCR14_EL1() MRSL(DBGWCR14_EL1)
+#define RSYSL_DBGWCR15_EL1() MRSL(DBGWCR15_EL1)
+#define RSYSL_DBGCLAIMSET_EL1() MRSL(DBGCLAIMSET_EL1)
+#define RSYSL_DBGCLAIMCLR_EL1() MRSL(DBGCLAIMCLR_EL1)
+#define RSYSL_DBGAUTHSTATUS_EL1() MRSL(DBGAUTHSTATUS_EL1)
+#define RSYSL_DBGVCR32_EL2() MRSL(DBGVCR32_EL2)
+#define RSYSL_MDCR_EL2() MRSL(MDCR_EL2)
+#define RSYSL_MDCR_EL3() MRSL(MDCR_EL3)
+/* 64 bit registers */
+#define RSYSQ_DBGDTR_EL0() MRSQ(DBGDTR_EL0)
+#define RSYSQ_MDRAR_EL1() MRSQ(MDRAR_EL1)
+#define RSYSQ_DBGBVR0_EL1() MRSQ(DBGBVR0_EL1)
+#define RSYSQ_DBGBVR1_EL1() MRSQ(DBGBVR1_EL1)
+#define RSYSQ_DBGBVR2_EL1() MRSQ(DBGBVR2_EL1)
+#define RSYSQ_DBGBVR3_EL1() MRSQ(DBGBVR3_EL1)
+#define RSYSQ_DBGBVR4_EL1() MRSQ(DBGBVR4_EL1)
+#define RSYSQ_DBGBVR5_EL1() MRSQ(DBGBVR5_EL1)
+#define RSYSQ_DBGBVR6_EL1() MRSQ(DBGBVR6_EL1)
+#define RSYSQ_DBGBVR7_EL1() MRSQ(DBGBVR7_EL1)
+#define RSYSQ_DBGBVR8_EL1() MRSQ(DBGBVR8_EL1)
+#define RSYSQ_DBGBVR9_EL1() MRSQ(DBGBVR9_EL1)
+#define RSYSQ_DBGBVR10_EL1() MRSQ(DBGBVR10_EL1)
+#define RSYSQ_DBGBVR11_EL1() MRSQ(DBGBVR11_EL1)
+#define RSYSQ_DBGBVR12_EL1() MRSQ(DBGBVR12_EL1)
+#define RSYSQ_DBGBVR13_EL1() MRSQ(DBGBVR13_EL1)
+#define RSYSQ_DBGBVR14_EL1() MRSQ(DBGBVR14_EL1)
+#define RSYSQ_DBGBVR15_EL1() MRSQ(DBGBVR15_EL1)
+#define RSYSQ_DBGWVR0_EL1() MRSQ(DBGWVR0_EL1)
+#define RSYSQ_DBGWVR1_EL1() MRSQ(DBGWVR1_EL1)
+#define RSYSQ_DBGWVR2_EL1() MRSQ(DBGWVR2_EL1)
+#define RSYSQ_DBGWVR3_EL1() MRSQ(DBGWVR3_EL1)
+#define RSYSQ_DBGWVR4_EL1() MRSQ(DBGWVR4_EL1)
+#define RSYSQ_DBGWVR5_EL1() MRSQ(DBGWVR5_EL1)
+#define RSYSQ_DBGWVR6_EL1() MRSQ(DBGWVR6_EL1)
+#define RSYSQ_DBGWVR7_EL1() MRSQ(DBGWVR7_EL1)
+#define RSYSQ_DBGWVR8_EL1() MRSQ(DBGWVR8_EL1)
+#define RSYSQ_DBGWVR9_EL1() MRSQ(DBGWVR9_EL1)
+#define RSYSQ_DBGWVR10_EL1() MRSQ(DBGWVR10_EL1)
+#define RSYSQ_DBGWVR11_EL1() MRSQ(DBGWVR11_EL1)
+#define RSYSQ_DBGWVR12_EL1() MRSQ(DBGWVR12_EL1)
+#define RSYSQ_DBGWVR13_EL1() MRSQ(DBGWVR13_EL1)
+#define RSYSQ_DBGWVR14_EL1() MRSQ(DBGWVR14_EL1)
+#define RSYSQ_DBGWVR15_EL1() MRSQ(DBGWVR15_EL1)
+
+/* 32 bit registers */
+#define WSYS_DBGDTRTX_EL0(val) MSR(val, DBGDTRTX_EL0)
+#define WSYS_MDCCINT_EL1(val) MSR(val, MDCCINT_EL1)
+#define WSYS_MDSCR_EL1(val) MSR(val, MDSCR_EL1)
+#define WSYS_OSDTRRX_EL1(val) MSR(val, OSDTRRX_EL1)
+#define WSYS_OSDTRTX_EL1(val) MSR(val, OSDTRTX_EL1)
+#define WSYS_OSDLR_EL1(val) MSR(val, OSDLR_EL1)
+#define WSYS_OSECCR_EL1(val) MSR(val, OSECCR_EL1)
+#define WSYS_DBGPRCR_EL1(val) MSR(val, DBGPRCR_EL1)
+#define WSYS_DBGBCR0_EL1(val) MSR(val, DBGBCR0_EL1)
+#define WSYS_DBGBCR1_EL1(val) MSR(val, DBGBCR1_EL1)
+#define WSYS_DBGBCR2_EL1(val) MSR(val, DBGBCR2_EL1)
+#define WSYS_DBGBCR3_EL1(val) MSR(val, DBGBCR3_EL1)
+#define WSYS_DBGBCR4_EL1(val) MSR(val, DBGBCR4_EL1)
+#define WSYS_DBGBCR5_EL1(val) MSR(val, DBGBCR5_EL1)
+#define WSYS_DBGBCR6_EL1(val) MSR(val, DBGBCR6_EL1)
+#define WSYS_DBGBCR7_EL1(val) MSR(val, DBGBCR7_EL1)
+#define WSYS_DBGBCR8_EL1(val) MSR(val, DBGBCR8_EL1)
+#define WSYS_DBGBCR9_EL1(val) MSR(val, DBGBCR9_EL1)
+#define WSYS_DBGBCR10_EL1(val) MSR(val, DBGBCR10_EL1)
+#define WSYS_DBGBCR11_EL1(val) MSR(val, DBGBCR11_EL1)
+#define WSYS_DBGBCR12_EL1(val) MSR(val, DBGBCR12_EL1)
+#define WSYS_DBGBCR13_EL1(val) MSR(val, DBGBCR13_EL1)
+#define WSYS_DBGBCR14_EL1(val) MSR(val, DBGBCR14_EL1)
+#define WSYS_DBGBCR15_EL1(val) MSR(val, DBGBCR15_EL1)
+#define WSYS_DBGWCR0_EL1(val) MSR(val, DBGWCR0_EL1)
+#define WSYS_DBGWCR1_EL1(val) MSR(val, DBGWCR1_EL1)
+#define WSYS_DBGWCR2_EL1(val) MSR(val, DBGWCR2_EL1)
+#define WSYS_DBGWCR3_EL1(val) MSR(val, DBGWCR3_EL1)
+#define WSYS_DBGWCR4_EL1(val) MSR(val, DBGWCR4_EL1)
+#define WSYS_DBGWCR5_EL1(val) MSR(val, DBGWCR5_EL1)
+#define WSYS_DBGWCR6_EL1(val) MSR(val, DBGWCR6_EL1)
+#define WSYS_DBGWCR7_EL1(val) MSR(val, DBGWCR7_EL1)
+#define WSYS_DBGWCR8_EL1(val) MSR(val, DBGWCR8_EL1)
+#define WSYS_DBGWCR9_EL1(val) MSR(val, DBGWCR9_EL1)
+#define WSYS_DBGWCR10_EL1(val) MSR(val, DBGWCR10_EL1)
+#define WSYS_DBGWCR11_EL1(val) MSR(val, DBGWCR11_EL1)
+#define WSYS_DBGWCR12_EL1(val) MSR(val, DBGWCR12_EL1)
+#define WSYS_DBGWCR13_EL1(val) MSR(val, DBGWCR13_EL1)
+#define WSYS_DBGWCR14_EL1(val) MSR(val, DBGWCR14_EL1)
+#define WSYS_DBGWCR15_EL1(val) MSR(val, DBGWCR15_EL1)
+#define WSYS_DBGCLAIMSET_EL1(val) MSR(val, DBGCLAIMSET_EL1)
+#define WSYS_DBGCLAIMCLR_EL1(val) MSR(val, DBGCLAIMCLR_EL1)
+#define WSYS_OSLAR_EL1(val) MSR(val, OSLAR_EL1)
+#define WSYS_DBGVCR32_EL2(val) MSR(val, DBGVCR32_EL2)
+#define WSYS_MDCR_EL2(val) MSR(val, MDCR_EL2)
+#define WSYS_MDCR_EL3(val) MSR(val, MDCR_EL3)
+/* 64 bit registers */
+#define WSYS_DBGDTR_EL0(val) MSR(val, DBGDTR_EL0)
+#define WSYS_DBGBVR0_EL1(val) MSR(val, DBGBVR0_EL1)
+#define WSYS_DBGBVR1_EL1(val) MSR(val, DBGBVR1_EL1)
+#define WSYS_DBGBVR2_EL1(val) MSR(val, DBGBVR2_EL1)
+#define WSYS_DBGBVR3_EL1(val) MSR(val, DBGBVR3_EL1)
+#define WSYS_DBGBVR4_EL1(val) MSR(val, DBGBVR4_EL1)
+#define WSYS_DBGBVR5_EL1(val) MSR(val, DBGBVR5_EL1)
+#define WSYS_DBGBVR6_EL1(val) MSR(val, DBGBVR6_EL1)
+#define WSYS_DBGBVR7_EL1(val) MSR(val, DBGBVR7_EL1)
+#define WSYS_DBGBVR8_EL1(val) MSR(val, DBGBVR8_EL1)
+#define WSYS_DBGBVR9_EL1(val) MSR(val, DBGBVR9_EL1)
+#define WSYS_DBGBVR10_EL1(val) MSR(val, DBGBVR10_EL1)
+#define WSYS_DBGBVR11_EL1(val) MSR(val, DBGBVR11_EL1)
+#define WSYS_DBGBVR12_EL1(val) MSR(val, DBGBVR12_EL1)
+#define WSYS_DBGBVR13_EL1(val) MSR(val, DBGBVR13_EL1)
+#define WSYS_DBGBVR14_EL1(val) MSR(val, DBGBVR14_EL1)
+#define WSYS_DBGBVR15_EL1(val) MSR(val, DBGBVR15_EL1)
+#define WSYS_DBGWVR0_EL1(val) MSR(val, DBGWVR0_EL1)
+#define WSYS_DBGWVR1_EL1(val) MSR(val, DBGWVR1_EL1)
+#define WSYS_DBGWVR2_EL1(val) MSR(val, DBGWVR2_EL1)
+#define WSYS_DBGWVR3_EL1(val) MSR(val, DBGWVR3_EL1)
+#define WSYS_DBGWVR4_EL1(val) MSR(val, DBGWVR4_EL1)
+#define WSYS_DBGWVR5_EL1(val) MSR(val, DBGWVR5_EL1)
+#define WSYS_DBGWVR6_EL1(val) MSR(val, DBGWVR6_EL1)
+#define WSYS_DBGWVR7_EL1(val) MSR(val, DBGWVR7_EL1)
+#define WSYS_DBGWVR8_EL1(val) MSR(val, DBGWVR8_EL1)
+#define WSYS_DBGWVR9_EL1(val) MSR(val, DBGWVR9_EL1)
+#define WSYS_DBGWVR10_EL1(val) MSR(val, DBGWVR10_EL1)
+#define WSYS_DBGWVR11_EL1(val) MSR(val, DBGWVR11_EL1)
+#define WSYS_DBGWVR12_EL1(val) MSR(val, DBGWVR12_EL1)
+#define WSYS_DBGWVR13_EL1(val) MSR(val, DBGWVR13_EL1)
+#define WSYS_DBGWVR14_EL1(val) MSR(val, DBGWVR14_EL1)
+#define WSYS_DBGWVR15_EL1(val) MSR(val, DBGWVR15_EL1)
+
+#endif
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef256b8c9..ffa5af4fcb7e 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -17,14 +17,24 @@
#define __ASM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct pdev_archdata {
+ u64 dma_mask;
};
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/include/asm/dma-contiguous.h
index fced77f0fd3a..e77da2002bc9 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013,2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,12 +11,14 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
-#include "msm8916-mtp.dtsi"
+#ifdef __KERNEL__
-/ {
- model = "Qualcomm Technologies, Inc. MSM 8916 MTP";
- compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp-smb1360",
- "qcom,msm8916", "qcom,mtp";
-};
+#include <linux/types.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..c16cf151f689
--- /dev/null
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -0,0 +1,64 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/err.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+ struct dma_fast_smmu_mapping *fast;
+#endif
+};
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#else /* !CONFIG_ARM64_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+ return ERR_PTR(-ENOMEM);
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif /* CONFIG_ARM64_DMA_USE_IOMMU */
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 61e08f360e31..b002c5e3809c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -27,7 +27,7 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops dummy_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
@@ -39,7 +39,7 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
@@ -47,6 +47,12 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->archdata.dma_ops = dma_ops;
+}
+
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi b/arch/arm64/include/asm/edac.h
index a1aa0b201e92..1bea82beb4f7 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
+++ b/arch/arm64/include/asm/edac.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,24 +10,19 @@
* GNU General Public License for more details.
*/
-#include "msm8916.dtsi"
-#include "pm8916.dtsi"
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
-/ {
- aliases {
- serial0 = &blsp1_uart2;
- };
+#if defined(CONFIG_EDAC_CORTEX_ARM64) && \
+ !defined(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)
+void arm64_check_cache_ecc(void *info);
+#else
+static inline void arm64_check_cache_ecc(void *info) { }
+#endif
- chosen {
- stdout-path = "serial0";
- };
+static inline void atomic_scrub(void *addr, int size)
+{
+ return;
+}
- soc {
- serial@78b0000 {
- status = "okay";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_uart2_default>;
- pinctrl-1 = <&blsp1_uart2_sleep>;
- };
- };
-};
+#endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index f9d64ed4fd2b..b9876364de1a 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -23,6 +23,7 @@
*/
#include <asm/ptrace.h>
#include <asm/user.h>
+#include <asm/fpsimd.h>
/*
* AArch64 static relocation types.
@@ -140,11 +141,12 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
-#define ARCH_DLINFO \
+#define _SET_AUX_ENT_VDSO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
- (elf_addr_t)current->mm->context.vdso); \
+ (Elf64_Off)current->mm->context.vdso); \
} while (0)
+#define ARCH_DLINFO _SET_AUX_ENT_VDSO
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
@@ -182,8 +184,16 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex) \
+do { \
+ set_thread_flag(TIF_32BIT); \
+} while (0)
+
+#ifdef CONFIG_VDSO32
+#define COMPAT_ARCH_DLINFO _SET_AUX_ENT_VDSO
+#else
#define COMPAT_ARCH_DLINFO
+#endif
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages \
diff --git a/arch/arm64/include/asm/etmv4x.h b/arch/arm64/include/asm/etmv4x.h
new file mode 100644
index 000000000000..91239f779587
--- /dev/null
+++ b/arch/arm64/include/asm/etmv4x.h
@@ -0,0 +1,385 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for AArch64 */
+#define trc_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for AArch64 */
+#define trc_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for AArch64 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(op0, op1, crn, crm, op2) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("msr S"#op0"_"#op1"_"#crn"_"#crm"_"#op2", %0" : : "r" (val)); \
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRSL(3, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MSR(val, 3, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRSL(2, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRSL(2, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRSL(2, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRSL(2, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRSL(2, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRSL(2, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRSL(2, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRSL(2, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRSL(2, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRSL(2, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRSL(2, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRSL(2, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRSL(2, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRSL(2, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRSL(2, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRSL(2, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRSL(2, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRSL(2, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRSL(2, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRSL(2, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRSL(2, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRSL(2, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRSL(2, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRSL(2, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRSL(2, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRSL(2, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRSL(2, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRSL(2, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRSL(2, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRSL(2, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRSL(2, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRSL(2, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRSL(2, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRSL(2, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRSL(2, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRSL(2, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRSL(2, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRSL(2, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRSL(2, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRSL(2, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRSL(2, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRSL(2, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRSL(2, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRSL(2, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRSL(2, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRSL(2, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRSL(2, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRSL(2, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRSL(2, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRSL(2, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRSL(2, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRSL(2, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRSL(2, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRSL(2, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRSL(2, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRSL(2, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRSL(2, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRSL(2, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRSL(2, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRSL(2, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRSL(2, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRSL(2, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRSL(2, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRSL(2, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRSL(2, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRSL(2, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRSL(2, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRSL(2, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRSL(2, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRSL(2, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRSL(2, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRSL(2, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRSL(2, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRSL(2, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRSL(2, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRSL(2, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRSL(2, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRSL(2, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRSL(2, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRSL(2, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRSL(2, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRSL(2, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRSL(2, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRSL(2, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRSL(2, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRSL(2, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRSL(2, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRSL(2, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRSL(2, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRSL(2, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRSL(2, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRSL(2, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRSL(2, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRSL(2, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRSL(2, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRSL(2, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRSL(2, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRSL(2, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRSL(2, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRSL(2, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRSL(2, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRSL(2, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRSL(2, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRSL(2, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRSL(2, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRSL(2, 1, c1, c7, 3)
+
+/* 64 bit registers */
+#define RSYSQ_ETMACATR0() MRSQ(2, 1, c2, c0, 2)
+#define RSYSQ_ETMACATR1() MRSQ(2, 1, c2, c2, 2)
+#define RSYSQ_ETMACATR2() MRSQ(2, 1, c2, c4, 2)
+#define RSYSQ_ETMACATR3() MRSQ(2, 1, c2, c6, 2)
+#define RSYSQ_ETMACATR4() MRSQ(2, 1, c2, c8, 2)
+#define RSYSQ_ETMACATR5() MRSQ(2, 1, c2, c10, 2)
+#define RSYSQ_ETMACATR6() MRSQ(2, 1, c2, c12, 2)
+#define RSYSQ_ETMACATR7() MRSQ(2, 1, c2, c14, 2)
+#define RSYSQ_ETMACATR8() MRSQ(2, 1, c2, c0, 3)
+#define RSYSQ_ETMACATR9() MRSQ(2, 1, c2, c2, 3)
+#define RSYSQ_ETMACATR10() MRSQ(2, 1, c2, c4, 3)
+#define RSYSQ_ETMACATR11() MRSQ(2, 1, c2, c6, 3)
+#define RSYSQ_ETMACATR12() MRSQ(2, 1, c2, c8, 3)
+#define RSYSQ_ETMACATR13() MRSQ(2, 1, c2, c10, 3)
+#define RSYSQ_ETMACATR14() MRSQ(2, 1, c2, c12, 3)
+#define RSYSQ_ETMACATR15() MRSQ(2, 1, c2, c14, 3)
+#define RSYSQ_ETMCIDCVR0() MRSQ(2, 1, c3, c0, 0)
+#define RSYSQ_ETMCIDCVR1() MRSQ(2, 1, c3, c2, 0)
+#define RSYSQ_ETMCIDCVR2() MRSQ(2, 1, c3, c4, 0)
+#define RSYSQ_ETMCIDCVR3() MRSQ(2, 1, c3, c6, 0)
+#define RSYSQ_ETMCIDCVR4() MRSQ(2, 1, c3, c8, 0)
+#define RSYSQ_ETMCIDCVR5() MRSQ(2, 1, c3, c10, 0)
+#define RSYSQ_ETMCIDCVR6() MRSQ(2, 1, c3, c12, 0)
+#define RSYSQ_ETMCIDCVR7() MRSQ(2, 1, c3, c14, 0)
+#define RSYSQ_ETMACVR0() MRSQ(2, 1, c2, c0, 0)
+#define RSYSQ_ETMACVR1() MRSQ(2, 1, c2, c2, 0)
+#define RSYSQ_ETMACVR2() MRSQ(2, 1, c2, c4, 0)
+#define RSYSQ_ETMACVR3() MRSQ(2, 1, c2, c6, 0)
+#define RSYSQ_ETMACVR4() MRSQ(2, 1, c2, c8, 0)
+#define RSYSQ_ETMACVR5() MRSQ(2, 1, c2, c10, 0)
+#define RSYSQ_ETMACVR6() MRSQ(2, 1, c2, c12, 0)
+#define RSYSQ_ETMACVR7() MRSQ(2, 1, c2, c14, 0)
+#define RSYSQ_ETMACVR8() MRSQ(2, 1, c2, c0, 1)
+#define RSYSQ_ETMACVR9() MRSQ(2, 1, c2, c2, 1)
+#define RSYSQ_ETMACVR10() MRSQ(2, 1, c2, c4, 1)
+#define RSYSQ_ETMACVR11() MRSQ(2, 1, c2, c6, 1)
+#define RSYSQ_ETMACVR12() MRSQ(2, 1, c2, c8, 1)
+#define RSYSQ_ETMACVR13() MRSQ(2, 1, c2, c10, 1)
+#define RSYSQ_ETMACVR14() MRSQ(2, 1, c2, c12, 1)
+#define RSYSQ_ETMACVR15() MRSQ(2, 1, c2, c14, 1)
+#define RSYSQ_ETMVMIDCVR0() MRSQ(2, 1, c3, c0, 1)
+#define RSYSQ_ETMVMIDCVR1() MRSQ(2, 1, c3, c2, 1)
+#define RSYSQ_ETMVMIDCVR2() MRSQ(2, 1, c3, c4, 1)
+#define RSYSQ_ETMVMIDCVR3() MRSQ(2, 1, c3, c6, 1)
+#define RSYSQ_ETMVMIDCVR4() MRSQ(2, 1, c3, c8, 1)
+#define RSYSQ_ETMVMIDCVR5() MRSQ(2, 1, c3, c10, 1)
+#define RSYSQ_ETMVMIDCVR6() MRSQ(2, 1, c3, c12, 1)
+#define RSYSQ_ETMVMIDCVR7() MRSQ(2, 1, c3, c14, 1)
+#define RSYSQ_ETMDVCVR0() MRSQ(2, 1, c2, c0, 4)
+#define RSYSQ_ETMDVCVR1() MRSQ(2, 1, c2, c4, 4)
+#define RSYSQ_ETMDVCVR2() MRSQ(2, 1, c2, c8, 4)
+#define RSYSQ_ETMDVCVR3() MRSQ(2, 1, c2, c12, 4)
+#define RSYSQ_ETMDVCVR4() MRSQ(2, 1, c2, c0, 5)
+#define RSYSQ_ETMDVCVR5() MRSQ(2, 1, c2, c4, 5)
+#define RSYSQ_ETMDVCVR6() MRSQ(2, 1, c2, c8, 5)
+#define RSYSQ_ETMDVCVR7() MRSQ(2, 1, c2, c12, 5)
+#define RSYSQ_ETMDVCMR0() MRSQ(2, 1, c2, c0, 6)
+#define RSYSQ_ETMDVCMR1() MRSQ(2, 1, c2, c4, 6)
+#define RSYSQ_ETMDVCMR2() MRSQ(2, 1, c2, c8, 6)
+#define RSYSQ_ETMDVCMR3() MRSQ(2, 1, c2, c12, 6)
+#define RSYSQ_ETMDVCMR4() MRSQ(2, 1, c2, c0, 7)
+#define RSYSQ_ETMDVCMR5() MRSQ(2, 1, c2, c4, 7)
+#define RSYSQ_ETMDVCMR6() MRSQ(2, 1, c2, c8, 7)
+#define RSYSQ_ETMDVCMR7() MRSQ(2, 1, c2, c12, 7)
+
+/* 32 and 64 bit registers */
+#define WSYS_ETMAUXCTLR(val) MSR(val, 2, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MSR(val, 2, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MSR(val, 2, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MSR(val, 2, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MSR(val, 2, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MSR(val, 2, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MSR(val, 2, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MSR(val, 2, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MSR(val, 2, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MSR(val, 2, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MSR(val, 2, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MSR(val, 2, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MSR(val, 2, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MSR(val, 2, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MSR(val, 2, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MSR(val, 2, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MSR(val, 2, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MSR(val, 2, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MSR(val, 2, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MSR(val, 2, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MSR(val, 2, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MSR(val, 2, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MSR(val, 2, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MSR(val, 2, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MSR(val, 2, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MSR(val, 2, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MSR(val, 2, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MSR(val, 2, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MSR(val, 2, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MSR(val, 2, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MSR(val, 2, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MSR(val, 2, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MSR(val, 2, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MSR(val, 2, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MSR(val, 2, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MSR(val, 2, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MSR(val, 2, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MSR(val, 2, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MSR(val, 2, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MSR(val, 2, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MSR(val, 2, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MSR(val, 2, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MSR(val, 2, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MSR(val, 2, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MSR(val, 2, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MSR(val, 2, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MSR(val, 2, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MSR(val, 2, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MSR(val, 2, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MSR(val, 2, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MSR(val, 2, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MSR(val, 2, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MSR(val, 2, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MSR(val, 2, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MSR(val, 2, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MSR(val, 2, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MSR(val, 2, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MSR(val, 2, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MSR(val, 2, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MSR(val, 2, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MSR(val, 2, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MSR(val, 2, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MSR(val, 2, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MSR(val, 2, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MSR(val, 2, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MSR(val, 2, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MSR(val, 2, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MSR(val, 2, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MSR(val, 2, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MSR(val, 2, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MSR(val, 2, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MSR(val, 2, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MSR(val, 2, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MSR(val, 2, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MSR(val, 2, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MSR(val, 2, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MSR(val, 2, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MSR(val, 2, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MSR(val, 2, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MSR(val, 2, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MSR(val, 2, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MSR(val, 2, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MSR(val, 2, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MSR(val, 2, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MSR(val, 2, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MSR(val, 2, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MSR(val, 2, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MSR(val, 2, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MSR(val, 2, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MSR(val, 2, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MSR(val, 2, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MSR(val, 2, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MSR(val, 2, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MSR(val, 2, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MSR(val, 2, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MSR(val, 2, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MSR(val, 2, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MSR(val, 2, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MSR(val, 2, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MSR(val, 2, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MSR(val, 2, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MSR(val, 2, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MSR(val, 2, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MSR(val, 2, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MSR(val, 2, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MSR(val, 2, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MSR(val, 2, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MSR(val, 2, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MSR(val, 2, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MSR(val, 2, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MSR(val, 2, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MSR(val, 2, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MSR(val, 2, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MSR(val, 2, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MSR(val, 2, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MSR(val, 2, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MSR(val, 2, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MSR(val, 2, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MSR(val, 2, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MSR(val, 2, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MSR(val, 2, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MSR(val, 2, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MSR(val, 2, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MSR(val, 2, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MSR(val, 2, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MSR(val, 2, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MSR(val, 2, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MSR(val, 2, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MSR(val, 2, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MSR(val, 2, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MSR(val, 2, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MSR(val, 2, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MSR(val, 2, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MSR(val, 2, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MSR(val, 2, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MSR(val, 2, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MSR(val, 2, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MSR(val, 2, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MSR(val, 2, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MSR(val, 2, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MSR(val, 2, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MSR(val, 2, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MSR(val, 2, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MSR(val, 2, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MSR(val, 2, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MSR(val, 2, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MSR(val, 2, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MSR(val, 2, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MSR(val, 2, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MSR(val, 2, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MSR(val, 2, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MSR(val, 2, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 50f559f574fe..3efaa5cebc03 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -81,6 +81,14 @@ extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
u32 num_regs);
extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+#ifdef CONFIG_ENABLE_FP_SIMD_SETTINGS
+extern void fpsimd_disable_trap(void);
+extern void fpsimd_enable_trap(void);
+#else
+static inline void fpsimd_disable_trap(void) {}
+static inline void fpsimd_enable_trap(void) {}
+#endif
+
#endif
#endif
diff --git a/arch/arm64/include/asm/gpio.h b/arch/arm64/include/asm/gpio.h
new file mode 100644
index 000000000000..9019bc99ed47
--- /dev/null
+++ b/arch/arm64/include/asm/gpio.h
@@ -0,0 +1,32 @@
+#ifndef _ARCH_ARM64_GPIO_H
+#define _ARCH_ARM64_GPIO_H
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#endif
+
+/* not all ARM64 platforms necessarily support this API ... */
+#ifdef CONFIG_NEED_MACH_GPIO_H
+#include <mach/gpio.h>
+#endif
+
+#ifndef __ARM64_GPIOLIB_COMPLEX
+/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
+#include <asm-generic/gpio.h>
+
+/* The trivial gpiolib dispatchers */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#endif
+
+/*
+ * Provide a default gpio_to_irq() which should satisfy every case.
+ * However, some platforms want to do this differently, so allow them
+ * to override it.
+ */
+#ifndef gpio_to_irq
+#define gpio_to_irq __gpio_to_irq
+#endif
+
+#endif /* _ARCH_ARM64_GPIO_H */
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297dac77..1473fc2f7ab7 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 30e50eb54a67..1dbaa901d7e5 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -120,6 +120,29 @@ enum aarch64_insn_register {
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
};
+enum aarch64_insn_special_register {
+ AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200,
+ AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201,
+ AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208,
+ AARCH64_INSN_SPCLREG_SPSEL = 0xC210,
+ AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212,
+ AARCH64_INSN_SPCLREG_DAIF = 0xDA11,
+ AARCH64_INSN_SPCLREG_NZCV = 0xDA10,
+ AARCH64_INSN_SPCLREG_FPCR = 0xDA20,
+ AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28,
+ AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29,
+ AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200,
+ AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201,
+ AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208,
+ AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218,
+ AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219,
+ AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A,
+ AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B,
+ AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200,
+ AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201,
+ AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
+};
+
enum aarch64_insn_variant {
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_VARIANT_64BIT
@@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
+__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000)
+__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
+__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
+__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
+__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
+__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
+__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
+__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
#undef __AARCH64_INSN_FUNCS
@@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+bool aarch64_insn_uses_literal(u32 insn);
+bool aarch64_insn_is_branch(u32 insn);
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
@@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn);
#define A32_RT_OFFSET 12
#define A32_RT2_OFFSET 0
+u32 aarch64_insn_extract_system_reg(u32 insn);
u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
u32 aarch32_insn_mcr_extract_opc2(u32 insn);
u32 aarch32_insn_mcr_extract_crm(u32 insn);
+
+typedef bool (pstate_check_t)(unsigned long);
+extern pstate_check_t * const aarch32_opcode_cond_checks[16];
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 44be1e03ed65..3112c2a9d96f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -31,38 +31,35 @@
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
* Generic IO read/write. These perform native-endian accesses.
+ * that some architectures will want to re-define __raw_{read,write}w.
*/
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writeq __raw_writeq
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
{
asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
{
u8 val;
asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
@@ -72,8 +69,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
return val;
}
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
{
u16 val;
@@ -84,8 +80,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
return val;
}
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
{
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
@@ -95,8 +90,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return val;
}
-#define __raw_readq __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
{
u64 val;
asm volatile(ALTERNATIVE("ldr %0, [%1]",
@@ -106,6 +100,46 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
return val;
}
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging, The appropriate functions can be used but clinets should carefully
+ * consider why they can't support the logging
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+ int _ret; \
+ volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ _ret = uncached_logk(LOGK_WRITEL, _addr); \
+ ETB_WAYPOINT; \
+ __raw_write##_t##_no_log((v), _a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ })
+
+#define __raw_writeb(v, a) __raw_write_logged((v), a, b)
+#define __raw_writew(v, a) __raw_write_logged((v), a, w)
+#define __raw_writel(v, a) __raw_write_logged((v), a, l)
+#define __raw_writeq(v, a) __raw_write_logged((v), a, q)
+
+#define __raw_read_logged(a, _l, _t) ({ \
+ _t __a; \
+ const volatile void __iomem *_a = (const volatile void __iomem *)(a); \
+ void *_addr = (void __force *)(_a); \
+ int _ret; \
+ _ret = uncached_logk(LOGK_READL, _addr); \
+ ETB_WAYPOINT; \
+ __a = __raw_read##_l##_no_log(_a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ __a; \
+ })
+
+#define __raw_readb(a) __raw_read_logged((a), b, u8)
+#define __raw_readw(a) __raw_read_logged((a), w, u16)
+#define __raw_readl(a) __raw_read_logged((a), l, u32)
+#define __raw_readq(a) __raw_read_logged((a), q, u64)
+
/* IO barriers */
#define __iormb() rmb()
#define __iowmb() wmb()
@@ -127,6 +161,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
+#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; })
+#define readw_relaxed_no_log(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; })
+#define readl_relaxed_no_log(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; })
+#define readq_relaxed_no_log(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
+
+#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
+#define writel_relaxed_no_log(v, c) ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
+
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
@@ -142,6 +186,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
+#define readb_no_log(c) ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; })
+#define readw_no_log(c) ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+#define readl_no_log(c) ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+#define readq_no_log(c) ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+
+#define writeb_no_log(v, c) ({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
+#define writew_no_log(v, c) ({ __iowmb(); writew_relaxed_no_log((v), (c)); })
+#define writel_no_log(v, c) ({ __iowmb(); writel_relaxed_no_log((v), (c)); })
+#define writeq_no_log(v, c) ({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
+
/*
* I/O port access primitives.
*/
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b77197d941fc..3e1c0e7ef082 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -47,6 +47,9 @@ static inline int nr_legacy_irqs(void)
return 0;
}
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
static inline bool on_irq_stack(unsigned long sp, int cpu)
{
/* variable names the same as kernel/stacktrace.c */
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
new file mode 100644
index 000000000000..1737aecfcc5e
--- /dev/null
+++ b/arch/arm64/include/asm/kprobes.h
@@ -0,0 +1,60 @@
+/*
+ * arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KPROBES_H
+#define _ARM_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_irqflag;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+ struct pt_regs jprobe_saved_regs;
+};
+
+void arch_remove_kprobe(struct kprobe *);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
+int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index d7e7cf56e8d6..5385adcd157d 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -86,17 +86,6 @@
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
-/* Hyp System Control Register (SCTLR_EL2) bits */
-#define SCTLR_EL2_EE (1 << 25)
-#define SCTLR_EL2_WXN (1 << 19)
-#define SCTLR_EL2_I (1 << 12)
-#define SCTLR_EL2_SA (1 << 3)
-#define SCTLR_EL2_C (1 << 2)
-#define SCTLR_EL2_A (1 << 1)
-#define SCTLR_EL2_M 1
-#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
- SCTLR_EL2_SA | SCTLR_EL2_I)
-
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
@@ -126,6 +115,7 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+#define VTCR_EL2_VS 19
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -169,7 +159,7 @@
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT (UL(48))
-#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 419bc6661b5c..36a30c80032d 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,84 +20,10 @@
#include <asm/virt.h>
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
-#define CSSELR_EL1 2 /* Cache Size Selection Register */
-#define SCTLR_EL1 3 /* System Control Register */
-#define ACTLR_EL1 4 /* Auxiliary Control Register */
-#define CPACR_EL1 5 /* Coprocessor Access Control */
-#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
-#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
-#define TCR_EL1 8 /* Translation Control Register */
-#define ESR_EL1 9 /* Exception Syndrome Register */
-#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
-#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
-#define FAR_EL1 12 /* Fault Address Register */
-#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
-#define VBAR_EL1 14 /* Vector Base Address Register */
-#define CONTEXTIDR_EL1 15 /* Context ID Register */
-#define TPIDR_EL0 16 /* Thread ID, User R/W */
-#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
-#define TPIDR_EL1 18 /* Thread ID, Privileged */
-#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
-#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
-#define PAR_EL1 21 /* Physical Address Register */
-#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
-#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 24 /* Domain Access Control Register */
-#define IFSR32_EL2 25 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
-#define NR_SYS_REGS 28
-
-/* 32bit mapping */
-#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
-#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
-#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
-#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
-#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
-#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
-#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
-#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
-#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
-#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
-#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
-#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
-#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
-#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
-#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
-#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
-#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
-#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
-#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
-#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
-#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
-#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
-#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
-#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
-#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
-#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
-#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
-#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-
-#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
-#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
-#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
-#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
-#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
-#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
-#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
-
-#define NR_COPRO_REGS (NR_SYS_REGS * 2)
-
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE 2
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@@ -105,11 +31,27 @@
#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
#ifndef __ASSEMBLY__
+#if __GNUC__ > 4
+#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
+#else
+/*
+ * GCC versions 4.9 and older will fold the constant below into the addend of
+ * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
+ * constant is used directly. However, since we use the small code model for
+ * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
+ * with a +/- 4 GB range, resulting in linker relocation errors if the shift
+ * is sufficiently large. So prevent the compiler from folding the shift into
+ * the addend, by making the shift a variable with external linkage.
+ */
+__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
+#endif
+
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 0729a2f94482..9917b55148d8 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -26,7 +26,6 @@
#include <asm/esr.h>
#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d2166bd67cd0..e875deff69f1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,7 +25,6 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -45,6 +44,7 @@
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
+phys_addr_t kvm_hyp_reset_entry(void);
struct kvm_arch {
/* The VMID generation used for the virt. memory system */
@@ -85,6 +85,86 @@ struct kvm_vcpu_fault_info {
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
};
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+ __INVALID_SYSREG__,
+ MPIDR_EL1, /* MultiProcessor Affinity Register */
+ CSSELR_EL1, /* Cache Size Selection Register */
+ SCTLR_EL1, /* System Control Register */
+ ACTLR_EL1, /* Auxiliary Control Register */
+ CPACR_EL1, /* Coprocessor Access Control */
+ TTBR0_EL1, /* Translation Table Base Register 0 */
+ TTBR1_EL1, /* Translation Table Base Register 1 */
+ TCR_EL1, /* Translation Control Register */
+ ESR_EL1, /* Exception Syndrome Register */
+ AFSR0_EL1, /* Auxilary Fault Status Register 0 */
+ AFSR1_EL1, /* Auxilary Fault Status Register 1 */
+ FAR_EL1, /* Fault Address Register */
+ MAIR_EL1, /* Memory Attribute Indirection Register */
+ VBAR_EL1, /* Vector Base Address Register */
+ CONTEXTIDR_EL1, /* Context ID Register */
+ TPIDR_EL0, /* Thread ID, User R/W */
+ TPIDRRO_EL0, /* Thread ID, User R/O */
+ TPIDR_EL1, /* Thread ID, Privileged */
+ AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
+ CNTKCTL_EL1, /* Timer Control Register (EL1) */
+ PAR_EL1, /* Physical Address Register */
+ MDSCR_EL1, /* Monitor Debug System Control Register */
+ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+ /* 32bit specific registers. Keep them at the end of the range */
+ DACR32_EL2, /* Domain Access Control Register */
+ IFSR32_EL2, /* Instruction Fault Status Register */
+ FPEXC32_EL2, /* Floating-Point Exception Control Register */
+ DBGVCR32_EL2, /* Debug Vector Catch Register */
+
+ NR_SYS_REGS /* Nothing after this line! */
+};
+
+/* 32bit mapping */
+#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
+#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
+#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
+#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
+#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
+#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
+#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
+#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
+#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
+#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
+#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
+
struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
@@ -249,7 +329,21 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
-static inline void kvm_arch_hardware_disable(void) {}
+static inline void __cpu_init_stage2(void)
+{
+}
+
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t phys_idmap_start)
+{
+ /*
+ * Call reset code, and switch back to stub hyp vectors.
+ * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
+ */
+ __kvm_call_hyp((void *)kvm_hyp_reset_entry(),
+ boot_pgd_ptr, phys_idmap_start);
+}
+
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 889c908ee631..fe612a962576 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -19,7 +19,6 @@
#define __ARM64_KVM_MMIO_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
/*
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 2b1020a056ad..c6aae0b85cef 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/cpufeature.h>
/*
* As we only have the TTBR0_EL2 register, we cannot express
@@ -98,6 +99,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -158,7 +160,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
#endif
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
@@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+ int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+
+ return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6b17d08845c3..1d870666b8d9 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -73,6 +73,9 @@
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+#define KERNEL_START _text
+#define KERNEL_END _end
+
/*
* The size of the KASAN shadow region. This should be 1/8th of the
* size of the entire kernel virtual address space.
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 39e502f0ab80..67e8c0c5e3cc 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -20,6 +20,10 @@
#define TTBR_ASID_MASK (UL(0xffff) << 48)
#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+
+#include <asm/cpufeature.h>
+#include <asm/percpu.h>
typedef struct {
atomic64_t id;
@@ -39,6 +43,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
@@ -46,6 +87,13 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void hotplug_paging(phys_addr_t start, phys_addr_t size);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern void remove_pagetable(unsigned long start,
+ unsigned long end, bool direct);
+#endif
+#endif
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e0d53cfca847..17d0ada5b473 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -28,20 +28,25 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
+#include <linux/msm_rtb.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ pid_t pid = task_pid_nr(next);
asm(
" msr contextidr_el1, %0\n"
" isb"
:
- : "r" (task_pid_nr(next)));
+ : "r" (pid));
+ uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
}
#else
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ uncached_logk(LOGK_CTXID, (void *)(u64)task_pid_nr(next));
}
#endif
@@ -181,7 +186,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
- task_thread_info(tsk)->ttbr0 = ttbr;
+ WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b008a72f8bc0..ed8f4351cc2d 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -41,3 +41,8 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#endif /* __KERNEL__ */
#endif /* __ASM_PCI_H */
+
+#ifdef CONFIG_PCI_MSM
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 91b6be092ce2..0c38c189fb3b 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -33,4 +33,91 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
(regs)->pstate = PSR_MODE_EL1h; \
}
+static inline u32 armv8pmu_pmcr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmccntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmccntr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmxevcntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmxevcntr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmovsclr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmovsclr_el0" : "=r" (val));
+ return val;
+}
+
+static inline void armv8pmu_pmcr_write_reg(u32 val)
+{
+ asm volatile("msr pmcr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmselr_write_reg(u32 val)
+{
+ asm volatile("msr pmselr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmccntr_write_reg(u32 val)
+{
+ asm volatile("msr pmccntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevcntr_write_reg(u32 val)
+{
+ asm volatile("msr pmxevcntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevtyper_write_reg(u32 val)
+{
+ asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenset_write_reg(u32 val)
+{
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenclr_write_reg(u32 val)
+{
+ asm volatile("msr pmcntenclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenset_write_reg(u32 val)
+{
+ asm volatile("msr pmintenset_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenclr_write_reg(u32 val)
+{
+ asm volatile("msr pmintenclr_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmovsclr_write_reg(u32 val)
+{
+ asm volatile("msr pmovsclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmuserenr_write_reg(u32 val)
+{
+ asm volatile("msr pmuserenr_el0, %0" :: "r" (val));
+}
+
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 81eee57ad519..13d6b496de92 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
#include <asm/bug.h>
#include <asm/proc-fns.h>
+#include <asm/bug.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -231,6 +232,16 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
+static inline pte_t pte_clear_rdonly(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_VALID));
+}
+
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
@@ -238,6 +249,34 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+ pteval_t old = pte_val(*ptep);
+ pteval_t new = pte_val(pte);
+
+ /* Only problematic if valid -> valid */
+ if (!(old & new & PTE_VALID))
+ goto pte_ok;
+
+ /* Changing attributes should go via an invalid entry */
+ if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+ goto pte_bad;
+
+ /* Change of OA is only an issue if one mapping is writable */
+ if (!(old & new & PTE_RDONLY) &&
+ WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+ goto pte_bad;
+
+ goto pte_ok;
+
+pte_bad:
+ *ptep = __pte(0);
+ dsb(ishst);
+ asm("tlbi vmalle1is");
+ dsb(ish);
+ isb();
+pte_ok:
+#endif
+
*ptep = pte;
/*
@@ -364,6 +403,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
@@ -440,6 +480,11 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long) __va(pmd_page_paddr(pmd));
+}
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -491,6 +536,11 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long) __va(pud_page_paddr(pud));
+}
+
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
@@ -543,6 +593,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return (unsigned long) __va(pgd_page_paddr(pgd));
+}
+
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
new file mode 100644
index 000000000000..5af574d632fa
--- /dev/null
+++ b/arch/arm64/include/asm/probes.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm64/include/asm/probes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ARM_PROBES_H
+#define _ARM_PROBES_H
+
+#include <asm/opcodes.h>
+
+struct kprobe;
+struct arch_specific_insn;
+
+typedef u32 kprobe_opcode_t;
+typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_specific_insn {
+ kprobe_opcode_t *insn;
+ pstate_check_t *pstate_cc;
+ kprobes_handler_t *handler;
+ /* restore address after step xol */
+ unsigned long restore;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 16cef2e8449e..9da52c2c6c5c 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,8 +28,12 @@
struct mm_struct;
struct cpu_suspend_ctx;
+extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+ unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 4be934fde409..7766635158df 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -40,9 +40,9 @@
#ifdef __KERNEL__
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
-#define AARCH32_VECTORS_BASE 0xffff0000
+#define AARCH32_KUSER_HELPERS_BASE 0xffff0000
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
- AARCH32_VECTORS_BASE : STACK_TOP_MAX)
+ AARCH32_KUSER_HELPERS_BASE : STACK_TOP_MAX)
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
@@ -51,6 +51,9 @@ extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
#endif /* __KERNEL__ */
+extern unsigned int boot_reason;
+extern unsigned int cold_boot;
+
struct debug_info {
/* Have we suspended stepping by a debugger? */
int suspended_step;
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 7f94755089e2..1528d52eb8c0 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -121,6 +121,8 @@ struct pt_regs {
u64 unused; // maintain 16 byte alignment
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
+
#define arch_has_single_step() (1)
#ifdef CONFIG_COMPAT
@@ -146,9 +148,57 @@ struct pt_regs {
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
-#define user_stack_pointer(regs) \
+#define GET_USP(regs) \
(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
+#define SET_USP(ptregs, value) \
+ (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ u64 val = 0;
+
+ offset >>= 3;
+ switch (offset) {
+ case 0 ... 30:
+ val = regs->regs[offset];
+ break;
+ case offsetof(struct pt_regs, sp) >> 3:
+ val = regs->sp;
+ break;
+ case offsetof(struct pt_regs, pc) >> 3:
+ val = regs->pc;
+ break;
+ case offsetof(struct pt_regs, pstate) >> 3:
+ val = regs->pstate;
+ break;
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->regs[0];
@@ -158,8 +208,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
struct task_struct;
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
-#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+#define GET_IP(regs) ((unsigned long)(regs)->pc)
+#define SET_IP(regs, value) ((regs)->pc = ((u64) (value)))
+
+#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29])
+#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value)))
+
+#include <asm-generic/ptrace.h>
+#undef profile_pc
extern unsigned long profile_pc(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
new file mode 100644
index 000000000000..b865e83e57f5
--- /dev/null
+++ b/arch/arm64/include/asm/sections.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __alt_instructions[], __alt_instructions_end[];
+extern char __exception_text_start[], __exception_text_end[];
+extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
+extern char __idmap_text_start[], __idmap_text_end[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __mmuoff_data_start[], __mmuoff_data_end[];
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index 81abea0b7650..bcd0e139ee4a 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -20,7 +20,51 @@
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
+struct compat_sigcontext {
+ /* We always set these two fields to 0 */
+ compat_ulong_t trap_no;
+ compat_ulong_t error_code;
+
+ compat_ulong_t oldmask;
+ compat_ulong_t arm_r0;
+ compat_ulong_t arm_r1;
+ compat_ulong_t arm_r2;
+ compat_ulong_t arm_r3;
+ compat_ulong_t arm_r4;
+ compat_ulong_t arm_r5;
+ compat_ulong_t arm_r6;
+ compat_ulong_t arm_r7;
+ compat_ulong_t arm_r8;
+ compat_ulong_t arm_r9;
+ compat_ulong_t arm_r10;
+ compat_ulong_t arm_fp;
+ compat_ulong_t arm_ip;
+ compat_ulong_t arm_sp;
+ compat_ulong_t arm_lr;
+ compat_ulong_t arm_pc;
+ compat_ulong_t arm_cpsr;
+ compat_ulong_t fault_address;
+};
+
+struct compat_ucontext {
+ compat_ulong_t uc_flags;
+ compat_uptr_t uc_link;
+ compat_stack_t uc_stack;
+ struct compat_sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask;
+ int __unused[32 - (sizeof(compat_sigset_t) / sizeof(int))];
+ compat_ulong_t uc_regspace[128] __aligned(8);
+};
+
+struct compat_sigframe {
+ struct compat_ucontext uc;
+ compat_ulong_t retcode[2];
+};
+
+struct compat_rt_sigframe {
+ struct compat_siginfo info;
+ struct compat_sigframe sig;
+};
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 74a9d301819f..81c69fe1adc0 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -18,7 +18,11 @@
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS 48
+#ifndef CONFIG_MEMORY_HOTPLUG
#define SECTION_SIZE_BITS 30
+#else
+#define SECTION_SIZE_BITS CONFIG_HOTPLUG_SIZE_BITS
+#endif
#endif
#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fc037c3e2c27..da7a921d88d5 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -103,7 +103,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
- " prfm pstl1strm, %2\n"
"1: ldaxr %w0, %2\n"
" eor %w1, %w0, %w0, ror #16\n"
" cbnz %w1, 2f\n"
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 4d19a03d316e..92d6a628e478 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,8 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 13
+#define NR_CTX_REGS 12
+#define NR_CALLEE_SAVED_REGS 12
/*
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -16,11 +17,34 @@ struct cpu_suspend_ctx {
u64 sp;
} __aligned(16);
-struct sleep_save_sp {
- phys_addr_t *save_ptr_stash;
- phys_addr_t save_ptr_stash_phys;
+/*
+ * Memory to save the cpu state is allocated on the stack by
+ * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
+ * This data must survive until cpu_resume() is called.
+ *
+ * This struct desribes the size and the layout of the saved cpu state.
+ * The layout of the callee_saved_regs is defined by the implementation
+ * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
+ * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
+ * returns, and the data would be subsequently corrupted by the call to the
+ * finisher.
+ */
+struct sleep_stack_data {
+ struct cpu_suspend_ctx system_regs;
+ unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS];
};
+extern unsigned long *sleep_save_stash;
+
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
+int __cpu_suspend_enter(struct sleep_stack_data *state);
+void __cpu_suspend_exit(void);
+void _cpu_resume(void);
+
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index accf6dc2dfe4..c768daa084ca 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -86,10 +86,21 @@
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
(!!x)<<8 | 0x1f)
-/* SCTLR_EL1 */
-#define SCTLR_EL1_CP15BEN (0x1 << 5)
-#define SCTLR_EL1_SED (0x1 << 8)
-#define SCTLR_EL1_SPAN (0x1 << 23)
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M 1
+
+#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_SPAN (1 << 23)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_CP15BEN (1 << 5)
/* id_aa64isar0 */
@@ -101,6 +112,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 57f110bea6a8..2fbc254a8a37 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -44,6 +44,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+extern char* (*arch_read_hardware_id)(void);
#define show_unhandled_signals_ratelimited() \
({ \
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 67dd228c3f17..70c11c25d3e4 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -120,6 +120,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
+#define TIF_MM_RELEASED 24
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -140,10 +142,12 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_FSCHECK)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index bbd362cd1ed1..7d35ea7b5b95 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -21,6 +21,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
struct sched_domain;
#ifdef CONFIG_CPU_FREQ
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 8f05d3b21418..7fe6a2e1c93f 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -19,6 +19,7 @@
#define __ASM_TRAP_H
#include <linux/list.h>
+#include <asm/sections.h>
struct pt_regs;
@@ -36,17 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook);
static inline int __in_irqentry_text(unsigned long ptr)
{
- extern char __irqentry_text_start[];
- extern char __irqentry_text_end[];
-
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
static inline int in_exception_text(unsigned long ptr)
{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
int in;
in = ptr >= (unsigned long)&__exception_text_start &&
@@ -55,4 +51,5 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
+static inline void get_pct_hook_init(void) {}
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index d39d8bde42d7..d0919bcb1953 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -73,6 +73,9 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+ /* On user-mode return, check fs is correct */
+ set_thread_flag(TIF_FSCHECK);
+
/*
* Enable/disable UAO so that copy_to_user() etc can access
* kernel memory with the unprivileged instructions.
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 839ce0031bd5..f2a952338f1e 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -28,6 +28,9 @@
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
+#ifdef CONFIG_VDSO32
+#include <generated/vdso32-offsets.h>
+#endif
#define VDSO_SYMBOL(base, name) \
({ \
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..348b9be9efe7 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -20,16 +20,33 @@
#ifndef __ASSEMBLY__
+#ifndef _VDSO_WTM_CLOCK_SEC_T
+#define _VDSO_WTM_CLOCK_SEC_T
+typedef __u64 vdso_wtm_clock_nsec_t;
+#endif
+
+#ifndef _VDSO_XTIME_CLOCK_SEC_T
+#define _VDSO_XTIME_CLOCK_SEC_T
+typedef __u64 vdso_xtime_clock_sec_t;
+#endif
+
+#ifndef _VDSO_RAW_TIME_SEC_T
+#define _VDSO_RAW_TIME_SEC_T
+typedef __u64 vdso_raw_time_sec_t;
+#endif
+
struct vdso_data {
__u64 cs_cycle_last; /* Timebase at clocksource init */
- __u64 raw_time_sec; /* Raw time */
+ vdso_raw_time_sec_t raw_time_sec; /* Raw time */
__u64 raw_time_nsec;
- __u64 xtime_clock_sec; /* Kernel time */
- __u64 xtime_clock_nsec;
+ vdso_xtime_clock_sec_t xtime_clock_sec; /* Kernel time */
+ __u64 xtime_clock_snsec;
__u64 xtime_coarse_sec; /* Coarse time */
__u64 xtime_coarse_nsec;
__u64 wtm_clock_sec; /* Wall to monotonic time */
- __u64 wtm_clock_nsec;
+ vdso_wtm_clock_nsec_t wtm_clock_nsec;
+ __u32 btm_sec; /* monotonic to boot time */
+ __u32 btm_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
/* cs_* members must be adjacent and in this order (ldp accesses) */
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df5252dd7..46e0bbddee94 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,11 +18,30 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
+/*
+ * The arm64 hcall implementation uses x0 to specify the hcall type. A value
+ * less than 0xfff indicates a special hcall, such as get/set vector.
+ * Any other value is used as a pointer to the function to call.
+ */
+
+/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
+#define HVC_GET_VECTORS 0
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x1: Physical address of the new vector table.
+ */
+#define HVC_SET_VECTORS 1
+
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+
/*
* __boot_cpu_mode records what mode CPUs were booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +69,14 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ u64 el;
+
+ asm("mrs %0, CurrentEL" : "=r" (el));
+ return el == CurrentEL_EL2;
+}
+
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 825b0fe51c2b..13a97aa2285f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,21 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += kvm_para.h
-header-y += perf_regs.h
-header-y += param.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index ee469be1ae1d..c731ca011ca3 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -16,6 +16,7 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
+#ifdef CONFIG_64BIT
#include <linux/types.h>
/*
@@ -61,4 +62,35 @@ struct esr_context {
__u64 esr;
};
+#else /* CONFIG_64BIT */
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long arm_r0;
+ unsigned long arm_r1;
+ unsigned long arm_r2;
+ unsigned long arm_r3;
+ unsigned long arm_r4;
+ unsigned long arm_r5;
+ unsigned long arm_r6;
+ unsigned long arm_r7;
+ unsigned long arm_r8;
+ unsigned long arm_r9;
+ unsigned long arm_r10;
+ unsigned long arm_fp;
+ unsigned long arm_ip;
+ unsigned long arm_sp;
+ unsigned long arm_lr;
+ unsigned long arm_pc;
+ unsigned long arm_cpsr;
+ unsigned long fault_address;
+};
+#endif /* CONFIG_64BIT */
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index d10034327423..18938199a838 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -25,14 +25,18 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
-arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o entry32.o \
- ../../arm/kernel/opcodes.o
+arm64-obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
+ sys_compat.o entry32.o
+ifneq ($(CONFIG_VDSO32),y)
+arm64-obj-$(CONFIG_COMPAT) += sigreturn32.o
+endif
+arm64-obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_debug.o perf_trace_counters.o \
+ perf_trace_user.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
@@ -42,10 +46,19 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+arm64-obj-$(CONFIG_MSM_APP_API) += app_api.o
+arm64-obj-$(CONFIG_MSM_APP_SETTINGS) += app_setting.o
+arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
+arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
+
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+endif
-obj-y += $(arm64-obj-y) vdso/
+obj-y += $(arm64-obj-y) vdso/ probes/
+obj-$(CONFIG_VDSO32) += vdso32/
obj-m += $(arm64-obj-m)
head-y := head.o
extra-y += $(head-y) vmlinux.lds
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index d2ee1b21a10d..737481c8e918 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -25,14 +25,13 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
+#include <asm/sections.h>
#include <linux/stop_machine.h>
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
-extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
@@ -108,7 +107,7 @@ static void __apply_alternatives(void *alt_region)
for (i = 0; i < nr_inst; i++) {
insn = get_alt_insn(alt, origptr + i, replptr + i);
- *(origptr + i) = cpu_to_le32(insn);
+ BUG_ON(aarch64_insn_patch_text_nosync(origptr + i, insn));
}
flush_icache_range((uintptr_t)origptr,
@@ -124,8 +123,8 @@ static int __apply_alternatives_multi_stop(void *unused)
{
static int patched = 0;
struct alt_region region = {
- .begin = __alt_instructions,
- .end = __alt_instructions_end,
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
};
/* We always have a CPU 0 at this point (__init) */
diff --git a/arch/arm64/kernel/app_api.c b/arch/arm64/kernel/app_api.c
new file mode 100644
index 000000000000..e995bbf3c7b4
--- /dev/null
+++ b/arch/arm64/kernel/app_api.c
@@ -0,0 +1,135 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/export.h>
+
+#include <asm/app_api.h>
+
+static spinlock_t spinlock;
+static spinlock_t spinlock_32bit_app;
+static DEFINE_PER_CPU(int, app_config_applied);
+static unsigned long app_config_set[NR_CPUS];
+static unsigned long app_config_clear[NR_CPUS];
+
+void set_app_setting_bit(uint32_t bit)
+{
+ unsigned long flags;
+ uint64_t reg;
+ int cpu;
+
+ spin_lock_irqsave(&spinlock, flags);
+ asm volatile("mrs %0, S3_1_C15_C15_0" : "=r" (reg));
+ reg = reg | BIT(bit);
+ isb();
+ asm volatile("msr S3_1_C15_C15_0, %0" : : "r" (reg));
+ isb();
+ if (bit == APP_SETTING_BIT) {
+ cpu = raw_smp_processor_id();
+ app_config_set[cpu]++;
+
+ this_cpu_write(app_config_applied, 1);
+ }
+ spin_unlock_irqrestore(&spinlock, flags);
+
+}
+EXPORT_SYMBOL(set_app_setting_bit);
+
+void clear_app_setting_bit(uint32_t bit)
+{
+ unsigned long flags;
+ uint64_t reg;
+ int cpu;
+
+ spin_lock_irqsave(&spinlock, flags);
+ asm volatile("mrs %0, S3_1_C15_C15_0" : "=r" (reg));
+ reg = reg & ~BIT(bit);
+ isb();
+ asm volatile("msr S3_1_C15_C15_0, %0" : : "r" (reg));
+ isb();
+ if (bit == APP_SETTING_BIT) {
+ cpu = raw_smp_processor_id();
+ app_config_clear[cpu]++;
+
+ this_cpu_write(app_config_applied, 0);
+ }
+ spin_unlock_irqrestore(&spinlock, flags);
+}
+EXPORT_SYMBOL(clear_app_setting_bit);
+
+void set_app_setting_bit_for_32bit_apps(void)
+{
+ unsigned long flags;
+ uint64_t reg;
+
+ spin_lock_irqsave(&spinlock_32bit_app, flags);
+ if (use_32bit_app_setting) {
+ asm volatile("mrs %0, S3_0_c15_c15_0 " : "=r" (reg));
+ reg = reg | BIT(24);
+ isb();
+ asm volatile("msr S3_0_c15_c15_0, %0" : : "r" (reg));
+ isb();
+ asm volatile("mrs %0, S3_0_c15_c15_1 " : "=r" (reg));
+ reg = reg | BIT(18) | BIT(2) | BIT(0);
+ isb();
+ asm volatile("msr S3_0_c15_c15_1, %0" : : "r" (reg));
+ isb();
+ } else if (use_32bit_app_setting_pro) {
+ asm volatile("mrs %0, S3_0_c15_c15_1 " : "=r" (reg));
+ reg = reg | BIT(18);
+ isb();
+ asm volatile("msr S3_0_c15_c15_1, %0" : : "r" (reg));
+ isb();
+ }
+ spin_unlock_irqrestore(&spinlock_32bit_app, flags);
+}
+EXPORT_SYMBOL(set_app_setting_bit_for_32bit_apps);
+
+void clear_app_setting_bit_for_32bit_apps(void)
+{
+ unsigned long flags;
+ uint64_t reg;
+
+ spin_lock_irqsave(&spinlock_32bit_app, flags);
+ if (use_32bit_app_setting) {
+ asm volatile("mrs %0, S3_0_c15_c15_0 " : "=r" (reg));
+ reg = reg & ~BIT(24);
+ isb();
+ asm volatile("msr S3_0_c15_c15_0, %0" : : "r" (reg));
+ isb();
+ asm volatile("mrs %0, S3_0_c15_c15_1 " : "=r" (reg));
+ reg = reg & ~BIT(18);
+ reg = reg & ~BIT(2);
+ reg = reg & ~BIT(0);
+ isb();
+ asm volatile("msr S3_0_c15_c15_1, %0" : : "r" (reg));
+ isb();
+ } else if (use_32bit_app_setting_pro) {
+ asm volatile("mrs %0, S3_0_c15_c15_1 " : "=r" (reg));
+ reg = reg & ~BIT(18);
+ isb();
+ asm volatile("msr S3_0_c15_c15_1, %0" : : "r" (reg));
+ isb();
+ }
+ spin_unlock_irqrestore(&spinlock_32bit_app, flags);
+}
+EXPORT_SYMBOL(clear_app_setting_bit_for_32bit_apps);
+
+static int __init init_app_api(void)
+{
+ spin_lock_init(&spinlock);
+ spin_lock_init(&spinlock_32bit_app);
+ return 0;
+}
+early_initcall(init_app_api);
diff --git a/arch/arm64/kernel/app_setting.c b/arch/arm64/kernel/app_setting.c
new file mode 100644
index 000000000000..0c6b00317645
--- /dev/null
+++ b/arch/arm64/kernel/app_setting.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+
+#include <asm/app_api.h>
+
+#define MAX_LEN 100
+
+static char *lib_names[MAX_ENTRIES];
+static unsigned int count;
+static struct mutex mutex;
+
+static char lib_str[MAX_LEN] = "";
+static struct kparam_string kps = {
+ .string = lib_str,
+ .maxlen = MAX_LEN,
+};
+static int set_name(const char *str, struct kernel_param *kp);
+module_param_call(lib_name, set_name, param_get_string, &kps, S_IWUSR);
+
+bool use_app_setting = true;
+module_param(use_app_setting, bool, 0644);
+MODULE_PARM_DESC(use_app_setting, "control use of app specific settings");
+
+bool use_32bit_app_setting = true;
+module_param(use_32bit_app_setting, bool, 0644);
+MODULE_PARM_DESC(use_32bit_app_setting, "control use of 32 bit app specific settings");
+
+bool use_32bit_app_setting_pro;
+module_param(use_32bit_app_setting_pro, bool, 0644);
+MODULE_PARM_DESC(use_32bit_app_setting_pro, "control use of 32 bit app specific settings");
+
+static int set_name(const char *str, struct kernel_param *kp)
+{
+ int len = strlen(str);
+ char *name;
+
+ if (len >= MAX_LEN) {
+ pr_err("app_setting: name string too long\n");
+ return -ENOSPC;
+ }
+
+ /*
+ * echo adds '\n' which we need to chop off later
+ */
+ name = kzalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strlcpy(name, str, len + 1);
+
+ if (name[len - 1] == '\n')
+ name[len - 1] = '\0';
+
+ mutex_lock(&mutex);
+ if (count < MAX_ENTRIES) {
+ lib_names[count] = name;
+ /*
+ * mb to ensure that the new lib_names entry is present
+ * before updating the view presented by get_lib_names
+ */
+ mb();
+ count++;
+ } else {
+ pr_err("app_setting: set name failed. Max entries reached\n");
+ kfree(name);
+ mutex_unlock(&mutex);
+ return -EPERM;
+ }
+ mutex_unlock(&mutex);
+
+ return 0;
+}
+
+void switch_app_setting_bit(struct task_struct *prev, struct task_struct *next)
+{
+ if (prev->mm && unlikely(prev->mm->app_setting))
+ clear_app_setting_bit(APP_SETTING_BIT);
+
+ if (next->mm && unlikely(next->mm->app_setting))
+ set_app_setting_bit(APP_SETTING_BIT);
+}
+EXPORT_SYMBOL(switch_app_setting_bit);
+
+void switch_32bit_app_setting_bit(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (prev->mm && unlikely(is_compat_thread(task_thread_info(prev))))
+ clear_app_setting_bit_for_32bit_apps();
+
+ if (next->mm && unlikely(is_compat_thread(task_thread_info(next))))
+ set_app_setting_bit_for_32bit_apps();
+}
+EXPORT_SYMBOL(switch_32bit_app_setting_bit);
+
+void apply_app_setting_bit(struct file *file)
+{
+ bool found = false;
+ int i;
+
+ if (file && file->f_path.dentry) {
+ const char *name = file->f_path.dentry->d_name.name;
+
+ for (i = 0; i < count; i++) {
+ if (unlikely(!strcmp(name, lib_names[i]))) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ preempt_disable();
+ set_app_setting_bit(APP_SETTING_BIT);
+ /* This will take care of child processes as well */
+ current->mm->app_setting = 1;
+ preempt_enable();
+ }
+ }
+}
+EXPORT_SYMBOL(apply_app_setting_bit);
+
+static int __init app_setting_init(void)
+{
+ mutex_init(&mutex);
+ return 0;
+}
+module_init(app_setting_init);
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 2dc44406a7ad..546ce8979b3a 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -26,8 +26,10 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/kprobes.h>
#include <linux/arm-smccc.h>
+#include <asm/cacheflush.h>
#include <asm/checksum.h>
EXPORT_SYMBOL(copy_page);
@@ -68,7 +70,12 @@ EXPORT_SYMBOL(test_and_change_bit);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
+NOKPROBE_SYMBOL(_mcount);
#endif
+ /* caching functions */
+EXPORT_SYMBOL(__dma_inv_range);
+EXPORT_SYMBOL(__dma_clean_range);
+EXPORT_SYMBOL(__dma_flush_range);
/* arm-smccc */
EXPORT_SYMBOL(arm_smccc_smc);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index fae82901ae81..e012ecd018ee 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -366,6 +366,21 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
return res;
}
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
+{
+ u32 cc_bits = opcode >> 28;
+
+ if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+ if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
+ return ARM_OPCODE_CONDTEST_PASS;
+ else
+ return ARM_OPCODE_CONDTEST_FAIL;
+ }
+ return ARM_OPCODE_CONDTEST_UNCOND;
+}
+
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
@@ -380,7 +395,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
type = instr & TYPE_SWPB;
- switch (arm_check_condition(instr, regs->pstate)) {
+ switch (aarch32_check_condition(instr, regs->pstate)) {
case ARM_OPCODE_CONDTEST_PASS:
break;
case ARM_OPCODE_CONDTEST_FAIL:
@@ -461,7 +476,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
{
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
- switch (arm_check_condition(instr, regs->pstate)) {
+ switch (aarch32_check_condition(instr, regs->pstate)) {
case ARM_OPCODE_CONDTEST_PASS:
break;
case ARM_OPCODE_CONDTEST_FAIL:
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 3fa949f04ce4..737eb8b80485 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -22,9 +22,11 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kvm_host.h>
+#include <linux/suspend.h>
#include <asm/fixmap.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/signal32.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
@@ -59,6 +61,17 @@ int main(void)
DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
+ DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
+ DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
+ DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
+ DEFINE(S_X14, offsetof(struct pt_regs, regs[14]));
+ DEFINE(S_X16, offsetof(struct pt_regs, regs[16]));
+ DEFINE(S_X18, offsetof(struct pt_regs, regs[18]));
+ DEFINE(S_X20, offsetof(struct pt_regs, regs[20]));
+ DEFINE(S_X22, offsetof(struct pt_regs, regs[22]));
+ DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
+ DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
+ DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
#ifdef CONFIG_COMPAT
@@ -71,6 +84,18 @@ int main(void)
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_COMPAT
+ DEFINE(COMPAT_SIGFRAME_REGS_OFFSET,
+ offsetof(struct compat_sigframe, uc) +
+ offsetof(struct compat_ucontext, uc_mcontext) +
+ offsetof(struct compat_sigcontext, arm_r0));
+ DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET,
+ offsetof(struct compat_rt_sigframe, sig) +
+ offsetof(struct compat_sigframe, uc) +
+ offsetof(struct compat_ucontext, uc_mcontext) +
+ offsetof(struct compat_sigcontext, arm_r0));
+ BLANK();
+#endif
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
@@ -84,40 +109,6 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
- DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
- DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
- DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
- DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
- DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- BLANK();
- DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
- DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
- DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
- DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
- DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
- DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
- DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
- DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
- DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
- DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
- DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
- DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
- DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
- DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
- DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
- DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall));
- BLANK();
- DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec));
- DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec));
- DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
- DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
- BLANK();
- DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
- DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
- BLANK();
#ifdef CONFIG_THREAD_INFO_IN_TASK
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
@@ -128,62 +119,27 @@ int main(void)
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
- DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
- DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
- DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
- DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
+ DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
- DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
- DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr));
- DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr));
- DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr));
- DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr));
- DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr));
- DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
- DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2));
- DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
- DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
- DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
- DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
- DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
- DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
- DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
- DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
- DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
- DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
- DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
- DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
- DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
- DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
- DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
- DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
- DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
- DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
- DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
- DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
- DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
- DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
- DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
- DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
- DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
- DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
- DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
- DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
- DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
+ DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
+ DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
#endif
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
BLANK();
+ DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
+ DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
+ DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
+ BLANK();
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
#endif
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 000000000000..76225c2611ea
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,87 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+ .rept 31
+ nop
+ .endr
+ b \target
+.endm
+
+.macro vectors target
+ ventry \target + 0x000
+ ventry \target + 0x080
+ ventry \target + 0x100
+ ventry \target + 0x180
+
+ ventry \target + 0x200
+ ventry \target + 0x280
+ ventry \target + 0x300
+ ventry \target + 0x380
+
+ ventry \target + 0x400
+ ventry \target + 0x480
+ ventry \target + 0x500
+ ventry \target + 0x580
+
+ ventry \target + 0x600
+ ventry \target + 0x680
+ ventry \target + 0x700
+ ventry \target + 0x780
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept 4
+ vectors __kvm_hyp_vector
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+ sub sp, sp, #(8 * 18)
+ stp x16, x17, [sp, #(16 * 0)]
+ stp x14, x15, [sp, #(16 * 1)]
+ stp x12, x13, [sp, #(16 * 2)]
+ stp x10, x11, [sp, #(16 * 3)]
+ stp x8, x9, [sp, #(16 * 4)]
+ stp x6, x7, [sp, #(16 * 5)]
+ stp x4, x5, [sp, #(16 * 6)]
+ stp x2, x3, [sp, #(16 * 7)]
+ stp x0, x1, [sp, #(16 * 8)]
+ mov x0, #0x84000000
+ smc #0
+ ldp x16, x17, [sp, #(16 * 0)]
+ ldp x14, x15, [sp, #(16 * 1)]
+ ldp x12, x13, [sp, #(16 * 2)]
+ ldp x10, x11, [sp, #(16 * 3)]
+ ldp x8, x9, [sp, #(16 * 4)]
+ ldp x6, x7, [sp, #(16 * 5)]
+ ldp x4, x5, [sp, #(16 * 6)]
+ ldp x2, x3, [sp, #(16 * 7)]
+ ldp x0, x1, [sp, #(16 * 8)]
+ add sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
+
+ENTRY(__qcom_hyp_sanitize_link_stack_start)
+ stp x29, x30, [sp, #-16]!
+ .rept 16
+ bl . + 4
+ .endr
+ ldp x29, x30, [sp], #16
+ENTRY(__qcom_hyp_sanitize_link_stack_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 06afd04e02c0..e857248dd980 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -29,12 +29,160 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
entry->midr_range_max);
}
+static bool __maybe_unused
+is_kryo_midr(const struct arm64_cpu_capabilities *entry)
+{
+ u32 model;
+
+ model = read_cpuid_id();
+ model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+ MIDR_ARCHITECTURE_MASK;
+
+ return model == entry->midr_model;
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+extern char __qcom_hyp_sanitize_link_stack_start[];
+extern char __qcom_hyp_sanitize_link_stack_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ static int last_slot = -1;
+ static DEFINE_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+
+ spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ last_slot++;
+ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+ / SZ_2K) <= last_slot);
+ slot = last_slot;
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start NULL
+#define __psci_hyp_bp_inval_end NULL
+#define __qcom_hyp_sanitize_link_stack_start NULL
+#define __qcom_hyp_sanitize_link_stack_end NULL
+
+static void __maybe_unused __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void __maybe_unused install_bp_hardening_cb(
+ const struct arm64_cpu_capabilities *entry,
+ bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ u64 pfr0;
+
+ if (!entry->matches(entry))
+ return;
+
+ pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return;
+
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ if (psci_ops.get_version)
+ install_bp_hardening_cb(entry,
+ (bp_hardening_cb_t)psci_ops.get_version,
+ __psci_hyp_bp_inval_start,
+ __psci_hyp_bp_inval_end);
+
+ return 0;
+}
+
+static void __maybe_unused qcom_link_stack_sanitization(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static void __maybe_unused qcom_bp_hardening(void)
+{
+ qcom_link_stack_sanitization();
+ if (psci_ops.get_version)
+ psci_ops.get_version();
+}
+
+static int __maybe_unused enable_qcom_bp_hardening(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ install_bp_hardening_cb(entry,
+ (bp_hardening_cb_t)qcom_bp_hardening,
+ __psci_hyp_bp_inval_start,
+ __psci_hyp_bp_inval_end);
+ return 0;
+}
+
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
#define MIDR_RANGE(model, min, max) \
.matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = min, \
.midr_range_max = max
+#define MIDR_ALL_VERSIONS(model) \
+ .matches = is_affected_midr_range, \
+ .midr_model = model, \
+ .midr_range_min = 0, \
+ .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -79,6 +227,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_845719,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
},
+ {
+ /* Kryo2xx Silver rAp4 */
+ .desc = "Kryo2xx Silver erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ MIDR_RANGE(MIDR_KRYO2XX_SILVER, 0xA00004, 0xA00004),
+ },
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
{
@@ -97,6 +251,39 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
(1 << MIDR_VARIANT_SHIFT) | 1),
},
#endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_KRYO2XX_GOLD),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ .midr_model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ .enable = enable_qcom_bp_hardening,
+ },
+#endif
{
}
};
@@ -105,3 +292,8 @@ void check_local_cpu_errata(void)
{
update_cpu_capabilities(arm64_errata, "enabling workaround for");
}
+
+void __init enable_errata_workarounds(void)
+{
+ enable_cpu_capabilities(arm64_errata);
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aee34290cd09..2a2bf5231f6a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -29,6 +29,7 @@
#include <asm/cpu_ops.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
+#include <asm/virt.h>
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -91,6 +92,7 @@ static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
@@ -656,6 +658,11 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
}
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+ return is_kernel_in_hyp_mode();
+}
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
@@ -744,6 +751,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = cpufeature_pan_not_uao,
},
#endif /* CONFIG_ARM64_PAN */
+ {
+ .desc = "Virtualization Host Extensions",
+ .capability = ARM64_HAS_VIRT_HOST_EXTN,
+ .matches = runs_at_el2,
+ },
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -867,8 +879,7 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
int i;
@@ -880,7 +891,8 @@ enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps[i].enable, NULL, cpu_online_mask);
+ stop_machine(caps[i].enable, (void *)&caps[i],
+ cpu_online_mask);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -995,7 +1007,7 @@ void verify_local_cpu_capabilities(void)
if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
fail_incapable_cpu("arm64_features", &caps[i]);
if (caps[i].enable)
- caps[i].enable(NULL);
+ caps[i].enable((void *)&caps[i]);
}
for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
@@ -1027,6 +1039,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
+ enable_errata_workarounds();
setup_cpu_hwcaps();
/* Advertise that we have computed the system capabilities */
@@ -1040,9 +1053,9 @@ void __init setup_cpu_features(void)
if (!cwg)
pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
cls);
- if (L1_CACHE_BYTES < cls)
- pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
- L1_CACHE_BYTES, cls);
+ if (ARCH_DMA_MINALIGN < cls)
+ pr_warn("ARCH_DMA_MINALIGN smaller than the Cache Writeback Granule (%d < %d)\n",
+ ARCH_DMA_MINALIGN, cls);
}
static bool __maybe_unused
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 95a6fae54740..4b2caefd3a8f 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/elf.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -33,6 +34,10 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
+#include <linux/of_fdt.h>
+
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
/*
* In case the boot CPU is hotpluggable, we record its initial state and
@@ -106,7 +111,9 @@ static int c_show(struct seq_file *m, void *v)
int i, j;
bool compat = personality(current->personality) == PER_LINUX32;
- for_each_online_cpu(i) {
+ seq_printf(m, "Processor\t: AArch64 Processor rev %d (%s)\n",
+ read_cpuid_id() & 15, ELF_PLATFORM);
+ for_each_present_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
u32 midr = cpuinfo->reg_midr;
@@ -156,6 +163,11 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
}
+ if (!arch_read_hardware_id)
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+ else
+ seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
+
return 0;
}
@@ -201,7 +213,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
if (l1ip == ICACHE_POLICY_AIVIVT)
set_bit(ICACHEF_AIVIVT, &__icache_flags);
- pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+ pr_debug("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
@@ -215,7 +227,12 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
- info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1);
+ /*
+ * Explicitly mask out 16KB granule since we donot
+ * want to support it
+ */
+ info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) &
+ (~MMFR0_EL1_16KGRAN_MASK);
info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 77fbcabcd9e3..50392fe170a9 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -23,6 +23,7 @@
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/ptrace.h>
+#include <linux/kprobes.h>
#include <linux/stat.h>
#include <linux/uaccess.h>
@@ -48,6 +49,7 @@ static void mdscr_write(u32 mdscr)
asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
local_dbg_restore(flags);
}
+NOKPROBE_SYMBOL(mdscr_write);
static u32 mdscr_read(void)
{
@@ -55,6 +57,7 @@ static u32 mdscr_read(void)
asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
return mdscr;
}
+NOKPROBE_SYMBOL(mdscr_read);
/*
* Allow root to disable self-hosted debug from userspace.
@@ -103,6 +106,7 @@ void enable_debug_monitors(enum dbg_active_el el)
mdscr_write(mdscr);
}
}
+NOKPROBE_SYMBOL(enable_debug_monitors);
void disable_debug_monitors(enum dbg_active_el el)
{
@@ -123,6 +127,7 @@ void disable_debug_monitors(enum dbg_active_el el)
mdscr_write(mdscr);
}
}
+NOKPROBE_SYMBOL(disable_debug_monitors);
/*
* OS lock clearing.
@@ -173,6 +178,7 @@ static void set_regs_spsr_ss(struct pt_regs *regs)
spsr |= DBG_SPSR_SS;
regs->pstate = spsr;
}
+NOKPROBE_SYMBOL(set_regs_spsr_ss);
static void clear_regs_spsr_ss(struct pt_regs *regs)
{
@@ -182,6 +188,7 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
spsr &= ~DBG_SPSR_SS;
regs->pstate = spsr;
}
+NOKPROBE_SYMBOL(clear_regs_spsr_ss);
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
@@ -225,6 +232,7 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
return retval;
}
+NOKPROBE_SYMBOL(call_step_hook);
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
@@ -253,6 +261,10 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+#endif
if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
return 0;
@@ -266,6 +278,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
+NOKPROBE_SYMBOL(single_step_handler);
/*
* Breakpoint handler is re-entrant as another breakpoint can
@@ -303,6 +316,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
+NOKPROBE_SYMBOL(call_break_hook);
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
@@ -318,13 +332,21 @@ static int brk_handler(unsigned long addr, unsigned int esr,
};
force_sig_info(SIGTRAP, &info, current);
- } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
- pr_warning("Unexpected kernel BRK exception at EL1\n");
+ }
+#ifdef CONFIG_KPROBES
+ else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+ if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
+ return -EFAULT;
+ }
+#endif
+ else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+ pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
return 0;
}
+NOKPROBE_SYMBOL(brk_handler);
int aarch32_break_handler(struct pt_regs *regs)
{
@@ -369,6 +391,7 @@ int aarch32_break_handler(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
return 0;
}
+NOKPROBE_SYMBOL(aarch32_break_handler);
static int __init debug_traps_init(void)
{
@@ -390,6 +413,7 @@ void user_rewind_single_step(struct task_struct *task)
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
+NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
@@ -405,6 +429,7 @@ void kernel_enable_single_step(struct pt_regs *regs)
mdscr_write(mdscr_read() | DBG_MDSCR_SS);
enable_debug_monitors(DBG_ACTIVE_EL1);
}
+NOKPROBE_SYMBOL(kernel_enable_single_step);
void kernel_disable_single_step(void)
{
@@ -412,12 +437,14 @@ void kernel_disable_single_step(void)
mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
disable_debug_monitors(DBG_ACTIVE_EL1);
}
+NOKPROBE_SYMBOL(kernel_disable_single_step);
int kernel_active_single_step(void)
{
WARN_ON(!irqs_disabled());
return mdscr_read() & DBG_MDSCR_SS;
}
+NOKPROBE_SYMBOL(kernel_active_single_step);
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
@@ -427,8 +454,10 @@ void user_enable_single_step(struct task_struct *task)
if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
+NOKPROBE_SYMBOL(user_enable_single_step);
void user_disable_single_step(struct task_struct *task)
{
clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
}
+NOKPROBE_SYMBOL(user_disable_single_step);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index f82036e02485..936022f0655e 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,7 @@ ENTRY(entry)
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- movz x21, #:abs_g0:stext_offset
+ ldr w21, =stext_offset
add x21, x0, x21
/*
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index c44a82f146b1..1ffe15459c92 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -64,4 +64,20 @@ ENTRY(fpsimd_load_partial_state)
ret
ENDPROC(fpsimd_load_partial_state)
+#ifdef CONFIG_ENABLE_FP_SIMD_SETTINGS
+ENTRY(fpsimd_enable_trap)
+ mrs x0, cpacr_el1
+ bic x0, x0, #(3 << 20)
+ orr x0, x0, #(1 << 20)
+ msr cpacr_el1, x0
+ ret
+ENDPROC(fpsimd_enable_trap)
+ENTRY(fpsimd_disable_trap)
+ mrs x0, cpacr_el1
+ orr x0, x0, #(3 << 20)
+ msr cpacr_el1, x0
+ ret
+ENDPROC(fpsimd_disable_trap)
+#endif
+
#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 4ed652aa62fd..f0ca0eb3b077 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -376,6 +376,7 @@ tsk .req x28 // current thread_info
/*
* Exception vectors.
*/
+ .pushsection ".entry.text", "ax"
.align 11
ENTRY(vectors)
@@ -465,6 +466,9 @@ ENDPROC(el1_error_invalid)
*/
.align 6
el1_sync:
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+ smc #0xffff
+#endif
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
@@ -548,6 +552,7 @@ el1_irq:
bl trace_hardirqs_off
#endif
+ get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
@@ -628,7 +633,7 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
- b.eq el0_fpsimd_acc
+ b.eq el0_fpsimd_acc_compat
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
@@ -681,13 +686,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
@@ -699,6 +706,17 @@ el0_fpsimd_acc:
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
+el0_fpsimd_acc_compat:
+ /*
+ * Floating Point or Advanced SIMD access
+ */
+ enable_dbg
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_fpsimd_acc_compat
+ b ret_to_user
+
el0_fpsimd_exc:
/*
* Floating Point or Advanced SIMD exception
@@ -809,6 +827,33 @@ ENTRY(cpu_switch_to)
ldp x27, x28, [x8], #16
ldp x29, x9, [x8], #16
ldr lr, [x8]
+#ifdef CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW
+ orr x13, x13, x13
+ orr x14, x14, x14
+ orr x15, x15, x15
+ orr x16, x16, x16
+ orr x17, x17, x17
+ orr x18, x18, x18
+ orr x19, x19, x19
+ orr x20, x20, x20
+ orr x21, x21, x21
+ mov v0.16b, v0.16b
+ mov v1.16b, v1.16b
+ mov v2.16b, v2.16b
+ mov v3.16b, v3.16b
+ mov v4.16b, v4.16b
+ mov v5.16b, v5.16b
+ mov v6.16b, v6.16b
+ mov v7.16b, v7.16b
+ mov v8.16b, v8.16b
+ mov v9.16b, v9.16b
+ mov v10.16b, v10.16b
+ mov v11.16b, v11.16b
+ mov v12.16b, v12.16b
+ mov v13.16b, v13.16b
+ mov v14.16b, v14.16b
+ mov v15.16b, v15.16b
+#endif
mov sp, x9
#ifdef CONFIG_THREAD_INFO_IN_TASK
msr sp_el0, x1
@@ -953,6 +998,7 @@ __ni_sys_trace:
bl do_ni_syscall
b __sys_trace_return
+ .popsection // .entry.text
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
/*
* Exception vectors trampoline.
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f995dae1c8fd..7950df171d86 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
@@ -27,6 +28,7 @@
#include <asm/fpsimd.h>
#include <asm/cputype.h>
+#include <asm/app_api.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -35,6 +37,8 @@
#define FPEXC_IXF (1 << 4)
#define FPEXC_IDF (1 << 7)
+#define FP_SIMD_BIT 31
+
/*
* In order to reduce the number of times the FPSIMD state is needlessly saved
* and restored, we need to keep track of two things:
@@ -88,14 +92,42 @@
* whatever is in the FPSIMD registers is not saved to memory, but discarded.
*/
static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+static DEFINE_PER_CPU(int, fpsimd_stg_enable);
+
+static int fpsimd_settings = 0x1; /* default = 0x1 */
+module_param(fpsimd_settings, int, 0644);
+
+void fpsimd_settings_enable(void)
+{
+ set_app_setting_bit(FP_SIMD_BIT);
+}
+
+void fpsimd_settings_disable(void)
+{
+ clear_app_setting_bit(FP_SIMD_BIT);
+}
/*
* Trapped FP/ASIMD access.
*/
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
- /* TODO: implement lazy context saving/restoring */
- WARN_ON(1);
+ if (!fpsimd_settings)
+ return;
+
+ fpsimd_disable_trap();
+ fpsimd_settings_disable();
+ this_cpu_write(fpsimd_stg_enable, 0);
+}
+
+void do_fpsimd_acc_compat(unsigned int esr, struct pt_regs *regs)
+{
+ if (!fpsimd_settings)
+ return;
+
+ fpsimd_disable_trap();
+ fpsimd_settings_enable();
+ this_cpu_write(fpsimd_stg_enable, 1);
}
/*
@@ -135,6 +167,11 @@ void fpsimd_thread_switch(struct task_struct *next)
if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
+ if (fpsimd_settings && __this_cpu_read(fpsimd_stg_enable)) {
+ fpsimd_settings_disable();
+ this_cpu_write(fpsimd_stg_enable, 0);
+ }
+
if (next->mm) {
/*
* If we are switching to a task whose most recent userland
@@ -152,6 +189,14 @@ void fpsimd_thread_switch(struct task_struct *next)
else
set_ti_thread_flag(task_thread_info(next),
TIF_FOREIGN_FPSTATE);
+
+ if (!fpsimd_settings)
+ return;
+
+ if (test_ti_thread_flag(task_thread_info(next), TIF_32BIT))
+ fpsimd_enable_trap();
+ else
+ fpsimd_disable_trap();
}
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8168277277dc..87c56a9bb41c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -51,9 +51,6 @@
#error TEXT_OFFSET must be less than 2MB
#endif
-#define KERNEL_START _text
-#define KERNEL_END _end
-
/*
* Kernel startup entry point.
* ---------------------------
@@ -102,8 +99,6 @@ _head:
#endif
#ifdef CONFIG_EFI
- .globl __efistub_stext_offset
- .set __efistub_stext_offset, stext - _head
.align 3
pe_header:
.ascii "PE"
@@ -123,11 +118,11 @@ optional_header:
.short 0x20b // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
- .long _end - stext // SizeOfCode
+ .long _end - efi_header_end // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long __efistub_entry - _head // AddressOfEntryPoint
- .long __efistub_stext_offset // BaseOfCode
+ .long efi_header_end - _head // BaseOfCode
extra_header_fields:
.quad 0 // ImageBase
@@ -144,7 +139,7 @@ extra_header_fields:
.long _end - _head // SizeOfImage
// Everything before the kernel image is considered part of the header
- .long __efistub_stext_offset // SizeOfHeaders
+ .long efi_header_end - _head // SizeOfHeaders
.long 0 // CheckSum
.short 0xa // Subsystem (EFI application)
.short 0 // DllCharacteristics
@@ -188,10 +183,10 @@ section_table:
.byte 0
.byte 0
.byte 0 // end of 0 padding of section name
- .long _end - stext // VirtualSize
- .long __efistub_stext_offset // VirtualAddress
- .long _edata - stext // SizeOfRawData
- .long __efistub_stext_offset // PointerToRawData
+ .long _end - efi_header_end // VirtualSize
+ .long efi_header_end - _head // VirtualAddress
+ .long _edata - efi_header_end // SizeOfRawData
+ .long efi_header_end - _head // PointerToRawData
.long 0 // PointerToRelocations (0 for executables)
.long 0 // PointerToLineNumbers (0 for executables)
@@ -200,15 +195,18 @@ section_table:
.long 0xe0500020 // Characteristics (section flags)
/*
- * EFI will load stext onwards at the 4k section alignment
+ * EFI will load .text onwards at the 4k section alignment
* described in the PE/COFF header. To ensure that instruction
* sequences using an adrp and a :lo12: immediate will function
- * correctly at this alignment, we must ensure that stext is
+ * correctly at this alignment, we must ensure that .text is
* placed at a 4k boundary in the Image to begin with.
*/
.align 12
+efi_header_end:
#endif
+ __INIT
+
ENTRY(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
@@ -222,13 +220,11 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, 0f // address to jump to after
+ bl __cpu_setup // initialise processor
+ adr_l x27, __primary_switch // address to jump to after
// MMU has been enabled
- adr_l lr, __enable_mmu // return (PIC) address
- b __cpu_setup // initialise processor
+ b __enable_mmu
ENDPROC(stext)
- .align 3
-0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
@@ -338,7 +334,7 @@ __create_page_tables:
cmp x0, x6
b.lo 1b
- ldr x7, =SWAPPER_MM_MMUFLAGS
+ mov x7, SWAPPER_MM_MMUFLAGS
/*
* Create the identity mapping.
@@ -394,12 +390,13 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- ldr x5, =KIMAGE_VADDR
+ mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
create_pgd_entry x0, x5, x3, x6
- ldr w6, kernel_img_size
- add x6, x6, x5
- mov x3, x24 // phys offset
+ adrp x6, _end // runtime __pa(_end)
+ adrp x3, _text // runtime __pa(_text)
+ sub x6, x6, x3 // _end - _text
+ add x6, x6, x5 // runtime __va(_end)
create_block_map x0, x7, x3, x5, x6
/*
@@ -414,16 +411,13 @@ __create_page_tables:
ret x28
ENDPROC(__create_page_tables)
-
-kernel_img_size:
- .long _end - (_head - TEXT_OFFSET)
.ltorg
/*
* The following fragment of code is executed with the MMU enabled.
*/
.set initial_sp, init_thread_union + THREAD_START_SP
-__mmap_switched:
+__primary_switched:
mov x28, lr // preserve LR
adr_l x8, vectors // load VBAR_EL1 with virtual
@@ -438,44 +432,6 @@ __mmap_switched:
bl __pi_memset
dsb ishst // Make zero page visible to PTW
-#ifdef CONFIG_RELOCATABLE
-
- /*
- * Iterate over each entry in the relocation table, and apply the
- * relocations in place.
- */
- adr_l x8, __dynsym_start // start of symbol table
- adr_l x9, __reloc_start // start of reloc table
- adr_l x10, __reloc_end // end of reloc table
-
-0: cmp x9, x10
- b.hs 2f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
- b 0b
-
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
- dc cvac, x8 // value visible to secondaries
- dsb sy // with MMU off
-#endif
-
#ifdef CONFIG_THREAD_INFO_IN_TASK
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
@@ -512,13 +468,13 @@ __mmap_switched:
0:
#endif
b start_kernel
-ENDPROC(__mmap_switched)
+ENDPROC(__primary_switched)
/*
* end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region
*/
- .section ".text","ax"
+ .section ".idmap.text","ax"
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
@@ -628,7 +584,7 @@ ENDPROC(el2_setup)
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
* in x20. See arch/arm64/include/asm/virt.h for more info.
*/
-ENTRY(set_cpu_boot_mode_flag)
+set_cpu_boot_mode_flag:
adr_l x1, __boot_cpu_mode
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
@@ -640,17 +596,29 @@ ENTRY(set_cpu_boot_mode_flag)
ENDPROC(set_cpu_boot_mode_flag)
/*
+ * These values are written with the MMU off, but read with the MMU on.
+ * Writers will invalidate the corresponding address, discarding up to a
+ * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
+ * sufficient alignment that the CWG doesn't overlap another section.
+ */
+ .pushsection ".mmuoff.data.write", "aw"
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
- .pushsection .data..cacheline_aligned
- .align L1_CACHE_SHIFT
ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1
+/*
+ * The booting CPU updates the failed status @__early_cpu_boot_status,
+ * with MMU turned off.
+ */
+ENTRY(__early_cpu_boot_status)
+ .long 0
+
.popsection
/*
@@ -661,7 +629,7 @@ ENTRY(secondary_holding_pen)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
- ldr x1, =MPIDR_HWID_BITMASK
+ mov_q x1, MPIDR_HWID_BITMASK
and x0, x0, x1
adr_l x3, secondary_holding_pen_release
pen: ldr x4, [x3]
@@ -681,7 +649,7 @@ ENTRY(secondary_entry)
b secondary_startup
ENDPROC(secondary_entry)
-ENTRY(secondary_startup)
+secondary_startup:
/*
* Common entry point for secondary CPUs.
*/
@@ -689,14 +657,11 @@ ENTRY(secondary_startup)
adrp x26, swapper_pg_dir
bl __cpu_setup // initialise processor
- ldr x8, kimage_vaddr
- ldr w9, 0f
- sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
+ adr_l x27, __secondary_switch // address to jump to after enabling the MMU
b __enable_mmu
ENDPROC(secondary_startup)
-0: .long (_text - TEXT_OFFSET) - __secondary_switched
-ENTRY(__secondary_switched)
+__secondary_switched:
adr_l x5, vectors
msr vbar_el1, x5
isb
@@ -727,9 +692,8 @@ ENDPROC(__secondary_switched)
* Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU
*/
- .section ".idmap.text", "ax"
-__enable_mmu:
- mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
+ENTRY(__enable_mmu)
+ mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -756,7 +720,7 @@ __enable_mmu:
* to take into account by discarding the current kernel mapping and
* creating a new one.
*/
- msr sctlr_el1, x18 // disable the MMU
+ msr sctlr_el1, x22 // disable the MMU
isb
bl __create_page_tables // recreate kernel mapping
@@ -768,7 +732,6 @@ __enable_mmu:
ic iallu // flush instructions fetched
dsb nsh // via old mapping
isb
- add x27, x27, x23 // relocated __mmap_switched
#endif
br x27
ENDPROC(__enable_mmu)
@@ -777,3 +740,38 @@ __no_granule_support:
wfe
b __no_granule_support
ENDPROC(__no_granule_support)
+
+__primary_switch:
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Iterate over each entry in the relocation table, and apply the
+ * relocations in place.
+ */
+ ldr w9, =__rela_offset // offset to reloc table
+ ldr w10, =__rela_size // size of reloc table
+
+ mov_q x11, KIMAGE_VADDR // default virtual offset
+ add x11, x11, x23 // actual virtual offset
+ add x9, x9, x11 // __va(.rela)
+ add x10, x9, x10 // __va(.rela) + sizeof(.rela)
+
+0: cmp x9, x10
+ b.hs 1f
+ ldp x11, x12, [x9], #24
+ ldr x13, [x9, #-8]
+ cmp w12, #R_AARCH64_RELATIVE
+ b.ne 0b
+ add x13, x13, x23 // relocate
+ str x13, [x11, x23]
+ b 0b
+
+1:
+#endif
+ ldr x8, =__primary_switched
+ br x8
+ENDPROC(__primary_switch)
+
+__secondary_switch:
+ ldr x8, =__secondary_switched
+ br x8
+ENDPROC(__secondary_switch)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
new file mode 100644
index 000000000000..46f29b6560ec
--- /dev/null
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -0,0 +1,176 @@
+/*
+ * Hibernate low-level support
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ * Author: James Morse <james.morse@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <linux/errno.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/cputype.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/virt.h>
+
+/*
+ * To prevent the possibility of old and new partial table walks being visible
+ * in the tlb, switch the ttbr to a zero page when we invalidate the old
+ * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
+ * Even switching to our copied tables will cause a changed output address at
+ * each stage of the walk.
+ */
+.macro break_before_make_ttbr_switch zero_page, page_table
+ msr ttbr1_el1, \zero_page
+ isb
+ tlbi vmalle1is
+ dsb ish
+ msr ttbr1_el1, \page_table
+ isb
+.endm
+
+
+/*
+ * Resume from hibernate
+ *
+ * Loads temporary page tables then restores the memory image.
+ * Finally branches to cpu_resume() to restore the state saved by
+ * swsusp_arch_suspend().
+ *
+ * Because this code has to be copied to a 'safe' page, it can't call out to
+ * other functions by PC-relative address. Also remember that it may be
+ * mid-way through over-writing other functions. For this reason it contains
+ * code from flush_icache_range() and uses the copy_page() macro.
+ *
+ * This 'safe' page is mapped via ttbr0, and executed from there. This function
+ * switches to a copy of the linear map in ttbr1, performs the restore, then
+ * switches ttbr1 to the original kernel's swapper_pg_dir.
+ *
+ * All of memory gets written to, including code. We need to clean the kernel
+ * text to the Point of Coherence (PoC) before secondary cores can be booted.
+ * Because the kernel modules and executable pages mapped to user space are
+ * also written as data, we clean all pages we touch to the Point of
+ * Unification (PoU).
+ *
+ * x0: physical address of temporary page tables
+ * x1: physical address of swapper page tables
+ * x2: address of cpu_resume
+ * x3: linear map address of restore_pblist in the current kernel
+ * x4: physical address of __hyp_stub_vectors, or 0
+ * x5: physical address of a zero page that remains zero after resume
+ */
+.pushsection ".hibernate_exit.text", "ax"
+ENTRY(swsusp_arch_suspend_exit)
+ /*
+ * We execute from ttbr0, change ttbr1 to our copied linear map tables
+ * with a break-before-make via the zero page
+ */
+ break_before_make_ttbr_switch x5, x0
+
+ mov x21, x1
+ mov x30, x2
+ mov x24, x4
+ mov x25, x5
+
+ /* walk the restore_pblist and use copy_page() to over-write memory */
+ mov x19, x3
+
+1: ldr x10, [x19, #HIBERN_PBE_ORIG]
+ mov x0, x10
+ ldr x1, [x19, #HIBERN_PBE_ADDR]
+
+ copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
+
+ add x1, x10, #PAGE_SIZE
+ /* Clean the copied page to PoU - based on flush_icache_range() */
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x4, x10, x3
+2: dc cvau, x4 /* clean D line / unified line */
+ add x4, x4, x2
+ cmp x4, x1
+ b.lo 2b
+
+ ldr x19, [x19, #HIBERN_PBE_NEXT]
+ cbnz x19, 1b
+ dsb ish /* wait for PoU cleaning to finish */
+
+ /* switch to the restored kernels page tables */
+ break_before_make_ttbr_switch x25, x21
+
+ ic ialluis
+ dsb ish
+ isb
+
+ cbz x24, 3f /* Do we need to re-initialise EL2? */
+ hvc #0
+3: ret
+
+ .ltorg
+ENDPROC(swsusp_arch_suspend_exit)
+
+/*
+ * Restore the hyp stub.
+ * This must be done before the hibernate page is unmapped by _cpu_resume(),
+ * but happens before any of the hyp-stub's code is cleaned to PoC.
+ *
+ * x24: The physical address of __hyp_stub_vectors
+ */
+el1_sync:
+ msr vbar_el2, x24
+ eret
+ENDPROC(el1_sync)
+
+.macro invalid_vector label
+\label:
+ b \label
+ENDPROC(\label)
+.endm
+
+ invalid_vector el2_sync_invalid
+ invalid_vector el2_irq_invalid
+ invalid_vector el2_fiq_invalid
+ invalid_vector el2_error_invalid
+ invalid_vector el1_sync_invalid
+ invalid_vector el1_irq_invalid
+ invalid_vector el1_fiq_invalid
+ invalid_vector el1_error_invalid
+
+/* el2 vectors - switch el2 here while we restore the memory image. */
+ .align 11
+ENTRY(hibernate_el2_vectors)
+ ventry el2_sync_invalid // Synchronous EL2t
+ ventry el2_irq_invalid // IRQ EL2t
+ ventry el2_fiq_invalid // FIQ EL2t
+ ventry el2_error_invalid // Error EL2t
+
+ ventry el2_sync_invalid // Synchronous EL2h
+ ventry el2_irq_invalid // IRQ EL2h
+ ventry el2_fiq_invalid // FIQ EL2h
+ ventry el2_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq_invalid // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync_invalid // Synchronous 32-bit EL1
+ ventry el1_irq_invalid // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+END(hibernate_el2_vectors)
+
+.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
new file mode 100644
index 000000000000..8761eb95ed27
--- /dev/null
+++ b/arch/arm64/kernel/hibernate.c
@@ -0,0 +1,520 @@
+/*:
+ * Hibernate support specific for ARM64
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(x) "hibernate: " x
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/irqflags.h>
+#include <asm/memory.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/sections.h>
+#include <asm/suspend.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+/*
+ * Hibernate core relies on this value being 0 on resume, and marks it
+ * __nosavedata assuming it will keep the resume kernel's '0' value. This
+ * doesn't happen with either KASLR.
+ *
+ * defined as "__visible int in_suspend __nosavedata" in
+ * kernel/power/hibernate.c
+ */
+extern int in_suspend;
+
+/* Find a symbols alias in the linear map */
+#define LMADDR(x) phys_to_virt(virt_to_phys(x))
+
+/* Do we need to reset el2? */
+#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
+
+/* temporary el2 vectors in the __hibernate_exit_text section. */
+extern char hibernate_el2_vectors[];
+
+/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
+extern char __hyp_stub_vectors[];
+
+/*
+ * Values that may not change over hibernate/resume. We put the build number
+ * and date in here so that we guarantee not to resume with a different
+ * kernel.
+ */
+struct arch_hibernate_hdr_invariants {
+ char uts_version[__NEW_UTS_LEN + 1];
+};
+
+/* These values need to be know across a hibernate/restore. */
+static struct arch_hibernate_hdr {
+ struct arch_hibernate_hdr_invariants invariants;
+
+ /* These are needed to find the relocated kernel if built with kaslr */
+ phys_addr_t ttbr1_el1;
+ void (*reenter_kernel)(void);
+
+ /*
+ * We need to know where the __hyp_stub_vectors are after restore to
+ * re-configure el2.
+ */
+ phys_addr_t __hyp_stub_vectors;
+} resume_hdr;
+
+static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
+{
+ memset(i, 0, sizeof(*i));
+ memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
+}
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
+ unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+
+ return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+}
+
+void notrace restore_processor_state(void)
+{
+}
+
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct arch_hibernate_hdr *hdr = addr;
+
+ if (max_size < sizeof(*hdr))
+ return -EOVERFLOW;
+
+ arch_hdr_invariants(&hdr->invariants);
+ hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
+ hdr->reenter_kernel = _cpu_resume;
+
+ /* We can't use __hyp_get_vectors() because kvm may still be loaded */
+ if (el2_reset_needed())
+ hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+ else
+ hdr->__hyp_stub_vectors = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_save);
+
+int arch_hibernation_header_restore(void *addr)
+{
+ struct arch_hibernate_hdr_invariants invariants;
+ struct arch_hibernate_hdr *hdr = addr;
+
+ arch_hdr_invariants(&invariants);
+ if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
+ pr_crit("Hibernate image not generated by this kernel!\n");
+ return -EINVAL;
+ }
+
+ resume_hdr = *hdr;
+
+ return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_restore);
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintentance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr,
+ void *(*allocator)(gfp_t mask),
+ gfp_t mask)
+{
+ int rc = 0;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long dst = (unsigned long)allocator(mask);
+
+ if (!dst) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy((void *)dst, src_start, length);
+ flush_icache_range(dst, dst + length);
+
+ pgd = pgd_offset_raw(allocator(mask), dst_addr);
+ if (pgd_none(*pgd)) {
+ pud = allocator(mask);
+ if (!pud) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ pgd_populate(&init_mm, pgd, pud);
+ }
+
+ pud = pud_offset(pgd, dst_addr);
+ if (pud_none(*pud)) {
+ pmd = allocator(mask);
+ if (!pmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ pud_populate(&init_mm, pud, pmd);
+ }
+
+ pmd = pmd_offset(pud, dst_addr);
+ if (pmd_none(*pmd)) {
+ pte = allocator(mask);
+ if (!pte) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
+
+ pte = pte_offset_kernel(pmd, dst_addr);
+ set_pte(pte, __pte(virt_to_phys((void *)dst) |
+ pgprot_val(PAGE_KERNEL_EXEC)));
+
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ isb();
+
+ *phys_dst_addr = virt_to_phys((void *)dst);
+
+out:
+ return rc;
+}
+
+#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
+
+int swsusp_arch_suspend(void)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct sleep_stack_data state;
+
+ local_dbg_save(flags);
+
+ if (__cpu_suspend_enter(&state)) {
+ ret = swsusp_save();
+ } else {
+ /* Clean kernel core startup/idle code to PoC*/
+ dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
+ dcache_clean_range(__idmap_text_start, __idmap_text_end);
+
+ /*
+ * Tell the hibernation core that we've just restored
+ * the memory
+ */
+ in_suspend = 0;
+
+ __cpu_suspend_exit();
+ }
+
+ local_dbg_restore(flags);
+
+ return ret;
+}
+
+static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
+{
+ pte_t pte = *src_pte;
+
+ if (pte_valid(pte)) {
+ /*
+ * Resume will overwrite areas that may be marked
+ * read only (code, rodata). Clear the RDONLY bit from
+ * the temporary mappings we use during restore.
+ */
+ set_pte(dst_pte, pte_clear_rdonly(pte));
+ } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
+ /*
+ * debug_pagealloc will removed the PTE_VALID bit if
+ * the page isn't in use by the resume kernel. It may have
+ * been in use by the original kernel, in which case we need
+ * to put it back in our copy to do the restore.
+ *
+ * Before marking this entry valid, check the pfn should
+ * be mapped.
+ */
+ BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
+ }
+}
+
+static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
+ unsigned long end)
+{
+ pte_t *src_pte;
+ pte_t *dst_pte;
+ unsigned long addr = start;
+
+ dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pte)
+ return -ENOMEM;
+ pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
+ dst_pte = pte_offset_kernel(dst_pmd, start);
+
+ src_pte = pte_offset_kernel(src_pmd, start);
+ do {
+ _copy_pte(dst_pte, src_pte, addr);
+ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+ return 0;
+}
+
+static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
+ unsigned long end)
+{
+ pmd_t *src_pmd;
+ pmd_t *dst_pmd;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (pud_none(*dst_pud)) {
+ dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pmd)
+ return -ENOMEM;
+ pud_populate(&init_mm, dst_pud, dst_pmd);
+ }
+ dst_pmd = pmd_offset(dst_pud, start);
+
+ src_pmd = pmd_offset(src_pud, start);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(*src_pmd))
+ continue;
+ if (pmd_table(*src_pmd)) {
+ if (copy_pte(dst_pmd, src_pmd, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pmd(dst_pmd,
+ __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
+ }
+ } while (dst_pmd++, src_pmd++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
+ unsigned long end)
+{
+ pud_t *dst_pud;
+ pud_t *src_pud;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (pgd_none(*dst_pgd)) {
+ dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pud)
+ return -ENOMEM;
+ pgd_populate(&init_mm, dst_pgd, dst_pud);
+ }
+ dst_pud = pud_offset(dst_pgd, start);
+
+ src_pud = pud_offset(src_pgd, start);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none(*src_pud))
+ continue;
+ if (pud_table(*(src_pud))) {
+ if (copy_pmd(dst_pud, src_pud, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pud(dst_pud,
+ __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
+ }
+ } while (dst_pud++, src_pud++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
+ unsigned long end)
+{
+ unsigned long next;
+ unsigned long addr = start;
+ pgd_t *src_pgd = pgd_offset_k(start);
+
+ dst_pgd = pgd_offset_raw(dst_pgd, start);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(*src_pgd))
+ continue;
+ if (copy_pud(dst_pgd, src_pgd, addr, next))
+ return -ENOMEM;
+ } while (dst_pgd++, src_pgd++, addr = next, addr != end);
+
+ return 0;
+}
+
+/*
+ * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
+ *
+ * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
+ * we don't need to free it here.
+ */
+int swsusp_arch_resume(void)
+{
+ int rc = 0;
+ void *zero_page;
+ size_t exit_size;
+ pgd_t *tmp_pg_dir;
+ void *lm_restore_pblist;
+ phys_addr_t phys_hibernate_exit;
+ void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
+ void *, phys_addr_t, phys_addr_t);
+
+ /*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!tmp_pg_dir) {
+ pr_err("Failed to allocate memory for temporary page tables.");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ if (rc)
+ goto out;
+
+ /*
+ * Since we only copied the linear map, we need to find restore_pblist's
+ * linear map address.
+ */
+ lm_restore_pblist = LMADDR(restore_pblist);
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Locate the exit code in the bottom-but-one page, so that *NULL
+ * still has disastrous affects.
+ */
+ hibernate_exit = (void *)PAGE_SIZE;
+ exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
+ /*
+ * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
+ * a new set of ttbr0 page tables and load them.
+ */
+ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+ (unsigned long)hibernate_exit,
+ &phys_hibernate_exit,
+ (void *)get_safe_page, GFP_ATOMIC);
+ if (rc) {
+ pr_err("Failed to create safe executable page for hibernate_exit code.");
+ goto out;
+ }
+
+ /*
+ * The hibernate exit text contains a set of el2 vectors, that will
+ * be executed at el2 with the mmu off in order to reload hyp-stub.
+ */
+ __flush_dcache_area(hibernate_exit, exit_size);
+
+ /*
+ * KASLR will cause the el2 vectors to be in a different location in
+ * the resumed kernel. Load hibernate's temporary copy into el2.
+ *
+ * We can skip this step if we booted at EL1, or are running with VHE.
+ */
+ if (el2_reset_needed()) {
+ phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
+ el2_vectors += hibernate_el2_vectors -
+ __hibernate_exit_text_start; /* offset */
+
+ __hyp_set_vectors(el2_vectors);
+ }
+
+ hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+ resume_hdr.reenter_kernel, lm_restore_pblist,
+ resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
+
+out:
+ return rc;
+}
+
+static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ if (action == PM_HIBERNATION_PREPARE &&
+ cpumask_first(cpu_online_mask) != 0) {
+ pr_warn("CPU0 is offline.\n");
+ return notifier_from_errno(-ENODEV);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int __init check_boot_cpu_online_init(void)
+{
+ /*
+ * Set this pm_notifier callback with a lower priority than
+ * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
+ * called earlier to disable cpu hotplug before the cpu online check.
+ */
+ pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
+
+ return 0;
+}
+core_initcall(check_boot_cpu_online_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 9e7228bda4a1..bef4b659d816 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
@@ -128,6 +129,7 @@ static u64 read_wb_reg(int reg, int n)
return val;
}
+NOKPROBE_SYMBOL(read_wb_reg);
static void write_wb_reg(int reg, int n, u64 val)
{
@@ -141,6 +143,7 @@ static void write_wb_reg(int reg, int n, u64 val)
}
isb();
}
+NOKPROBE_SYMBOL(write_wb_reg);
/*
* Convert a breakpoint privilege level to the corresponding exception
@@ -158,6 +161,7 @@ static enum dbg_active_el debug_exception_level(int privilege)
return -EINVAL;
}
}
+NOKPROBE_SYMBOL(debug_exception_level);
enum hw_breakpoint_ops {
HW_BREAKPOINT_INSTALL,
@@ -616,6 +620,7 @@ static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
write_wb_reg(reg, i, ctrl);
}
}
+NOKPROBE_SYMBOL(toggle_bp_registers);
/*
* Debug exception handlers.
@@ -695,6 +700,7 @@ unlock:
return 0;
}
+NOKPROBE_SYMBOL(breakpoint_handler);
/*
* Arm64 hardware does not always report a watchpoint hit address that matches
@@ -835,6 +841,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
return 0;
}
+NOKPROBE_SYMBOL(watchpoint_handler);
/*
* Handle single-step exception.
@@ -892,6 +899,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
return !handled_exception;
}
+NOKPROBE_SYMBOL(reinstall_suspended_bps);
/*
* Context-switcher for restoring suspended breakpoints.
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 096e957aecb0..ba0127e31b1a 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -22,6 +22,8 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
@@ -55,15 +57,26 @@ ENDPROC(__hyp_stub_vectors)
.align 11
el1_sync:
- mrs x1, esr_el2
- lsr x1, x1, #26
- cmp x1, #0x16
- b.ne 2f // Not an HVC trap
- cbz x0, 1f
- msr vbar_el2, x0 // Set vbar_el2
- b 2f
-1: mrs x0, vbar_el2 // Return vbar_el2
-2: eret
+ mrs x30, esr_el2
+ lsr x30, x30, #ESR_ELx_EC_SHIFT
+
+ cmp x30, #ESR_ELx_EC_HVC64
+ b.ne 9f // Not an HVC trap
+
+ cmp x0, #HVC_GET_VECTORS
+ b.ne 1f
+ mrs x0, vbar_el2
+ b 9f
+
+1: cmp x0, #HVC_SET_VECTORS
+ b.ne 2f
+ msr vbar_el2, x1
+ b 9f
+
+ /* Someone called kvm_call_hyp() against the hyp-stub... */
+2: mov x0, #ARM_EXCEPTION_HYP_GONE
+
+9: eret
ENDPROC(el1_sync)
.macro invalid_vector label
@@ -103,10 +116,18 @@ ENDPROC(\label)
*/
ENTRY(__hyp_get_vectors)
- mov x0, xzr
- // fall through
-ENTRY(__hyp_set_vectors)
+ str lr, [sp, #-16]!
+ mov x0, #HVC_GET_VECTORS
hvc #0
+ ldr lr, [sp], #16
ret
ENDPROC(__hyp_get_vectors)
+
+ENTRY(__hyp_set_vectors)
+ str lr, [sp, #-16]!
+ mov x1, x0
+ mov x0, #HVC_SET_VECTORS
+ hvc #0
+ ldr lr, [sp], #16
+ ret
ENDPROC(__hyp_set_vectors)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 5e360ce88f10..86d444f9c2c1 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -73,6 +73,8 @@
#ifdef CONFIG_EFI
+__efistub_stext_offset = stext - _text;
+
/*
* Prevent the symbol aliases below from being emitted into the kallsyms
* table, by forcing them to be absolute symbols (which are conveniently
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 67d3100369c6..a3f8f8bbfc92 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,6 +30,7 @@
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/fixmap.h>
+#include <asm/opcodes.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
@@ -95,7 +96,8 @@ static void __kprobes *patch_map(void *addr, int fixmap)
if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
page = vmalloc_to_page(addr);
- else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+ else if (!module && (IS_ENABLED(CONFIG_DEBUG_RODATA)
+ || IS_ENABLED(CONFIG_KERNEL_TEXT_RDONLY)))
page = phys_to_page(__pa_symbol(addr));
else
return addr;
@@ -162,6 +164,32 @@ static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
aarch64_insn_is_nop(insn);
}
+bool __kprobes aarch64_insn_uses_literal(u32 insn)
+{
+ /* ldr/ldrsw (literal), prfm */
+
+ return aarch64_insn_is_ldr_lit(insn) ||
+ aarch64_insn_is_ldrsw_lit(insn) ||
+ aarch64_insn_is_adr_adrp(insn) ||
+ aarch64_insn_is_prfm_lit(insn);
+}
+
+bool __kprobes aarch64_insn_is_branch(u32 insn)
+{
+ /* b, bl, cb*, tb*, b.cond, br, blr */
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_bcond(insn);
+}
+
/*
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
* Section B2.6.5 "Concurrent modification and execution of instructions":
@@ -363,6 +391,9 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 immlo, immhi, mask;
int shift;
+ if (insn == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
@@ -377,7 +408,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type);
- return 0;
+ return AARCH64_BREAK_FAULT;
}
}
@@ -394,9 +425,12 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
{
int shift;
+ if (insn == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
pr_err("%s: unknown register encoding %d\n", __func__, reg);
- return 0;
+ return AARCH64_BREAK_FAULT;
}
switch (type) {
@@ -417,7 +451,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
default:
pr_err("%s: unknown register type encoding %d\n", __func__,
type);
- return 0;
+ return AARCH64_BREAK_FAULT;
}
insn &= ~(GENMASK(4, 0) << shift);
@@ -446,7 +480,7 @@ static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
break;
default:
pr_err("%s: unknown size encoding %d\n", __func__, type);
- return 0;
+ return AARCH64_BREAK_FAULT;
}
insn &= ~GENMASK(31, 30);
@@ -460,14 +494,17 @@ static inline long branch_imm_common(unsigned long pc, unsigned long addr,
{
long offset;
- /*
- * PC: A 64-bit Program Counter holding the address of the current
- * instruction. A64 instructions must be word-aligned.
- */
- BUG_ON((pc & 0x3) || (addr & 0x3));
+ if ((pc & 0x3) || (addr & 0x3)) {
+ pr_err("%s: A64 instructions must be word aligned\n", __func__);
+ return range;
+ }
offset = ((long)addr - (long)pc);
- BUG_ON(offset < -range || offset >= range);
+
+ if (offset < -range || offset >= range) {
+ pr_err("%s: offset out of range\n", __func__);
+ return range;
+ }
return offset;
}
@@ -484,6 +521,8 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
* texts are within +/-128M.
*/
offset = branch_imm_common(pc, addr, SZ_128M);
+ if (offset >= SZ_128M)
+ return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_BRANCH_LINK:
@@ -493,7 +532,7 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_b_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -510,6 +549,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
long offset;
offset = branch_imm_common(pc, addr, SZ_1M);
+ if (offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_BRANCH_COMP_ZERO:
@@ -519,7 +560,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_cbnz_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -530,7 +571,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -550,7 +591,10 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_bcond_value();
- BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
+ if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
+ pr_err("%s: unknown condition encoding %d\n", __func__, cond);
+ return AARCH64_BREAK_FAULT;
+ }
insn |= cond;
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
@@ -583,7 +627,7 @@ u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
insn = aarch64_insn_get_ret_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -606,7 +650,7 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
insn = aarch64_insn_get_str_reg_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -645,26 +689,30 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
insn = aarch64_insn_get_stp_post_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
- /* offset must be multiples of 4 in the range [-256, 252] */
- BUG_ON(offset & 0x3);
- BUG_ON(offset < -256 || offset > 252);
+ if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
+ pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
+ __func__, offset);
+ return AARCH64_BREAK_FAULT;
+ }
shift = 2;
break;
case AARCH64_INSN_VARIANT_64BIT:
- /* offset must be multiples of 8 in the range [-512, 504] */
- BUG_ON(offset & 0x7);
- BUG_ON(offset < -512 || offset > 504);
+ if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
+ pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
+ __func__, offset);
+ return AARCH64_BREAK_FAULT;
+ }
shift = 3;
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -702,7 +750,7 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
insn = aarch64_insn_get_subs_imm_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -713,11 +761,14 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
- BUG_ON(imm & ~(SZ_4K - 1));
+ if (imm & ~(SZ_4K - 1)) {
+ pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+ }
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
@@ -746,7 +797,7 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
insn = aarch64_insn_get_sbfm_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -759,12 +810,18 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
mask = GENMASK(5, 0);
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
- BUG_ON(immr & ~mask);
- BUG_ON(imms & ~mask);
+ if (immr & ~mask) {
+ pr_err("%s: invalid immr encoding %d\n", __func__, immr);
+ return AARCH64_BREAK_FAULT;
+ }
+ if (imms & ~mask) {
+ pr_err("%s: invalid imms encoding %d\n", __func__, imms);
+ return AARCH64_BREAK_FAULT;
+ }
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
@@ -793,23 +850,33 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
insn = aarch64_insn_get_movn_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown movewide encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
- BUG_ON(imm & ~(SZ_64K - 1));
+ if (imm & ~(SZ_64K - 1)) {
+ pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+ }
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
- BUG_ON(shift != 0 && shift != 16);
+ if (shift != 0 && shift != 16) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
- BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
- shift != 48);
+ if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -843,20 +910,28 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
insn = aarch64_insn_get_subs_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
- BUG_ON(shift & ~(SZ_32 - 1));
+ if (shift & ~(SZ_32 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
- BUG_ON(shift & ~(SZ_64 - 1));
+ if (shift & ~(SZ_64 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -885,11 +960,15 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
insn = aarch64_insn_get_rev32_value();
break;
case AARCH64_INSN_DATA1_REVERSE_64:
- BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
+ if (variant != AARCH64_INSN_VARIANT_64BIT) {
+ pr_err("%s: invalid variant for reverse64 %d\n",
+ __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
insn = aarch64_insn_get_rev64_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown data1 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -900,7 +979,7 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -937,7 +1016,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
insn = aarch64_insn_get_rorv_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown data2 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -948,7 +1027,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -976,7 +1055,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
insn = aarch64_insn_get_msub_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown data3 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@@ -987,7 +1066,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -1037,20 +1116,28 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
insn = aarch64_insn_get_bics_value();
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown logical encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
- BUG_ON(shift & ~(SZ_32 - 1));
+ if (shift & ~(SZ_32 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
- BUG_ON(shift & ~(SZ_64 - 1));
+ if (shift & ~(SZ_64 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
break;
default:
- BUG_ON(1);
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@@ -1116,6 +1203,14 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset)
BUG();
}
+/*
+ * Extract the Op/CR data from a msr/mrs instruction.
+ */
+u32 aarch64_insn_extract_system_reg(u32 insn)
+{
+ return (insn & 0x1FFFE0) >> 5;
+}
+
bool aarch32_insn_is_wide(u32 insn)
{
return insn >= 0xe800;
@@ -1141,3 +1236,101 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
{
return insn & CRM_MASK;
}
+
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
+};
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 79b17384effa..d43ea93dc68d 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -19,6 +19,7 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/msm_rtb.h>
/*
* Copy data from IO memory space to "real" memory space.
@@ -26,21 +27,21 @@
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
while (count && !IS_ALIGNED((unsigned long)from, 8)) {
- *(u8 *)to = __raw_readb(from);
+ *(u8 *)to = __raw_readb_no_log(from);
from++;
to++;
count--;
}
while (count >= 8) {
- *(u64 *)to = __raw_readq(from);
+ *(u64 *)to = __raw_readq_no_log(from);
from += 8;
to += 8;
count -= 8;
}
while (count) {
- *(u8 *)to = __raw_readb(from);
+ *(u8 *)to = __raw_readb_no_log(from);
from++;
to++;
count--;
@@ -54,21 +55,21 @@ EXPORT_SYMBOL(__memcpy_fromio);
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
while (count && !IS_ALIGNED((unsigned long)to, 8)) {
- __raw_writeb(*(u8 *)from, to);
+ __raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
- __raw_writeq(*(u64 *)from, to);
+ __raw_writeq_no_log(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
- __raw_writeb(*(u8 *)from, to);
+ __raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;
@@ -88,19 +89,19 @@ void __memset_io(volatile void __iomem *dst, int c, size_t count)
qc |= qc << 32;
while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
- __raw_writeb(c, dst);
+ __raw_writeb_no_log(c, dst);
dst++;
count--;
}
while (count >= 8) {
- __raw_writeq(qc, dst);
+ __raw_writeq_no_log(qc, dst);
dst += 8;
count -= 8;
}
while (count) {
- __raw_writeb(c, dst);
+ __raw_writeb_no_log(c, dst);
dst++;
count--;
}
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 606c21760f23..49e6de3812d9 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/kprobes.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -221,6 +222,7 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
+NOKPROBE_SYMBOL(kgdb_brk_fn)
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
{
@@ -232,6 +234,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
+NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
@@ -241,6 +244,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
+NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 997e6b27ff6a..d15b5c2935b3 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -20,16 +20,13 @@
*
* AArch32 user helpers.
*
- * Each segment is 32-byte aligned and will be moved to the top of the high
- * vector page. New segments (if ever needed) must be added in front of
- * existing ones. This mechanism should be used only for things that are
- * really small and justified, and not be abused freely.
+ * These helpers are provided for compatibility with AArch32 binaries that
+ * still need them. They are installed at a fixed address by
+ * aarch32_setup_additional_pages().
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
-#include <asm/unistd.h>
-
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -77,42 +74,3 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
-
-/*
- * AArch32 sigreturn code
- *
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- *
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
- .globl __aarch32_sigret_code_start
-__aarch32_sigret_code_start:
-
- /*
- * ARM Code
- */
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
-
- /*
- * ARM code
- */
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
-
- .globl __aarch32_sigret_code_end
-__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f316982ce00..093c13541efb 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/perf_debug.c b/arch/arm64/kernel/perf_debug.c
new file mode 100644
index 000000000000..ef3313fd16c6
--- /dev/null
+++ b/arch/arm64/kernel/perf_debug.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+/*
+ * Subsequent patches should add an entry to end of this string.
+ * Format is incrementing sequence number followed by text of
+ * patch commit title with newline.
+ * Note trailing ';' is on its own line to simplify addition of
+ * future strings.
+ */
+static char *descriptions =
+ " 0 arm64: perf: add debug patch logging framework\n"
+ " 1 Perf: arm64: Add L1 counters to tracepoints\n"
+ " 5 Perf: arm64: add perf user-mode permissions\n"
+ " 6 Perf: arm64: Add debugfs node to clear PMU\n"
+ " 7 Perf: arm64: Update PMU force reset\n"
+ "10 Perf: arm64: tracectr: initialize counts after hotplug\n"
+ "11 Perf: arm64: Refine disable/enable in tracecounters\n"
+ "15 Perf: arm64: make debug dir handle exportable\n"
+ "16 Perf: arm64: add perf trace user\n"
+ "17 Perf: arm64: add support for kryo pmu\n"
+;
+
+static ssize_t desc_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return simple_read_from_buffer(buf, count, pos, descriptions,
+ strlen(descriptions));
+}
+
+static const struct file_operations perf_debug_desc_fops = {
+ .read = desc_read,
+};
+
+static int perf_debugfs_init(void)
+{
+ int ret = 0;
+ struct dentry *dir;
+ struct dentry *file;
+
+ dir = debugfs_create_dir("msm-perf-patches", NULL);
+ if (IS_ERR_OR_NULL(dir)) {
+ pr_err("failed to create msm-perf-patches dir in debugfs\n");
+ ret = PTR_ERR(dir);
+ goto init_exit;
+ }
+
+ file = debugfs_create_file("descriptions", 0444, dir, NULL,
+ &perf_debug_desc_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ debugfs_remove(dir);
+ pr_err("failed to create descriptions file for msm-perf-patches\n");
+ ret = PTR_ERR(file);
+ goto init_exit;
+ }
+
+init_exit:
+ return ret;
+}
+late_initcall(perf_debugfs_init);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
deleted file mode 100644
index e99a0ed7e66b..000000000000
--- a/arch/arm64/kernel/perf_event.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * PMU support
- *
- * Copyright (C) 2012 ARM Limited
- * Author: Will Deacon <will.deacon@arm.com>
- *
- * This code is based heavily on the ARMv7 perf event code.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/irq_regs.h>
-
-#include <linux/of.h>
-#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
-
-/*
- * ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
- */
-enum armv8_pmuv3_perf_types {
- /* Required events. */
- ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
- ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
- ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
- ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
- ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
- ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
-
- /* At least one of the following is required. */
- ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
- ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
-
- /* Common architectural events. */
- ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
- ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
- ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
- ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
- ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
- ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
- ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
- ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
- ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
- ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
-
- /* Common microarchitectural events. */
- ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
- ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
- ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
- ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
- ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
- ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
- ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
- ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
- ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
- ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
- ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
- ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
-};
-
-/* ARMv8 Cortex-A53 specific event types. */
-enum armv8_a53_pmu_perf_types {
- ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2,
-};
-
-/* ARMv8 Cortex-A57 specific event types. */
-enum armv8_a57_perf_types {
- ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40,
- ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41,
- ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42,
- ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43,
- ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c,
- ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d,
-};
-
-/* PMUv3 HW events mapping. */
-static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/* ARM Cortex-A53 HW events mapping. */
-static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-
-/*
- * Perf Events' indices
- */
-#define ARMV8_IDX_CYCLE_COUNTER 0
-#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
-
-#define ARMV8_MAX_COUNTERS 32
-#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
-
-/*
- * ARMv8 low level PMU access
- */
-
-/*
- * Perf Event to low level counters mapping
- */
-#define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
-#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
-#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
-#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMCR_N_MASK 0x1f
-#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define ARMV8_EXCLUDE_EL1 (1 << 31)
-#define ARMV8_EXCLUDE_EL0 (1 << 30)
-#define ARMV8_INCLUDE_EL2 (1 << 27)
-
-static inline u32 armv8pmu_pmcr_read(void)
-{
- u32 val;
- asm volatile("mrs %0, pmcr_el0" : "=r" (val));
- return val;
-}
-
-static inline void armv8pmu_pmcr_write(u32 val)
-{
- val &= ARMV8_PMCR_MASK;
- isb();
- asm volatile("msr pmcr_el0, %0" :: "r" (val));
-}
-
-static inline int armv8pmu_has_overflowed(u32 pmovsr)
-{
- return pmovsr & ARMV8_OVERFLOWED_MASK;
-}
-
-static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
-{
- return idx >= ARMV8_IDX_CYCLE_COUNTER &&
- idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
-}
-
-static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
-{
- return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
-}
-
-static inline int armv8pmu_select_counter(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- asm volatile("msr pmselr_el0, %0" :: "r" (counter));
- isb();
-
- return idx;
-}
-
-static inline u32 armv8pmu_read_counter(struct perf_event *event)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- u32 value = 0;
-
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u reading wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
- asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
- else if (armv8pmu_select_counter(idx) == idx)
- asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
-
- return value;
-}
-
-static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u writing wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
- asm volatile("msr pmccntr_el0, %0" :: "r" (value));
- else if (armv8pmu_select_counter(idx) == idx)
- asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
-}
-
-static inline void armv8pmu_write_evtype(int idx, u32 val)
-{
- if (armv8pmu_select_counter(idx) == idx) {
- val &= ARMV8_EVTYPE_MASK;
- asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
- }
-}
-
-static inline int armv8pmu_enable_counter(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
- return idx;
-}
-
-static inline int armv8pmu_disable_counter(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
- return idx;
-}
-
-static inline int armv8pmu_enable_intens(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
- return idx;
-}
-
-static inline int armv8pmu_disable_intens(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
- isb();
- /* Clear the overflow flag in case an interrupt is pending. */
- asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
- isb();
-
- return idx;
-}
-
-static inline u32 armv8pmu_getreset_flags(void)
-{
- u32 value;
-
- /* Read */
- asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
-
- /* Write to clear flags */
- value &= ARMV8_OVSR_MASK;
- asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
-
- return value;
-}
-
-static void armv8pmu_enable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /*
- * Disable counter
- */
- armv8pmu_disable_counter(idx);
-
- /*
- * Set event (if destined for PMNx counters).
- */
- armv8pmu_write_evtype(idx, hwc->config_base);
-
- /*
- * Enable interrupt for this counter
- */
- armv8pmu_enable_intens(idx);
-
- /*
- * Enable counter
- */
- armv8pmu_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv8pmu_disable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- /*
- * Disable counter and interrupt
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /*
- * Disable counter
- */
- armv8pmu_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
- armv8pmu_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
-{
- u32 pmovsr;
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- /*
- * Get and reset the IRQ flags
- */
- pmovsr = armv8pmu_getreset_flags();
-
- /*
- * Did an overflow occur?
- */
- if (!armv8pmu_has_overflowed(pmovsr))
- return IRQ_NONE;
-
- /*
- * Handle the counter(s) overflow(s)
- */
- regs = get_irq_regs();
-
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- /* Ignore if we don't have an event. */
- if (!event)
- continue;
-
- /*
- * We have a single interrupt for all counters. Check that
- * each counter has overflowed before we process it.
- */
- if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
-
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
-
- return IRQ_HANDLED;
-}
-
-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
-
- /* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
- if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
-
- return ARMV8_IDX_CYCLE_COUNTER;
- }
-
- /*
- * For anything other than a cycle counter, try and use
- * the events counters
- */
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
-
- /* The counters are all in use. */
- return -EAGAIN;
-}
-
-/*
- * Add an event filter to a given event. This will only work for PMUv2 PMUs.
- */
-static int armv8pmu_set_event_filter(struct hw_perf_event *event,
- struct perf_event_attr *attr)
-{
- unsigned long config_base = 0;
-
- if (attr->exclude_idle)
- return -EPERM;
- if (attr->exclude_user)
- config_base |= ARMV8_EXCLUDE_EL0;
- if (attr->exclude_kernel)
- config_base |= ARMV8_EXCLUDE_EL1;
- if (!attr->exclude_hv)
- config_base |= ARMV8_INCLUDE_EL2;
-
- /*
- * Install the filter into config_base as this is used to
- * construct the event type.
- */
- event->config_base = config_base;
-
- return 0;
-}
-
-static void armv8pmu_reset(void *info)
-{
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
-
- /* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv8pmu_disable_counter(idx);
- armv8pmu_disable_intens(idx);
- }
-
- /* Initialize & Reset PMNC: C and P bits. */
- armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
-}
-
-static int armv8_pmuv3_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
-}
-
-static int armv8_a53_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv8_a53_perf_map,
- &armv8_a53_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
-}
-
-static int armv8_a57_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv8_a57_perf_map,
- &armv8_a57_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
-}
-
-static void armv8pmu_read_num_pmnc_events(void *info)
-{
- int *nb_cnt = info;
-
- /* Read the nb of CNTx counters supported from PMNC */
- *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
-
- /* Add the CPU cycles counter */
- *nb_cnt += 1;
-}
-
-static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
-{
- return smp_call_function_any(&arm_pmu->supported_cpus,
- armv8pmu_read_num_pmnc_events,
- &arm_pmu->num_events, 1);
-}
-
-static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->handle_irq = armv8pmu_handle_irq,
- cpu_pmu->enable = armv8pmu_enable_event,
- cpu_pmu->disable = armv8pmu_disable_event,
- cpu_pmu->read_counter = armv8pmu_read_counter,
- cpu_pmu->write_counter = armv8pmu_write_counter,
- cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
- cpu_pmu->start = armv8pmu_start,
- cpu_pmu->stop = armv8pmu_stop,
- cpu_pmu->reset = armv8pmu_reset,
- cpu_pmu->max_period = (1LLU << 32) - 1,
- cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
-}
-
-static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
-{
- armv8_pmu_init(cpu_pmu);
- cpu_pmu->name = "armv8_pmuv3";
- cpu_pmu->map_event = armv8_pmuv3_map_event;
- return armv8pmu_probe_num_events(cpu_pmu);
-}
-
-static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv8_pmu_init(cpu_pmu);
- cpu_pmu->name = "armv8_cortex_a53";
- cpu_pmu->map_event = armv8_a53_map_event;
- return armv8pmu_probe_num_events(cpu_pmu);
-}
-
-static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv8_pmu_init(cpu_pmu);
- cpu_pmu->name = "armv8_cortex_a57";
- cpu_pmu->map_event = armv8_a57_map_event;
- return armv8pmu_probe_num_events(cpu_pmu);
-}
-
-static const struct of_device_id armv8_pmu_of_device_ids[] = {
- {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
- {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
- {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
- {},
-};
-
-static int armv8_pmu_device_probe(struct platform_device *pdev)
-{
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
-}
-
-static struct platform_driver armv8_pmu_driver = {
- .driver = {
- .name = "armv8-pmu",
- .of_match_table = armv8_pmu_of_device_ids,
- .suppress_bind_attrs = true,
- },
- .probe = armv8_pmu_device_probe,
-};
-
-static int __init register_armv8_pmu_driver(void)
-{
- return platform_driver_register(&armv8_pmu_driver);
-}
-device_initcall(register_armv8_pmu_driver);
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
new file mode 100644
index 000000000000..7b852e36eaa2
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cpu.h>
+#include <linux/tracepoint.h>
+#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include "perf_trace_counters.h"
+
+static unsigned int tp_pid_state;
+
+DEFINE_PER_CPU(u32, cntenset_val);
+DEFINE_PER_CPU(u32, previous_ccnt);
+DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
+
+static int tracectr_cpu_hotplug_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+
+ if ((action & (~CPU_TASKS_FROZEN)) == CPU_STARTING)
+ per_cpu(hotplug_flag, cpu) = 1;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tracectr_cpu_hotplug_notifier_block = {
+ .notifier_call = tracectr_cpu_hotplug_notifier,
+};
+
+static void setup_prev_cnts(u32 cpu, u32 cnten_val)
+{
+ int i;
+
+ if (cnten_val & CC)
+ asm volatile("mrs %0, pmccntr_el0"
+ : "=r"(per_cpu(previous_ccnt, cpu)));
+
+ for (i = 0; i < NUM_L1_CTRS; i++) {
+ if (cnten_val & (1 << i)) {
+ /* Select */
+ asm volatile("msr pmselr_el0, %0" : : "r"(i));
+ isb();
+ /* Read value */
+ asm volatile("mrs %0, pmxevcntr_el0"
+ : "=r"(per_cpu(previous_l1_cnts[i], cpu)));
+ }
+ }
+}
+
+void tracectr_notifier(void *ignore, bool preempt,
+ struct task_struct *prev, struct task_struct *next)
+{
+ u32 cnten_val;
+ int current_pid;
+ u32 cpu = task_cpu(next);
+
+ if (tp_pid_state != 1)
+ return;
+ current_pid = next->pid;
+ if (per_cpu(old_pid, cpu) != -1) {
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (cnten_val));
+ per_cpu(cntenset_val, cpu) = cnten_val;
+ /* Disable all the counters that were enabled */
+ asm volatile("msr pmcntenclr_el0, %0" : : "r" (cnten_val));
+
+ if (per_cpu(hotplug_flag, cpu) == 1) {
+ per_cpu(hotplug_flag, cpu) = 0;
+ setup_prev_cnts(cpu, cnten_val);
+ } else {
+ trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+ current_pid);
+ }
+
+ /* Enable all the counters that were disabled */
+ asm volatile("msr pmcntenset_el0, %0" : : "r" (cnten_val));
+ }
+ per_cpu(old_pid, cpu) = current_pid;
+}
+
+static void enable_tp_pid(void)
+{
+ if (tp_pid_state == 0) {
+ tp_pid_state = 1;
+ register_trace_sched_switch(tracectr_notifier, NULL);
+ }
+}
+
+static void disable_tp_pid(void)
+{
+ if (tp_pid_state == 1) {
+ tp_pid_state = 0;
+ unregister_trace_sched_switch(tracectr_notifier, NULL);
+ }
+}
+
+static ssize_t read_enabled_perftp_file_bool(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[2];
+ buf[1] = '\n';
+ if (tp_pid_state == 0)
+ buf[0] = '0';
+ else
+ buf[0] = '1';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_perftp_file_bool(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ size_t buf_size;
+
+ buf[0] = 0;
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ enable_tp_pid();
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ disable_tp_pid();
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_perftp = {
+ .read = read_enabled_perftp_file_bool,
+ .write = write_enabled_perftp_file_bool,
+ .llseek = default_llseek,
+};
+
+int __init init_tracecounters(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+ unsigned int value = 1;
+ int cpu;
+
+ dir = debugfs_create_dir("perf_debug_tp", NULL);
+ if (!dir)
+ return -ENOMEM;
+ file = debugfs_create_file("enabled", 0660, dir,
+ &value, &fops_perftp);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+ for_each_possible_cpu(cpu)
+ per_cpu(old_pid, cpu) = -1;
+ register_cpu_notifier(&tracectr_cpu_hotplug_notifier_block);
+ return 0;
+}
+
+int __exit exit_tracecounters(void)
+{
+ unregister_cpu_notifier(&tracectr_cpu_hotplug_notifier_block);
+ return 0;
+}
+late_initcall(init_tracecounters);
diff --git a/arch/arm64/kernel/perf_trace_counters.h b/arch/arm64/kernel/perf_trace_counters.h
new file mode 100644
index 000000000000..ff3bd371791d
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_COUNTERS_H_
+
+/* Ctr index for PMCNTENSET/CLR */
+#define CC 0x80000000
+#define C0 0x1
+#define C1 0x2
+#define C2 0x4
+#define C3 0x8
+#define C4 0x10
+#define C5 0x20
+#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5)
+#define NUM_L1_CTRS 6
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/tracepoint.h>
+
+DECLARE_PER_CPU(u32, cntenset_val);
+DECLARE_PER_CPU(u32, previous_ccnt);
+DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+TRACE_EVENT(sched_switch_with_ctrs,
+
+ TP_PROTO(pid_t prev, pid_t next),
+
+ TP_ARGS(prev, next),
+
+ TP_STRUCT__entry(
+ __field(pid_t, old_pid)
+ __field(pid_t, new_pid)
+ __field(u32, cctr)
+ __field(u32, ctr0)
+ __field(u32, ctr1)
+ __field(u32, ctr2)
+ __field(u32, ctr3)
+ __field(u32, ctr4)
+ __field(u32, ctr5)
+ ),
+
+ TP_fast_assign(
+ u32 cpu = smp_processor_id();
+ u32 i;
+ u32 cnten_val;
+ u32 total_ccnt = 0;
+ u32 total_cnt = 0;
+ u32 delta_l1_cnts[NUM_L1_CTRS];
+ __entry->old_pid = prev;
+ __entry->new_pid = next;
+
+ cnten_val = per_cpu(cntenset_val, cpu);
+
+ if (cnten_val & CC) {
+ asm volatile("mrs %0, pmccntr_el0"
+ : "=r" (total_ccnt));
+ /* Read value */
+ __entry->cctr = total_ccnt -
+ per_cpu(previous_ccnt, cpu);
+ per_cpu(previous_ccnt, cpu) = total_ccnt;
+ }
+ for (i = 0; i < NUM_L1_CTRS; i++) {
+ if (cnten_val & (1 << i)) {
+ /* Select */
+ asm volatile("msr pmselr_el0, %0"
+ : : "r" (i));
+ isb();
+ asm volatile("mrs %0, pmxevcntr_el0"
+ : "=r" (total_cnt));
+ /* Read value */
+ delta_l1_cnts[i] = total_cnt -
+ per_cpu(previous_l1_cnts[i], cpu);
+ per_cpu(previous_l1_cnts[i], cpu) =
+ total_cnt;
+ } else
+ delta_l1_cnts[i] = 0;
+ }
+
+ __entry->ctr0 = delta_l1_cnts[0];
+ __entry->ctr1 = delta_l1_cnts[1];
+ __entry->ctr2 = delta_l1_cnts[2];
+ __entry->ctr3 = delta_l1_cnts[3];
+ __entry->ctr4 = delta_l1_cnts[4];
+ __entry->ctr5 = delta_l1_cnts[5];
+ ),
+
+ TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u",
+ __entry->old_pid, __entry->new_pid,
+ __entry->cctr, __entry->ctr0, __entry->ctr1,
+ __entry->ctr2, __entry->ctr3,
+ __entry->ctr4, __entry->ctr5)
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_counters
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/perf_trace_user.c b/arch/arm64/kernel/perf_trace_user.c
new file mode 100644
index 000000000000..98bbb2045265
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.c
@@ -0,0 +1,96 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/preempt.h>
+#include <linux/stat.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "perf_trace_user.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#define TRACE_USER_MAX_BUF_SIZE 100
+
+static ssize_t perf_trace_write(struct file *file,
+ const char __user *user_string_in,
+ size_t len, loff_t *ppos)
+{
+ u32 cnten_val;
+ int rc;
+ char buf[TRACE_USER_MAX_BUF_SIZE + 1];
+ ssize_t length;
+
+ if (len == 0)
+ return 0;
+
+ length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len;
+
+ rc = copy_from_user(buf, user_string_in, length);
+ if (rc) {
+ pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc);
+ return length;
+ }
+
+ /* Remove any trailing newline and make sure string is terminated */
+ if (buf[length - 1] == '\n')
+ buf[length - 1] = '\0';
+ else
+ buf[length] = '\0';
+
+ /*
+ * Disable preemption to ensure that all the performance counter
+ * accesses happen on the same cpu
+ */
+ preempt_disable();
+ /* stop counters, call the trace function, restart them */
+
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (cnten_val));
+ /* Disable all the counters that were enabled */
+ asm volatile("msr pmcntenclr_el0, %0" : : "r" (cnten_val));
+
+ trace_perf_trace_user(buf, cnten_val);
+
+ /* Enable all the counters that were disabled */
+ asm volatile("msr pmcntenset_el0, %0" : : "r" (cnten_val));
+ preempt_enable();
+
+ return length;
+}
+
+static const struct file_operations perf_trace_fops = {
+ .write = perf_trace_write
+};
+
+static int __init init_perf_trace(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+ unsigned int value = 1;
+
+ dir = perf_create_debug_dir();
+ if (!dir)
+ return -ENOMEM;
+ file = debugfs_create_file("trace_marker", S_IWUSR | S_IWGRP, dir,
+ &value, &perf_trace_fops);
+ if (!file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+late_initcall(init_perf_trace);
diff --git a/arch/arm64/kernel/perf_trace_user.h b/arch/arm64/kernel/perf_trace_user.h
new file mode 100644
index 000000000000..e5f7336029af
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_USER_H_
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#include <linux/tracepoint.h>
+
+#define CNTENSET_CC 0x80000000
+#define NUM_L1_CTRS 4
+
+TRACE_EVENT(perf_trace_user,
+ TP_PROTO(char *string, u32 cnten_val),
+ TP_ARGS(string, cnten_val),
+
+ TP_STRUCT__entry(
+ __field(u32, cctr)
+ __field(u32, ctr0)
+ __field(u32, ctr1)
+ __field(u32, ctr2)
+ __field(u32, ctr3)
+ __field(u32, lctr0)
+ __field(u32, lctr1)
+ __string(user_string, string)
+ ),
+
+ TP_fast_assign(
+ u32 cnt;
+ u32 l1_cnts[NUM_L1_CTRS];
+ int i;
+
+ if (cnten_val & CNTENSET_CC) {
+ /* Read value */
+ asm volatile("mrs %0, pmccntr_el0" : "=r" (cnt));
+ __entry->cctr = cnt;
+ } else
+ __entry->cctr = 0;
+ for (i = 0; i < NUM_L1_CTRS; i++) {
+ if (cnten_val & (1 << i)) {
+ /* Select */
+ asm volatile("msr pmselr_el0, %0"
+ : : "r" (i));
+ isb();
+ /* Read value */
+ asm volatile("mrs %0, pmxevcntr_el0"
+ : "=r" (cnt));
+ l1_cnts[i] = cnt;
+ } else {
+ l1_cnts[i] = 0;
+ }
+ }
+
+ __entry->ctr0 = l1_cnts[0];
+ __entry->ctr1 = l1_cnts[1];
+ __entry->ctr2 = l1_cnts[2];
+ __entry->ctr3 = l1_cnts[3];
+ __entry->lctr0 = 0;
+ __entry->lctr1 = 0;
+ __assign_str(user_string, string);
+ ),
+
+ TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, L2CTR0: %u, L2CTR1: %u, MSG=%s",
+ __entry->cctr, __entry->ctr0, __entry->ctr1,
+ __entry->ctr2, __entry->ctr3,
+ __entry->lctr0, __entry->lctr1,
+ __get_str(user_string)
+ )
+ );
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_user
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
new file mode 100644
index 000000000000..ce06312e3d34
--- /dev/null
+++ b/arch/arm64/kernel/probes/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
+ kprobes_trampoline.o \
+ simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
new file mode 100644
index 000000000000..f7931d900bca
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -0,0 +1,174 @@
+/*
+ * arch/arm64/kernel/probes/decode-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <asm/kprobes.h>
+#include <asm/insn.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static bool __kprobes aarch64_insn_is_steppable(u32 insn)
+{
+ /*
+ * Branch instructions will write a new value into the PC which is
+ * likely to be relative to the XOL address and therefore invalid.
+ * Deliberate generation of an exception during stepping is also not
+ * currently safe. Lastly, MSR instructions can do any number of nasty
+ * things we can't handle during single-stepping.
+ */
+ if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+ if (aarch64_insn_is_branch(insn) ||
+ aarch64_insn_is_msr_imm(insn) ||
+ aarch64_insn_is_msr_reg(insn) ||
+ aarch64_insn_is_exception(insn) ||
+ aarch64_insn_is_eret(insn))
+ return false;
+
+ /*
+ * The MRS instruction may not return a correct value when
+ * executing in the single-stepping environment. We do make one
+ * exception, for reading the DAIF bits.
+ */
+ if (aarch64_insn_is_mrs(insn))
+ return aarch64_insn_extract_system_reg(insn)
+ != AARCH64_INSN_SPCLREG_DAIF;
+
+ /*
+ * The HINT instruction is is problematic when single-stepping,
+ * except for the NOP case.
+ */
+ if (aarch64_insn_is_hint(insn))
+ return aarch64_insn_is_nop(insn);
+
+ return true;
+ }
+
+ /*
+ * Instructions which load PC relative literals are not going to work
+ * when executed from an XOL slot. Instructions doing an exclusive
+ * load/store are not going to complete successfully when single-step
+ * exception handling happens in the middle of the sequence.
+ */
+ if (aarch64_insn_uses_literal(insn) ||
+ aarch64_insn_is_exclusive(insn))
+ return false;
+
+ return true;
+}
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD If instruction is supported and uses instruction slot,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+static enum kprobe_insn __kprobes
+arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /*
+ * Instructions reading or modifying the PC won't work from the XOL
+ * slot.
+ */
+ if (aarch64_insn_is_steppable(insn))
+ return INSN_GOOD;
+
+ if (aarch64_insn_is_bcond(insn)) {
+ asi->handler = simulate_b_cond;
+ } else if (aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn)) {
+ asi->handler = simulate_cbz_cbnz;
+ } else if (aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn)) {
+ asi->handler = simulate_tbz_tbnz;
+ } else if (aarch64_insn_is_adr_adrp(insn)) {
+ asi->handler = simulate_adr_adrp;
+ } else if (aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn)) {
+ asi->handler = simulate_b_bl;
+ } else if (aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_ret(insn)) {
+ asi->handler = simulate_br_blr_ret;
+ } else if (aarch64_insn_is_ldr_lit(insn)) {
+ asi->handler = simulate_ldr_literal;
+ } else if (aarch64_insn_is_ldrsw_lit(insn)) {
+ asi->handler = simulate_ldrsw_literal;
+ } else {
+ /*
+ * Instruction cannot be stepped out-of-line and we don't
+ * (yet) simulate it.
+ */
+ return INSN_REJECTED;
+ }
+
+ return INSN_GOOD_NO_SLOT;
+}
+
+static bool __kprobes
+is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
+{
+ while (scan_start > scan_end) {
+ /*
+ * atomic region starts from exclusive load and ends with
+ * exclusive store.
+ */
+ if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
+ return false;
+ else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
+ return true;
+ scan_start--;
+ }
+
+ return false;
+}
+
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+{
+ enum kprobe_insn decoded;
+ kprobe_opcode_t insn = le32_to_cpu(*addr);
+ kprobe_opcode_t *scan_start = addr - 1;
+ kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+ struct module *mod;
+#endif
+
+ if (addr >= (kprobe_opcode_t *)_text &&
+ scan_end < (kprobe_opcode_t *)_text)
+ scan_end = (kprobe_opcode_t *)_text;
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+ else {
+ preempt_disable();
+ mod = __module_address((unsigned long)addr);
+ if (mod && within_module_init((unsigned long)addr, mod) &&
+ !within_module_init((unsigned long)scan_end, mod))
+ scan_end = (kprobe_opcode_t *)mod->module_init;
+ else if (mod && within_module_core((unsigned long)addr, mod) &&
+ !within_module_core((unsigned long)scan_end, mod))
+ scan_end = (kprobe_opcode_t *)mod->module_core;
+ preempt_enable();
+ }
+#endif
+ decoded = arm_probe_decode_insn(insn, asi);
+
+ if (decoded == INSN_REJECTED ||
+ is_probed_address_atomic(scan_start, scan_end))
+ return INSN_REJECTED;
+
+ return decoded;
+}
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
new file mode 100644
index 000000000000..d438289646a6
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm64/kernel/probes/decode-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_ARM64_H
+#define _ARM_KERNEL_KPROBES_ARM64_H
+
+/*
+ * ARM strongly recommends a limit of 128 bytes between LoadExcl and
+ * StoreExcl instructions in a single thread of execution. So keep the
+ * max atomic context size as 32.
+ */
+#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
+
+enum kprobe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+
+#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
new file mode 100644
index 000000000000..4ea8433011d0
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -0,0 +1,657 @@
+/*
+ * arch/arm64/kernel/probes/kprobes.c
+ *
+ * Kprobes support for ARM64
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/stringify.h>
+#include <asm/traps.h>
+#include <asm/ptrace.h>
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+#include <asm/system_misc.h>
+#include <asm/insn.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ /* prepare insn slot */
+ p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+
+ flush_icache_range((uintptr_t) (p->ainsn.insn),
+ (uintptr_t) (p->ainsn.insn) +
+ MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+
+ /*
+ * Needs restoring of return address after stepping xol.
+ */
+ p->ainsn.restore = (unsigned long) p->addr +
+ sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ /* This instructions is not executed xol. No need to adjust the PC */
+ p->ainsn.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.handler)
+ p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+
+ /* single step simulated, now go for post processing */
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+ extern char __start_rodata[];
+ extern char __end_rodata[];
+
+ if (probe_addr & 0x3)
+ return -EINVAL;
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ if (in_exception_text(probe_addr))
+ return -EINVAL;
+ if (probe_addr >= (unsigned long) __start_rodata &&
+ probe_addr <= (unsigned long) __end_rodata)
+ return -EINVAL;
+
+ /* decode instruction */
+ switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ break;
+ };
+
+ /* prepare the instruction */
+ if (p->ainsn.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ void *addrs[1];
+ u32 insns[1];
+
+ addrs[0] = (void *)addr;
+ insns[0] = (u32)opcode;
+
+ return aarch64_insn_patch_text(addrs, insns, 1);
+}
+
+/* arm kprobe: install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, BRK64_OPCODE_KPROBES);
+}
+
+/* disarm kprobe: remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * The D-flag (Debug mask) is set (masked) upon debug exception entry.
+ * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
+ * probe i.e. when probe hit from kprobe handler context upon
+ * executing the pre/post handlers. In this case we return with
+ * D-flag clear so that single-stepping can be carried-out.
+ *
+ * Leave D-flag set in all other cases.
+ */
+static void __kprobes
+spsr_set_debug_flag(struct pt_regs *regs, int mask)
+{
+ unsigned long spsr = regs->pstate;
+
+ if (mask)
+ spsr |= PSR_D_BIT;
+ else
+ spsr &= ~PSR_D_BIT;
+
+ regs->pstate = spsr;
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot); /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+ else
+ WARN_ON(regs->pstate & PSR_D_BIT);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.restore);
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ instruction_pointer_set(regs, (unsigned long) cur->addr);
+ if (!instruction_pointer(regs))
+ BUG();
+
+ kernel_disable_single_step();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 1);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it prepped
+ * for calling the break_handler below on re-entry,
+ * so get out doing nothing more here.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return, keep PSTATE D-flag enabled until
+ * pre_handler return back.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ return;
+ }
+ }
+ } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
+ BRK64_OPCODE_KPROBES) && cur_kprobe) {
+ /* We probably hit a jprobe. Call its break handler. */
+ if (cur_kprobe->break_handler &&
+ cur_kprobe->break_handler(cur_kprobe, regs)) {
+ setup_singlestep(cur_kprobe, regs, kcb, 0);
+ return;
+ }
+ }
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+}
+
+static int __kprobes
+kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == addr)) {
+ clear_ss_context(kcb); /* clear pending ss */
+ return DBG_HOOK_HANDLED;
+ }
+ /* not ours, kprobes should ignore it */
+ return DBG_HOOK_ERROR;
+}
+
+int __kprobes
+kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ int retval;
+
+ /* return error if this is not our step */
+ retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
+
+ if (retval == DBG_HOOK_HANDLED) {
+ kprobes_restore_local_irqflag(kcb, regs);
+ kernel_disable_single_step();
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 1);
+
+ post_kprobe_handler(kcb, regs);
+ }
+
+ return retval;
+}
+
+int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
+{
+ kprobe_handler(regs);
+ return DBG_HOOK_HANDLED;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ /*
+ * Since we can't be sure where in the stack frame "stacked"
+ * pass-by-value arguments are stored we just don't try to
+ * duplicate any of the stack. Do not use jprobes on functions that
+ * use more than 64 bytes (after padding each to an 8 byte boundary)
+ * of arguments, or pass individual arguments larger than 16 bytes.
+ */
+
+ instruction_pointer_set(regs, (unsigned long) jp->entry);
+ preempt_disable();
+ pause_graph_tracing();
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ /*
+ * Jprobe handler return by entering break exception,
+ * encoded same as kprobe, but with following conditions
+ * -a special PC to identify it from the other kprobes.
+ * -restore stack addr to original saved pt_regs
+ */
+ asm volatile(" mov sp, %0 \n"
+ "jprobe_return_break: brk %1 \n"
+ :
+ : "r" (kcb->jprobe_saved_regs.sp),
+ "I" (BRK64_ESR_KPROBES)
+ : "memory");
+
+ unreachable();
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ long stack_addr = kcb->jprobe_saved_regs.sp;
+ long orig_sp = kernel_stack_pointer(regs);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ extern const char jprobe_return_break[];
+
+ if (instruction_pointer(regs) != (u64) jprobe_return_break)
+ return 0;
+
+ if (orig_sp != stack_addr) {
+ struct pt_regs *saved_regs =
+ (struct pt_regs *)kcb->jprobe_saved_regs.sp;
+ pr_err("current sp %lx does not match saved sp %lx\n",
+ orig_sp, stack_addr);
+ pr_err("Saved registers for jprobe %p\n", jp);
+ show_regs(saved_regs);
+ pr_err("Current registers\n");
+ show_regs(regs);
+ BUG();
+ }
+ unpause_graph_tracing();
+ *regs = kcb->jprobe_saved_regs;
+ preempt_enable_no_resched();
+ return 1;
+}
+
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+ extern char __idmap_text_start[], __idmap_text_end[];
+
+ if ((addr >= (unsigned long)__kprobes_text_start &&
+ addr < (unsigned long)__kprobes_text_end) ||
+ (addr >= (unsigned long)__entry_text_start &&
+ addr < (unsigned long)__entry_text_end) ||
+ (addr >= (unsigned long)__idmap_text_start &&
+ addr < (unsigned long)__idmap_text_end) ||
+ !!search_exception_tables(addr))
+ return true;
+
+
+ return false;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address =
+ (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * return probes installed on them, and/or more than one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always pushed into the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the (chronologically) first instance's ret_addr
+ * will be the real return address, and all the rest will
+ * point to kretprobe_trampoline.
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ return (void *)orig_ret_address;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
+
+ /* replace return addr (x30) with trampoline */
+ regs->regs[30] = (long)&kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000000..5d6e7f14638c
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,81 @@
+/*
+ * trampoline entry and return code for kretprobes.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .macro save_all_base_regs
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ add x0, sp, #S_FRAME_SIZE
+ stp lr, x0, [sp, #S_LR]
+ /*
+ * Construct a useful saved PSTATE
+ */
+ mrs x0, nzcv
+ mrs x1, daif
+ orr x0, x0, x1
+ mrs x1, CurrentEL
+ orr x0, x0, x1
+ mrs x1, SPSel
+ orr x0, x0, x1
+ stp xzr, x0, [sp, #S_PC]
+ .endm
+
+ .macro restore_all_base_regs
+ ldr x0, [sp, #S_PSTATE]
+ and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
+ msr nzcv, x0
+ ldp x0, x1, [sp, #S_X0]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldp x8, x9, [sp, #S_X8]
+ ldp x10, x11, [sp, #S_X10]
+ ldp x12, x13, [sp, #S_X12]
+ ldp x14, x15, [sp, #S_X14]
+ ldp x16, x17, [sp, #S_X16]
+ ldp x18, x19, [sp, #S_X18]
+ ldp x20, x21, [sp, #S_X20]
+ ldp x22, x23, [sp, #S_X22]
+ ldp x24, x25, [sp, #S_X24]
+ ldp x26, x27, [sp, #S_X26]
+ ldp x28, x29, [sp, #S_X28]
+ .endm
+
+ENTRY(kretprobe_trampoline)
+ sub sp, sp, #S_FRAME_SIZE
+
+ save_all_base_regs
+
+ mov x0, sp
+ bl trampoline_probe_handler
+ /*
+ * Replace trampoline address in lr with actual orig_ret_addr return
+ * address.
+ */
+ mov lr, x0
+
+ restore_all_base_regs
+
+ add sp, sp, #S_FRAME_SIZE
+ ret
+
+ENDPROC(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000000..8977ce9d009d
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -0,0 +1,217 @@
+/*
+ * arch/arm64/kernel/probes/simulate-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "simulate-insn.h"
+
+#define sign_extend(x, signbit) \
+ ((x) | (0 - ((x) & (1 << (signbit)))))
+
+#define bbl_displacement(insn) \
+ sign_extend(((insn) & 0x3ffffff) << 2, 27)
+
+#define bcond_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define cbz_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define tbz_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
+
+#define ldr_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ if (reg < 31)
+ regs->regs[reg] = val;
+}
+
+static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ if (reg < 31)
+ regs->regs[reg] = lower_32_bits(val);
+}
+
+static inline u64 get_x_reg(struct pt_regs *regs, int reg)
+{
+ if (reg < 31)
+ return regs->regs[reg];
+ else
+ return 0;
+}
+
+static inline u32 get_w_reg(struct pt_regs *regs, int reg)
+{
+ if (reg < 31)
+ return lower_32_bits(regs->regs[reg]);
+ else
+ return 0;
+}
+
+static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0);
+}
+
+static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0);
+}
+
+static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0;
+}
+
+static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0;
+}
+
+/*
+ * instruction simulation functions
+ */
+void __kprobes
+simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
+{
+ long imm, xn, val;
+
+ xn = opcode & 0x1f;
+ imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
+ imm = sign_extend(imm, 20);
+ if (opcode & 0x80000000)
+ val = (imm<<12) + (addr & 0xfffffffffffff000);
+ else
+ val = imm + addr;
+
+ set_x_reg(regs, xn, val);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = bbl_displacement(opcode);
+
+ /* Link register is x30 */
+ if (opcode & (1 << 31))
+ set_x_reg(regs, 30, addr + 4);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff))
+ disp = bcond_displacement(opcode);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int xn = (opcode >> 5) & 0x1f;
+
+ /* update pc first in case we're doing a "blr lr" */
+ instruction_pointer_set(regs, get_x_reg(regs, xn));
+
+ /* Link register is x30 */
+ if (((opcode >> 21) & 0x3) == 1)
+ set_x_reg(regs, 30, addr + 4);
+}
+
+void __kprobes
+simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_cbnz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ } else {
+ if (check_cbz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_tbnz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ } else {
+ if (check_tbz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ u64 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (u64 *) (addr + disp);
+
+ if (opcode & (1 << 30)) /* x0-x30 */
+ set_x_reg(regs, xn, *load_addr);
+ else /* w0-w30 */
+ set_w_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ s32 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (s32 *) (addr + disp);
+
+ set_x_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000000..050bde683c2d
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm64/kernel/probes/simulate-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+
+void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs);
+
+#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 236120fc4dbc..5dd9b572259f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -87,6 +87,16 @@ void arch_cpu_idle(void)
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
+void arch_cpu_idle_enter(void)
+{
+ idle_notifier_call_chain(IDLE_START);
+}
+
+void arch_cpu_idle_exit(void)
+{
+ idle_notifier_call_chain(IDLE_END);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@@ -182,7 +192,7 @@ static void show_data(unsigned long addr, int nbytes, const char *name)
* don't attempt to dump non-kernel addresses or
* values that are probably just small negative numbers
*/
- if (addr < PAGE_OFFSET || addr > -256UL)
+ if (addr < KIMAGE_VADDR || addr > -256UL)
return;
printk("\n%s: %#lx:\n", name, addr);
@@ -207,29 +217,23 @@ static void show_data(unsigned long addr, int nbytes, const char *name)
if (probe_kernel_address(p, data)) {
printk(" ********");
} else {
- printk(" %08x", data);
+ pr_cont(" %08x", data);
}
++p;
}
- printk("\n");
+ pr_cont("\n");
}
}
static void show_extra_register_data(struct pt_regs *regs, int nbytes)
{
mm_segment_t fs;
- unsigned int i;
fs = get_fs();
set_fs(KERNEL_DS);
show_data(regs->pc - nbytes, nbytes * 2, "PC");
show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
show_data(regs->sp - nbytes, nbytes * 2, "SP");
- for (i = 0; i < 30; i++) {
- char name[4];
- snprintf(name, sizeof(name), "X%u", i);
- show_data(regs->regs[i] - nbytes, nbytes * 2, name);
- }
set_fs(fs);
}
@@ -260,7 +264,7 @@ void __show_regs(struct pt_regs *regs)
printk("\n");
}
if (!user_mode(regs))
- show_extra_register_data(regs, 128);
+ show_extra_register_data(regs, 64);
printk("\n");
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index dbff1ab8e92f..2f8094ab1cef 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -21,7 +21,6 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/psci.h>
-#include <linux/slab.h>
#include <uapi/linux/psci.h>
@@ -29,76 +28,10 @@
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
-{
- int i, ret, count = 0;
- u32 *psci_states;
- struct device_node *state_node, *cpu_node;
-
- cpu_node = of_get_cpu_node(cpu, NULL);
- if (!cpu_node)
- return -ENODEV;
-
- /*
- * If the PSCI cpu_suspend function hook has not been initialized
- * idle states must not be enabled, so bail out
- */
- if (!psci_ops.cpu_suspend)
- return -EOPNOTSUPP;
-
- /* Count idle states */
- while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- count))) {
- count++;
- of_node_put(state_node);
- }
-
- if (!count)
- return -ENODEV;
-
- psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
- if (!psci_states)
- return -ENOMEM;
-
- for (i = 0; i < count; i++) {
- u32 state;
-
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
- ret = of_property_read_u32(state_node,
- "arm,psci-suspend-param",
- &state);
- if (ret) {
- pr_warn(" * %s missing arm,psci-suspend-param property\n",
- state_node->full_name);
- of_node_put(state_node);
- goto free_mem;
- }
-
- of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", state, i);
- if (!psci_power_state_is_valid(state)) {
- pr_warn("Invalid PSCI power state %#x\n", state);
- ret = -EINVAL;
- goto free_mem;
- }
- psci_states[i] = state;
- }
- /* Idle states parsed correctly, initialize per-cpu pointer */
- per_cpu(psci_power_state, cpu) = psci_states;
- return 0;
-
-free_mem:
- kfree(psci_states);
- return ret;
-}
static int __init cpu_psci_cpu_init(unsigned int cpu)
{
+ pr_info("Initializing psci_cpu_init\n");
return 0;
}
@@ -166,7 +99,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
do {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
- pr_info("CPU%d killed (polled %d ms)\n", cpu,
+ pr_debug("CPU%d killed (polled %d ms)\n", cpu,
jiffies_to_msecs(jiffies - start));
return 0;
}
@@ -180,38 +113,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
}
#endif
-static int psci_suspend_finisher(unsigned long index)
-{
- u32 *state = __this_cpu_read(psci_power_state);
-
- return psci_ops.cpu_suspend(state[index - 1],
- virt_to_phys(cpu_resume));
-}
-
-static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
-{
- int ret;
- u32 *state = __this_cpu_read(psci_power_state);
- /*
- * idle state index 0 corresponds to wfi, should never be called
- * from the cpu_suspend operations
- */
- if (WARN_ON_ONCE(!index))
- return -EINVAL;
-
- if (!psci_power_state_loses_context(state[index - 1]))
- ret = psci_ops.cpu_suspend(state[index - 1], 0);
- else
- ret = cpu_suspend(index, psci_suspend_finisher);
-
- return ret;
-}
-
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
- .cpu_init_idle = cpu_psci_cpu_init_idle,
- .cpu_suspend = cpu_psci_cpu_suspend,
+ .cpu_init_idle = psci_cpu_init_idle,
+ .cpu_suspend = psci_cpu_suspend_enter,
#endif
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index beff0fb11b6b..c67bd311e815 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -49,6 +49,106 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+#define GPR_OFFSET_NAME(r) \
+ {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pstate),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
/*
* TODO: does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -1254,13 +1354,13 @@ static void tracehook_report_syscall(struct pt_regs *regs,
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- /* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ /* Do the secure computing after ptrace; failures should be fast. */
+ if (secure_computing(NULL) == -1)
+ return -1;
+
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 4bc5bc9463b8..01f259ec5700 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -44,6 +44,7 @@
#include <linux/of_platform.h>
#include <linux/efi.h>
#include <linux/psci.h>
+#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <asm/acpi.h>
@@ -65,8 +66,15 @@
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
+unsigned int boot_reason;
+EXPORT_SYMBOL(boot_reason);
+
+unsigned int cold_boot;
+EXPORT_SYMBOL(cold_boot);
+
phys_addr_t __fdt_pointer __initdata;
+const char *machine_name;
/*
* Standard memory resources
*/
@@ -176,7 +184,6 @@ static void __init smp_build_mpidr_hash(void)
*/
if (mpidr_hash_size() > 4 * num_possible_cpus())
pr_warn("Large number of MPIDR hash buckets detected\n");
- __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -194,7 +201,11 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
cpu_relax();
}
- dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+ machine_name = of_flat_dt_get_machine_name();
+ if (machine_name) {
+ dump_stack_set_arch_desc("%s (DT)", machine_name);
+ pr_info("Machine: %s\n", machine_name);
+ }
}
static void __init request_standard_resources(void)
@@ -290,6 +301,8 @@ static inline void __init relocate_initrd(void)
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+void __init __weak init_random_pool(void) { }
+
void __init setup_arch(char **cmdline_p)
{
pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
@@ -368,6 +381,7 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+ init_random_pool();
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
@@ -401,7 +415,13 @@ static int __init topology_init(void)
return 0;
}
-subsys_initcall(topology_init);
+postcore_initcall(topology_init);
+
+void arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+ pdev->archdata.dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+}
/*
* Dump out kernel offset information on panic.
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index a8eafdbc7cb8..0bed9a899850 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
@@ -402,6 +403,9 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned int thread_flags)
{
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 107335637390..666363d127e5 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -28,42 +28,7 @@
#include <asm/signal32.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-
-struct compat_sigcontext {
- /* We always set these two fields to 0 */
- compat_ulong_t trap_no;
- compat_ulong_t error_code;
-
- compat_ulong_t oldmask;
- compat_ulong_t arm_r0;
- compat_ulong_t arm_r1;
- compat_ulong_t arm_r2;
- compat_ulong_t arm_r3;
- compat_ulong_t arm_r4;
- compat_ulong_t arm_r5;
- compat_ulong_t arm_r6;
- compat_ulong_t arm_r7;
- compat_ulong_t arm_r8;
- compat_ulong_t arm_r9;
- compat_ulong_t arm_r10;
- compat_ulong_t arm_fp;
- compat_ulong_t arm_ip;
- compat_ulong_t arm_sp;
- compat_ulong_t arm_lr;
- compat_ulong_t arm_pc;
- compat_ulong_t arm_cpsr;
- compat_ulong_t fault_address;
-};
-
-struct compat_ucontext {
- compat_ulong_t uc_flags;
- compat_uptr_t uc_link;
- compat_stack_t uc_stack;
- struct compat_sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask;
- int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
- compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
-};
+#include <asm/vdso.h>
struct compat_vfp_sigframe {
compat_ulong_t magic;
@@ -91,16 +56,6 @@ struct compat_aux_sigframe {
unsigned long end_magic;
} __attribute__((__aligned__(8)));
-struct compat_sigframe {
- struct compat_ucontext uc;
- compat_ulong_t retcode[2];
-};
-
-struct compat_rt_sigframe {
- struct compat_siginfo info;
- struct compat_sigframe sig;
-};
-
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
@@ -484,14 +439,27 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
/* Set up sigreturn pointer */
+#ifdef CONFIG_VDSO32
+ void *vdso_base = current->mm->context.vdso;
+ void *trampoline =
+ (ka->sa.sa_flags & SA_SIGINFO
+ ? (thumb
+ ? VDSO_SYMBOL(vdso_base, compat_rt_sigreturn_thumb)
+ : VDSO_SYMBOL(vdso_base, compat_rt_sigreturn_arm))
+ : (thumb
+ ? VDSO_SYMBOL(vdso_base, compat_sigreturn_thumb)
+ : VDSO_SYMBOL(vdso_base, compat_sigreturn_arm)));
+
+ retcode = ptr_to_compat(trampoline) + thumb;
+#else
+ void *sigreturn_base = current->mm->context.vdso;
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = AARCH32_VECTORS_BASE +
- AARCH32_KERN_SIGRET_CODE_OFFSET +
- (idx << 2) + thumb;
+ retcode = ptr_to_compat(sigreturn_base) + (idx << 2) + thumb;
+#endif
}
regs->regs[0] = usig;
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
new file mode 100644
index 000000000000..6ecda4d84cd5
--- /dev/null
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -0,0 +1,67 @@
+/*
+ * sigreturn trampolines for AArch32.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+
+#include <asm/unistd.h>
+
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3
+ // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef
+
+ /*
+ * Thumb code
+ */
+ // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x27
+ // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf
+
+ /*
+ * ARM code
+ */
+ // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3
+ // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef
+
+ /*
+ * Thumb code
+ */
+ // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x27
+ // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index f586f7c875e2..100f92f13113 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -49,39 +49,32 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
- * Save CPU state for a suspend and execute the suspend finisher.
- * On success it will return 0 through cpu_resume - ie through a CPU
- * soft/hard reboot from the reset vector.
- * On failure it returns the suspend finisher return value or force
- * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
- * is not allowed to return, if it does this must be considered failure).
- * It saves callee registers, and allocates space on the kernel stack
- * to save the CPU specific registers + some other data for resume.
+ * Save CPU state in the provided sleep_stack_data area, and publish its
+ * location for cpu_resume()'s use in sleep_save_stash.
*
- * x0 = suspend finisher argument
- * x1 = suspend finisher function pointer
+ * cpu_resume() will restore this saved state, and return. Because the
+ * link-register is saved and restored, it will appear to return from this
+ * function. So that the caller can tell the suspend/resume paths apart,
+ * __cpu_suspend_enter() will always return a non-zero value, whereas the
+ * path through cpu_resume() will return 0.
+ *
+ * x0 = struct sleep_stack_data area
*/
ENTRY(__cpu_suspend_enter)
- stp x29, lr, [sp, #-96]!
- stp x19, x20, [sp,#16]
- stp x21, x22, [sp,#32]
- stp x23, x24, [sp,#48]
- stp x25, x26, [sp,#64]
- stp x27, x28, [sp,#80]
- /*
- * Stash suspend finisher and its argument in x20 and x19
- */
- mov x19, x0
- mov x20, x1
+ stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
+ stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
+ stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
+ stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
+ stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
+ stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
+
+ /* save the sp in cpu_suspend_ctx */
mov x2, sp
- sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
- mov x0, sp
- /*
- * x0 now points to struct cpu_suspend_ctx allocated on the stack
- */
- str x2, [x0, #CPU_CTX_SP]
- ldr x1, =sleep_save_sp
- ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
+ str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
+
+ /* find the mpidr_hash */
+ ldr x1, =sleep_save_stash
+ ldr x1, [x1]
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
ldr x10, [x9, #MPIDR_HASH_MASK]
@@ -93,70 +86,36 @@ ENTRY(__cpu_suspend_enter)
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x1, x1, x8, lsl #3
- bl __cpu_suspend_save
- /*
- * Grab suspend finisher in x20 and its argument in x19
- */
- mov x0, x19
- mov x1, x20
- /*
- * We are ready for power down, fire off the suspend finisher
- * in x1, with argument in x0
- */
- blr x1
- /*
- * Never gets here, unless suspend finisher fails.
- * Successful cpu_suspend should return from cpu_resume, returning
- * through this code path is considered an error
- * If the return value is set to 0 force x0 = -EOPNOTSUPP
- * to make sure a proper error condition is propagated
- */
- cmp x0, #0
- mov x3, #-EOPNOTSUPP
- csel x0, x3, x0, eq
- add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
- ldp x19, x20, [sp, #16]
- ldp x21, x22, [sp, #32]
- ldp x23, x24, [sp, #48]
- ldp x25, x26, [sp, #64]
- ldp x27, x28, [sp, #80]
- ldp x29, lr, [sp], #96
+
+ str x0, [x1]
+ add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+ stp x29, lr, [sp, #-16]!
+ bl cpu_do_suspend
+ ldp x29, lr, [sp], #16
+ mov x0, #1
ret
ENDPROC(__cpu_suspend_enter)
.ltorg
-/*
- * x0 must contain the sctlr value retrieved from restored context
- */
- .pushsection ".idmap.text", "ax"
-ENTRY(cpu_resume_mmu)
- ldr x3, =cpu_resume_after_mmu
- msr sctlr_el1, x0 // restore sctlr_el1
- isb
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
- br x3 // global jump to virtual address
-ENDPROC(cpu_resume_mmu)
- .popsection
-cpu_resume_after_mmu:
- mov x0, #0 // return zero on success
- ldp x19, x20, [sp, #16]
- ldp x21, x22, [sp, #32]
- ldp x23, x24, [sp, #48]
- ldp x25, x26, [sp, #64]
- ldp x27, x28, [sp, #80]
- ldp x29, lr, [sp], #96
- ret
-ENDPROC(cpu_resume_after_mmu)
-
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
+ /* enable the MMU early - so we can access sleep_save_stash by va */
+ adr_l lr, __enable_mmu /* __cpu_setup will return here */
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
+ adrp x25, idmap_pg_dir
+ adrp x26, swapper_pg_dir
+ b __cpu_setup
+ENDPROC(cpu_resume)
+
+_resume_switched:
+ ldr x8, =_cpu_resume
+ br x8
+ENDPROC(_resume_switched)
+ .ltorg
+ .popsection
+
+ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -166,17 +125,29 @@ ENTRY(cpu_resume)
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
/* x7 contains hash index, let's use it to grab context pointer */
- ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
+ ldr_l x0, sleep_save_stash
ldr x0, [x0, x7, lsl #3]
+ add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
+ add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
- /* load physical address of identity map page table in x1 */
- adrp x1, idmap_pg_dir
mov sp, x2
/*
- * cpu_do_resume expects x0 to contain context physical address
- * pointer and x1 to contain physical address of 1:1 page tables
+ * cpu_do_resume expects x0 to contain context address pointer
*/
- bl cpu_do_resume // PC relative jump, MMU off
- b cpu_resume_mmu // Resume MMU, never returns
-ENDPROC(cpu_resume)
+ bl cpu_do_resume
+
+#ifdef CONFIG_KASAN
+ mov x0, sp
+ bl kasan_unpoison_task_stack_below
+#endif
+
+ ldp x19, x20, [x29, #16]
+ ldp x21, x22, [x29, #32]
+ ldp x23, x24, [x29, #48]
+ ldp x25, x26, [x29, #64]
+ ldp x27, x28, [x29, #80]
+ ldp x29, lr, [x29]
+ mov x0, #0
+ ret
+ENDPROC(_cpu_resume)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a90c1f184792..16f97cdaaeae 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -53,6 +53,8 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
+#include <asm/edac.h>
+#include <soc/qcom/minidump.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
@@ -73,7 +75,8 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ IPI_WAKEUP,
+ IPI_CPU_BACKTRACE,
};
/*
@@ -150,6 +153,8 @@ asmlinkage notrace void secondary_start_kernel(void)
cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));
+ pr_debug("CPU%u: Booted secondary processor\n", cpu);
+
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -184,16 +189,16 @@ asmlinkage notrace void secondary_start_kernel(void)
/*
* Enable GIC and timers.
*/
- notify_cpu_starting(cpu);
-
smp_store_cpu_info(cpu);
+ notify_cpu_starting(cpu);
+
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
* before we continue.
*/
- pr_info("CPU%u: Booted secondary processor [%08x]\n",
+ pr_debug("CPU%u: Booted secondary processor [%08x]\n",
cpu, read_cpuid_id());
set_cpu_online(cpu, true);
complete(&cpu_running);
@@ -278,7 +283,7 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
@@ -300,7 +305,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void cpu_die(void)
+void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
@@ -318,7 +323,16 @@ void cpu_die(void)
*/
cpu_ops[cpu]->cpu_die(cpu);
- BUG();
+ /*
+ * Do not return to the idle loop - jump back to the secondary
+ * cpu initialisation. There's some initialisation which needs
+ * to be repeated to undo the effects of taking the CPU offline.
+ */
+
+ asm volatile("mov sp, %0\n"
+ "mov x29, #0\n"
+ "b secondary_start_kernel"
+ : : "r" (task_stack_page(current) + THREAD_START_SP));
}
#endif
@@ -487,6 +501,18 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
#else
#define acpi_table_parse_madt(...) do { } while (0)
#endif
+void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+DEFINE_PER_CPU(bool, pending_ipi);
+
+void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpumask)
+ per_cpu(pending_ipi, cpu) = true;
+
+ __smp_cross_call(cpumask, func);
+}
/*
* Enumerate the possible CPU set from the device tree and build the
@@ -635,8 +661,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
__smp_cross_call = fn;
@@ -649,11 +673,17 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_WAKEUP, "CPU wake-up interrupts"),
+ S(IPI_WAKEUP, "CPU wakeup interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ per_cpu(pending_ipi, cpu) = true;
+
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -685,12 +715,12 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- smp_cross_call(mask, IPI_CALL_FUNC);
+ smp_cross_call_common(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
+ smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
@@ -710,27 +740,100 @@ void arch_irq_work_raise(void)
static DEFINE_RAW_SPINLOCK(stop_lock);
+DEFINE_PER_CPU(struct pt_regs, regs_before_stop);
+
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
-static void ipi_cpu_stop(unsigned int cpu)
+static void ipi_cpu_stop(unsigned int cpu, struct pt_regs *regs)
{
if (system_state == SYSTEM_BOOTING ||
system_state == SYSTEM_RUNNING) {
+ per_cpu(regs_before_stop, cpu) = *regs;
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
+ show_regs(regs);
dump_stack();
+ dump_stack_minidump(regs->sp);
+ arm64_check_cache_ecc(NULL);
raw_spin_unlock(&stop_lock);
}
set_cpu_online(cpu, false);
+ flush_cache_all();
local_irq_disable();
while (1)
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, &backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ if (!cpumask_empty(&backtrace_mask))
+ smp_cross_call_common(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpumask_test_cpu(cpu, &backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warn("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpumask_clear_cpu(cpu, &backtrace_mask);
+ }
+}
+
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
+
/*
* Main handler for inter-processor interrupts
*/
@@ -757,7 +860,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
case IPI_CPU_STOP:
irq_enter();
- ipi_cpu_stop(cpu);
+ ipi_cpu_stop(cpu, regs);
irq_exit();
break;
@@ -777,6 +880,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
+ break;
+
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
case IPI_WAKEUP:
WARN_ONCE(!acpi_parking_protocol_valid(cpu),
@@ -792,18 +899,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+
+ per_cpu(pending_ipi, cpu) = false;
set_irq_regs(old_regs);
}
void smp_send_reschedule(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+ BUG_ON(cpu_is_offline(cpu));
+ smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
- smp_cross_call(mask, IPI_TIMER);
+ smp_cross_call_common(mask, IPI_TIMER);
}
#endif
@@ -828,7 +938,7 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
- smp_cross_call(&mask, IPI_CPU_STOP);
+ smp_cross_call_common(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 2ccb883353d9..303d571702ea 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -30,7 +30,8 @@
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
+volatile unsigned long __section(".mmuoff.data.read")
+secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 75a856568813..0edcd34b45d2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -15,6 +15,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
@@ -63,10 +64,14 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (fp < low || fp > high || fp & 0xf)
return -EINVAL;
+ kasan_disable_current();
+
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
+ kasan_enable_current();
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk && tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
@@ -157,7 +162,8 @@ static int save_trace(struct stackframe *frame, void *d)
return trace->nr_entries >= trace->max_entries;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
@@ -167,17 +173,18 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
@@ -189,10 +196,17 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
put_task_stack(tsk);
}
+EXPORT_SYMBOL(save_stack_trace_tsk);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
+
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index e7a96462ca2d..468b939f3471 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -13,30 +13,11 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
/*
- * This is called by __cpu_suspend_enter() to save the state, and do whatever
- * flushing is required to ensure that when the CPU goes to sleep we have
- * the necessary data available when the caches are not searched.
- *
- * ptr: CPU context virtual address
- * save_ptr: address of the location where the context physical address
- * must be saved
+ * This is allocated by cpu_suspend_init(), and used to store a pointer to
+ * the 'struct sleep_stack_data' the contains a particular CPUs state.
*/
-void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
- phys_addr_t *save_ptr)
-{
- *save_ptr = virt_to_phys(ptr);
-
- cpu_do_suspend(ptr);
- /*
- * Only flush the context that must be retrieved with the MMU
- * off. VA primitives ensure the flush is applied to all
- * cache levels so context is pushed to DRAM.
- */
- __flush_dcache_area(ptr, sizeof(*ptr));
- __flush_dcache_area(save_ptr, sizeof(*save_ptr));
-}
+unsigned long *sleep_save_stash;
/*
* This hook is provided so that cpu_suspend code can restore HW
@@ -54,6 +35,24 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
hw_breakpoint_restore = hw_bp_restore;
}
+void notrace __cpu_suspend_exit(void)
+{
+ /*
+ * We are resuming from reset with the idmap active in TTBR0_EL1.
+ * We must uninstall the idmap and restore the expected MMU
+ * state before we can possibly return to userspace.
+ */
+ cpu_uninstall_idmap();
+
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
+}
+
/*
* cpu_suspend
*
@@ -63,8 +62,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
*/
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
- int ret;
+ int ret = 0;
unsigned long flags;
+ struct sleep_stack_data state;
/*
* From this point debug exceptions are disabled to prevent
@@ -80,20 +80,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
pause_graph_tracing();
- /*
- * mm context saved on the stack, it will be restored when
- * the cpu comes out of reset through the identity mapped
- * page tables, so that the thread address space is properly
- * set-up on function return.
- */
- ret = __cpu_suspend_enter(arg, fn);
- if (ret == 0) {
- /*
- * We are resuming from reset with the idmap active in TTBR0_EL1.
- * We must uninstall the idmap and restore the expected MMU
- * state before we can possibly return to userspace.
- */
- cpu_uninstall_idmap();
+ if (__cpu_suspend_enter(&state)) {
+ /* Call the suspend finisher */
+ ret = fn(arg);
/*
* PSTATE was not saved over suspend/resume, re-enable any
@@ -108,8 +97,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* before debug exceptions are possibly reenabled
* through local_dbg_restore.
*/
- if (hw_breakpoint_restore)
- hw_breakpoint_restore(NULL);
+ if (!ret)
+ ret = -EOPNOTSUPP;
+ } else {
+ __cpu_suspend_exit();
}
unpause_graph_tracing();
@@ -124,22 +115,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
-struct sleep_save_sp sleep_save_sp;
-
static int __init cpu_suspend_init(void)
{
- void *ctx_ptr;
-
/* ctx_ptr is an array of physical addresses */
- ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+ sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
+ GFP_KERNEL);
- if (WARN_ON(!ctx_ptr))
+ if (WARN_ON(!sleep_save_stash))
return -ENOMEM;
- sleep_save_sp.save_ptr_stash = ctx_ptr;
- sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
- __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
-
return 0;
}
early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 7758f7ff131b..d0e5fe5fbf22 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,14 +19,36 @@
#include <linux/nodemask.h>
#include <linux/of.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched_energy.h>
#include <asm/cputype.h>
#include <asm/topology.h>
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
#ifdef CONFIG_CPU_FREQ
@@ -181,6 +203,46 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
return 0;
}
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+ { NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+ return per_cpu(cpu_efficiency, cpu);
+}
+EXPORT_SYMBOL(arch_get_cpu_efficiency);
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
static int __init parse_dt_topology(void)
{
struct device_node *cn, *map;
@@ -220,6 +282,107 @@ out:
return ret;
}
+static void __init parse_dt_cpu_power(void)
+{
+ const struct cpu_efficiency *cpu_eff;
+ struct device_node *cn;
+ unsigned long min_capacity = ULONG_MAX;
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int cpu;
+
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
+
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
+ int len;
+ u32 efficiency;
+
+ /* Too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("Missing device node for CPU %d\n", cpu);
+ continue;
+ }
+
+ /*
+ * The CPU efficiency value passed from the device tree
+ * overrides the value defined in the table_efficiency[]
+ */
+ if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
+
+ for (cpu_eff = table_efficiency;
+ cpu_eff->compatible; cpu_eff++)
+
+ if (of_device_is_compatible(cn,
+ cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL) {
+ pr_warn("%s: Unknown CPU type\n",
+ cn->full_name);
+ continue;
+ }
+
+ efficiency = cpu_eff->efficiency;
+ }
+
+ per_cpu(cpu_efficiency, cpu) = efficiency;
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s: Missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity(cpu) = capacity;
+ }
+
+ /* If min and max capacities are equal we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_CAPACITY_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ return;
+ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_CAPACITY_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_CAPACITY_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
/*
* cpu topology table
*/
@@ -281,14 +444,14 @@ static void update_cpu_capacity(unsigned int cpu)
{
unsigned long capacity = SCHED_CAPACITY_SCALE;
- if (cpu_core_energy(cpu)) {
+ if (sched_energy_aware && cpu_core_energy(cpu)) {
int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
}
set_capacity_scale(cpu, capacity);
- pr_info("CPU%d: update cpu_capacity %lu\n",
+ pr_debug("CPU%d: update cpu_capacity %lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
@@ -353,6 +516,7 @@ void store_cpu_topology(unsigned int cpuid)
topology_populated:
update_siblings_masks(cpuid);
+ update_cpu_power(cpuid);
update_cpu_capacity(cpuid);
}
@@ -374,18 +538,33 @@ static void __init reset_cpu_topology(void)
}
}
+static void __init reset_cpu_power(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_power_scale(cpu, SCHED_CAPACITY_SCALE);
+}
+
void __init init_cpu_topology(void)
{
+ int cpu;
+
reset_cpu_topology();
/*
* Discard anything that was parsed if we hit an error so we
* don't use partial information.
*/
- if (of_have_populated_dt() && parse_dt_topology())
+ if (of_have_populated_dt() && parse_dt_topology()) {
reset_cpu_topology();
- else
+ } else {
set_sched_topology(arm64_topology);
+ for_each_possible_cpu(cpu)
+ update_siblings_masks(cpu);
+ }
+ reset_cpu_power();
+ parse_dt_cpu_power();
init_sched_energy_costs();
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 47454ee143df..416aea2c6719 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -43,6 +43,10 @@
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
+#include <asm/esr.h>
+#include <asm/edac.h>
+
+#include <trace/events/exception.h>
static const char *handler[]= {
"Synchronous Abort",
@@ -151,7 +155,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
unsigned long irq_stack_ptr;
int skip;
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ pr_debug("%s(regs = %pK tsk = %pK)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
@@ -267,9 +271,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
- dump_mem(KERN_EMERG, "Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk),
- compat_user_mode(regs));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -277,40 +278,73 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
}
-static DEFINE_RAW_SPINLOCK(die_lock);
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
-/*
- * This function is protected against re-entrancy.
- */
-void die(const char *str, struct pt_regs *regs, int err)
+static unsigned long oops_begin(void)
{
- int ret;
+ int cpu;
unsigned long flags;
- raw_spin_lock_irqsave(&die_lock, flags);
-
oops_enter();
+ /* racy, but better than risking deadlock. */
+ raw_local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (!arch_spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+ else
+ arch_spin_lock(&die_lock);
+ }
+ die_nest_count++;
+ die_owner = cpu;
console_verbose();
bust_spinlocks(1);
- ret = __die(str, err, regs);
+ return flags;
+}
+static void oops_end(unsigned long flags, struct pt_regs *regs, int notify)
+{
if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
+ die_owner = -1;
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ die_nest_count--;
+ if (!die_nest_count)
+ /* Nest count reaches zero, release the lock. */
+ arch_spin_unlock(&die_lock);
+ raw_local_irq_restore(flags);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
+ if (notify != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+}
- raw_spin_unlock_irqrestore(&die_lock, flags);
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+ unsigned long flags = oops_begin();
+ int ret;
- if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ if (!user_mode(regs))
+ bug_type = report_bug(regs->pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
+
+ ret = __die(str, err, regs);
+
+ oops_end(flags, regs, ret);
}
void arm64_notify_die(const char *str, struct pt_regs *regs,
@@ -400,6 +434,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
+ trace_undef_instr(regs, (void *)pc);
+
if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
@@ -518,6 +554,12 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr));
+ if (esr >> ESR_ELx_EC_SHIFT == ESR_ELx_EC_SERROR) {
+ pr_crit("System error detected. ESR.ISS = %08x\n",
+ esr & 0xffffff);
+ arm64_check_cache_ecc(NULL);
+ }
+
local_irq_disable();
panic("bad mode");
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a6f1df69c0c3..91f541a1bdb1 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -1,5 +1,7 @@
/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * Additional userspace pages setup for AArch64 and AArch32.
+ * - AArch64: vDSO pages setup, vDSO data page update.
+ * - AArch32: sigreturn and kuser helpers pages setup.
*
* Copyright (C) 2012 ARM Limited
*
@@ -36,9 +38,11 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start[], vdso_end[];
-static unsigned long vdso_pages;
-static struct page **vdso_pagelist;
+struct vdso_mappings {
+ unsigned long num_code_pages;
+ struct vm_special_mapping data_mapping;
+ struct vm_special_mapping code_mapping;
+};
/*
* The vDSO data page.
@@ -53,149 +57,258 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static struct page *vectors_page[1];
+#if !defined(CONFIG_VDSO32) || defined(CONFIG_KUSER_HELPERS)
+static struct page *vectors_page[] __ro_after_init;
+static const struct vm_special_mapping compat_vdso_spec[] = {
+ {
+ /* Must be named [sigpage] for compatibility with arm. */
+ .name = "[sigpage]",
+ .pages = &vectors_page[0],
+ },
+#ifdef CONFIG_KUSER_HELPERS
+ {
+ .name = "[kuserhelpers]",
+ .pages = &vectors_page[1],
+ },
+#endif
+};
+static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
+#endif
static int __init alloc_vectors_page(void)
{
+#ifdef CONFIG_KUSER_HELPERS
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
+ size_t kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long kuser_vpage;
+#endif
- vpage = get_zeroed_page(GFP_ATOMIC);
+#ifndef CONFIG_VDSO32
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ size_t sigret_sz =
+ __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigret_vpage;
- if (!vpage)
+ sigret_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigret_vpage)
return -ENOMEM;
+#endif
+
+#ifdef CONFIG_KUSER_HELPERS
+ kuser_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!kuser_vpage) {
+#ifndef CONFIG_VDSO32
+ free_page(sigret_vpage);
+#endif
+ return -ENOMEM;
+ }
+#endif
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
-
+#ifndef CONFIG_VDSO32
/* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+ memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
+ flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
+ vectors_page[0] = virt_to_page(sigret_vpage);
+#endif
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
+#ifdef CONFIG_KUSER_HELPERS
+ /* kuser helpers */
+ memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start,
+ kuser_sz);
+ flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE);
+ vectors_page[1] = virt_to_page(kuser_vpage);
+#endif
return 0;
}
arch_initcall(alloc_vectors_page);
+#ifndef CONFIG_VDSO32
int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
-
- };
+ unsigned long addr;
void *ret;
down_write(&mm->mmap_sem);
- current->mm->context.vdso = (void *)addr;
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
- /* Map vectors page at the high address. */
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ &compat_vdso_spec[0]);
+ if (IS_ERR(ret))
+ goto out;
+
+ current->mm->context.vdso = (void *)addr;
+#ifdef CONFIG_KUSER_HELPERS
+ /* Map the kuser helpers at the ABI-defined high address. */
+ ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+ PAGE_SIZE,
+ VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+ &compat_vdso_spec[1]);
+#endif
+out:
up_write(&mm->mmap_sem);
return PTR_ERR_OR_ZERO(ret);
}
+#endif /* !CONFIG_VDSO32 */
#endif /* CONFIG_COMPAT */
-static struct vm_special_mapping vdso_spec[2];
-
-static int __init vdso_init(void)
+static int __init vdso_mappings_init(const char *name,
+ const char *code_start,
+ const char *code_end,
+ struct vdso_mappings *mappings)
{
- int i;
+ unsigned long i, vdso_pages;
+ struct page **vdso_pagelist;
unsigned long pfn;
- if (memcmp(vdso_start, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
+ if (memcmp(code_start, "\177ELF", 4)) {
+ pr_err("%s is not a valid ELF object!\n", name);
return -EINVAL;
}
- vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
-
- /* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
- GFP_KERNEL);
+ vdso_pages = (code_end - code_start) >> PAGE_SHIFT;
+ pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+ name, vdso_pages + 1, vdso_pages, code_start, 1L,
+ vdso_data);
+
+ /*
+ * Allocate space for storing pointers to the vDSO code pages + the
+ * data page. The pointers must have the same lifetime as the mappings,
+ * which are static, so there is no need to keep track of the pointer
+ * array to free it.
+ */
+ vdso_pagelist = kmalloc_array(vdso_pages + 1, sizeof(struct page *),
+ GFP_KERNEL);
if (vdso_pagelist == NULL)
return -ENOMEM;
/* Grab the vDSO data page. */
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
/* Grab the vDSO code pages. */
- pfn = sym_to_pfn(vdso_start);
+ pfn = sym_to_pfn(code_start);
for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
/* Populate the special mapping structures */
- vdso_spec[0] = (struct vm_special_mapping) {
+ mappings->data_mapping = (struct vm_special_mapping) {
.name = "[vvar]",
- .pages = vdso_pagelist,
+ .pages = &vdso_pagelist[0],
};
- vdso_spec[1] = (struct vm_special_mapping) {
+ mappings->code_mapping = (struct vm_special_mapping) {
.name = "[vdso]",
.pages = &vdso_pagelist[1],
};
+ mappings->num_code_pages = vdso_pages;
return 0;
}
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+
+static struct vdso_mappings vdso32_mappings __ro_after_init;
+
+static int __init vdso32_init(void)
+{
+ extern char vdso32_start[], vdso32_end[];
+
+ return vdso_mappings_init("vdso32", vdso32_start, vdso32_end,
+ &vdso32_mappings);
+}
+arch_initcall(vdso32_init);
+
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
+static struct vdso_mappings vdso_mappings __ro_after_init;
+
+static int __init vdso_init(void)
+{
+ extern char vdso_start[], vdso_end[];
+
+ return vdso_mappings_init("vdso", vdso_start, vdso_end,
+ &vdso_mappings);
+}
+
arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+static int vdso_setup(struct mm_struct *mm,
+ const struct vdso_mappings *mappings)
{
- struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
- vdso_text_len = vdso_pages << PAGE_SHIFT;
+ vdso_text_len = mappings->num_code_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
- down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- ret = ERR_PTR(vdso_base);
- goto up_fail;
- }
+ if (IS_ERR_VALUE(vdso_base))
+ return PTR_ERR_OR_ZERO(ERR_PTR(vdso_base));
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_MAYREAD,
- &vdso_spec[0]);
+ &mappings->data_mapping);
if (IS_ERR(ret))
- goto up_fail;
+ return PTR_ERR_OR_ZERO(ret);
vdso_base += PAGE_SIZE;
- mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_spec[1]);
- if (IS_ERR(ret))
- goto up_fail;
+ &mappings->code_mapping);
+ if (!IS_ERR(ret))
+ mm->context.vdso = (void *)vdso_base;
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ void *ret;
+ down_write(&mm->mmap_sem);
+
+ ret = ERR_PTR(vdso_setup(mm, &vdso32_mappings));
+#ifdef CONFIG_KUSER_HELPERS
+ if (!IS_ERR(ret))
+ /* Map the kuser helpers at the ABI-defined high address. */
+ ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+ PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYEXEC,
+ &compat_vdso_spec[1]);
+#endif
up_write(&mm->mmap_sem);
- return 0;
-up_fail:
- mm->context.vdso = NULL;
+ return PTR_ERR_OR_ZERO(ret);
+}
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ ret = vdso_setup(mm, &vdso_mappings);
up_write(&mm->mmap_sem);
- return PTR_ERR(ret);
+ return ret;
}
/*
@@ -216,16 +329,20 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 62c84f7cb01b..4adcb532ac6a 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -5,18 +5,32 @@
# Heavily based on the vDSO Makefiles for other archs.
#
-obj-vdso := gettimeofday.o note.o sigreturn.o
+obj-vdso-s := note.o sigreturn.o
+obj-vdso-c := vgettimeofday.o
# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg
-obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+targets := $(obj-vdso-s) $(obj-vdso-c) vdso.so vdso.so.dbg
+obj-vdso-s := $(addprefix $(obj)/, $(obj-vdso-s))
+obj-vdso-c := $(addprefix $(obj)/, $(obj-vdso-c))
+obj-vdso := $(obj-vdso-c) $(obj-vdso-s)
-ccflags-y := -shared -fno-common -fno-builtin
+ccflags-y := -shared -fno-common -fno-builtin -fno-stack-protector
+ccflags-y += -DDISABLE_BRANCH_PROFILING -ffixed-x18
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+# Force -O2 to avoid libgcc dependencies
+CFLAGS_REMOVE_vgettimeofday.o = -pg -Os
+CFLAGS_vgettimeofday.o = -O2 -fPIC
+ifneq ($(cc-name),clang)
+CFLAGS_vgettimeofday.o += -mcmodel=tiny
+endif
+
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
# down to collect2, resulting in silent corruption of the vDSO image.
@@ -49,12 +63,17 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Assembly rules for the .S files
-$(obj-vdso): %.o: %.S FORCE
+$(obj-vdso-s): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
+$(obj-vdso-c): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc)
+
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+quiet_cmd_vdsocc = VDSOC $@
+ cmd_vdsocc = ${CC} $(c_flags) -c -o $@ $<
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/compiler.h b/arch/arm64/kernel/vdso/compiler.h
new file mode 100644
index 000000000000..fb27545640f2
--- /dev/null
+++ b/arch/arm64/kernel/vdso/compiler.h
@@ -0,0 +1,70 @@
+/*
+ * Userspace implementations of fallback calls
+ *
+ * Copyright (C) 2017 Cavium, Inc.
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Rewriten into C by: Andrew Pinski <apinski@cavium.com>
+ */
+
+#ifndef __VDSO_COMPILER_H
+#define __VDSO_COMPILER_H
+
+#include <asm/processor.h> /* for cpu_relax() */
+#include <asm/sysreg.h> /* for read_sysreg() */
+#include <asm/unistd.h>
+#include <linux/compiler.h>
+#include <linux/hrtimer.h> /* for LOW_RES_NSEC and MONOTONIC_RES_NSEC */
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+#define ARCH_PROVIDES_TIMER
+#endif
+
+#define DEFINE_FALLBACK(name, type_arg1, name_arg1, type_arg2, name_arg2) \
+static notrace long name##_fallback(type_arg1 _##name_arg1, \
+ type_arg2 _##name_arg2) \
+{ \
+ register type_arg1 name_arg1 asm("x0") = _##name_arg1; \
+ register type_arg2 name_arg2 asm("x1") = _##name_arg2; \
+ register long ret asm ("x0"); \
+ register long nr asm("x8") = __NR_##name; \
+ \
+ asm volatile( \
+ " svc #0\n" \
+ : "=r" (ret) \
+ : "r" (name_arg1), "r" (name_arg2), "r" (nr) \
+ : "memory"); \
+ \
+ return ret; \
+}
+
+/*
+ * AArch64 implementation of arch_counter_get_cntvct() suitable for vdso
+ */
+static __always_inline notrace u64 arch_vdso_read_counter(void)
+{
+ /* Read the virtual counter. */
+ isb();
+ return read_sysreg(cntvct_el0);
+}
+
+/* Rename exported vdso functions */
+#define __vdso_clock_gettime __kernel_clock_gettime
+#define __vdso_gettimeofday __kernel_gettimeofday
+#define __vdso_clock_getres __kernel_clock_getres
+#define __vdso_time __kernel_time
+
+#endif /* __VDSO_COMPILER_H */
diff --git a/arch/arm64/kernel/vdso/datapage.h b/arch/arm64/kernel/vdso/datapage.h
new file mode 100644
index 000000000000..be86a6074cf8
--- /dev/null
+++ b/arch/arm64/kernel/vdso/datapage.h
@@ -0,0 +1,59 @@
+/*
+ * Userspace implementations of __get_datapage
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __VDSO_DATAPAGE_H
+#define __VDSO_DATAPAGE_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <asm/vdso_datapage.h>
+
+/*
+ * We use the hidden visibility to prevent the compiler from generating a GOT
+ * relocation. Not only is going through a GOT useless (the entry couldn't and
+ * mustn't be overridden by another library), it does not even work: the linker
+ * cannot generate an absolute address to the data page.
+ *
+ * With the hidden visibility, the compiler simply generates a PC-relative
+ * relocation (R_ARM_REL32), and this is what we need.
+ */
+extern const struct vdso_data _vdso_data __attribute__((visibility("hidden")));
+
+static inline const struct vdso_data *__get_datapage(void)
+{
+ const struct vdso_data *ret;
+ /*
+ * This simply puts &_vdso_data into ret. The reason why we don't use
+ * `ret = &_vdso_data` is that the compiler tends to optimise this in a
+ * very suboptimal way: instead of keeping &_vdso_data in a register,
+ * it goes through a relocation almost every time _vdso_data must be
+ * accessed (even in subfunctions). This is both time and space
+ * consuming: each relocation uses a word in the code section, and it
+ * has to be loaded at runtime.
+ *
+ * This trick hides the assignment from the compiler. Since it cannot
+ * track where the pointer comes from, it will only use one relocation
+ * where __get_datapage() is called, and then keep the result in a
+ * register.
+ */
+ asm("" : "=r"(ret) : "0"(&_vdso_data));
+ return ret;
+}
+
+/* We can only guarantee 56 bits of precision. */
+#define ARCH_CLOCK_FIXED_MASK GENMASK_ULL(55, 0)
+
+#endif /* __VDSO_DATAPAGE_H */
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index b3e6c4d5b75c..3dc1198b5ec9 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -94,6 +94,7 @@ VERSION
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
+ __kernel_time;
local: *;
};
}
diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..b73d4011993d
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,3 @@
+#include "compiler.h"
+#include "datapage.h"
+#include "../../../../lib/vdso/vgettimeofday.c"
diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore
new file mode 100644
index 000000000000..4fea950fa5ed
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/.gitignore
@@ -0,0 +1,2 @@
+vdso.lds
+vdso.so.raw
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
new file mode 100644
index 000000000000..807d08e28c27
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -0,0 +1,178 @@
+#
+# Building a vDSO image for AArch32.
+#
+# Author: Kevin Brodsky <kevin.brodsky@arm.com>
+# A mix between the arm64 and arm vDSO Makefiles.
+
+ifeq ($(cc-name),clang)
+ CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as
+ GCC_ARM32_TC := $(realpath $(dir $(shell which $(CROSS_COMPILE_ARM32)ld))/..)
+ifneq ($(GCC_ARM32_TC),)
+ CC_ARM32 += --gcc-toolchain=$(GCC_ARM32_TC)
+endif
+else
+ CC_ARM32 := $(CROSS_COMPILE_ARM32)$(cc-name)
+endif
+
+# Same as cc-*option, but using CC_ARM32 instead of CC
+cc32-option = $(call try-run,\
+ $(CC_ARM32) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-disable-warning = $(call try-run,\
+ $(CC_ARM32) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+cc32-ldoption = $(call try-run,\
+ $(CC_ARM32) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+
+# We cannot use the global flags to compile the vDSO files, the main reason
+# being that the 32-bit compiler may be older than the main (64-bit) compiler
+# and therefore may not understand flags set using $(cc-option ...). Besides,
+# arch-specific options should be taken from the arm Makefile instead of the
+# arm64 one.
+# As a result we set our own flags here.
+
+# From top-level Makefile
+# NOSTDINC_FLAGS
+VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(CC_ARM32) -print-file-name=include)
+VDSO_CPPFLAGS += $(LINUXINCLUDE)
+VDSO_CPPFLAGS += -D__KERNEL__
+VDSO_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+VDSO_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
+
+# Common C and assembly flags
+# From top-level Makefile
+VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
+ifdef CONFIG_DEBUG_INFO
+VDSO_CAFLAGS += -g
+endif
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC_ARM32)), y)
+VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
+# From arm Makefile
+VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
+VDSO_CAFLAGS += -mabi=aapcs-linux -mfloat-abi=soft
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+VDSO_CAFLAGS += -mbig-endian
+else
+VDSO_CAFLAGS += -mlittle-endian
+endif
+
+# From arm vDSO Makefile
+VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
+VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
+
+# Try to compile for ARMv8. If the compiler is too old and doesn't support it,
+# fall back to v7. There is no easy way to check for what architecture the code
+# is being compiled, so define a macro specifying that (see arch/arm/Makefile).
+VDSO_CAFLAGS += $(call cc32-option,-march=armv8-a -D__LINUX_ARM_ARCH__=8,\
+ -march=armv7-a -D__LINUX_ARM_ARCH__=7)
+
+VDSO_CFLAGS := $(VDSO_CAFLAGS)
+# KBUILD_CFLAGS from top-level Makefile
+VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+ -std=gnu89
+VDSO_CFLAGS += -O2
+# Some useful compiler-dependent flags from top-level Makefile
+VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
+VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+VDSO_CFLAGS += $(call cc32-option,-fno-strict-overflow)
+VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
+VDSO_CFLAGS += $(call cc32-option,-Werror=date-time)
+VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types)
+
+# The 32-bit compiler does not provide 128-bit integers, which are used in
+# some headers that are indirectly included from the vDSO code.
+# This hack makes the compiler happy and should trigger a warning/error if
+# variables of such type are referenced.
+VDSO_CFLAGS += -D__uint128_t='void*'
+# Silence some warnings coming from headers that operate on long's
+# (on GCC 4.8 or older, there is unfortunately no way to silence this warning)
+VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
+VDSO_CFLAGS += -Wno-int-to-pointer-cast
+
+VDSO_AFLAGS := $(VDSO_CAFLAGS)
+VDSO_AFLAGS += -D__ASSEMBLY__
+
+VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
+# From arm vDSO Makefile
+VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
+VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft
+VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--build-id)
+VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
+
+
+# Borrow vdsomunge.c from the arm vDSO
+# We have to use a relative path because scripts/Makefile.host prefixes
+# $(hostprogs-y) with $(obj)
+munge := ../../../arm/vdso/vdsomunge
+hostprogs-y := $(munge)
+
+c-obj-vdso := vgettimeofday.o
+asm-obj-vdso := sigreturn.o
+
+# Build rules
+targets := $(c-obj-vdso) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw
+c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso))
+asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso))
+obj-vdso := $(c-obj-vdso) $(asm-obj-vdso)
+
+obj-y += vdso.o
+extra-y += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (vdso.s includes vdso.so through incbin)
+$(obj)/vdso.o: $(obj)/vdso.so
+
+include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+# Strip rule for vdso.so
+$(obj)/vdso.so: OBJCOPYFLAGS := -S
+$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
+ $(call if_changed,vdsomunge)
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+
+# Compilation rules for the vDSO sources
+$(filter-out vgettimeofday.o, $(c-obj-vdso)): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc)
+$(asm-obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+# Actual build commands
+quiet_cmd_vdsold = VDSOL32 $@
+ cmd_vdsold = $(CC_ARM32) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
+ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+quiet_cmd_vdsocc = VDSOC32 $@
+ cmd_vdsocc = $(CC_ARM32) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+quiet_cmd_vdsoas = VDSOA32 $@
+ cmd_vdsoas = $(CC_ARM32) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+
+quiet_cmd_vdsomunge = MUNGE $@
+ cmd_vdsomunge = $(obj)/$(munge) $< $@
+
+# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
+gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+# The AArch64 nm should be able to read an AArch32 binary
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso32/compiler.h b/arch/arm64/kernel/vdso32/compiler.h
new file mode 100644
index 000000000000..19a43fc37bb9
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/compiler.h
@@ -0,0 +1,122 @@
+/*
+ * Userspace implementations of fallback calls
+ *
+ * Copyright (C) 2017 Cavium, Inc.
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Rewriten into C by: Andrew Pinski <apinski@cavium.com>
+ */
+
+#ifndef __VDSO_COMPILER_H
+#define __VDSO_COMPILER_H
+
+#include <generated/autoconf.h>
+#undef CONFIG_64BIT
+#include <asm/barrier.h> /* for isb() & dmb() */
+#include <asm/param.h> /* for HZ */
+#include <asm/unistd32.h>
+#include <linux/compiler.h>
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+#define ARCH_PROVIDES_TIMER
+#endif
+
+/* can not include linux/time.h because of too much architectural cruft */
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
+/* can not include linux/jiffies.h because of too much architectural cruft */
+#ifndef TICK_NSEC
+#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+#endif
+
+/* can not include linux/hrtimer.h because of too much architectural cruft */
+#ifndef LOW_RES_NSEC
+#define LOW_RES_NSEC TICK_NSEC
+#ifdef ARCH_PROVIDES_TIMER
+#ifdef CONFIG_HIGH_RES_TIMERS
+# define HIGH_RES_NSEC 1
+# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+#else
+# define MONOTONIC_RES_NSEC LOW_RES_NSEC
+#endif
+#endif
+#endif
+
+#define DEFINE_FALLBACK(name, type_arg1, name_arg1, type_arg2, name_arg2) \
+static notrace long name##_fallback(type_arg1 _##name_arg1, \
+ type_arg2 _##name_arg2) \
+{ \
+ register type_arg1 name_arg1 asm("r0") = _##name_arg1; \
+ register type_arg2 name_arg2 asm("r1") = _##name_arg2; \
+ register long ret asm ("r0"); \
+ register long nr asm("r7") = __NR_##name; \
+ \
+ asm volatile( \
+ " swi #0\n" \
+ : "=r" (ret) \
+ : "r" (name_arg1), "r" (name_arg2), "r" (nr) \
+ : "memory"); \
+ \
+ return ret; \
+}
+
+/*
+ * AArch32 implementation of arch_counter_get_cntvct() suitable for vdso
+ */
+static __always_inline notrace u64 arch_vdso_read_counter(void)
+{
+ u64 res;
+
+ /* Read the virtual counter. */
+ isb();
+ asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
+
+ return res;
+}
+
+/*
+ * Can not include asm/processor.h to pick this up because of all the
+ * architectural components also included, so we open code a copy.
+ */
+static inline void cpu_relax(void)
+{
+ asm volatile("yield" ::: "memory");
+}
+
+#undef smp_rmb
+#if __LINUX_ARM_ARCH__ >= 8
+#define smp_rmb() dmb(ishld) /* ok on ARMv8 */
+#else
+#define smp_rmb() dmb(ish) /* ishld does not exist on ARMv7 */
+#endif
+
+/* Avoid unresolved references emitted by GCC */
+
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
+
+#endif /* __VDSO_COMPILER_H */
diff --git a/arch/arm64/kernel/vdso32/datapage.h b/arch/arm64/kernel/vdso32/datapage.h
new file mode 100644
index 000000000000..fe3e216d94d1
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/datapage.h
@@ -0,0 +1 @@
+#include "../vdso/datapage.h"
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
new file mode 100644
index 000000000000..14e5f9ca34f9
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/sigreturn.S
@@ -0,0 +1,76 @@
+/*
+ * Sigreturn trampolines for returning from a signal when the SA_RESTORER
+ * flag is not set.
+ *
+ * Copyright (C) 2016 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on glibc's arm sa_restorer. While this is not strictly necessary, we
+ * provide both A32 and T32 versions, in accordance with the arm sigreturn
+ * code.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd32.h>
+
+.macro sigreturn_trampoline name, syscall, regs_offset
+ /*
+ * We provide directives for enabling stack unwinding through the
+ * trampoline. On arm, CFI directives are only used for debugging (and
+ * the vDSO is stripped of debug information), so only the arm-specific
+ * unwinding directives are useful here.
+ */
+ .fnstart
+ .save {r0-r15}
+ .pad #\regs_offset
+ /*
+ * It is necessary to start the unwind tables at least one instruction
+ * before the trampoline, as the unwinder will assume that the signal
+ * handler has been called from the trampoline, that is just before
+ * where the signal handler returns (mov r7, ...).
+ */
+ nop
+ENTRY(\name)
+ mov r7, #\syscall
+ svc #0
+ .fnend
+ /*
+ * We would like to use ENDPROC, but the macro uses @ which is a
+ * comment symbol for arm assemblers, so directly use .type with %
+ * instead.
+ */
+ .type \name, %function
+END(\name)
+.endm
+
+ .text
+
+ .arm
+ sigreturn_trampoline __kernel_sigreturn_arm, \
+ __NR_sigreturn, \
+ COMPAT_SIGFRAME_REGS_OFFSET
+
+ sigreturn_trampoline __kernel_rt_sigreturn_arm, \
+ __NR_rt_sigreturn, \
+ COMPAT_RT_SIGFRAME_REGS_OFFSET
+
+ .thumb
+ sigreturn_trampoline __kernel_sigreturn_thumb, \
+ __NR_sigreturn, \
+ COMPAT_SIGFRAME_REGS_OFFSET
+
+ sigreturn_trampoline __kernel_rt_sigreturn_thumb, \
+ __NR_rt_sigreturn, \
+ COMPAT_RT_SIGFRAME_REGS_OFFSET
diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32/vdso.S
new file mode 100644
index 000000000000..fe19ff70eb76
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+ .globl vdso32_start, vdso32_end
+ .section .rodata
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/arm64/kernel/vdso32/vdso.so"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
new file mode 100644
index 000000000000..f95cb1c431fb
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -0,0 +1,95 @@
+/*
+ * Adapted from arm64 version.
+ *
+ * GNU linker script for the VDSO library.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Heavily based on the vDSO linker scripts for other archs.
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+SECTIONS
+{
+ PROVIDE_HIDDEN(_vdso_data = . - PAGE_SIZE);
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ .text : { *(.text*) } :text =0xe7f001f2
+
+ .got : { *(.got) }
+ .rel.plt : { *(.rel.plt) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+}
+
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ __vdso_time;
+ __kernel_sigreturn_arm;
+ __kernel_sigreturn_thumb;
+ __kernel_rt_sigreturn_arm;
+ __kernel_rt_sigreturn_thumb;
+ local: *;
+ };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm;
+VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb;
+VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm;
+VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
new file mode 100644
index 000000000000..b73d4011993d
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -0,0 +1,3 @@
+#include "compiler.h"
+#include "datapage.h"
+#include "../../../../lib/vdso/vgettimeofday.c"
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6e4832def254..71c8076bbc60 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -11,6 +11,7 @@
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/cache.h>
#include "image.h"
@@ -46,6 +47,16 @@ jiffies = jiffies_64;
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
+#ifdef CONFIG_HIBERNATION
+#define HIBERNATE_TEXT \
+ . = ALIGN(SZ_4K); \
+ VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+ *(.hibernate_exit.text) \
+ VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+#else
+#define HIBERNATE_TEXT
+#endif
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define TRAMP_TEXT \
. = ALIGN(PAGE_SIZE); \
@@ -74,14 +85,19 @@ PECOFF_FILE_ALIGNMENT = 0x200;
#endif
#if defined(CONFIG_DEBUG_ALIGN_RODATA)
-#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
-#elif defined(CONFIG_DEBUG_RODATA)
-#define ALIGN_DEBUG_RO . = ALIGN(1<<PAGE_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
+/*
+ * 4 KB granule: 1 level 2 entry
+ * 16 KB granule: 128 level 3 entries, with contiguous bit
+ * 64 KB granule: 32 level 3 entries, with contiguous bit
+ */
+#define SEGMENT_ALIGN SZ_2M
#else
-#define ALIGN_DEBUG_RO
-#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
+/*
+ * 4 KB granule: 16 level 3 entries, with contiguous bit
+ * 16 KB granule: 4 level 3 entries, without contiguous bit
+ * 64 KB granule: 1 level 3 entry
+ */
+#define SEGMENT_ALIGN SZ_64K
#endif
SECTIONS
@@ -99,6 +115,7 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
+ *(.dynsym .dynstr .hash)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -107,19 +124,21 @@ SECTIONS
_text = .;
HEAD_TEXT
}
- ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
+ ENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
HYPERVISOR_TEXT
IDMAP_TEXT
+ HIBERNATE_TEXT
TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
@@ -127,14 +146,14 @@ SECTIONS
*(.got) /* Global offset table */
}
- ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+ . = ALIGN(SEGMENT_ALIGN);
_etext = .; /* End of text section */
RO_DATA(PAGE_SIZE) /* everything from this point to */
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
- ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+ . = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
INIT_TEXT_SECTION(8)
@@ -166,27 +185,37 @@ SECTIONS
*(.altinstr_replacement)
}
.rela : ALIGN(8) {
- __reloc_start = .;
*(.rela .rela*)
- __reloc_end = .;
- }
- .dynsym : ALIGN(8) {
- __dynsym_start = .;
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
}
- . = ALIGN(PAGE_SIZE);
+ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
+ __rela_size = SIZEOF(.rela);
+
+ . = ALIGN(SEGMENT_ALIGN);
__init_end = .;
_data = .;
_sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ /*
+ * Data written with the MMU off but read with the MMU on requires
+ * cache lines to be invalidated, discarding up to a Cache Writeback
+ * Granule (CWG) of data from the cache. Keep the section that
+ * requires this type of maintenance to be in its own Cache Writeback
+ * Granule (CWG) area so the cache maintenance operations don't
+ * interfere with adjacent data.
+ */
+ .mmuoff.data.write : ALIGN(SZ_2K) {
+ __mmuoff_data_start = .;
+ *(.mmuoff.data.write)
+ }
+ . = ALIGN(SZ_2K);
+ .mmuoff.data.read : {
+ *(.mmuoff.data.read)
+ __mmuoff_data_end = .;
+ }
+
PECOFF_EDATA_PADDING
_edata = .;
@@ -223,6 +252,10 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned")
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
+#ifdef CONFIG_HIBERNATION
+ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+ <= SZ_4K, "Hibernate exit text too big or misaligned")
+#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 1949fe5f5424..caee9ee8e12a 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -10,6 +10,7 @@ KVM=../../../virt/kvm
ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
@@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 79705fde8cc8..fe0ecc3d0c12 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -28,7 +28,6 @@
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5295aef7c8f0..51abbd1d98e3 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <asm/esr.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
@@ -187,6 +188,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
+ case ARM_EXCEPTION_HYP_GONE:
+ /*
+ * EL2 has been reset to the hyp-stub. This happens when a guest
+ * is pre-empted by kvm_reboot()'s shutdown call.
+ */
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ return 0;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 84c338f017b2..d87635e678b7 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -21,6 +21,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/pgtable-hwdef.h>
+#include <asm/sysreg.h>
.text
.pushsection .hyp.idmap.text, "ax"
@@ -96,6 +97,14 @@ __do_hyp_init:
ldr x4, =VTCR_EL2_FLAGS
bfi x4, x5, #16, #3
+ /*
+ * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
+ * VTCR_EL2.
+ */
+ mrs x5, ID_AA64MMFR1_EL1
+ ubfx x5, x5, #5, #1
+ lsl x5, x5, #VTCR_EL2_VS
+ orr x4, x4, x5
msr vtcr_el2, x4
@@ -108,8 +117,8 @@ __do_hyp_init:
dsb sy
mrs x4, sctlr_el2
- and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
- ldr x5, =SCTLR_EL2_FLAGS
+ and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
+ ldr x5, =SCTLR_ELx_FLAGS
orr x4, x4, x5
msr sctlr_el2, x4
isb
@@ -143,6 +152,44 @@ merged:
eret
ENDPROC(__kvm_hyp_init)
+ /*
+ * x0: HYP boot pgd
+ * x1: HYP phys_idmap_start
+ */
+ENTRY(__kvm_hyp_reset)
+ /* We're in trampoline code in VA, switch back to boot page tables */
+ msr ttbr0_el2, x0
+ isb
+
+ /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
+ ic iallu
+ tlbi alle2
+ dsb sy
+ isb
+
+ /* Branch into PA space */
+ adr x0, 1f
+ bfi x1, x0, #0, #PAGE_SHIFT
+ br x1
+
+ /* We're now in idmap, disable MMU */
+1: mrs x0, sctlr_el2
+ ldr x1, =SCTLR_ELx_FLAGS
+ bic x0, x0, x1 // Clear SCTL_M and etc
+ msr sctlr_el2, x0
+ isb
+
+ /* Invalidate the old TLBs */
+ tlbi alle2
+ dsb sy
+
+ /* Install stub vectors */
+ adr_l x0, __hyp_stub_vectors
+ msr vbar_el2, x0
+
+ eret
+ENDPROC(__kvm_hyp_reset)
+
.ltorg
.popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 844c4d73335a..7ce931565151 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -18,909 +18,8 @@
#include <linux/linkage.h>
#include <asm/alternative.h>
-#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
-#include <asm/debug-monitors.h>
-#include <asm/esr.h>
-#include <asm/fpsimdmacros.h>
-#include <asm/kvm.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/memory.h>
-
-#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
-#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
-#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
-#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
-
- .text
- .pushsection .hyp.text, "ax"
- .align PAGE_SHIFT
-
-.macro save_common_regs
- // x2: base address for cpu context
- // x3: tmp register
-
- add x3, x2, #CPU_XREG_OFFSET(19)
- stp x19, x20, [x3]
- stp x21, x22, [x3, #16]
- stp x23, x24, [x3, #32]
- stp x25, x26, [x3, #48]
- stp x27, x28, [x3, #64]
- stp x29, lr, [x3, #80]
-
- mrs x19, sp_el0
- mrs x20, elr_el2 // pc before entering el2
- mrs x21, spsr_el2 // pstate before entering el2
-
- stp x19, x20, [x3, #96]
- str x21, [x3, #112]
-
- mrs x22, sp_el1
- mrs x23, elr_el1
- mrs x24, spsr_el1
-
- str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
- str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
- str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-.endm
-
-.macro restore_common_regs
- // x2: base address for cpu context
- // x3: tmp register
-
- ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
- ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
- ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-
- msr sp_el1, x22
- msr elr_el1, x23
- msr spsr_el1, x24
-
- add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
- ldp x19, x20, [x3]
- ldr x21, [x3, #16]
-
- msr sp_el0, x19
- msr elr_el2, x20 // pc on return from el2
- msr spsr_el2, x21 // pstate on return from el2
-
- add x3, x2, #CPU_XREG_OFFSET(19)
- ldp x19, x20, [x3]
- ldp x21, x22, [x3, #16]
- ldp x23, x24, [x3, #32]
- ldp x25, x26, [x3, #48]
- ldp x27, x28, [x3, #64]
- ldp x29, lr, [x3, #80]
-.endm
-
-.macro save_host_regs
- save_common_regs
-.endm
-
-.macro restore_host_regs
- restore_common_regs
-.endm
-
-.macro save_fpsimd
- // x2: cpu context address
- // x3, x4: tmp regs
- add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
- fpsimd_save x3, 4
-.endm
-
-.macro restore_fpsimd
- // x2: cpu context address
- // x3, x4: tmp regs
- add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
- fpsimd_restore x3, 4
-.endm
-
-.macro save_guest_regs
- // x0 is the vcpu address
- // x1 is the return code, do not corrupt!
- // x2 is the cpu context
- // x3 is a tmp register
- // Guest's x0-x3 are on the stack
-
- // Compute base to save registers
- add x3, x2, #CPU_XREG_OFFSET(4)
- stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
- stp x8, x9, [x3, #32]
- stp x10, x11, [x3, #48]
- stp x12, x13, [x3, #64]
- stp x14, x15, [x3, #80]
- stp x16, x17, [x3, #96]
- str x18, [x3, #112]
-
- pop x6, x7 // x2, x3
- pop x4, x5 // x0, x1
-
- add x3, x2, #CPU_XREG_OFFSET(0)
- stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
-
- save_common_regs
-.endm
-
-.macro restore_guest_regs
- // x0 is the vcpu address.
- // x2 is the cpu context
- // x3 is a tmp register
-
- // Prepare x0-x3 for later restore
- add x3, x2, #CPU_XREG_OFFSET(0)
- ldp x4, x5, [x3]
- ldp x6, x7, [x3, #16]
- push x4, x5 // Push x0-x3 on the stack
- push x6, x7
-
- // x4-x18
- ldp x4, x5, [x3, #32]
- ldp x6, x7, [x3, #48]
- ldp x8, x9, [x3, #64]
- ldp x10, x11, [x3, #80]
- ldp x12, x13, [x3, #96]
- ldp x14, x15, [x3, #112]
- ldp x16, x17, [x3, #128]
- ldr x18, [x3, #144]
-
- // x19-x29, lr, sp*, elr*, spsr*
- restore_common_regs
-
- // Last bits of the 64bit state
- pop x2, x3
- pop x0, x1
-
- // Do not touch any register after this!
-.endm
-
-/*
- * Macros to perform system register save/restore.
- *
- * Ordering here is absolutely critical, and must be kept consistent
- * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
- * and in kvm_asm.h.
- *
- * In other words, don't touch any of these unless you know what
- * you are doing.
- */
-.macro save_sysregs
- // x2: base address for cpu context
- // x3: tmp register
-
- add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
-
- mrs x4, vmpidr_el2
- mrs x5, csselr_el1
- mrs x6, sctlr_el1
- mrs x7, actlr_el1
- mrs x8, cpacr_el1
- mrs x9, ttbr0_el1
- mrs x10, ttbr1_el1
- mrs x11, tcr_el1
- mrs x12, esr_el1
- mrs x13, afsr0_el1
- mrs x14, afsr1_el1
- mrs x15, far_el1
- mrs x16, mair_el1
- mrs x17, vbar_el1
- mrs x18, contextidr_el1
- mrs x19, tpidr_el0
- mrs x20, tpidrro_el0
- mrs x21, tpidr_el1
- mrs x22, amair_el1
- mrs x23, cntkctl_el1
- mrs x24, par_el1
- mrs x25, mdscr_el1
-
- stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
- stp x8, x9, [x3, #32]
- stp x10, x11, [x3, #48]
- stp x12, x13, [x3, #64]
- stp x14, x15, [x3, #80]
- stp x16, x17, [x3, #96]
- stp x18, x19, [x3, #112]
- stp x20, x21, [x3, #128]
- stp x22, x23, [x3, #144]
- stp x24, x25, [x3, #160]
-.endm
-
-.macro save_debug type
- // x4: pointer to register set
- // x5: number of registers to skip
- // x6..x22 trashed
-
- adr x22, 1f
- add x22, x22, x5, lsl #2
- br x22
-1:
- mrs x21, \type\()15_el1
- mrs x20, \type\()14_el1
- mrs x19, \type\()13_el1
- mrs x18, \type\()12_el1
- mrs x17, \type\()11_el1
- mrs x16, \type\()10_el1
- mrs x15, \type\()9_el1
- mrs x14, \type\()8_el1
- mrs x13, \type\()7_el1
- mrs x12, \type\()6_el1
- mrs x11, \type\()5_el1
- mrs x10, \type\()4_el1
- mrs x9, \type\()3_el1
- mrs x8, \type\()2_el1
- mrs x7, \type\()1_el1
- mrs x6, \type\()0_el1
-
- adr x22, 1f
- add x22, x22, x5, lsl #2
- br x22
-1:
- str x21, [x4, #(15 * 8)]
- str x20, [x4, #(14 * 8)]
- str x19, [x4, #(13 * 8)]
- str x18, [x4, #(12 * 8)]
- str x17, [x4, #(11 * 8)]
- str x16, [x4, #(10 * 8)]
- str x15, [x4, #(9 * 8)]
- str x14, [x4, #(8 * 8)]
- str x13, [x4, #(7 * 8)]
- str x12, [x4, #(6 * 8)]
- str x11, [x4, #(5 * 8)]
- str x10, [x4, #(4 * 8)]
- str x9, [x4, #(3 * 8)]
- str x8, [x4, #(2 * 8)]
- str x7, [x4, #(1 * 8)]
- str x6, [x4, #(0 * 8)]
-.endm
-
-.macro restore_sysregs
- // x2: base address for cpu context
- // x3: tmp register
-
- add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
-
- ldp x4, x5, [x3]
- ldp x6, x7, [x3, #16]
- ldp x8, x9, [x3, #32]
- ldp x10, x11, [x3, #48]
- ldp x12, x13, [x3, #64]
- ldp x14, x15, [x3, #80]
- ldp x16, x17, [x3, #96]
- ldp x18, x19, [x3, #112]
- ldp x20, x21, [x3, #128]
- ldp x22, x23, [x3, #144]
- ldp x24, x25, [x3, #160]
-
- msr vmpidr_el2, x4
- msr csselr_el1, x5
- msr sctlr_el1, x6
- msr actlr_el1, x7
- msr cpacr_el1, x8
- msr ttbr0_el1, x9
- msr ttbr1_el1, x10
- msr tcr_el1, x11
- msr esr_el1, x12
- msr afsr0_el1, x13
- msr afsr1_el1, x14
- msr far_el1, x15
- msr mair_el1, x16
- msr vbar_el1, x17
- msr contextidr_el1, x18
- msr tpidr_el0, x19
- msr tpidrro_el0, x20
- msr tpidr_el1, x21
- msr amair_el1, x22
- msr cntkctl_el1, x23
- msr par_el1, x24
- msr mdscr_el1, x25
-.endm
-
-.macro restore_debug type
- // x4: pointer to register set
- // x5: number of registers to skip
- // x6..x22 trashed
-
- adr x22, 1f
- add x22, x22, x5, lsl #2
- br x22
-1:
- ldr x21, [x4, #(15 * 8)]
- ldr x20, [x4, #(14 * 8)]
- ldr x19, [x4, #(13 * 8)]
- ldr x18, [x4, #(12 * 8)]
- ldr x17, [x4, #(11 * 8)]
- ldr x16, [x4, #(10 * 8)]
- ldr x15, [x4, #(9 * 8)]
- ldr x14, [x4, #(8 * 8)]
- ldr x13, [x4, #(7 * 8)]
- ldr x12, [x4, #(6 * 8)]
- ldr x11, [x4, #(5 * 8)]
- ldr x10, [x4, #(4 * 8)]
- ldr x9, [x4, #(3 * 8)]
- ldr x8, [x4, #(2 * 8)]
- ldr x7, [x4, #(1 * 8)]
- ldr x6, [x4, #(0 * 8)]
-
- adr x22, 1f
- add x22, x22, x5, lsl #2
- br x22
-1:
- msr \type\()15_el1, x21
- msr \type\()14_el1, x20
- msr \type\()13_el1, x19
- msr \type\()12_el1, x18
- msr \type\()11_el1, x17
- msr \type\()10_el1, x16
- msr \type\()9_el1, x15
- msr \type\()8_el1, x14
- msr \type\()7_el1, x13
- msr \type\()6_el1, x12
- msr \type\()5_el1, x11
- msr \type\()4_el1, x10
- msr \type\()3_el1, x9
- msr \type\()2_el1, x8
- msr \type\()1_el1, x7
- msr \type\()0_el1, x6
-.endm
-
-.macro skip_32bit_state tmp, target
- // Skip 32bit state if not needed
- mrs \tmp, hcr_el2
- tbnz \tmp, #HCR_RW_SHIFT, \target
-.endm
-
-.macro skip_tee_state tmp, target
- // Skip ThumbEE state if not needed
- mrs \tmp, id_pfr0_el1
- tbz \tmp, #12, \target
-.endm
-
-.macro skip_debug_state tmp, target
- ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
- tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
-.endm
-
-/*
- * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
- */
-.macro skip_fpsimd_state tmp, target
- mrs \tmp, cptr_el2
- tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
-.endm
-
-.macro compute_debug_state target
- // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
- // is set, we do a full save/restore cycle and disable trapping.
- add x25, x0, #VCPU_CONTEXT
-
- // Check the state of MDSCR_EL1
- ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
- and x26, x25, #DBG_MDSCR_KDE
- and x25, x25, #DBG_MDSCR_MDE
- adds xzr, x25, x26
- b.eq 9998f // Nothing to see there
-
- // If any interesting bits was set, we must set the flag
- mov x26, #KVM_ARM64_DEBUG_DIRTY
- str x26, [x0, #VCPU_DEBUG_FLAGS]
- b 9999f // Don't skip restore
-
-9998:
- // Otherwise load the flags from memory in case we recently
- // trapped
- skip_debug_state x25, \target
-9999:
-.endm
-
-.macro save_guest_32bit_state
- skip_32bit_state x3, 1f
-
- add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
- mrs x4, spsr_abt
- mrs x5, spsr_und
- mrs x6, spsr_irq
- mrs x7, spsr_fiq
- stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
- mrs x4, dacr32_el2
- mrs x5, ifsr32_el2
- stp x4, x5, [x3]
-
- skip_fpsimd_state x8, 2f
- mrs x6, fpexc32_el2
- str x6, [x3, #16]
-2:
- skip_debug_state x8, 1f
- mrs x7, dbgvcr32_el2
- str x7, [x3, #24]
-1:
-.endm
-
-.macro restore_guest_32bit_state
- skip_32bit_state x3, 1f
-
- add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
- ldp x4, x5, [x3]
- ldp x6, x7, [x3, #16]
- msr spsr_abt, x4
- msr spsr_und, x5
- msr spsr_irq, x6
- msr spsr_fiq, x7
-
- add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
- ldp x4, x5, [x3]
- msr dacr32_el2, x4
- msr ifsr32_el2, x5
-
- skip_debug_state x8, 1f
- ldr x7, [x3, #24]
- msr dbgvcr32_el2, x7
-1:
-.endm
-
-.macro activate_traps
- ldr x2, [x0, #VCPU_HCR_EL2]
-
- /*
- * We are about to set CPTR_EL2.TFP to trap all floating point
- * register accesses to EL2, however, the ARM ARM clearly states that
- * traps are only taken to EL2 if the operation would not otherwise
- * trap to EL1. Therefore, always make sure that for 32-bit guests,
- * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
- */
- tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
- mov x3, #(1 << 30)
- msr fpexc32_el2, x3
- isb
-99:
- msr hcr_el2, x2
- mov x2, #CPTR_EL2_TTA
- orr x2, x2, #CPTR_EL2_TFP
- msr cptr_el2, x2
-
- mov x2, #(1 << 15) // Trap CP15 Cr=15
- msr hstr_el2, x2
-
- // Monitor Debug Config - see kvm_arm_setup_debug()
- ldr x2, [x0, #VCPU_MDCR_EL2]
- msr mdcr_el2, x2
-.endm
-
-.macro deactivate_traps
- mov_q x2, HCR_HOST_NVHE_FLAGS
- msr hcr_el2, x2
- msr hstr_el2, xzr
-
- mrs x2, mdcr_el2
- and x2, x2, #MDCR_EL2_HPMN_MASK
- msr mdcr_el2, x2
-.endm
-
-.macro activate_vm
- ldr x1, [x0, #VCPU_KVM]
- kern_hyp_va x1
- ldr x2, [x1, #KVM_VTTBR]
- msr vttbr_el2, x2
-.endm
-
-.macro deactivate_vm
- msr vttbr_el2, xzr
-.endm
-
-/*
- * Call into the vgic backend for state saving
- */
-.macro save_vgic_state
-alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
- bl __save_vgic_v2_state
-alternative_else
- bl __save_vgic_v3_state
-alternative_endif
- mrs x24, hcr_el2
- mov x25, #HCR_INT_OVERRIDE
- neg x25, x25
- and x24, x24, x25
- msr hcr_el2, x24
-.endm
-
-/*
- * Call into the vgic backend for state restoring
- */
-.macro restore_vgic_state
- mrs x24, hcr_el2
- ldr x25, [x0, #VCPU_IRQ_LINES]
- orr x24, x24, #HCR_INT_OVERRIDE
- orr x24, x24, x25
- msr hcr_el2, x24
-alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
- bl __restore_vgic_v2_state
-alternative_else
- bl __restore_vgic_v3_state
-alternative_endif
-.endm
-
-.macro save_timer_state
- // x0: vcpu pointer
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr w3, [x2, #KVM_TIMER_ENABLED]
- cbz w3, 1f
-
- mrs x3, cntv_ctl_el0
- and x3, x3, #3
- str w3, [x0, #VCPU_TIMER_CNTV_CTL]
-
- isb
-
- mrs x3, cntv_cval_el0
- str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
-
-1:
- // Disable the virtual timer
- msr cntv_ctl_el0, xzr
-
- // Allow physical timer/counter access for the host
- mrs x2, cnthctl_el2
- orr x2, x2, #3
- msr cnthctl_el2, x2
-
- // Clear cntvoff for the host
- msr cntvoff_el2, xzr
-.endm
-
-.macro restore_timer_state
- // x0: vcpu pointer
- // Disallow physical timer access for the guest
- // Physical counter access is allowed
- mrs x2, cnthctl_el2
- orr x2, x2, #1
- bic x2, x2, #2
- msr cnthctl_el2, x2
-
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr w3, [x2, #KVM_TIMER_ENABLED]
- cbz w3, 1f
-
- ldr x3, [x2, #KVM_TIMER_CNTVOFF]
- msr cntvoff_el2, x3
- ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
- msr cntv_cval_el0, x2
- isb
-
- ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
- and x2, x2, #3
- msr cntv_ctl_el0, x2
-1:
-.endm
-
-__save_sysregs:
- save_sysregs
- ret
-
-__restore_sysregs:
- restore_sysregs
- ret
-
-/* Save debug state */
-__save_debug:
- // x2: ptr to CPU context
- // x3: ptr to debug reg struct
- // x4/x5/x6-22/x24-26: trashed
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- mov x5, x24
- add x4, x3, #DEBUG_BCR
- save_debug dbgbcr
- add x4, x3, #DEBUG_BVR
- save_debug dbgbvr
-
- mov x5, x25
- add x4, x3, #DEBUG_WCR
- save_debug dbgwcr
- add x4, x3, #DEBUG_WVR
- save_debug dbgwvr
-
- mrs x21, mdccint_el1
- str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
- ret
-
-/* Restore debug state */
-__restore_debug:
- // x2: ptr to CPU context
- // x3: ptr to debug reg struct
- // x4/x5/x6-22/x24-26: trashed
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- mov x5, x24
- add x4, x3, #DEBUG_BCR
- restore_debug dbgbcr
- add x4, x3, #DEBUG_BVR
- restore_debug dbgbvr
-
- mov x5, x25
- add x4, x3, #DEBUG_WCR
- restore_debug dbgwcr
- add x4, x3, #DEBUG_WVR
- restore_debug dbgwvr
-
- ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
- msr mdccint_el1, x21
-
- ret
-
-__save_fpsimd:
- skip_fpsimd_state x3, 1f
- save_fpsimd
-1: ret
-
-__restore_fpsimd:
- skip_fpsimd_state x3, 1f
- restore_fpsimd
-1: ret
-
-switch_to_guest_fpsimd:
- push x4, lr
-
- mrs x2, cptr_el2
- bic x2, x2, #CPTR_EL2_TFP
- msr cptr_el2, x2
- isb
-
- mrs x0, tpidr_el2
-
- ldr x2, [x0, #VCPU_HOST_CONTEXT]
- kern_hyp_va x2
- bl __save_fpsimd
-
- add x2, x0, #VCPU_CONTEXT
- bl __restore_fpsimd
-
- skip_32bit_state x3, 1f
- ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
- msr fpexc32_el2, x4
-1:
- pop x4, lr
- pop x2, x3
- pop x0, x1
-
- eret
-
-/*
- * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
- *
- * This is the world switch. The first half of the function
- * deals with entering the guest, and anything from __kvm_vcpu_return
- * to the end of the function deals with reentering the host.
- * On the enter path, only x0 (vcpu pointer) must be preserved until
- * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
- * code) must both be preserved until the epilogue.
- * In both cases, x2 points to the CPU context we're saving/restoring from/to.
- */
-ENTRY(__kvm_vcpu_run)
- kern_hyp_va x0
- msr tpidr_el2, x0 // Save the vcpu register
-
- // Host context
- ldr x2, [x0, #VCPU_HOST_CONTEXT]
- kern_hyp_va x2
-
- save_host_regs
- bl __save_sysregs
-
- compute_debug_state 1f
- add x3, x0, #VCPU_HOST_DEBUG_STATE
- bl __save_debug
-1:
- activate_traps
- activate_vm
-
- restore_vgic_state
- restore_timer_state
-
- // Guest context
- add x2, x0, #VCPU_CONTEXT
-
- // We must restore the 32-bit state before the sysregs, thanks
- // to Cortex-A57 erratum #852523.
- restore_guest_32bit_state
- bl __restore_sysregs
-
- skip_debug_state x3, 1f
- ldr x3, [x0, #VCPU_DEBUG_PTR]
- kern_hyp_va x3
- bl __restore_debug
-1:
- restore_guest_regs
-
- // That's it, no more messing around.
- eret
-
-__kvm_vcpu_return:
- // Assume x0 is the vcpu pointer, x1 the return code
- // Guest's x0-x3 are on the stack
-
- // Guest context
- add x2, x0, #VCPU_CONTEXT
-
- save_guest_regs
- bl __save_fpsimd
- bl __save_sysregs
-
- skip_debug_state x3, 1f
- ldr x3, [x0, #VCPU_DEBUG_PTR]
- kern_hyp_va x3
- bl __save_debug
-1:
- save_guest_32bit_state
-
- save_timer_state
- save_vgic_state
-
- deactivate_traps
- deactivate_vm
-
- // Host context
- ldr x2, [x0, #VCPU_HOST_CONTEXT]
- kern_hyp_va x2
-
- bl __restore_sysregs
- bl __restore_fpsimd
- /* Clear FPSIMD and Trace trapping */
- msr cptr_el2, xzr
-
- skip_debug_state x3, 1f
- // Clear the dirty flag for the next run, as all the state has
- // already been saved. Note that we nuke the whole 64bit word.
- // If we ever add more flags, we'll have to be more careful...
- str xzr, [x0, #VCPU_DEBUG_FLAGS]
- add x3, x0, #VCPU_HOST_DEBUG_STATE
- bl __restore_debug
-1:
- restore_host_regs
-
- mov x0, x1
- ret
-END(__kvm_vcpu_run)
-
-// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-ENTRY(__kvm_tlb_flush_vmid_ipa)
- dsb ishst
-
- kern_hyp_va x0
- ldr x2, [x0, #KVM_VTTBR]
- msr vttbr_el2, x2
- isb
-
- /*
- * We could do so much better if we had the VA as well.
- * Instead, we invalidate Stage-2 for this IPA, and the
- * whole of Stage-1. Weep...
- */
- lsr x1, x1, #12
- tlbi ipas2e1is, x1
- /*
- * We have to ensure completion of the invalidation at Stage-2,
- * since a table walk on another CPU could refill a TLB with a
- * complete (S1 + S2) walk based on the old Stage-2 mapping if
- * the Stage-1 invalidation happened first.
- */
- dsb ish
- tlbi vmalle1is
- dsb ish
- isb
-
- msr vttbr_el2, xzr
- ret
-ENDPROC(__kvm_tlb_flush_vmid_ipa)
-
-/**
- * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
- * @struct kvm *kvm - pointer to kvm structure
- *
- * Invalidates all Stage 1 and 2 TLB entries for current VMID.
- */
-ENTRY(__kvm_tlb_flush_vmid)
- dsb ishst
-
- kern_hyp_va x0
- ldr x2, [x0, #KVM_VTTBR]
- msr vttbr_el2, x2
- isb
-
- tlbi vmalls12e1is
- dsb ish
- isb
-
- msr vttbr_el2, xzr
- ret
-ENDPROC(__kvm_tlb_flush_vmid)
-
-ENTRY(__kvm_flush_vm_context)
- dsb ishst
- tlbi alle1is
- ic ialluis
- dsb ish
- ret
-ENDPROC(__kvm_flush_vm_context)
-
-__kvm_hyp_panic:
- // Stash PAR_EL1 before corrupting it in __restore_sysregs
- mrs x0, par_el1
- push x0, xzr
-
- // Guess the context by looking at VTTBR:
- // If zero, then we're already a host.
- // Otherwise restore a minimal host context before panicing.
- mrs x0, vttbr_el2
- cbz x0, 1f
-
- mrs x0, tpidr_el2
-
- deactivate_traps
- deactivate_vm
-
- ldr x2, [x0, #VCPU_HOST_CONTEXT]
- kern_hyp_va x2
-
- bl __restore_sysregs
-
- /*
- * Make sure we have a valid host stack, and don't leave junk in the
- * frame pointer that will give us a misleading host stack unwinding.
- */
- ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
- msr sp_el1, x22
- mov x29, xzr
-
-1: adr x0, __hyp_panic_str
- adr x1, 2f
- ldp x2, x3, [x1]
- sub x0, x0, x2
- add x0, x0, x3
- mrs x1, spsr_el2
- mrs x2, elr_el2
- mrs x3, esr_el2
- mrs x4, far_el2
- mrs x5, hpfar_el2
- pop x6, xzr // active context PAR_EL1
- mrs x7, tpidr_el2
-
- mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
- PSR_MODE_EL1h)
- msr spsr_el2, lr
- ldr lr, =panic
- msr elr_el2, lr
- eret
-
- .align 3
-2: .quad HYP_PAGE_OFFSET
- .quad PAGE_OFFSET
-ENDPROC(__kvm_hyp_panic)
-
-__hyp_panic_str:
- .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
-
- .align 2
/*
* u64 __kvm_call_hyp(void *hypfn, ...);
@@ -934,189 +33,23 @@ __hyp_panic_str:
* passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
- * passed in r0 and r1.
+ * passed in x0.
*
- * A function pointer with a value of 0 has a special meaning, and is
- * used to implement __hyp_get_vectors in the same way as in
+ * A function pointer with a value less than 0xfff has a special meaning,
+ * and is used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
+ * HVC behaves as a 'bl' call and will clobber lr.
*/
ENTRY(__kvm_call_hyp)
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ str lr, [sp, #-16]!
hvc #0
+ ldr lr, [sp], #16
ret
-ENDPROC(__kvm_call_hyp)
-
-.macro invalid_vector label, target
- .align 2
-\label:
- b \target
-ENDPROC(\label)
-.endm
-
- /* None of these should ever happen */
- invalid_vector el2t_sync_invalid, __kvm_hyp_panic
- invalid_vector el2t_irq_invalid, __kvm_hyp_panic
- invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
- invalid_vector el2t_error_invalid, __kvm_hyp_panic
- invalid_vector el2h_sync_invalid, __kvm_hyp_panic
- invalid_vector el2h_irq_invalid, __kvm_hyp_panic
- invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
- invalid_vector el2h_error_invalid, __kvm_hyp_panic
- invalid_vector el1_sync_invalid, __kvm_hyp_panic
- invalid_vector el1_irq_invalid, __kvm_hyp_panic
- invalid_vector el1_fiq_invalid, __kvm_hyp_panic
- invalid_vector el1_error_invalid, __kvm_hyp_panic
-
-el1_sync: // Guest trapped into EL2
- push x0, x1
- push x2, x3
-
- mrs x1, esr_el2
- lsr x2, x1, #ESR_ELx_EC_SHIFT
-
- cmp x2, #ESR_ELx_EC_HVC64
- b.ne el1_trap
-
- mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x3, el1_trap // called HVC
-
- /* Here, we're pretty sure the host called HVC. */
- pop x2, x3
- pop x0, x1
-
- /* Check for __hyp_get_vectors */
- cbnz x0, 1f
- mrs x0, vbar_el2
- b 2f
-
-1: push lr, xzr
-
- /*
- * Compute the function address in EL2, and shuffle the parameters.
- */
- kern_hyp_va x0
- mov lr, x0
- mov x0, x1
- mov x1, x2
- mov x2, x3
- blr lr
-
- pop lr, xzr
-2: eret
-
-el1_trap:
- /*
- * x1: ESR
- * x2: ESR_EC
- */
-
- /* Guest accessed VFP/SIMD registers, save host, restore Guest */
- cmp x2, #ESR_ELx_EC_FP_ASIMD
- b.eq switch_to_guest_fpsimd
-
- cmp x2, #ESR_ELx_EC_DABT_LOW
- mov x0, #ESR_ELx_EC_IABT_LOW
- ccmp x2, x0, #4, ne
- b.ne 1f // Not an abort we care about
-
- /* This is an abort. Check for permission fault */
-alternative_if_not ARM64_WORKAROUND_834220
- and x2, x1, #ESR_ELx_FSC_TYPE
- cmp x2, #FSC_PERM
- b.ne 1f // Not a permission fault
alternative_else
- nop // Use the permission fault path to
- nop // check for a valid S1 translation,
- nop // regardless of the ESR value.
+ b __vhe_hyp_call
+ nop
+ nop
+ nop
alternative_endif
-
- /*
- * Check for Stage-1 page table walk, which is guaranteed
- * to give a valid HPFAR_EL2.
- */
- tbnz x1, #7, 1f // S1PTW is set
-
- /* Preserve PAR_EL1 */
- mrs x3, par_el1
- push x3, xzr
-
- /*
- * Permission fault, HPFAR_EL2 is invalid.
- * Resolve the IPA the hard way using the guest VA.
- * Stage-1 translation already validated the memory access rights.
- * As such, we can use the EL1 translation regime, and don't have
- * to distinguish between EL0 and EL1 access.
- */
- mrs x2, far_el2
- at s1e1r, x2
- isb
-
- /* Read result */
- mrs x3, par_el1
- pop x0, xzr // Restore PAR_EL1 from the stack
- msr par_el1, x0
- tbnz x3, #0, 3f // Bail out if we failed the translation
- ubfx x3, x3, #12, #36 // Extract IPA
- lsl x3, x3, #4 // and present it like HPFAR
- b 2f
-
-1: mrs x3, hpfar_el2
- mrs x2, far_el2
-
-2: mrs x0, tpidr_el2
- str w1, [x0, #VCPU_ESR_EL2]
- str x2, [x0, #VCPU_FAR_EL2]
- str x3, [x0, #VCPU_HPFAR_EL2]
-
- mov x1, #ARM_EXCEPTION_TRAP
- b __kvm_vcpu_return
-
- /*
- * Translation failed. Just return to the guest and
- * let it fault again. Another CPU is probably playing
- * behind our back.
- */
-3: pop x2, x3
- pop x0, x1
-
- eret
-
-el1_irq:
- push x0, x1
- push x2, x3
- mrs x0, tpidr_el2
- mov x1, #ARM_EXCEPTION_IRQ
- b __kvm_vcpu_return
-
- .ltorg
-
- .align 11
-
-ENTRY(__kvm_hyp_vector)
- ventry el2t_sync_invalid // Synchronous EL2t
- ventry el2t_irq_invalid // IRQ EL2t
- ventry el2t_fiq_invalid // FIQ EL2t
- ventry el2t_error_invalid // Error EL2t
-
- ventry el2h_sync_invalid // Synchronous EL2h
- ventry el2h_irq_invalid // IRQ EL2h
- ventry el2h_fiq_invalid // FIQ EL2h
- ventry el2h_error_invalid // Error EL2h
-
- ventry el1_sync // Synchronous 64-bit EL1
- ventry el1_irq // IRQ 64-bit EL1
- ventry el1_fiq_invalid // FIQ 64-bit EL1
- ventry el1_error_invalid // Error 64-bit EL1
-
- ventry el1_sync // Synchronous 32-bit EL1
- ventry el1_irq // IRQ 32-bit EL1
- ventry el1_fiq_invalid // FIQ 32-bit EL1
- ventry el1_error_invalid // Error 32-bit EL1
-ENDPROC(__kvm_hyp_vector)
-
-
-ENTRY(__kvm_get_mdcr_el2)
- mrs x0, mdcr_el2
- ret
-ENDPROC(__kvm_get_mdcr_el2)
-
- .popsection
+ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
new file mode 100644
index 000000000000..826032bc3945
--- /dev/null
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
new file mode 100644
index 000000000000..2f8bca8af295
--- /dev/null
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define read_debug(r,n) read_sysreg(r##n##_el1)
+#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
+
+#define save_debug(ptr,reg,nr) \
+ switch (nr) { \
+ case 15: ptr[15] = read_debug(reg, 15); \
+ case 14: ptr[14] = read_debug(reg, 14); \
+ case 13: ptr[13] = read_debug(reg, 13); \
+ case 12: ptr[12] = read_debug(reg, 12); \
+ case 11: ptr[11] = read_debug(reg, 11); \
+ case 10: ptr[10] = read_debug(reg, 10); \
+ case 9: ptr[9] = read_debug(reg, 9); \
+ case 8: ptr[8] = read_debug(reg, 8); \
+ case 7: ptr[7] = read_debug(reg, 7); \
+ case 6: ptr[6] = read_debug(reg, 6); \
+ case 5: ptr[5] = read_debug(reg, 5); \
+ case 4: ptr[4] = read_debug(reg, 4); \
+ case 3: ptr[3] = read_debug(reg, 3); \
+ case 2: ptr[2] = read_debug(reg, 2); \
+ case 1: ptr[1] = read_debug(reg, 1); \
+ default: ptr[0] = read_debug(reg, 0); \
+ }
+
+#define restore_debug(ptr,reg,nr) \
+ switch (nr) { \
+ case 15: write_debug(ptr[15], reg, 15); \
+ case 14: write_debug(ptr[14], reg, 14); \
+ case 13: write_debug(ptr[13], reg, 13); \
+ case 12: write_debug(ptr[12], reg, 12); \
+ case 11: write_debug(ptr[11], reg, 11); \
+ case 10: write_debug(ptr[10], reg, 10); \
+ case 9: write_debug(ptr[9], reg, 9); \
+ case 8: write_debug(ptr[8], reg, 8); \
+ case 7: write_debug(ptr[7], reg, 7); \
+ case 6: write_debug(ptr[6], reg, 6); \
+ case 5: write_debug(ptr[5], reg, 5); \
+ case 4: write_debug(ptr[4], reg, 4); \
+ case 3: write_debug(ptr[3], reg, 3); \
+ case 2: write_debug(ptr[2], reg, 2); \
+ case 1: write_debug(ptr[1], reg, 1); \
+ default: write_debug(ptr[0], reg, 0); \
+ }
+
+void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt)
+{
+ u64 aa64dfr0;
+ int brps, wrps;
+
+ if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ return;
+
+ aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+ brps = (aa64dfr0 >> 12) & 0xf;
+ wrps = (aa64dfr0 >> 20) & 0xf;
+
+ save_debug(dbg->dbg_bcr, dbgbcr, brps);
+ save_debug(dbg->dbg_bvr, dbgbvr, brps);
+ save_debug(dbg->dbg_wcr, dbgwcr, wrps);
+ save_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+ ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
+}
+
+void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt)
+{
+ u64 aa64dfr0;
+ int brps, wrps;
+
+ if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ return;
+
+ aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ brps = (aa64dfr0 >> 12) & 0xf;
+ wrps = (aa64dfr0 >> 20) & 0xf;
+
+ restore_debug(dbg->dbg_bcr, dbgbcr, brps);
+ restore_debug(dbg->dbg_bvr, dbgbvr, brps);
+ restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
+ restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+ write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
+}
+
+void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
+{
+ /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
+ * a full save/restore cycle. */
+ if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
+ (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+
+ __debug_save_state(vcpu, &vcpu->arch.host_debug_state,
+ kern_hyp_va(vcpu->arch.host_cpu_context));
+}
+
+void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
+{
+ __debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
+ kern_hyp_va(vcpu->arch.host_cpu_context));
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
+}
+
+static u32 __hyp_text __debug_read_mdcr_el2(void)
+{
+ return read_sysreg(mdcr_el2);
+}
+
+__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
new file mode 100644
index 000000000000..fd0fbe9b7e6a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/fpsimdmacros.h>
+#include <asm/kvm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+.macro save_callee_saved_regs ctxt
+ stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+ stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+ stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+ stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+ stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+ stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_callee_saved_regs ctxt
+ ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+ ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+ ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+ ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+ ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+ ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+/*
+ * u64 __guest_enter(struct kvm_vcpu *vcpu,
+ * struct kvm_cpu_context *host_ctxt);
+ */
+ENTRY(__guest_enter)
+ // x0: vcpu
+ // x1: host/guest context
+ // x2-x18: clobbered by macros
+
+ // Store the host regs
+ save_callee_saved_regs x1
+
+ // Preserve vcpu & host_ctxt for use at exit time
+ stp x0, x1, [sp, #-16]!
+
+ add x1, x0, #VCPU_CONTEXT
+
+ // Prepare x0-x1 for later restore by pushing them onto the stack
+ ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
+ stp x2, x3, [sp, #-16]!
+
+ // x2-x18
+ ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
+ ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
+ ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
+ ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
+ ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
+ ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
+ ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
+ ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
+ ldr x18, [x1, #CPU_XREG_OFFSET(18)]
+
+ // x19-x29, lr
+ restore_callee_saved_regs x1
+
+ // Last bits of the 64bit state
+ ldp x0, x1, [sp], #16
+
+ // Do not touch any register after this!
+ eret
+ENDPROC(__guest_enter)
+
+ENTRY(__guest_exit)
+ // x0: vcpu
+ // x1: return code
+ // x2-x3: free
+ // x4-x29,lr: vcpu regs
+ // vcpu x0-x3 on the stack
+
+ add x2, x0, #VCPU_CONTEXT
+
+ stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
+ stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
+ stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
+ stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
+ stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
+ stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
+ stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
+ str x18, [x2, #CPU_XREG_OFFSET(18)]
+
+ ldp x6, x7, [sp], #16 // x2, x3
+ ldp x4, x5, [sp], #16 // x0, x1
+
+ stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
+ stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+
+ save_callee_saved_regs x2
+
+ // Restore vcpu & host_ctxt from the stack
+ // (preserving return code in x1)
+ ldp x0, x2, [sp], #16
+ // Now restore the host regs
+ restore_callee_saved_regs x2
+
+ mov x0, x1
+ ret
+ENDPROC(__guest_exit)
+
+ENTRY(__fpsimd_guest_restore)
+ stp x4, lr, [sp, #-16]!
+
+ mrs x2, cptr_el2
+ bic x2, x2, #CPTR_EL2_TFP
+ msr cptr_el2, x2
+ isb
+
+ mrs x3, tpidr_el2
+
+ ldr x0, [x3, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x0
+ add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ bl __fpsimd_save_state
+
+ add x2, x3, #VCPU_CONTEXT
+ add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ bl __fpsimd_restore_state
+
+ // Skip restoring fpexc32 for AArch64 guests
+ mrs x1, hcr_el2
+ tbnz x1, #HCR_RW_SHIFT, 1f
+ ldr x4, [x3, #VCPU_FPEXC32_EL2]
+ msr fpexc32_el2, x4
+1:
+ ldp x4, lr, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+
+ eret
+ENDPROC(__fpsimd_guest_restore)
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
new file mode 100644
index 000000000000..da3f22c7f14a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/fpsimdmacros.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+ENTRY(__fpsimd_save_state)
+ fpsimd_save x0, 1
+ ret
+ENDPROC(__fpsimd_save_state)
+
+ENTRY(__fpsimd_restore_state)
+ fpsimd_restore x0, 1
+ ret
+ENDPROC(__fpsimd_restore_state)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
new file mode 100644
index 000000000000..44c79fd81ad1
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+.macro save_x0_to_x3
+ stp x0, x1, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+.endm
+
+.macro restore_x0_to_x3
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
+.macro do_el2_call
+ /*
+ * Shuffle the parameters before calling the function
+ * pointed to in x0. Assumes parameters in x[1,2,3].
+ */
+ mov lr, x0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ blr lr
+.endm
+
+ENTRY(__vhe_hyp_call)
+ str lr, [sp, #-16]!
+ do_el2_call
+ ldr lr, [sp], #16
+ /*
+ * We used to rely on having an exception return to get
+ * an implicit isb. In the E2H case, we don't have it anymore.
+ * rather than changing all the leaf functions, just do it here
+ * before returning to the rest of the kernel.
+ */
+ isb
+ ret
+ENDPROC(__vhe_hyp_call)
+
+el1_sync: // Guest trapped into EL2
+ save_x0_to_x3
+
+ mrs x1, esr_el2
+ lsr x2, x1, #ESR_ELx_EC_SHIFT
+
+ cmp x2, #ESR_ELx_EC_HVC64
+ b.ne el1_trap
+
+ mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
+ cbnz x3, el1_trap // called HVC
+
+ /* Here, we're pretty sure the host called HVC. */
+ restore_x0_to_x3
+
+ cmp x0, #HVC_GET_VECTORS
+ b.ne 1f
+ mrs x0, vbar_el2
+ b 2f
+
+1:
+ /*
+ * Perform the EL2 call
+ */
+ kern_hyp_va x0
+ do_el2_call
+
+2: eret
+
+el1_trap:
+ /*
+ * x1: ESR
+ * x2: ESR_EC
+ */
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x2, #ESR_ELx_EC_FP_ASIMD
+ b.eq __fpsimd_guest_restore
+
+ cmp x2, #ESR_ELx_EC_DABT_LOW
+ mov x0, #ESR_ELx_EC_IABT_LOW
+ ccmp x2, x0, #4, ne
+ b.ne 1f // Not an abort we care about
+
+ /* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
+ and x2, x1, #ESR_ELx_FSC_TYPE
+ cmp x2, #FSC_PERM
+ b.ne 1f // Not a permission fault
+alternative_else
+ nop // Use the permission fault path to
+ nop // check for a valid S1 translation,
+ nop // regardless of the ESR value.
+alternative_endif
+
+ /*
+ * Check for Stage-1 page table walk, which is guaranteed
+ * to give a valid HPFAR_EL2.
+ */
+ tbnz x1, #7, 1f // S1PTW is set
+
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ stp x3, xzr, [sp, #-16]!
+
+ /*
+ * Permission fault, HPFAR_EL2 is invalid.
+ * Resolve the IPA the hard way using the guest VA.
+ * Stage-1 translation already validated the memory access rights.
+ * As such, we can use the EL1 translation regime, and don't have
+ * to distinguish between EL0 and EL1 access.
+ */
+ mrs x2, far_el2
+ at s1e1r, x2
+ isb
+
+ /* Read result */
+ mrs x3, par_el1
+ ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
+ msr par_el1, x0
+ tbnz x3, #0, 3f // Bail out if we failed the translation
+ ubfx x3, x3, #12, #36 // Extract IPA
+ lsl x3, x3, #4 // and present it like HPFAR
+ b 2f
+
+1: mrs x3, hpfar_el2
+ mrs x2, far_el2
+
+2: mrs x0, tpidr_el2
+ str w1, [x0, #VCPU_ESR_EL2]
+ str x2, [x0, #VCPU_FAR_EL2]
+ str x3, [x0, #VCPU_HPFAR_EL2]
+
+ mov x1, #ARM_EXCEPTION_TRAP
+ b __guest_exit
+
+ /*
+ * Translation failed. Just return to the guest and
+ * let it fault again. Another CPU is probably playing
+ * behind our back.
+ */
+3: restore_x0_to_x3
+
+ eret
+
+el1_irq:
+ save_x0_to_x3
+ mrs x0, tpidr_el2
+ mov x1, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ENTRY(__hyp_do_panic)
+ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+ PSR_MODE_EL1h)
+ msr spsr_el2, lr
+ ldr lr, =panic
+ msr elr_el2, lr
+ eret
+ENDPROC(__hyp_do_panic)
+
+.macro invalid_vector label, target = __hyp_panic
+ .align 2
+\label:
+ b \target
+ENDPROC(\label)
+.endm
+
+ /* None of these should ever happen */
+ invalid_vector el2t_sync_invalid
+ invalid_vector el2t_irq_invalid
+ invalid_vector el2t_fiq_invalid
+ invalid_vector el2t_error_invalid
+ invalid_vector el2h_sync_invalid
+ invalid_vector el2h_irq_invalid
+ invalid_vector el2h_fiq_invalid
+ invalid_vector el2h_error_invalid
+ invalid_vector el1_sync_invalid
+ invalid_vector el1_irq_invalid
+ invalid_vector el1_fiq_invalid
+ invalid_vector el1_error_invalid
+
+ .ltorg
+
+ .align 11
+
+ENTRY(__kvm_hyp_vector)
+ ventry el2t_sync_invalid // Synchronous EL2t
+ ventry el2t_irq_invalid // IRQ EL2t
+ ventry el2t_fiq_invalid // FIQ EL2t
+ ventry el2t_error_invalid // Error EL2t
+
+ ventry el2h_sync_invalid // Synchronous EL2h
+ ventry el2h_irq_invalid // IRQ EL2h
+ ventry el2h_fiq_invalid // FIQ EL2h
+ ventry el2h_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync // Synchronous 32-bit EL1
+ ventry el1_irq // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
new file mode 100644
index 000000000000..fb275178b6af
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
+#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
+ + PAGE_OFFSET)
+
+/**
+ * hyp_alternate_select - Generates patchable code sequences that are
+ * used to switch between two implementations of a function, depending
+ * on the availability of a feature.
+ *
+ * @fname: a symbol name that will be defined as a function returning a
+ * function pointer whose type will match @orig and @alt
+ * @orig: A pointer to the default function, as returned by @fname when
+ * @cond doesn't hold
+ * @alt: A pointer to the alternate function, as returned by @fname
+ * when @cond holds
+ * @cond: a CPU feature (as described in asm/cpufeature.h)
+ */
+#define hyp_alternate_select(fname, orig, alt, cond) \
+typeof(orig) * __hyp_text fname(void) \
+{ \
+ typeof(alt) *val = orig; \
+ asm volatile(ALTERNATIVE("nop \n", \
+ "mov %0, %1 \n", \
+ cond) \
+ : "+r" (val) : "r" (alt)); \
+ return val; \
+}
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
+
+void __debug_save_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_restore_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
+void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
+
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+static inline bool __fpsimd_enabled(void)
+{
+ return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+}
+
+u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
new file mode 100644
index 000000000000..ca8f5a5e2f96
--- /dev/null
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ /*
+ * We are about to set CPTR_EL2.TFP to trap all floating point
+ * register accesses to EL2, however, the ARM ARM clearly states that
+ * traps are only taken to EL2 if the operation would not otherwise
+ * trap to EL1. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ */
+ val = vcpu->arch.hcr_el2;
+ if (!(val & HCR_RW)) {
+ write_sysreg(1 << 30, fpexc32_el2);
+ isb();
+ }
+ write_sysreg(val, hcr_el2);
+ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
+ write_sysreg(1 << 15, hstr_el2);
+ write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+ write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+ write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(0, hstr_el2);
+ write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
+ write_sysreg(0, cptr_el2);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+ write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__vgic_call_save_state,
+ __vgic_v2_save_state, __vgic_v3_save_state,
+ ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static hyp_alternate_select(__vgic_call_restore_state,
+ __vgic_v2_restore_state, __vgic_v3_restore_state,
+ ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+ __vgic_call_save_state()(vcpu);
+ write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ val = read_sysreg(hcr_el2);
+ val |= HCR_INT_OVERRIDE;
+ val |= vcpu->arch.irq_lines;
+ write_sysreg(val, hcr_el2);
+
+ __vgic_call_restore_state()(vcpu);
+}
+
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_cpu_context *guest_ctxt;
+ bool fp_enabled;
+ u64 exit_code;
+
+ vcpu = kern_hyp_va(vcpu);
+ write_sysreg(vcpu, tpidr_el2);
+
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ guest_ctxt = &vcpu->arch.ctxt;
+
+ __sysreg_save_state(host_ctxt);
+ __debug_cond_save_host_state(vcpu);
+
+ __activate_traps(vcpu);
+ __activate_vm(vcpu);
+
+ __vgic_restore_state(vcpu);
+ __timer_restore_state(vcpu);
+
+ /*
+ * We must restore the 32-bit state before the sysregs, thanks
+ * to Cortex-A57 erratum #852523.
+ */
+ __sysreg32_restore_state(vcpu);
+ __sysreg_restore_state(guest_ctxt);
+ __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+ /* And we're baaack! */
+
+ fp_enabled = __fpsimd_enabled();
+
+ __sysreg_save_state(guest_ctxt);
+ __sysreg32_save_state(vcpu);
+ __timer_save_state(vcpu);
+ __vgic_save_state(vcpu);
+
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+
+ __sysreg_restore_state(host_ctxt);
+
+ if (fp_enabled) {
+ __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
+ __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+ }
+
+ __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+ __debug_cond_restore_host_state(vcpu);
+
+ return exit_code;
+}
+
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+ unsigned long str_va = (unsigned long)__hyp_panic_string;
+ u64 spsr = read_sysreg(spsr_el2);
+ u64 elr = read_sysreg(elr_el2);
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+ struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *host_ctxt;
+
+ vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+ __sysreg_restore_state(host_ctxt);
+ }
+
+ /* Call panic for real */
+ __hyp_do_panic(hyp_kern_va(str_va),
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg(far_el2),
+ read_sysreg(hpfar_el2), par,
+ (void *)read_sysreg(tpidr_el2));
+
+ unreachable();
+}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
new file mode 100644
index 000000000000..425630980229
--- /dev/null
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* ctxt is already in the HYP VA space */
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
+ ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
+ ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1);
+ ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
+ ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1);
+ ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1);
+ ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1);
+ ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
+ ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
+ ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1);
+ ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1);
+ ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
+ ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1);
+ ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1);
+ ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1);
+ ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
+ ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
+ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
+ ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
+
+ ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
+ ctxt->gp_regs.regs.pc = read_sysreg(elr_el2);
+ ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2);
+ ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
+ ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+ write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
+ write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
+ write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1);
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1);
+ write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1);
+ write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1);
+ write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
+ write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
+ write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1);
+ write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1);
+ write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
+ write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1);
+ write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1);
+ write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1);
+ write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
+ write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+
+ write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
+ write_sysreg(ctxt->gp_regs.regs.pc, elr_el2);
+ write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
+ write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
+ write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
+ write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
+}
+
+void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
+{
+ u64 *spsr, *sysreg;
+
+ if (read_sysreg(hcr_el2) & HCR_RW)
+ return;
+
+ spsr = vcpu->arch.ctxt.gp_regs.spsr;
+ sysreg = vcpu->arch.ctxt.sys_regs;
+
+ spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
+ spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
+ spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
+ spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
+
+ sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
+ sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
+
+ if (__fpsimd_enabled())
+ sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
+}
+
+void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+{
+ u64 *spsr, *sysreg;
+
+ if (read_sysreg(hcr_el2) & HCR_RW)
+ return;
+
+ spsr = vcpu->arch.ctxt.gp_regs.spsr;
+ sysreg = vcpu->arch.ctxt.sys_regs;
+
+ write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
+ write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
+ write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
+ write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
+
+ write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
+ write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+}
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c
new file mode 100644
index 000000000000..1051e5d7320f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/timer-sr.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <clocksource/arm_arch_timer.h>
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 val;
+
+ if (kvm->arch.timer.enabled) {
+ timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
+ timer->cntv_cval = read_sysreg(cntv_cval_el0);
+ }
+
+ /* Disable the virtual timer */
+ write_sysreg(0, cntv_ctl_el0);
+
+ /* Allow physical timer/counter access for the host */
+ val = read_sysreg(cnthctl_el2);
+ val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ write_sysreg(val, cnthctl_el2);
+
+ /* Clear cntvoff for the host */
+ write_sysreg(0, cntvoff_el2);
+}
+
+void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 val;
+
+ /*
+ * Disallow physical timer access for the guest
+ * Physical counter access is allowed
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~CNTHCTL_EL1PCEN;
+ val |= CNTHCTL_EL1PCTEN;
+ write_sysreg(val, cnthctl_el2);
+
+ if (kvm->arch.timer.enabled) {
+ write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+ write_sysreg(timer->cntv_cval, cntv_cval_el0);
+ isb();
+ write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+ }
+}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
new file mode 100644
index 000000000000..2a7e0d838698
--- /dev/null
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ /*
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ ipa >>= 12;
+ asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
+
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb(ish);
+ asm volatile("tlbi vmalle1is" : : );
+ dsb(ish);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+ phys_addr_t ipa);
+
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalls12e1is" : : );
+ dsb(ish);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+static void __hyp_text __tlb_flush_vm_context(void)
+{
+ dsb(ishst);
+ asm volatile("tlbi alle1is \n"
+ "ic ialluis ": : );
+ dsb(ish);
+}
+
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
new file mode 100644
index 000000000000..e71761238cfc
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ u32 eisr0, eisr1, elrsr0, elrsr1;
+ int i, nr_lr;
+
+ if (!base)
+ return;
+
+ nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+ cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+ eisr0 = readl_relaxed(base + GICH_EISR0);
+ elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+ if (unlikely(nr_lr > 32)) {
+ eisr1 = readl_relaxed(base + GICH_EISR1);
+ elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+ } else {
+ eisr1 = elrsr1 = 0;
+ }
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
+ cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+ cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
+ cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+ cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
+
+ writel_relaxed(0, base + GICH_HCR);
+
+ for (i = 0; i < nr_lr; i++)
+ cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ int i, nr_lr;
+
+ if (!base)
+ return;
+
+ writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+ writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+ writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+
+ nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ for (i = 0; i < nr_lr; i++)
+ writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
new file mode 100644
index 000000000000..9142e082f5f3
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define vtr_to_max_lr_idx(v) ((v) & 0xf)
+#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
+
+#define read_gicreg(r) \
+ ({ \
+ u64 reg; \
+ asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+ reg; \
+ })
+
+#define write_gicreg(v,r) \
+ do { \
+ u64 __val = (v); \
+ asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+ } while (0)
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val;
+ u32 max_lr_idx, nr_pri_bits;
+
+ /*
+ * Make sure stores to the GIC via the memory mapped interface
+ * are now visible to the system register interface.
+ */
+ dsb(st);
+
+ cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
+ cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
+ cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
+ cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
+
+ write_gicreg(0, ICH_HCR_EL2);
+ val = read_gicreg(ICH_VTR_EL2);
+ max_lr_idx = vtr_to_max_lr_idx(val);
+ nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+ switch (max_lr_idx) {
+ case 15:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
+ case 14:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
+ case 13:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
+ case 12:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
+ case 11:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
+ case 10:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
+ case 9:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
+ case 8:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
+ case 7:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
+ case 6:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
+ case 5:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
+ case 4:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
+ case 3:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
+ case 2:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
+ case 1:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
+ case 0:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
+ cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+ case 6:
+ cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+ default:
+ cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
+ cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+ case 6:
+ cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+ default:
+ cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+ }
+
+ val = read_gicreg(ICC_SRE_EL2);
+ write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
+ isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+ write_gicreg(1, ICC_SRE_EL1);
+}
+
+void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val;
+ u32 max_lr_idx, nr_pri_bits;
+
+ /*
+ * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
+ * Group0 interrupt (as generated in GICv2 mode) to be
+ * delivered as a FIQ to the guest, with potentially fatal
+ * consequences. So we must make sure that ICC_SRE_EL1 has
+ * been actually programmed with the value we want before
+ * starting to mess with the rest of the GIC.
+ */
+ write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
+ isb();
+
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+ write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
+
+ val = read_gicreg(ICH_VTR_EL2);
+ max_lr_idx = vtr_to_max_lr_idx(val);
+ nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+ write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
+ write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+ }
+
+ switch (max_lr_idx) {
+ case 15:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
+ case 14:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
+ case 13:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
+ case 12:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
+ case 11:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
+ case 10:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
+ case 9:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
+ case 8:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
+ case 7:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
+ case 5:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
+ case 4:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
+ case 3:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
+ case 2:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
+ case 1:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
+ case 0:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
+ }
+
+ /*
+ * Ensures that the above will have reached the
+ * (re)distributors. This ensure the guest will read the
+ * correct values from the memory-mapped interface.
+ */
+ isb();
+ dsb(sy);
+
+ /*
+ * Prevent the guest from touching the GIC system registers if
+ * SRE isn't enabled for GICv3 emulation.
+ */
+ if (!cpu_if->vgic_sre) {
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+ ICC_SRE_EL2);
+ }
+}
+
+static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
+{
+ return read_gicreg(ICH_VTR_EL2);
+}
+
+__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f34745cb3d23..d6e155a212dc 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -29,7 +29,9 @@
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
/*
* ARMv8 Reset Values
@@ -123,3 +125,15 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
+
+extern char __hyp_idmap_text_start[];
+
+phys_addr_t kvm_hyp_reset_entry(void)
+{
+ unsigned long offset;
+
+ offset = (unsigned long)__kvm_hyp_reset
+ - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
+
+ return TRAMPOLINE_VA + offset;
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0a587e7b9b6e..2127fdf74f20 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -29,6 +29,7 @@
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
@@ -219,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
* hyp.S code switches between host and guest values in future.
*/
-static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- u64 *dbg_reg)
+static void reg_to_dbg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ u64 *dbg_reg)
{
u64 val = p->regval;
@@ -234,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
}
-static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- u64 *dbg_reg)
+static void dbg_to_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ u64 *dbg_reg)
{
p->regval = *dbg_reg;
if (p->is_32bit)
p->regval &= 0xffffffffUL;
}
-static inline bool trap_bvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static bool trap_bvr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -279,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static inline void reset_bvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static void reset_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
}
-static inline bool trap_bcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static bool trap_bcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -322,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static inline void reset_bcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static void reset_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
}
-static inline bool trap_wvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static bool trap_wvr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -365,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static inline void reset_wvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static void reset_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
}
-static inline bool trap_wcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static bool trap_wcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -407,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static inline void reset_wcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static void reset_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
}
@@ -722,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
* system is in.
*/
-static inline bool trap_xvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static bool trap_xvr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
deleted file mode 100644
index 3f000712a85d..000000000000
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/assembler.h>
-#include <asm/memory.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
- .text
- .pushsection .hyp.text, "ax"
-
-/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
- */
-ENTRY(__save_vgic_v2_state)
-__save_vgic_v2_state:
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* Save all interesting registers */
- ldr w5, [x2, #GICH_VMCR]
- ldr w6, [x2, #GICH_MISR]
- ldr w7, [x2, #GICH_EISR0]
- ldr w8, [x2, #GICH_EISR1]
- ldr w9, [x2, #GICH_ELRSR0]
- ldr w10, [x2, #GICH_ELRSR1]
- ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
-CPU_BE( rev w9, w9 )
-CPU_BE( rev w10, w10 )
-CPU_BE( rev w11, w11 )
-
- str w5, [x3, #VGIC_V2_CPU_VMCR]
- str w6, [x3, #VGIC_V2_CPU_MISR]
-CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
-CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
-CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
-CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
-CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
-CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
-CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
-CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
- str w11, [x3, #VGIC_V2_CPU_APR]
-
- /* Clear GICH_HCR */
- str wzr, [x2, #GICH_HCR]
-
- /* Save list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_V2_CPU_LR
-1: ldr w5, [x2], #4
-CPU_BE( rev w5, w5 )
- str w5, [x3], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
- ret
-ENDPROC(__save_vgic_v2_state)
-
-/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
- */
-ENTRY(__restore_vgic_v2_state)
-__restore_vgic_v2_state:
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* We only restore a minimal set of registers */
- ldr w4, [x3, #VGIC_V2_CPU_HCR]
- ldr w5, [x3, #VGIC_V2_CPU_VMCR]
- ldr w6, [x3, #VGIC_V2_CPU_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-
- str w4, [x2, #GICH_HCR]
- str w5, [x2, #GICH_VMCR]
- str w6, [x2, #GICH_APR]
-
- /* Restore list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_V2_CPU_LR
-1: ldr w5, [x3], #4
-CPU_BE( rev w5, w5 )
- str w5, [x2], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
- ret
-ENDPROC(__restore_vgic_v2_state)
-
- .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
deleted file mode 100644
index 3c20730ddff5..000000000000
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic-v3.h>
-
-#include <asm/assembler.h>
-#include <asm/memory.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
- .text
- .pushsection .hyp.text, "ax"
-
-/*
- * We store LRs in reverse order to let the CPU deal with streaming
- * access. Use this macro to make it look saner...
- */
-#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
-
-/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
- */
-.macro save_vgic_v3_state
- // Compute the address of struct vgic_cpu
- add x3, x0, #VCPU_VGIC_CPU
-
- // Make sure stores to the GIC via the memory mapped interface
- // are now visible to the system register interface
- dsb st
-
- // Save all interesting registers
- mrs_s x5, ICH_VMCR_EL2
- mrs_s x6, ICH_MISR_EL2
- mrs_s x7, ICH_EISR_EL2
- mrs_s x8, ICH_ELSR_EL2
-
- str w5, [x3, #VGIC_V3_CPU_VMCR]
- str w6, [x3, #VGIC_V3_CPU_MISR]
- str w7, [x3, #VGIC_V3_CPU_EISR]
- str w8, [x3, #VGIC_V3_CPU_ELRSR]
-
- msr_s ICH_HCR_EL2, xzr
-
- mrs_s x21, ICH_VTR_EL2
- mvn w22, w21
- ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
-
- adr x24, 1f
- add x24, x24, x23
- br x24
-
-1:
- mrs_s x20, ICH_LR15_EL2
- mrs_s x19, ICH_LR14_EL2
- mrs_s x18, ICH_LR13_EL2
- mrs_s x17, ICH_LR12_EL2
- mrs_s x16, ICH_LR11_EL2
- mrs_s x15, ICH_LR10_EL2
- mrs_s x14, ICH_LR9_EL2
- mrs_s x13, ICH_LR8_EL2
- mrs_s x12, ICH_LR7_EL2
- mrs_s x11, ICH_LR6_EL2
- mrs_s x10, ICH_LR5_EL2
- mrs_s x9, ICH_LR4_EL2
- mrs_s x8, ICH_LR3_EL2
- mrs_s x7, ICH_LR2_EL2
- mrs_s x6, ICH_LR1_EL2
- mrs_s x5, ICH_LR0_EL2
-
- adr x24, 1f
- add x24, x24, x23
- br x24
-
-1:
- str x20, [x3, #LR_OFFSET(15)]
- str x19, [x3, #LR_OFFSET(14)]
- str x18, [x3, #LR_OFFSET(13)]
- str x17, [x3, #LR_OFFSET(12)]
- str x16, [x3, #LR_OFFSET(11)]
- str x15, [x3, #LR_OFFSET(10)]
- str x14, [x3, #LR_OFFSET(9)]
- str x13, [x3, #LR_OFFSET(8)]
- str x12, [x3, #LR_OFFSET(7)]
- str x11, [x3, #LR_OFFSET(6)]
- str x10, [x3, #LR_OFFSET(5)]
- str x9, [x3, #LR_OFFSET(4)]
- str x8, [x3, #LR_OFFSET(3)]
- str x7, [x3, #LR_OFFSET(2)]
- str x6, [x3, #LR_OFFSET(1)]
- str x5, [x3, #LR_OFFSET(0)]
-
- tbnz w21, #29, 6f // 6 bits
- tbz w21, #30, 5f // 5 bits
- // 7 bits
- mrs_s x20, ICH_AP0R3_EL2
- str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
- mrs_s x19, ICH_AP0R2_EL2
- str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
-6: mrs_s x18, ICH_AP0R1_EL2
- str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
-5: mrs_s x17, ICH_AP0R0_EL2
- str w17, [x3, #VGIC_V3_CPU_AP0R]
-
- tbnz w21, #29, 6f // 6 bits
- tbz w21, #30, 5f // 5 bits
- // 7 bits
- mrs_s x20, ICH_AP1R3_EL2
- str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
- mrs_s x19, ICH_AP1R2_EL2
- str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
-6: mrs_s x18, ICH_AP1R1_EL2
- str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
-5: mrs_s x17, ICH_AP1R0_EL2
- str w17, [x3, #VGIC_V3_CPU_AP1R]
-
- // Restore SRE_EL1 access and re-enable SRE at EL1.
- mrs_s x5, ICC_SRE_EL2
- orr x5, x5, #ICC_SRE_EL2_ENABLE
- msr_s ICC_SRE_EL2, x5
- isb
- mov x5, #1
- msr_s ICC_SRE_EL1, x5
-.endm
-
-/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
- */
-.macro restore_vgic_v3_state
- // Compute the address of struct vgic_cpu
- add x3, x0, #VCPU_VGIC_CPU
-
- // Restore all interesting registers
- ldr w4, [x3, #VGIC_V3_CPU_HCR]
- ldr w5, [x3, #VGIC_V3_CPU_VMCR]
- ldr w25, [x3, #VGIC_V3_CPU_SRE]
-
- msr_s ICC_SRE_EL1, x25
-
- // make sure SRE is valid before writing the other registers
- isb
-
- msr_s ICH_HCR_EL2, x4
- msr_s ICH_VMCR_EL2, x5
-
- mrs_s x21, ICH_VTR_EL2
-
- tbnz w21, #29, 6f // 6 bits
- tbz w21, #30, 5f // 5 bits
- // 7 bits
- ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
- msr_s ICH_AP1R3_EL2, x20
- ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
- msr_s ICH_AP1R2_EL2, x19
-6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
- msr_s ICH_AP1R1_EL2, x18
-5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
- msr_s ICH_AP1R0_EL2, x17
-
- tbnz w21, #29, 6f // 6 bits
- tbz w21, #30, 5f // 5 bits
- // 7 bits
- ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
- msr_s ICH_AP0R3_EL2, x20
- ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
- msr_s ICH_AP0R2_EL2, x19
-6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
- msr_s ICH_AP0R1_EL2, x18
-5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
- msr_s ICH_AP0R0_EL2, x17
-
- and w22, w21, #0xf
- mvn w22, w21
- ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
-
- adr x24, 1f
- add x24, x24, x23
- br x24
-
-1:
- ldr x20, [x3, #LR_OFFSET(15)]
- ldr x19, [x3, #LR_OFFSET(14)]
- ldr x18, [x3, #LR_OFFSET(13)]
- ldr x17, [x3, #LR_OFFSET(12)]
- ldr x16, [x3, #LR_OFFSET(11)]
- ldr x15, [x3, #LR_OFFSET(10)]
- ldr x14, [x3, #LR_OFFSET(9)]
- ldr x13, [x3, #LR_OFFSET(8)]
- ldr x12, [x3, #LR_OFFSET(7)]
- ldr x11, [x3, #LR_OFFSET(6)]
- ldr x10, [x3, #LR_OFFSET(5)]
- ldr x9, [x3, #LR_OFFSET(4)]
- ldr x8, [x3, #LR_OFFSET(3)]
- ldr x7, [x3, #LR_OFFSET(2)]
- ldr x6, [x3, #LR_OFFSET(1)]
- ldr x5, [x3, #LR_OFFSET(0)]
-
- adr x24, 1f
- add x24, x24, x23
- br x24
-
-1:
- msr_s ICH_LR15_EL2, x20
- msr_s ICH_LR14_EL2, x19
- msr_s ICH_LR13_EL2, x18
- msr_s ICH_LR12_EL2, x17
- msr_s ICH_LR11_EL2, x16
- msr_s ICH_LR10_EL2, x15
- msr_s ICH_LR9_EL2, x14
- msr_s ICH_LR8_EL2, x13
- msr_s ICH_LR7_EL2, x12
- msr_s ICH_LR6_EL2, x11
- msr_s ICH_LR5_EL2, x10
- msr_s ICH_LR4_EL2, x9
- msr_s ICH_LR3_EL2, x8
- msr_s ICH_LR2_EL2, x7
- msr_s ICH_LR1_EL2, x6
- msr_s ICH_LR0_EL2, x5
-
- // Ensure that the above will have reached the
- // (re)distributors. This ensure the guest will read
- // the correct values from the memory-mapped interface.
- isb
- dsb sy
-
- // Prevent the guest from touching the GIC system registers
- // if SRE isn't enabled for GICv3 emulation
- cbnz x25, 1f
- mrs_s x5, ICC_SRE_EL2
- and x5, x5, #~ICC_SRE_EL2_ENABLE
- msr_s ICC_SRE_EL2, x5
-1:
-.endm
-
-ENTRY(__save_vgic_v3_state)
- save_vgic_v3_state
- ret
-ENDPROC(__save_vgic_v3_state)
-
-ENTRY(__restore_vgic_v3_state)
- restore_vgic_v3_state
- ret
-ENDPROC(__restore_vgic_v3_state)
-
-ENTRY(__vgic_v3_get_ich_vtr_el2)
- mrs_s x0, ICH_VTR_EL2
- ret
-ENDPROC(__vgic_v3_get_ich_vtr_el2)
-
- .popsection
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index e5091d9cceb6..f14be126fb6a 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -26,6 +26,79 @@
#include <asm/uaccess.h>
/*
+ * __flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ */
+__flush_dcache_all:
+ dmb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+ lsr x3, x3, #23 // left align loc bit field
+ cbz x3, finished // if loc is 0, then no need to clean
+ mov x10, #0 // start clean at cache level 0
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask of the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt skip // skip if no cache, or just i-cache
+ save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ restore_irqs x9
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ mov x4, #0x3ff
+ and x4, x4, x1, lsr #3 // find maximum number on the way size
+ clz w5, w4 // find bit position of way size increment
+ mov x7, #0x7fff
+ and x7, x7, x1, lsr #13 // extract max number of the index size
+loop2:
+ mov x9, x4 // create working copy of max way size
+loop3:
+ lsl x6, x9, x5
+ orr x11, x10, x6 // factor way and cache number into x11
+ lsl x6, x7, x2
+ orr x11, x11, x6 // factor index number into x11
+ dc cisw, x11 // clean & invalidate by set/way
+ subs x9, x9, #1 // decrement the way
+ b.ge loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge loop2
+skip:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+finished:
+ mov x10, #0 // swith back to cache level 0
+ msr csselr_el1, x10 // select current cache level in csselr
+ dsb sy
+ isb
+ ret
+ENDPROC(__flush_dcache_all)
+
+/*
+ * flush_cache_all()
+ *
+ * Flush the entire cache system. The data cache flush is now achieved
+ * using atomic clean / invalidates working outwards from L1 cache. This
+ * is done using Set/Way based cache maintainance instructions. The
+ * instruction cache can still be invalidated back to the point of
+ * unification in a single instruction.
+ */
+ENTRY(flush_cache_all)
+ mov x12, lr
+ bl __flush_dcache_all
+ mov x0, #0
+ ic ialluis // I+BTB cache invalidate
+ ret x12
+ENDPROC(flush_cache_all)
+
+/*
* flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
@@ -121,7 +194,7 @@ ENTRY(__inval_cache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_inv_range:
+ENTRY(__dma_inv_range)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -147,7 +220,7 @@ ENDPROC(__dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_clean_range:
+ENTRY(__dma_clean_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index d4bf895b47cf..1e31cd3871ee 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -191,6 +191,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
+
+ arm64_apply_bp_hardening();
+
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
* emulating PAN.
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 925b2b3a06f8..159c79612e63 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,21 +25,49 @@
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <linux/io.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-mapping-fast.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include "mm.h"
+
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+ return pgprot_noncached(prot);
+ else if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
return pgprot_writecombine(prot);
return prot;
}
-static struct gen_pool *atomic_pool;
+static bool is_dma_coherent(struct device *dev, struct dma_attrs *attrs)
+{
+ bool is_coherent;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_COHERENT, attrs))
+ is_coherent = true;
+ else if (dma_get_attr(DMA_ATTR_FORCE_NON_COHERENT, attrs))
+ is_coherent = false;
+ else if (is_device_dma_coherent(dev))
+ is_coherent = true;
+ else
+ is_coherent = false;
+
+ return is_coherent;
+}
+static struct gen_pool *atomic_pool;
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
@@ -86,10 +115,47 @@ static int __free_from_pool(void *start, size_t size)
return 1;
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte(pte, mk_pte(page, prot));
+ return 0;
+}
+
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ pte_clear(&init_mm, addr, pte);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+ bool no_kernel_map)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+ int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+
+ if (no_kernel_map)
+ func = __dma_clear_pte;
+ else
+ func = __dma_update_pte;
+
+ apply_to_page_range(&init_mm, start, size, func, &prot);
+ mb();
+ flush_tlb_kernel_range(start, end);
+}
+
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ void *addr;
+
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return NULL;
@@ -100,7 +166,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA;
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
struct page *page;
- void *addr;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
@@ -110,10 +175,20 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
memset(addr, 0, size);
- return addr;
} else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
+
+ if (addr && (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
+ dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))) {
+ /*
+ * flush the caches here because we can't later
+ */
+ __dma_flush_range(addr, addr + size);
+ __dma_remap(virt_to_page(addr), size, 0, true);
}
+
+ return addr;
}
static void __dma_free_coherent(struct device *dev, size_t size,
@@ -123,11 +198,16 @@ static void __dma_free_coherent(struct device *dev, size_t size,
bool freed;
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+ size = PAGE_ALIGN(size);
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return;
}
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
+ dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+ __dma_remap(phys_to_page(paddr), size, PAGE_KERNEL, false);
+
freed = dma_release_from_contiguous(dev,
phys_to_page(paddr),
size >> PAGE_SHIFT);
@@ -141,8 +221,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
{
struct page *page;
void *ptr, *coherent_ptr;
- bool coherent = is_device_dma_coherent(dev);
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+ bool coherent = is_dma_coherent(dev, attrs);
size = PAGE_ALIGN(size);
@@ -164,16 +243,22 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (coherent)
return ptr;
- /* remove any dirty cache lines on the kernel alias */
- __dma_flush_range(ptr, ptr + size);
-
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, __builtin_return_address(0));
- if (!coherent_ptr)
- goto no_map;
-
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ coherent_ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
+ } else {
+ if (!dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+ /* remove any dirty cache lines on the kernel alias */
+ __dma_flush_range(ptr, ptr + size);
+
+ /* create a coherent mapping */
+ page = virt_to_page(ptr);
+ coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ __get_dma_pgprot(attrs,
+ __pgprot(PROT_NORMAL_NC), false),
+ NULL);
+ if (!coherent_ptr)
+ goto no_map;
+ }
return coherent_ptr;
no_map:
@@ -191,10 +276,11 @@ static void __dma_free(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
- if (!is_device_dma_coherent(dev)) {
+ if (!is_dma_coherent(dev, attrs)) {
if (__free_from_pool(vaddr, size))
return;
- vunmap(vaddr);
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ vunmap(vaddr);
}
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}
@@ -207,7 +293,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr;
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- if (!is_device_dma_coherent(dev))
+ if (!is_dma_coherent(dev, attrs))
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
@@ -218,7 +304,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!is_device_dma_coherent(dev))
+ if (!is_dma_coherent(dev, attrs))
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
@@ -231,7 +317,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- if (!is_device_dma_coherent(dev))
+ if (!is_dma_coherent(dev, attrs))
for_each_sg(sgl, sg, ret, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
@@ -247,7 +333,7 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sg;
int i;
- if (!is_device_dma_coherent(dev))
+ if (!is_dma_coherent(dev, attrs))
for_each_sg(sgl, sg, nelems, i)
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
@@ -313,7 +399,7 @@ static int __swiotlb_mmap(struct device *dev,
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
+ is_dma_coherent(dev, attrs));
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -341,6 +427,55 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
+static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct page *page = phys_to_page(dma_to_phys(dev, handle));
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+ unsigned long offset = handle & ~PAGE_MASK;
+ struct vm_struct *area;
+ unsigned long addr;
+
+ size = PAGE_ALIGN(size + offset);
+
+ /*
+ * DMA allocation can be mapped to user space, so lets
+ * set VM_USERMAP flags too.
+ */
+ area = get_vm_area(size, VM_USERMAP);
+ if (!area)
+ return NULL;
+
+ addr = (unsigned long)area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+ return (void *)addr + offset;
+}
+
+static void arm64_dma_unremap(struct device *dev, void *remapped_addr,
+ size_t size)
+{
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
+
+ area = find_vm_area(remapped_addr);
+ if (!area) {
+ WARN(1, "trying to free invalid coherent area: %p\n",
+ remapped_addr);
+ return;
+ }
+ vunmap(remapped_addr);
+ flush_tlb_kernel_range((unsigned long)remapped_addr,
+ (unsigned long)(remapped_addr + size));
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
@@ -356,6 +491,8 @@ static struct dma_map_ops swiotlb_dma_ops = {
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
+ .remap = arm64_dma_remap,
+ .unremap = arm64_dma_unremap,
};
static int __init atomic_pool_init(void)
@@ -406,7 +543,7 @@ static int __init atomic_pool_init(void)
goto out;
remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+ dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, true);
destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
@@ -427,6 +564,7 @@ static void *__dummy_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ WARN(1, "dma alloc failure, device may be missing a call to arch_setup_dma_ops");
return NULL;
}
@@ -542,7 +680,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
struct dma_attrs *attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = is_dma_coherent(dev, attrs);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
size_t iosize = size;
void *addr;
@@ -624,7 +762,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (WARN_ON(!area || !area->pages))
return;
iommu_dma_free(dev, area->pages, iosize, &handle);
- dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
} else {
iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_pages(virt_to_page(cpu_addr), get_order(size));
@@ -639,7 +777,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
int ret;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
+ is_dma_coherent(dev, attrs));
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -696,7 +834,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = is_dma_coherent(dev, attrs);
int prot = dma_direction_to_prot(dir, coherent);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
@@ -749,7 +887,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = is_dma_coherent(dev, attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
@@ -782,7 +920,6 @@ static struct dma_map_ops iommu_dma_ops = {
.sync_single_for_device = __iommu_sync_single_for_device,
.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
.sync_sg_for_device = __iommu_sync_sg_for_device,
- .dma_supported = iommu_dma_supported,
.mapping_error = iommu_dma_mapping_error,
};
@@ -993,3 +1130,922 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
+ bool coherent)
+{
+ if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
+ prot |= IOMMU_NOEXEC;
+ if (coherent)
+ prot |= IOMMU_CACHE;
+
+ return prot;
+}
+
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_unmap_area(page_address(page) + off, size, dir);
+
+ /*
+ * Mark the D-cache clean for this page to avoid extra flushing.
+ */
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+/* IOMMU */
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+ struct dma_attrs *attrs, bool is_coherent)
+{
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ void *ptr = page_address(page);
+ if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+ memset(ptr, 0, size);
+ if (!is_coherent)
+ dmac_flush_range(ptr, ptr + size);
+}
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
+
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ align = (1 << order) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << PAGE_SHIFT);
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >> PAGE_SHIFT;
+ unsigned int count = size >> PAGE_SHIFT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ size_t count = size >> PAGE_SHIFT;
+ size_t array_size = count * sizeof(struct page *);
+ int i = 0;
+ bool is_coherent = is_dma_coherent(dev, attrs);
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size, attrs, is_coherent);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
+ is_coherent);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
+{
+ return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t __iommu_create_mapping(struct device *dev,
+ struct page **pages, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_dma_coherent(dev, attrs));
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
+ size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct page *page;
+ phys_addr_t phys;
+
+ phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+ page = phys_to_page(phys);
+
+ return (struct page **)page;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
+ area = find_vm_area(cpu_addr);
+ if (area)
+ return area->pages;
+ return NULL;
+}
+
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ struct page *page;
+ struct page **pages;
+ size_t count = size >> PAGE_SHIFT;
+ size_t array_size = count * sizeof(struct page *);
+ int i;
+ void *addr;
+ bool coherent = is_dma_coherent(dev, attrs);
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+
+ if (!pages)
+ return NULL;
+
+ if (coherent) {
+ page = alloc_pages(gfp, get_order(size));
+ addr = page ? page_address(page) : NULL;
+ } else {
+ addr = __alloc_from_pool(size, &page, gfp);
+ }
+
+ if (!addr)
+ goto err_free;
+
+ for (i = 0; i < count ; i++)
+ pages[i] = page + i;
+
+ *handle = __iommu_create_mapping(dev, pages, size, attrs);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ kvfree(pages);
+ return addr;
+
+err_mapping:
+ if (coherent)
+ __free_pages(page, get_order(size));
+ else
+ __free_from_pool(addr, size);
+err_free:
+ kvfree(pages);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(cpu_addr, size);
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ bool coherent = is_dma_coherent(dev, attrs);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ if (!gfpflags_allow_blocking(gfp))
+ return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);
+
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size, attrs);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size, attrs);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+ bool coherent = is_dma_coherent(dev, attrs);
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ coherent);
+
+ if (!pages)
+ return -ENXIO;
+
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ size = PAGE_ALIGN(size);
+
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
+ return;
+ }
+
+ pages = __iommu_get_pages(cpu_addr, attrs);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
+}
+
+int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ if (!pages)
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+ GFP_KERNEL);
+}
+
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int ret, i;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int total_length = 0, current_offset = 0;
+ dma_addr_t iova;
+ int prot = __dma_direction_to_prot(dir);
+
+ for_each_sg(sg, s, nents, i)
+ total_length += s->length;
+
+ iova = __alloc_iova(mapping, total_length);
+ if (iova == DMA_ERROR_CODE) {
+ dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
+ return 0;
+ }
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_dma_coherent(dev, attrs));
+
+ ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
+ if (ret != total_length) {
+ __free_iova(mapping, iova, total_length);
+ return 0;
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = iova + current_offset;
+ s->dma_length = total_length - current_offset;
+ current_offset += s->length;
+ }
+
+ return nents;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int total_length = sg_dma_len(sg);
+ dma_addr_t iova = sg_dma_address(sg);
+
+ total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, total_length);
+ __free_iova(mapping, iova, total_length);
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = sg_dma_address(sg);
+ bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+ if (iova_coherent)
+ return;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = sg_dma_address(sg);
+ bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+ if (iova_coherent)
+ return;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_coherent_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Coherent IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, prot, len, start_offset, map_offset;
+
+ map_offset = offset & ~PAGE_MASK;
+ start_offset = offset & PAGE_MASK;
+ len = PAGE_ALIGN(map_offset + size);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ prot = __dma_direction_to_prot(dir);
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_dma_coherent(dev, attrs));
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+ start_offset, len, prot);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + map_offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!is_dma_coherent(dev, attrs) &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!(is_dma_coherent(dev, attrs) ||
+ dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)))
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+ bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
+
+ if (!iova_coherent)
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+ bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
+
+ if (!iova_coherent)
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_iommu_dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ if (!mapping) {
+ dev_warn(dev, "No IOMMU mapping for device\n");
+ return 0;
+ }
+
+ return iommu_dma_supported(mapping->domain, dev, mask);
+}
+
+static int arm_iommu_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return dma_addr == DMA_ERROR_CODE;
+}
+
+const struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
+ .dma_supported = arm_iommu_dma_supported,
+ .mapping_error = arm_iommu_mapping_error,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: maximum size of the valid IO address space
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+ unsigned int bits = size >> PAGE_SHIFT;
+ unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!bitmap_size)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!mapping->bitmap)
+ mapping->bitmap = vzalloc(bitmap_size);
+
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = bits;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kvfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kvfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+EXPORT_SYMBOL(arm_iommu_release_mapping);
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+ int s1_bypass = 0, is_fast = 0;
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+ if (is_fast)
+ return fast_smmu_attach_device(dev, mapping);
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ if (!s1_bypass)
+ set_dma_ops(dev, &iommu_ops);
+
+ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+EXPORT_SYMBOL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+ int is_fast, s1_bypass = 0;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+ if (is_fast) {
+ fast_smmu_detach_device(dev, mapping);
+ return;
+ }
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+
+ if (msm_dma_unmap_all_for_dev(dev))
+ dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ dev->archdata.mapping = NULL;
+ if (!s1_bypass)
+ set_dma_ops(dev, NULL);
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL(arm_iommu_detach_device);
+
+#endif
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5a3117c287ca..4970252612c2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,9 +40,35 @@
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include <asm/edac.h>
+#include <soc/qcom/scm.h>
+
+#include <trace/events/exception.h>
static const char *fault_name(unsigned int esr);
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, esr))
+ ret = 1;
+ preempt_enable();
+ }
+
+ return ret;
+}
+#else
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+ return 0;
+}
+#endif
+
/*
* Dump out the page tables associated with 'addr' in mm 'mm'.
*/
@@ -176,6 +202,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
+ trace_user_fault(tsk, addr, esr);
+
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
@@ -278,6 +306,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (notify_page_fault(regs, esr))
+ return 0;
+
tsk = current;
mm = tsk->mm;
@@ -297,16 +328,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (is_el0_instruction_abort(esr)) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
+ } else if (((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) ||
+ ((esr & ESR_ELx_CM) && !(mm_flags & FAULT_FLAG_USER))) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
if (addr < USER_DS && is_permission_fault(esr, regs)) {
- /* regs->orig_addr_limit may be 0 if we entered from EL0 */
- if (regs->orig_addr_limit == KERNEL_DS)
- die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
-
if (is_el1_instruction_abort(esr))
die("Attempting to execute userspace memory", regs, esr);
@@ -429,6 +457,19 @@ no_context:
}
/*
+ * TLB conflict is already handled in EL2. This rourtine should return zero
+ * so that, do_mem_abort would not crash kernel thinking TLB conflict not
+ * handled.
+*/
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+static int do_tlb_conf_fault(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+#endif
+/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain a valid
@@ -461,6 +502,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
+ arm64_check_cache_ecc(NULL);
return 1;
}
@@ -518,7 +560,11 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
+#ifdef CONFIG_QCOM_TLB_EL2_HANDLER
+ { do_tlb_conf_fault, SIGBUS, 0, "TLB conflict abort" },
+#else
{ do_bad, SIGBUS, 0, "TLB conflict abort" },
+#endif
{ do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
@@ -564,6 +610,22 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (addr > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
+ local_irq_enable();
+ do_mem_abort(addr, esr, regs);
+}
+
/*
* Handle stack alignment exceptions.
*/
@@ -653,6 +715,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
return rv;
}
+NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
int cpu_enable_pan(void *__unused)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index d9c664ed6104..298db9789fc7 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
+EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 60e113a7c5db..2e7702bd7e57 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -155,6 +155,7 @@ static void __init arm64_memory_present(void)
#endif
static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
+static phys_addr_t bootloader_memory_limit;
/*
* Limit the memory size that was specified via FDT.
@@ -208,6 +209,11 @@ void __init arm64_memblock_init(void)
* via the linear mapping.
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+ /*
+ * Save bootloader imposed memory limit before we overwirte
+ * memblock.
+ */
+ bootloader_memory_limit = memblock_end_of_DRAM();
memblock_enforce_memory_limit(memory_limit);
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
@@ -223,7 +229,7 @@ void __init arm64_memblock_init(void)
* memory spans, randomize the linear region as well.
*/
if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
- range = range / ARM64_MEMSTART_ALIGN + 1;
+ range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
}
@@ -432,6 +438,11 @@ void __init mem_init(void)
}
}
+static inline void poison_init_mem(void *s, size_t count)
+{
+ memset(s, 0, count);
+}
+
void free_initmem(void)
{
free_initmem_default(0);
@@ -457,6 +468,18 @@ static int __init keepinitrd_setup(char *__unused)
__setup("keepinitrd", keepinitrd_setup);
#endif
+#ifdef CONFIG_KERNEL_TEXT_RDONLY
+void set_kernel_text_ro(void)
+{
+ unsigned long start = PFN_ALIGN(_stext);
+ unsigned long end = PFN_ALIGN(_etext);
+
+ /*
+ * Set the kernel identity mapping for text RO.
+ */
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+}
+#endif
/*
* Dump out memory limit information on panic.
*/
@@ -481,3 +504,137 @@ static int __init register_mem_limit_dumper(void)
return 0;
}
__initcall(register_mem_limit_dumper);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+{
+ pg_data_t *pgdat;
+ struct zone *zone;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+ int ret;
+
+ if (end_pfn > max_sparsemem_pfn) {
+ pr_err("end_pfn too big");
+ return -1;
+ }
+ hotplug_paging(start, size);
+
+ /*
+ * Mark the first page in the range as unusable. This is needed
+ * because __add_section (within __add_pages) wants pfn_valid
+ * of it to be false, and in arm64 pfn falid is implemented by
+ * just checking at the nomap flag for existing blocks.
+ *
+ * A small trick here is that __add_section() requires only
+ * phys_start_pfn (that is the first pfn of a section) to be
+ * invalid. Regardless of whether it was assumed (by the function
+ * author) that all pfns within a section are either all valid
+ * or all invalid, it allows to avoid looping twice (once here,
+ * second when memblock_clear_nomap() is called) through all
+ * pfns of the section and modify only one pfn. Thanks to that,
+ * further, in __add_zone() only this very first pfn is skipped
+ * and corresponding page is not flagged reserved. Therefore it
+ * is enough to correct this setup only for it.
+ *
+ * When arch_add_memory() returns the walk_memory_range() function
+ * is called and passed with online_memory_block() callback,
+ * which execution finally reaches the memory_block_action()
+ * function, where also only the first pfn of a memory block is
+ * checked to be reserved. Above, it was first pfn of a section,
+ * here it is a block but
+ * (drivers/base/memory.c):
+ * sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ * (include/linux/memory.h):
+ * #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+ * so we can consider block and section equivalently
+ */
+ memblock_mark_nomap(start, 1<<PAGE_SHIFT);
+
+ pgdat = NODE_DATA(nid);
+
+ zone = pgdat->node_zones +
+ zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
+ ret = __add_pages(nid, zone, start_pfn, nr_pages);
+
+ /*
+ * Make the pages usable after they have been added.
+ * This will make pfn_valid return true
+ */
+ memblock_clear_nomap(start, 1<<PAGE_SHIFT);
+
+ /*
+ * This is a hack to avoid having to mix arch specific code
+ * into arch independent code. SetPageReserved is supposed
+ * to be called by __add_zone (within __add_section, within
+ * __add_pages). However, when it is called there, it assumes that
+ * pfn_valid returns true. For the way pfn_valid is implemented
+ * in arm64 (a check on the nomap flag), the only way to make
+ * this evaluate true inside __add_zone is to clear the nomap
+ * flags of blocks in architecture independent code.
+ *
+ * To avoid this, we set the Reserved flag here after we cleared
+ * the nomap flag in the line above.
+ */
+ SetPageReserved(pfn_to_page(start_pfn));
+
+ if (ret)
+ pr_warn("%s: Problem encountered in __add_pages() ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static void kernel_physical_mapping_remove(unsigned long start,
+ unsigned long end)
+{
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
+
+ remove_pagetable(start, end, true);
+
+}
+
+int arch_remove_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(start_pfn);
+ struct zone *zone;
+ int ret = 0;
+
+ zone = page_zone(page);
+ ret = __remove_pages(zone, start_pfn, nr_pages);
+ WARN_ON_ONCE(ret);
+
+ kernel_physical_mapping_remove(start, start + size);
+
+ return ret;
+}
+
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+static int arm64_online_page(struct page *page)
+{
+ unsigned long target_pfn = page_to_pfn(page);
+ unsigned long limit = __phys_to_pfn(bootloader_memory_limit);
+
+ if (target_pfn >= limit)
+ return -EINVAL;
+
+ __online_page_set_limits(page);
+ __online_page_increment_counters(page);
+ __online_page_free(page);
+
+ return 0;
+}
+
+static int __init arm64_memory_hotplug_init(void)
+{
+ set_online_page_callback(&arm64_online_page);
+ return 0;
+}
+subsys_initcall(arm64_memory_hotplug_init);
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ddfe90299048..e8b8590f553a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -29,6 +29,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
#include <linux/mm.h>
#include <asm/barrier.h>
@@ -61,6 +63,8 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+static bool dma_overlap(phys_addr_t start, phys_addr_t end);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -210,7 +214,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- block_mappings_allowed(pgtable_alloc)) {
+ block_mappings_allowed(pgtable_alloc) &&
+ !dma_overlap(phys, phys + next - addr)) {
pmd_t old_pmd =*pmd;
pmd_set_huge(pmd, phys, prot);
/*
@@ -270,7 +275,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
- block_mappings_allowed(pgtable_alloc)) {
+ block_mappings_allowed(pgtable_alloc) &&
+ !dma_overlap(phys, phys + next - addr)) {
pud_t old_pud = *pud;
pud_set_huge(pud, phys, prot);
@@ -386,7 +392,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
- unsigned long kernel_start = __pa_symbol(_stext);
+ unsigned long kernel_start = __pa_symbol(_text);
unsigned long kernel_end = __pa_symbol(__init_begin);
/*
@@ -403,7 +409,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
}
/*
- * This block overlaps the kernel text/rodata mapping.
+ * This block overlaps the kernel text/rodata mappings.
* Map the portion(s) which don't overlap.
*/
if (start < kernel_start)
@@ -418,7 +424,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
early_pgtable_alloc);
/*
- * Map the linear alias of the [_stext, __init_begin) interval as
+ * Map the linear alias of the [_text, __init_begin) interval as
* read-only/non-executable. This makes the contents of the
* region accessible to subsystems such as hibernate, but
* protects it from inadvertent modification or execution.
@@ -450,8 +456,8 @@ void mark_rodata_ro(void)
{
unsigned long section_size;
- section_size = (unsigned long)_etext - (unsigned long)_stext;
- create_mapping_late(__pa_symbol(_stext), (unsigned long)_stext,
+ section_size = (unsigned long)_etext - (unsigned long)_text;
+ create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
section_size, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
@@ -473,8 +479,8 @@ void fixup_init(void)
unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
}
-static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma)
+static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
+ pgprot_t prot, struct vm_struct *vma)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -532,11 +538,11 @@ static void __init map_kernel(pgd_t *pgd)
{
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
- map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_chunk(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
- map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
- &vmlinux_init);
- map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+ map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+ map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
+ map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+ &vmlinux_init);
+ map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/*
@@ -564,6 +570,37 @@ static void __init map_kernel(pgd_t *pgd)
kasan_copy_shadow(pgd);
}
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+
+static int dma_mmu_remap_num;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+ int i;
+
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t dma_base = dma_mmu_remap[i].base;
+ phys_addr_t dma_end = dma_mmu_remap[i].base +
+ dma_mmu_remap[i].size;
+
+ if ((dma_base < end) && (dma_end > start))
+ return true;
+ }
+ return false;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps and sets up the zero page.
@@ -591,9 +628,6 @@ void __init paging_init(void)
pgd_clear_fixmap();
memblock_free(pgd_phys, PAGE_SIZE);
- /* Ensure the zero page is visible to the page table walker */
- dsb(ishst);
-
/*
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
* allocated with it.
@@ -604,6 +638,439 @@ void __init paging_init(void)
bootmem_init();
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+static phys_addr_t pgd_pgtable_alloc(void)
+{
+ void *ptr = (void *)__get_free_page(PGALLOC_GFP);
+ if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ BUG();
+
+ /* Ensure the zeroed page is visible to the page table walker */
+ dsb(ishst);
+ return __pa(ptr);
+}
+
+/*
+ * hotplug_paging() is used by memory hotplug to build new page tables
+ * for hot added memory.
+ */
+void hotplug_paging(phys_addr_t start, phys_addr_t size)
+{
+ struct page *pg;
+ phys_addr_t pgd_phys;
+ pgd_t *pgd;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ if (current->cpu != cpu)
+ sched_isolate_cpu(cpu);
+ pgd_phys = pgd_pgtable_alloc();
+ pgd = pgd_set_fixmap(pgd_phys);
+
+ memcpy(pgd, swapper_pg_dir, PAGE_SIZE);
+
+ __create_pgd_mapping(pgd, start, __phys_to_virt(start), size,
+ PAGE_KERNEL, pgd_pgtable_alloc);
+
+ cpu_replace_ttbr1(__va(pgd_phys));
+ memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+ cpu_replace_ttbr1(swapper_pg_dir);
+
+ pgd_clear_fixmap();
+
+ pg = phys_to_page(pgd_phys);
+ pgtable_page_dtor(pg);
+ __free_pages(pg, 0);
+ for_each_possible_cpu(cpu)
+ if (current->cpu != cpu)
+ sched_unisolate_cpu_unlocked(cpu);
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+#define PAGE_INUSE 0xFD
+
+static void free_pagetable(struct page *page, int order, bool direct)
+{
+ unsigned long magic;
+ unsigned int nr_pages = 1 << order;
+
+ /* bootmem page has reserved flag */
+ if (PageReserved(page)) {
+ __ClearPageReserved(page);
+
+ magic = (unsigned long)page->lru.next;
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
+ while (nr_pages--)
+ put_page_bootmem(page++);
+ } else {
+ while (nr_pages--)
+ free_reserved_page(page++);
+ }
+ } else {
+ /*
+ * Only direct pagetable allocation (those allocated via
+ * hotplug) call the pgtable_page_ctor; vmemmap pgtable
+ * allocations don't.
+ */
+ if (direct)
+ pgtable_page_dtor(page);
+
+ free_pages((unsigned long)page_address(page), order);
+ }
+}
+
+static void free_pte_table(pmd_t *pmd, bool direct)
+{
+ pte_t *pte_start, *pte;
+ struct page *page;
+ int i;
+
+ pte_start = (pte_t *) pmd_page_vaddr(*pmd);
+ /* Check if there is no valid entry in the PMD */
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ pte = pte_start + i;
+ if (!pte_none(*pte))
+ return;
+ }
+
+ page = pmd_page(*pmd);
+
+ free_pagetable(page, 0, direct);
+
+ /*
+ * This spin lock could be only taken in _pte_aloc_kernel
+ * in mm/memory.c and nowhere else (for arm64). Not sure if
+ * the function above can be called concurrently. In doubt,
+ * I am living it here for now, but it probably can be removed
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pmd_clear(pmd);
+ spin_unlock(&init_mm.page_table_lock);
+}
+
+static void free_pmd_table(pud_t *pud, bool direct)
+{
+ pmd_t *pmd_start, *pmd;
+ struct page *page;
+ int i;
+
+ pmd_start = (pmd_t *) pud_page_vaddr(*pud);
+ /* Check if there is no valid entry in the PMD */
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd = pmd_start + i;
+ if (!pmd_none(*pmd))
+ return;
+ }
+
+ page = pud_page(*pud);
+
+ free_pagetable(page, 0, direct);
+
+ /*
+ * This spin lock could be only taken in _pte_aloc_kernel
+ * in mm/memory.c and nowhere else (for arm64). Not sure if
+ * the function above can be called concurrently. In doubt,
+ * I am living it here for now, but it probably can be removed
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pud_clear(pud);
+ spin_unlock(&init_mm.page_table_lock);
+}
+
+/*
+ * When the PUD is folded on the PGD (three levels of paging),
+ * there's no need to free PUDs
+ */
+#if CONFIG_PGTABLE_LEVELS > 3
+static void free_pud_table(pgd_t *pgd, bool direct)
+{
+ pud_t *pud_start, *pud;
+ struct page *page;
+ int i;
+
+ pud_start = (pud_t *) pgd_page_vaddr(*pgd);
+ /* Check if there is no valid entry in the PUD */
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ pud = pud_start + i;
+ if (!pud_none(*pud))
+ return;
+ }
+
+ page = pgd_page(*pgd);
+
+ free_pagetable(page, 0, direct);
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pgd_clear(pgd);
+ spin_unlock(&init_mm.page_table_lock);
+}
+#endif
+
+static void remove_pte_table(pte_t *pte, unsigned long addr,
+ unsigned long end, bool direct)
+{
+ unsigned long next;
+ void *page_addr;
+
+ for (; addr < end; addr = next, pte++) {
+ next = (addr + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+
+ if (!pte_present(*pte))
+ continue;
+
+ if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+ /*
+ * Do not free direct mapping pages since they were
+ * freed when offlining, or simplely not in use.
+ */
+ if (!direct)
+ free_pagetable(pte_page(*pte), 0, direct);
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pte_clear(&init_mm, addr, pte);
+ spin_unlock(&init_mm.page_table_lock);
+ } else {
+ /*
+ * If we are here, we are freeing vmemmap pages since
+ * direct mapped memory ranges to be freed are aligned.
+ *
+ * If we are not removing the whole page, it means
+ * other page structs in this page are being used and
+ * we canot remove them. So fill the unused page_structs
+ * with 0xFD, and remove the page when it is wholly
+ * filled with 0xFD.
+ */
+ memset((void *)addr, PAGE_INUSE, next - addr);
+
+ page_addr = page_address(pte_page(*pte));
+ if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
+ free_pagetable(pte_page(*pte), 0, direct);
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pte_clear(&init_mm, addr, pte);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ }
+ }
+
+ // I am adding this flush here in simmetry to the x86 code.
+ // Why do I need to call it here and not in remove_p[mu]d
+ flush_tlb_all();
+}
+
+static void remove_pmd_table(pmd_t *pmd, unsigned long addr,
+ unsigned long end, bool direct)
+{
+ unsigned long next;
+ void *page_addr;
+ pte_t *pte;
+
+ for (; addr < end; addr = next, pmd++) {
+ next = pmd_addr_end(addr, end);
+
+ if (!pmd_present(*pmd))
+ continue;
+
+ // check if we are using 2MB section mappings
+ if (pmd_sect(*pmd)) {
+ if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+ if (!direct) {
+ free_pagetable(pmd_page(*pmd),
+ get_order(PMD_SIZE), direct);
+ }
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pmd_clear(pmd);
+ spin_unlock(&init_mm.page_table_lock);
+ } else {
+ /* If here, we are freeing vmemmap pages. */
+ memset((void *)addr, PAGE_INUSE, next - addr);
+
+ page_addr = page_address(pmd_page(*pmd));
+ if (!memchr_inv(page_addr, PAGE_INUSE,
+ PMD_SIZE)) {
+ free_pagetable(pmd_page(*pmd),
+ get_order(PMD_SIZE), direct);
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pmd_clear(pmd);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ }
+ continue;
+ }
+
+ BUG_ON(!pmd_table(*pmd));
+
+ pte = pte_offset_map(pmd, addr);
+ remove_pte_table(pte, addr, next, direct);
+ free_pte_table(pmd, direct);
+ }
+}
+
+static void remove_pud_table(pud_t *pud, unsigned long addr,
+ unsigned long end, bool direct)
+{
+ unsigned long next;
+ pmd_t *pmd;
+ void *page_addr;
+
+ for (; addr < end; addr = next, pud++) {
+ next = pud_addr_end(addr, end);
+ if (!pud_present(*pud))
+ continue;
+ /*
+ * If we are using 4K granules, check if we are using
+ * 1GB section mapping.
+ */
+ if (pud_sect(*pud)) {
+ if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
+ if (!direct) {
+ free_pagetable(pud_page(*pud),
+ get_order(PUD_SIZE), direct);
+ }
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pud_clear(pud);
+ spin_unlock(&init_mm.page_table_lock);
+ } else {
+ /* If here, we are freeing vmemmap pages. */
+ memset((void *)addr, PAGE_INUSE, next - addr);
+
+ page_addr = page_address(pud_page(*pud));
+ if (!memchr_inv(page_addr, PAGE_INUSE,
+ PUD_SIZE)) {
+
+ free_pagetable(pud_page(*pud),
+ get_order(PUD_SIZE), direct);
+
+ /*
+ * This spin lock could be only
+ * taken in _pte_aloc_kernel in
+ * mm/memory.c and nowhere else
+ * (for arm64). Not sure if the
+ * function above can be called
+ * concurrently. In doubt,
+ * I am living it here for now,
+ * but it probably can be removed.
+ */
+ spin_lock(&init_mm.page_table_lock);
+ pud_clear(pud);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ }
+ continue;
+ }
+
+ BUG_ON(!pud_table(*pud));
+
+ pmd = pmd_offset(pud, addr);
+ remove_pmd_table(pmd, addr, next, direct);
+ free_pmd_table(pud, direct);
+ }
+}
+
+void remove_pagetable(unsigned long start, unsigned long end, bool direct)
+{
+ unsigned long next;
+ unsigned long addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ if (current->cpu != cpu)
+ sched_isolate_cpu(cpu);
+ for (addr = start; addr < end; addr = next) {
+ next = pgd_addr_end(addr, end);
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ continue;
+
+ pud = pud_offset(pgd, addr);
+ remove_pud_table(pud, addr, next, direct);
+ /*
+ * When the PUD is folded on the PGD (three levels of paging),
+ * I did already clear the PMD page in free_pmd_table,
+ * and reset the corresponding PGD==PUD entry.
+ */
+#if CONFIG_PGTABLE_LEVELS > 3
+ free_pud_table(pgd, direct);
+#endif
+ }
+
+ flush_tlb_all();
+ for_each_possible_cpu(cpu)
+ if (current->cpu != cpu)
+ sched_unisolate_cpu_unlocked(cpu);
+}
+
+
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
/*
* Check whether a kernel address is valid (derived from arch/x86/).
*/
@@ -655,6 +1122,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+ int ret = 0;
do {
next = pmd_addr_end(addr, end);
@@ -672,19 +1140,30 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
- if (!p)
- return -ENOMEM;
+ if (!p) {
+#ifdef CONFIG_MEMORY_HOTPLUG
+ vmemmap_free(start, end);
+#endif
+ ret = -ENOMEM;
+ break;
+ }
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
- return 0;
+ if (ret)
+ return vmemmap_populate_basepages(start, end, node);
+ else
+ return ret;
}
#endif /* CONFIG_ARM64_64K_PAGES */
void vmemmap_free(unsigned long start, unsigned long end)
{
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ remove_pagetable(start, end, false);
+#endif
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index ca6d268e3313..6ea71387ee12 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -70,6 +70,13 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
+ if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
+ if (start < MODULES_VADDR || start >= MODULES_END)
+ return -EINVAL;
+
+ if (end < MODULES_VADDR || end >= MODULES_END)
+ return -EINVAL;
+ }
/*
* Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
@@ -139,4 +146,43 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
__pgprot(0),
__pgprot(PTE_VALID));
}
-#endif
+#ifdef CONFIG_HIBERNATION
+/*
+ * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
+ * is used to determine if a linear map page has been marked as not-valid by
+ * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
+ * This is based on kern_addr_valid(), which almost does what we need.
+ *
+ * Because this is only called on the kernel linear map, p?d_sect() implies
+ * p?d_present(). When debug_pagealloc is enabled, sections mappings are
+ * disabled.
+ */
+bool kernel_page_present(struct page *page)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return false;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ return false;
+ if (pud_sect(*pud))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return false;
+ if (pmd_sect(*pmd))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_valid(*pte);
+}
+#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index bc0f0a6c9c23..b78688806652 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -24,6 +24,7 @@
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
@@ -43,6 +44,52 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
+ * cpu_cache_off()
+ *
+ * Turn the CPU D-cache off.
+ */
+ENTRY(cpu_cache_off)
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el1, x0
+ isb
+ ret
+ENDPROC(cpu_cache_off)
+
+/*
+ * cpu_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the same state
+ * as it would be if it had been reset, and branch to what would be the
+ * reset vector. It must be executed with the flat identity mapping.
+ *
+ * - loc - location to jump to for soft reset
+ */
+ .align 5
+ENTRY(cpu_reset)
+ mrs x1, sctlr_el1
+ bic x1, x1, #1
+ msr sctlr_el1, x1 // disable the MMU
+ isb
+ ret x0
+ENDPROC(cpu_reset)
+
+ENTRY(cpu_soft_restart)
+ /* Save address of cpu_reset() and reset address */
+ mov x19, x0
+ mov x20, x1
+
+ /* Turn D-cache off */
+ bl cpu_cache_off
+
+ /* Push out all dirty data, and ensure cache is empty */
+ bl flush_cache_all
+
+ mov x0, x20
+ ret x19
+ENDPROC(cpu_soft_restart)
+
+/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
@@ -63,58 +110,49 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
- mrs x5, mair_el1
- mrs x6, cpacr_el1
- mrs x7, ttbr1_el1
- mrs x8, tcr_el1
- mrs x9, vbar_el1
- mrs x10, mdscr_el1
- mrs x11, oslsr_el1
- mrs x12, sctlr_el1
- mrs x13, tpidr_el1
- mrs x14, sp_el0
+ mrs x5, cpacr_el1
+ mrs x6, tcr_el1
+ mrs x7, vbar_el1
+ mrs x8, mdscr_el1
+ mrs x9, oslsr_el1
+ mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
- stp x4, x5, [x0, #16]
- stp x6, x7, [x0, #32]
- stp x8, x9, [x0, #48]
- stp x10, x11, [x0, #64]
- stp x12, x13, [x0, #80]
- str x14, [x0, #96]
+ stp x4, xzr, [x0, #16]
+ stp x5, x6, [x0, #32]
+ stp x7, x8, [x0, #48]
+ stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
/**
* cpu_do_resume - restore CPU register context
*
- * x0: Physical address of context pointer
- * x1: ttbr0_el1 to be restored
- *
- * Returns:
- * sctlr_el1 value in x0
+ * x0: Address of context pointer
*/
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_do_resume)
- /*
- * Invalidate local tlb entries before turning on MMU
- */
- tlbi vmalle1
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
- ldp x6, x7, [x0, #32]
- ldp x8, x9, [x0, #48]
- ldp x10, x11, [x0, #64]
- ldp x12, x13, [x0, #80]
- ldr x14, [x0, #96]
+ ldp x6, x8, [x0, #32]
+ ldp x9, x10, [x0, #48]
+ ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
- msr mair_el1, x5
msr cpacr_el1, x6
- msr ttbr0_el1, x1
- msr ttbr1_el1, x7
- tcr_set_idmap_t0sz x8, x7
+
+ /* Don't change t0sz here, mask those bits when restoring */
+ mrs x5, tcr_el1
+ bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
+ msr sctlr_el1, x12
msr tpidr_el1, x13
msr sp_el0, x14
/*
@@ -123,11 +161,10 @@ ENTRY(cpu_do_resume)
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
- mov x0, x12
- dsb nsh // Make sure local tlb invalidation completed
isb
ret
ENDPROC(cpu_do_resume)
+ .popsection
#endif
/*
@@ -185,6 +222,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
* Initialise the processor for turning the MMU on. Return in x0 the
* value of the SCTLR_EL1 register.
*/
+ .pushsection ".idmap.text", "ax"
ENTRY(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
@@ -273,5 +311,17 @@ ENDPROC(__cpu_setup)
*/
.type crval, #object
crval:
+#ifdef CONFIG_ARM64_ICACHE_DISABLE
+#define CR_IBIT 0
+#else
+#define CR_IBIT 0x1000
+#endif
+
+#ifdef CONFIG_ARM64_DCACHE_DISABLE
+#define CR_CBIT 0
+#else
+#define CR_CBIT 0x4
+#endif
.word 0xfcffffff // clear
- .word 0x34d5d91d // set
+ .word 0x34d5d91d | CR_IBIT | CR_CBIT // set
+ .popsection