summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild3
-rw-r--r--arch/arm64/include/asm/app_api.h50
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h13
-rw-r--r--arch/arm64/include/asm/arch_timer.h46
-rw-r--r--arch/arm64/include/asm/assembler.h12
-rw-r--r--arch/arm64/include/asm/cache.h12
-rw-r--r--arch/arm64/include/asm/cacheflush.h27
-rw-r--r--arch/arm64/include/asm/checksum.h51
-rw-r--r--arch/arm64/include/asm/cpufeature.h15
-rw-r--r--arch/arm64/include/asm/cputype.h21
-rw-r--r--arch/arm64/include/asm/debug-monitors.h5
-rw-r--r--arch/arm64/include/asm/debugv8.h229
-rw-r--r--arch/arm64/include/asm/device.h12
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h24
-rw-r--r--arch/arm64/include/asm/dma-iommu.h64
-rw-r--r--arch/arm64/include/asm/dma-mapping.h10
-rw-r--r--arch/arm64/include/asm/edac.h28
-rw-r--r--arch/arm64/include/asm/elf.h16
-rw-r--r--arch/arm64/include/asm/etmv4x.h385
-rw-r--r--arch/arm64/include/asm/fpsimd.h8
-rw-r--r--arch/arm64/include/asm/gpio.h32
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/insn.h41
-rw-r--r--arch/arm64/include/asm/io.h86
-rw-r--r--arch/arm64/include/asm/irq.h3
-rw-r--r--arch/arm64/include/asm/kprobes.h60
-rw-r--r--arch/arm64/include/asm/kvm_arm.h14
-rw-r--r--arch/arm64/include/asm/kvm_asm.h94
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h98
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h10
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/mmu.h48
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/pci.h5
-rw-r--r--arch/arm64/include/asm/perf_event.h87
-rw-r--r--arch/arm64/include/asm/pgtable.h55
-rw-r--r--arch/arm64/include/asm/probes.h35
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/include/asm/ptrace.h61
-rw-r--r--arch/arm64/include/asm/sections.h28
-rw-r--r--arch/arm64/include/asm/signal32.h46
-rw-r--r--arch/arm64/include/asm/sparsemem.h4
-rw-r--r--arch/arm64/include/asm/spinlock.h1
-rw-r--r--arch/arm64/include/asm/suspend.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h20
-rw-r--r--arch/arm64/include/asm/system_misc.h1
-rw-r--r--arch/arm64/include/asm/thread_info.h6
-rw-r--r--arch/arm64/include/asm/topology.h1
-rw-r--r--arch/arm64/include/asm/traps.h7
-rw-r--r--arch/arm64/include/asm/uaccess.h3
-rw-r--r--arch/arm64/include/asm/vdso.h3
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h25
-rw-r--r--arch/arm64/include/asm/virt.h27
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild18
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h32
58 files changed, 1839 insertions, 202 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 70fd9ffb58cf..213c78f84e56 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += bug.h
generic-y += bugs.h
-generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
@@ -14,6 +13,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -36,7 +36,6 @@ generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
-generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/app_api.h b/arch/arm64/include/asm/app_api.h
new file mode 100644
index 000000000000..0e6a469cd683
--- /dev/null
+++ b/arch/arm64/include/asm/app_api.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_APP_API_H
+#define __ASM_APP_API_H
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+
+#define APP_SETTING_BIT 30
+#define MAX_ENTRIES 10
+
+/*
+ * APIs to set / clear the app setting bits
+ * in the register.
+ */
+#ifdef CONFIG_MSM_APP_API
+extern void set_app_setting_bit(uint32_t bit);
+extern void clear_app_setting_bit(uint32_t bit);
+extern void set_app_setting_bit_for_32bit_apps(void);
+extern void clear_app_setting_bit_for_32bit_apps(void);
+#else
+static inline void set_app_setting_bit(uint32_t bit) {}
+static inline void clear_app_setting_bit(uint32_t bit) {}
+static inline void set_app_setting_bit_for_32bit_apps(void) {}
+static inline void clear_app_setting_bit_for_32bit_apps(void) {}
+#endif
+
+#ifdef CONFIG_MSM_APP_SETTINGS
+extern void switch_app_setting_bit(struct task_struct *prev,
+ struct task_struct *next);
+extern void switch_32bit_app_setting_bit(struct task_struct *prev,
+ struct task_struct *next);
+extern void apply_app_setting_bit(struct file *file);
+extern bool use_app_setting;
+extern bool use_32bit_app_setting;
+extern bool use_32bit_app_setting_pro;
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8ec88e5b290f..30cf6f5961ef 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -103,7 +103,8 @@ static inline u64 gic_read_iar_common(void)
u64 irqstat;
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
- dsb(sy);
+ /* As per the architecture specification */
+ mb();
return irqstat;
}
@@ -132,6 +133,9 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+ /* As per the architecture specification */
+ isb();
+ mb();
}
static inline void gic_write_ctlr(u32 val)
@@ -149,6 +153,9 @@ static inline void gic_write_grpen1(u32 val)
static inline void gic_write_sgi1r(u64 val)
{
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+ /* As per the architecture specification */
+ isb();
+ mb();
}
static inline u32 gic_read_sre(void)
@@ -165,8 +172,8 @@ static inline void gic_write_sre(u32 val)
isb();
}
-#define gic_read_typer(c) readq_relaxed(c)
-#define gic_write_irouter(v, c) writeq_relaxed(v, c)
+#define gic_read_typer(c) readq_relaxed_no_log(c)
+#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index fbe0ca31a99c..902db125d994 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -20,6 +20,7 @@
#define __ASM_ARCH_TIMER_H
#include <asm/barrier.h>
+#include <asm/sysreg.h>
#include <linux/bug.h>
#include <linux/init.h>
@@ -38,19 +39,19 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
+ write_sysreg(val, cntp_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
- asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
+ write_sysreg(val, cntp_tval_el0);
break;
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
+ write_sysreg(val, cntv_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
- asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
+ write_sysreg(val, cntv_tval_el0);
break;
}
}
@@ -61,48 +62,38 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
static __always_inline
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
- u32 val;
-
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
- break;
+ return read_sysreg(cntp_ctl_el0);
case ARCH_TIMER_REG_TVAL:
- asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
- break;
+ return read_sysreg(cntp_tval_el0);
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val));
- break;
+ return read_sysreg(cntv_ctl_el0);
case ARCH_TIMER_REG_TVAL:
- asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
- break;
+ return read_sysreg(cntv_tval_el0);
}
}
- return val;
+ BUG();
}
static inline u32 arch_timer_get_cntfrq(void)
{
- u32 val;
- asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
- return val;
+ return read_sysreg(cntfrq_el0);
}
static inline u32 arch_timer_get_cntkctl(void)
{
- u32 cntkctl;
- asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- return cntkctl;
+ return read_sysreg(cntkctl_el1);
}
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
- asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
+ write_sysreg(cntkctl, cntkctl_el1);
}
static inline u64 arch_counter_get_cntpct(void)
@@ -117,10 +108,15 @@ static inline u64 arch_counter_get_cntpct(void)
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
-
isb();
- asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
-
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define L32_BITS 0x00000000FFFFFFFF
+ do {
+ cval = read_sysreg(cntvct_el0);
+ } while ((cval & L32_BITS) == L32_BITS);
+#else
+ cval = read_sysreg(cntvct_el0);
+#endif
return cval;
}
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1a1516fabd07..7dcfd83ff5e8 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -64,6 +64,18 @@
.endm
/*
+ * Save/disable and restore interrupts.
+ */
+ .macro save_and_disable_irqs, olddaif
+ mrs \olddaif, daif
+ disable_irq
+ .endm
+
+ .macro restore_irqs, olddaif
+ msr daif, \olddaif
+ .endm
+
+/*
* Enable and disable debug exceptions.
*/
.macro disable_dbg
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5082b30bc2c0..f9359d32fae5 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -18,17 +18,17 @@
#include <asm/cachetype.h>
-#define L1_CACHE_SHIFT 7
+#define L1_CACHE_SHIFT 6
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
- * sure that all such allocations are cache aligned. Otherwise,
- * unrelated code may cause parts of the buffer to be read into the
- * cache before the transfer is done, causing old data to be seen by
- * the CPU.
+ * sure that all such allocations are aligned to the maximum *known*
+ * cache line size on ARMv8 systems. Otherwise, unrelated code may
+ * cause parts of the buffer to be read into the cache before the
+ * transfer is done, causing old data to be seen by the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 22dda613f9c9..df06d37374cc 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,6 +40,10 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT or ASID-tagged VIVT I-cache.
*
+ * flush_cache_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
+ *
* flush_cache_mm(mm)
*
* Clean and invalidate all user space cache entries
@@ -65,6 +69,7 @@
* - kaddr - page address
* - size - region size
*/
+extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
@@ -86,6 +91,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
@@ -155,5 +166,21 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_KERNEL_TEXT_RDONLY
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_ro(void) { }
+#endif
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a) set_memory_ro((unsigned long)a, 1);
+#define mark_addr_rdwrite(a) set_memory_rw((unsigned long)a, 1);
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
#endif
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
new file mode 100644
index 000000000000..09f65339d66d
--- /dev/null
+++ b/arch/arm64/include/asm/checksum.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/types.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum += (sum >> 16) | (sum << 16);
+ return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ __uint128_t tmp;
+ u64 sum;
+
+ tmp = *(const __uint128_t *)iph;
+ iph += 16;
+ ihl -= 4;
+ tmp += ((tmp >> 64) | (tmp << 64));
+ sum = tmp >> 64;
+ do {
+ sum += *(const u32 *)iph;
+ iph += 4;
+ } while (--ihl);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold(sum >> 32);
+}
+#define ip_fast_csum ip_fast_csum
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 67fff4a9212c..2c94aecc8992 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -33,17 +33,20 @@
#define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_NCAPS 24
+#define ARM64_WORKAROUND_CAVIUM_27456 11
+#define ARM64_HAS_VIRT_HOST_EXTN 12
+#define ARM64_HARDEN_BRANCH_PREDICTOR 13
+#define ARM64_UNMAP_KERNEL_AT_EL0 14
+#define ARM64_HAS_32BIT_EL0 15
+#define ARM64_NCAPS 16
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+extern const char *machine_name;
+
/* CPU feature register tracking */
enum ftr_type {
FTR_EXACT, /* Use a predefined safe value */
@@ -177,7 +180,9 @@ void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_errata(void);
+void __init enable_errata_workarounds(void);
#ifdef CONFIG_HOTPLUG_CPU
void verify_local_cpu_capabilities(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 0a23fb0600c8..f857adc51b0f 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -32,6 +32,10 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+#define MMFR0_16KGRAN_SIZE 15
+#define MMFR0_16KGRAN_SHFT 20
+#define MMFR0_EL1_16KGRAN_MASK (MMFR0_16KGRAN_SIZE << MMFR0_16KGRAN_SHFT)
+
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -78,12 +82,19 @@
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
+#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
-#define ARM_CPU_PART_CORTEX_A57 0xD07
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
+#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
+#define ARM_CPU_PART_KRYO2XX_GOLD 0x800
+#define ARM_CPU_PART_KRYO2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO 0x200
#define APM_CPU_PART_POTENZA 0x000
@@ -92,7 +103,15 @@
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_KRYO2XX_SILVER \
+ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
+#define MIDR_KRYO2XX_GOLD \
+ MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7c876c..4b6b3f72a215 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -66,6 +66,11 @@
#define CACHE_FLUSH_IS_SAFE 1
+/* kprobes BRK opcodes with ESR encoding */
+#define BRK64_ESR_MASK 0xFFFF
+#define BRK64_ESR_KPROBES 0x0004
+#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
diff --git a/arch/arm64/include/asm/debugv8.h b/arch/arm64/include/asm/debugv8.h
new file mode 100644
index 000000000000..6a2538279f39
--- /dev/null
+++ b/arch/arm64/include/asm/debugv8.h
@@ -0,0 +1,229 @@
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_DEBUGV8_H
+#define __ASM_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for aarch 64 bit */
+#define dbg_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for aarch 64 bit */
+#define dbg_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for aarch 64 bit */
+#define dbg_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(reg) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(reg) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, reg) \
+({ \
+asm volatile("msr "#reg", %0" : : "r" (val)); \
+})
+
+/*
+ * Debug Feature Register
+ *
+ * Read only
+ */
+#define RSYSQ_ID_AA64DFR0_EL1() MRSQ(ID_AA64DFR0_EL1)
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv8
+ *
+ * Read only
+ * MDCCSR_EL0, MDRAR_EL1, OSLSR_EL1, DBGDTRRX_EL0, DBGAUTHSTATUS_EL1
+ *
+ * Write only
+ * DBGDTRTX_EL0, OSLAR_EL1
+ */
+/* 32 bit registers */
+#define RSYSL_DBGDTRRX_EL0() MRSL(DBGDTRRX_EL0)
+#define RSYSL_MDCCSR_EL0() MRSL(MDCCSR_EL0)
+#define RSYSL_MDSCR_EL1() MRSL(MDSCR_EL1)
+#define RSYSL_OSDTRRX_EL1() MRSL(OSDTRRX_EL1)
+#define RSYSL_OSDTRTX_EL1() MRSL(OSDTRTX_EL1)
+#define RSYSL_OSDLR_EL1() MRSL(OSDLR_EL1)
+#define RSYSL_OSLSR_EL1() MRSL(OSLSR_EL1)
+#define RSYSL_MDCCINT_EL1() MRSL(MDCCINT_EL1)
+#define RSYSL_OSECCR_EL1() MRSL(OSECCR_EL1)
+#define RSYSL_DBGPRCR_EL1() MRSL(DBGPRCR_EL1)
+#define RSYSL_DBGBCR0_EL1() MRSL(DBGBCR0_EL1)
+#define RSYSL_DBGBCR1_EL1() MRSL(DBGBCR1_EL1)
+#define RSYSL_DBGBCR2_EL1() MRSL(DBGBCR2_EL1)
+#define RSYSL_DBGBCR3_EL1() MRSL(DBGBCR3_EL1)
+#define RSYSL_DBGBCR4_EL1() MRSL(DBGBCR4_EL1)
+#define RSYSL_DBGBCR5_EL1() MRSL(DBGBCR5_EL1)
+#define RSYSL_DBGBCR6_EL1() MRSL(DBGBCR6_EL1)
+#define RSYSL_DBGBCR7_EL1() MRSL(DBGBCR7_EL1)
+#define RSYSL_DBGBCR8_EL1() MRSL(DBGBCR8_EL1)
+#define RSYSL_DBGBCR9_EL1() MRSL(DBGBCR9_EL1)
+#define RSYSL_DBGBCR10_EL1() MRSL(DBGBCR10_EL1)
+#define RSYSL_DBGBCR11_EL1() MRSL(DBGBCR11_EL1)
+#define RSYSL_DBGBCR12_EL1() MRSL(DBGBCR12_EL1)
+#define RSYSL_DBGBCR13_EL1() MRSL(DBGBCR13_EL1)
+#define RSYSL_DBGBCR14_EL1() MRSL(DBGBCR14_EL1)
+#define RSYSL_DBGBCR15_EL1() MRSL(DBGBCR15_EL1)
+#define RSYSL_DBGWCR0_EL1() MRSL(DBGWCR0_EL1)
+#define RSYSL_DBGWCR1_EL1() MRSL(DBGWCR1_EL1)
+#define RSYSL_DBGWCR2_EL1() MRSL(DBGWCR2_EL1)
+#define RSYSL_DBGWCR3_EL1() MRSL(DBGWCR3_EL1)
+#define RSYSL_DBGWCR4_EL1() MRSL(DBGWCR4_EL1)
+#define RSYSL_DBGWCR5_EL1() MRSL(DBGWCR5_EL1)
+#define RSYSL_DBGWCR6_EL1() MRSL(DBGWCR6_EL1)
+#define RSYSL_DBGWCR7_EL1() MRSL(DBGWCR7_EL1)
+#define RSYSL_DBGWCR8_EL1() MRSL(DBGWCR8_EL1)
+#define RSYSL_DBGWCR9_EL1() MRSL(DBGWCR9_EL1)
+#define RSYSL_DBGWCR10_EL1() MRSL(DBGWCR10_EL1)
+#define RSYSL_DBGWCR11_EL1() MRSL(DBGWCR11_EL1)
+#define RSYSL_DBGWCR12_EL1() MRSL(DBGWCR12_EL1)
+#define RSYSL_DBGWCR13_EL1() MRSL(DBGWCR13_EL1)
+#define RSYSL_DBGWCR14_EL1() MRSL(DBGWCR14_EL1)
+#define RSYSL_DBGWCR15_EL1() MRSL(DBGWCR15_EL1)
+#define RSYSL_DBGCLAIMSET_EL1() MRSL(DBGCLAIMSET_EL1)
+#define RSYSL_DBGCLAIMCLR_EL1() MRSL(DBGCLAIMCLR_EL1)
+#define RSYSL_DBGAUTHSTATUS_EL1() MRSL(DBGAUTHSTATUS_EL1)
+#define RSYSL_DBGVCR32_EL2() MRSL(DBGVCR32_EL2)
+#define RSYSL_MDCR_EL2() MRSL(MDCR_EL2)
+#define RSYSL_MDCR_EL3() MRSL(MDCR_EL3)
+/* 64 bit registers */
+#define RSYSQ_DBGDTR_EL0() MRSQ(DBGDTR_EL0)
+#define RSYSQ_MDRAR_EL1() MRSQ(MDRAR_EL1)
+#define RSYSQ_DBGBVR0_EL1() MRSQ(DBGBVR0_EL1)
+#define RSYSQ_DBGBVR1_EL1() MRSQ(DBGBVR1_EL1)
+#define RSYSQ_DBGBVR2_EL1() MRSQ(DBGBVR2_EL1)
+#define RSYSQ_DBGBVR3_EL1() MRSQ(DBGBVR3_EL1)
+#define RSYSQ_DBGBVR4_EL1() MRSQ(DBGBVR4_EL1)
+#define RSYSQ_DBGBVR5_EL1() MRSQ(DBGBVR5_EL1)
+#define RSYSQ_DBGBVR6_EL1() MRSQ(DBGBVR6_EL1)
+#define RSYSQ_DBGBVR7_EL1() MRSQ(DBGBVR7_EL1)
+#define RSYSQ_DBGBVR8_EL1() MRSQ(DBGBVR8_EL1)
+#define RSYSQ_DBGBVR9_EL1() MRSQ(DBGBVR9_EL1)
+#define RSYSQ_DBGBVR10_EL1() MRSQ(DBGBVR10_EL1)
+#define RSYSQ_DBGBVR11_EL1() MRSQ(DBGBVR11_EL1)
+#define RSYSQ_DBGBVR12_EL1() MRSQ(DBGBVR12_EL1)
+#define RSYSQ_DBGBVR13_EL1() MRSQ(DBGBVR13_EL1)
+#define RSYSQ_DBGBVR14_EL1() MRSQ(DBGBVR14_EL1)
+#define RSYSQ_DBGBVR15_EL1() MRSQ(DBGBVR15_EL1)
+#define RSYSQ_DBGWVR0_EL1() MRSQ(DBGWVR0_EL1)
+#define RSYSQ_DBGWVR1_EL1() MRSQ(DBGWVR1_EL1)
+#define RSYSQ_DBGWVR2_EL1() MRSQ(DBGWVR2_EL1)
+#define RSYSQ_DBGWVR3_EL1() MRSQ(DBGWVR3_EL1)
+#define RSYSQ_DBGWVR4_EL1() MRSQ(DBGWVR4_EL1)
+#define RSYSQ_DBGWVR5_EL1() MRSQ(DBGWVR5_EL1)
+#define RSYSQ_DBGWVR6_EL1() MRSQ(DBGWVR6_EL1)
+#define RSYSQ_DBGWVR7_EL1() MRSQ(DBGWVR7_EL1)
+#define RSYSQ_DBGWVR8_EL1() MRSQ(DBGWVR8_EL1)
+#define RSYSQ_DBGWVR9_EL1() MRSQ(DBGWVR9_EL1)
+#define RSYSQ_DBGWVR10_EL1() MRSQ(DBGWVR10_EL1)
+#define RSYSQ_DBGWVR11_EL1() MRSQ(DBGWVR11_EL1)
+#define RSYSQ_DBGWVR12_EL1() MRSQ(DBGWVR12_EL1)
+#define RSYSQ_DBGWVR13_EL1() MRSQ(DBGWVR13_EL1)
+#define RSYSQ_DBGWVR14_EL1() MRSQ(DBGWVR14_EL1)
+#define RSYSQ_DBGWVR15_EL1() MRSQ(DBGWVR15_EL1)
+
+/* 32 bit registers */
+#define WSYS_DBGDTRTX_EL0(val) MSR(val, DBGDTRTX_EL0)
+#define WSYS_MDCCINT_EL1(val) MSR(val, MDCCINT_EL1)
+#define WSYS_MDSCR_EL1(val) MSR(val, MDSCR_EL1)
+#define WSYS_OSDTRRX_EL1(val) MSR(val, OSDTRRX_EL1)
+#define WSYS_OSDTRTX_EL1(val) MSR(val, OSDTRTX_EL1)
+#define WSYS_OSDLR_EL1(val) MSR(val, OSDLR_EL1)
+#define WSYS_OSECCR_EL1(val) MSR(val, OSECCR_EL1)
+#define WSYS_DBGPRCR_EL1(val) MSR(val, DBGPRCR_EL1)
+#define WSYS_DBGBCR0_EL1(val) MSR(val, DBGBCR0_EL1)
+#define WSYS_DBGBCR1_EL1(val) MSR(val, DBGBCR1_EL1)
+#define WSYS_DBGBCR2_EL1(val) MSR(val, DBGBCR2_EL1)
+#define WSYS_DBGBCR3_EL1(val) MSR(val, DBGBCR3_EL1)
+#define WSYS_DBGBCR4_EL1(val) MSR(val, DBGBCR4_EL1)
+#define WSYS_DBGBCR5_EL1(val) MSR(val, DBGBCR5_EL1)
+#define WSYS_DBGBCR6_EL1(val) MSR(val, DBGBCR6_EL1)
+#define WSYS_DBGBCR7_EL1(val) MSR(val, DBGBCR7_EL1)
+#define WSYS_DBGBCR8_EL1(val) MSR(val, DBGBCR8_EL1)
+#define WSYS_DBGBCR9_EL1(val) MSR(val, DBGBCR9_EL1)
+#define WSYS_DBGBCR10_EL1(val) MSR(val, DBGBCR10_EL1)
+#define WSYS_DBGBCR11_EL1(val) MSR(val, DBGBCR11_EL1)
+#define WSYS_DBGBCR12_EL1(val) MSR(val, DBGBCR12_EL1)
+#define WSYS_DBGBCR13_EL1(val) MSR(val, DBGBCR13_EL1)
+#define WSYS_DBGBCR14_EL1(val) MSR(val, DBGBCR14_EL1)
+#define WSYS_DBGBCR15_EL1(val) MSR(val, DBGBCR15_EL1)
+#define WSYS_DBGWCR0_EL1(val) MSR(val, DBGWCR0_EL1)
+#define WSYS_DBGWCR1_EL1(val) MSR(val, DBGWCR1_EL1)
+#define WSYS_DBGWCR2_EL1(val) MSR(val, DBGWCR2_EL1)
+#define WSYS_DBGWCR3_EL1(val) MSR(val, DBGWCR3_EL1)
+#define WSYS_DBGWCR4_EL1(val) MSR(val, DBGWCR4_EL1)
+#define WSYS_DBGWCR5_EL1(val) MSR(val, DBGWCR5_EL1)
+#define WSYS_DBGWCR6_EL1(val) MSR(val, DBGWCR6_EL1)
+#define WSYS_DBGWCR7_EL1(val) MSR(val, DBGWCR7_EL1)
+#define WSYS_DBGWCR8_EL1(val) MSR(val, DBGWCR8_EL1)
+#define WSYS_DBGWCR9_EL1(val) MSR(val, DBGWCR9_EL1)
+#define WSYS_DBGWCR10_EL1(val) MSR(val, DBGWCR10_EL1)
+#define WSYS_DBGWCR11_EL1(val) MSR(val, DBGWCR11_EL1)
+#define WSYS_DBGWCR12_EL1(val) MSR(val, DBGWCR12_EL1)
+#define WSYS_DBGWCR13_EL1(val) MSR(val, DBGWCR13_EL1)
+#define WSYS_DBGWCR14_EL1(val) MSR(val, DBGWCR14_EL1)
+#define WSYS_DBGWCR15_EL1(val) MSR(val, DBGWCR15_EL1)
+#define WSYS_DBGCLAIMSET_EL1(val) MSR(val, DBGCLAIMSET_EL1)
+#define WSYS_DBGCLAIMCLR_EL1(val) MSR(val, DBGCLAIMCLR_EL1)
+#define WSYS_OSLAR_EL1(val) MSR(val, OSLAR_EL1)
+#define WSYS_DBGVCR32_EL2(val) MSR(val, DBGVCR32_EL2)
+#define WSYS_MDCR_EL2(val) MSR(val, MDCR_EL2)
+#define WSYS_MDCR_EL3(val) MSR(val, MDCR_EL3)
+/* 64 bit registers */
+#define WSYS_DBGDTR_EL0(val) MSR(val, DBGDTR_EL0)
+#define WSYS_DBGBVR0_EL1(val) MSR(val, DBGBVR0_EL1)
+#define WSYS_DBGBVR1_EL1(val) MSR(val, DBGBVR1_EL1)
+#define WSYS_DBGBVR2_EL1(val) MSR(val, DBGBVR2_EL1)
+#define WSYS_DBGBVR3_EL1(val) MSR(val, DBGBVR3_EL1)
+#define WSYS_DBGBVR4_EL1(val) MSR(val, DBGBVR4_EL1)
+#define WSYS_DBGBVR5_EL1(val) MSR(val, DBGBVR5_EL1)
+#define WSYS_DBGBVR6_EL1(val) MSR(val, DBGBVR6_EL1)
+#define WSYS_DBGBVR7_EL1(val) MSR(val, DBGBVR7_EL1)
+#define WSYS_DBGBVR8_EL1(val) MSR(val, DBGBVR8_EL1)
+#define WSYS_DBGBVR9_EL1(val) MSR(val, DBGBVR9_EL1)
+#define WSYS_DBGBVR10_EL1(val) MSR(val, DBGBVR10_EL1)
+#define WSYS_DBGBVR11_EL1(val) MSR(val, DBGBVR11_EL1)
+#define WSYS_DBGBVR12_EL1(val) MSR(val, DBGBVR12_EL1)
+#define WSYS_DBGBVR13_EL1(val) MSR(val, DBGBVR13_EL1)
+#define WSYS_DBGBVR14_EL1(val) MSR(val, DBGBVR14_EL1)
+#define WSYS_DBGBVR15_EL1(val) MSR(val, DBGBVR15_EL1)
+#define WSYS_DBGWVR0_EL1(val) MSR(val, DBGWVR0_EL1)
+#define WSYS_DBGWVR1_EL1(val) MSR(val, DBGWVR1_EL1)
+#define WSYS_DBGWVR2_EL1(val) MSR(val, DBGWVR2_EL1)
+#define WSYS_DBGWVR3_EL1(val) MSR(val, DBGWVR3_EL1)
+#define WSYS_DBGWVR4_EL1(val) MSR(val, DBGWVR4_EL1)
+#define WSYS_DBGWVR5_EL1(val) MSR(val, DBGWVR5_EL1)
+#define WSYS_DBGWVR6_EL1(val) MSR(val, DBGWVR6_EL1)
+#define WSYS_DBGWVR7_EL1(val) MSR(val, DBGWVR7_EL1)
+#define WSYS_DBGWVR8_EL1(val) MSR(val, DBGWVR8_EL1)
+#define WSYS_DBGWVR9_EL1(val) MSR(val, DBGWVR9_EL1)
+#define WSYS_DBGWVR10_EL1(val) MSR(val, DBGWVR10_EL1)
+#define WSYS_DBGWVR11_EL1(val) MSR(val, DBGWVR11_EL1)
+#define WSYS_DBGWVR12_EL1(val) MSR(val, DBGWVR12_EL1)
+#define WSYS_DBGWVR13_EL1(val) MSR(val, DBGWVR13_EL1)
+#define WSYS_DBGWVR14_EL1(val) MSR(val, DBGWVR14_EL1)
+#define WSYS_DBGWVR15_EL1(val) MSR(val, DBGWVR15_EL1)
+
+#endif
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef256b8c9..ffa5af4fcb7e 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -17,14 +17,24 @@
#define __ASM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct pdev_archdata {
+ u64 dma_mask;
};
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..e77da2002bc9
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013,2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..c16cf151f689
--- /dev/null
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -0,0 +1,64 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/err.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+ struct dma_fast_smmu_mapping *fast;
+#endif
+};
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#else /* !CONFIG_ARM64_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+ return ERR_PTR(-ENOMEM);
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif /* CONFIG_ARM64_DMA_USE_IOMMU */
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 61e08f360e31..b002c5e3809c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -27,7 +27,7 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops dummy_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
@@ -39,7 +39,7 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
@@ -47,6 +47,12 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->archdata.dma_ops = dma_ops;
+}
+
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/arm64/include/asm/edac.h b/arch/arm64/include/asm/edac.h
new file mode 100644
index 000000000000..1bea82beb4f7
--- /dev/null
+++ b/arch/arm64/include/asm/edac.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+#if defined(CONFIG_EDAC_CORTEX_ARM64) && \
+ !defined(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)
+void arm64_check_cache_ecc(void *info);
+#else
+static inline void arm64_check_cache_ecc(void *info) { }
+#endif
+
+static inline void atomic_scrub(void *addr, int size)
+{
+ return;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index f9d64ed4fd2b..b9876364de1a 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -23,6 +23,7 @@
*/
#include <asm/ptrace.h>
#include <asm/user.h>
+#include <asm/fpsimd.h>
/*
* AArch64 static relocation types.
@@ -140,11 +141,12 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
-#define ARCH_DLINFO \
+#define _SET_AUX_ENT_VDSO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
- (elf_addr_t)current->mm->context.vdso); \
+ (Elf64_Off)current->mm->context.vdso); \
} while (0)
+#define ARCH_DLINFO _SET_AUX_ENT_VDSO
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
@@ -182,8 +184,16 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex) \
+do { \
+ set_thread_flag(TIF_32BIT); \
+} while (0)
+
+#ifdef CONFIG_VDSO32
+#define COMPAT_ARCH_DLINFO _SET_AUX_ENT_VDSO
+#else
#define COMPAT_ARCH_DLINFO
+#endif
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages \
diff --git a/arch/arm64/include/asm/etmv4x.h b/arch/arm64/include/asm/etmv4x.h
new file mode 100644
index 000000000000..91239f779587
--- /dev/null
+++ b/arch/arm64/include/asm/etmv4x.h
@@ -0,0 +1,385 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for AArch64 */
+#define trc_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for AArch64 */
+#define trc_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for AArch64 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(op0, op1, crn, crm, op2) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("msr S"#op0"_"#op1"_"#crn"_"#crm"_"#op2", %0" : : "r" (val)); \
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRSL(3, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MSR(val, 3, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRSL(2, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRSL(2, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRSL(2, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRSL(2, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRSL(2, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRSL(2, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRSL(2, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRSL(2, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRSL(2, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRSL(2, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRSL(2, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRSL(2, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRSL(2, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRSL(2, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRSL(2, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRSL(2, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRSL(2, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRSL(2, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRSL(2, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRSL(2, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRSL(2, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRSL(2, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRSL(2, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRSL(2, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRSL(2, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRSL(2, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRSL(2, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRSL(2, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRSL(2, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRSL(2, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRSL(2, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRSL(2, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRSL(2, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRSL(2, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRSL(2, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRSL(2, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRSL(2, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRSL(2, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRSL(2, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRSL(2, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRSL(2, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRSL(2, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRSL(2, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRSL(2, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRSL(2, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRSL(2, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRSL(2, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRSL(2, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRSL(2, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRSL(2, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRSL(2, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRSL(2, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRSL(2, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRSL(2, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRSL(2, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRSL(2, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRSL(2, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRSL(2, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRSL(2, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRSL(2, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRSL(2, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRSL(2, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRSL(2, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRSL(2, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRSL(2, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRSL(2, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRSL(2, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRSL(2, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRSL(2, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRSL(2, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRSL(2, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRSL(2, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRSL(2, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRSL(2, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRSL(2, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRSL(2, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRSL(2, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRSL(2, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRSL(2, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRSL(2, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRSL(2, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRSL(2, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRSL(2, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRSL(2, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRSL(2, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRSL(2, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRSL(2, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRSL(2, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRSL(2, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRSL(2, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRSL(2, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRSL(2, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRSL(2, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRSL(2, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRSL(2, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRSL(2, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRSL(2, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRSL(2, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRSL(2, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRSL(2, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRSL(2, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRSL(2, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRSL(2, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRSL(2, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRSL(2, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRSL(2, 1, c1, c7, 3)
+
+/* 64 bit registers */
+#define RSYSQ_ETMACATR0() MRSQ(2, 1, c2, c0, 2)
+#define RSYSQ_ETMACATR1() MRSQ(2, 1, c2, c2, 2)
+#define RSYSQ_ETMACATR2() MRSQ(2, 1, c2, c4, 2)
+#define RSYSQ_ETMACATR3() MRSQ(2, 1, c2, c6, 2)
+#define RSYSQ_ETMACATR4() MRSQ(2, 1, c2, c8, 2)
+#define RSYSQ_ETMACATR5() MRSQ(2, 1, c2, c10, 2)
+#define RSYSQ_ETMACATR6() MRSQ(2, 1, c2, c12, 2)
+#define RSYSQ_ETMACATR7() MRSQ(2, 1, c2, c14, 2)
+#define RSYSQ_ETMACATR8() MRSQ(2, 1, c2, c0, 3)
+#define RSYSQ_ETMACATR9() MRSQ(2, 1, c2, c2, 3)
+#define RSYSQ_ETMACATR10() MRSQ(2, 1, c2, c4, 3)
+#define RSYSQ_ETMACATR11() MRSQ(2, 1, c2, c6, 3)
+#define RSYSQ_ETMACATR12() MRSQ(2, 1, c2, c8, 3)
+#define RSYSQ_ETMACATR13() MRSQ(2, 1, c2, c10, 3)
+#define RSYSQ_ETMACATR14() MRSQ(2, 1, c2, c12, 3)
+#define RSYSQ_ETMACATR15() MRSQ(2, 1, c2, c14, 3)
+#define RSYSQ_ETMCIDCVR0() MRSQ(2, 1, c3, c0, 0)
+#define RSYSQ_ETMCIDCVR1() MRSQ(2, 1, c3, c2, 0)
+#define RSYSQ_ETMCIDCVR2() MRSQ(2, 1, c3, c4, 0)
+#define RSYSQ_ETMCIDCVR3() MRSQ(2, 1, c3, c6, 0)
+#define RSYSQ_ETMCIDCVR4() MRSQ(2, 1, c3, c8, 0)
+#define RSYSQ_ETMCIDCVR5() MRSQ(2, 1, c3, c10, 0)
+#define RSYSQ_ETMCIDCVR6() MRSQ(2, 1, c3, c12, 0)
+#define RSYSQ_ETMCIDCVR7() MRSQ(2, 1, c3, c14, 0)
+#define RSYSQ_ETMACVR0() MRSQ(2, 1, c2, c0, 0)
+#define RSYSQ_ETMACVR1() MRSQ(2, 1, c2, c2, 0)
+#define RSYSQ_ETMACVR2() MRSQ(2, 1, c2, c4, 0)
+#define RSYSQ_ETMACVR3() MRSQ(2, 1, c2, c6, 0)
+#define RSYSQ_ETMACVR4() MRSQ(2, 1, c2, c8, 0)
+#define RSYSQ_ETMACVR5() MRSQ(2, 1, c2, c10, 0)
+#define RSYSQ_ETMACVR6() MRSQ(2, 1, c2, c12, 0)
+#define RSYSQ_ETMACVR7() MRSQ(2, 1, c2, c14, 0)
+#define RSYSQ_ETMACVR8() MRSQ(2, 1, c2, c0, 1)
+#define RSYSQ_ETMACVR9() MRSQ(2, 1, c2, c2, 1)
+#define RSYSQ_ETMACVR10() MRSQ(2, 1, c2, c4, 1)
+#define RSYSQ_ETMACVR11() MRSQ(2, 1, c2, c6, 1)
+#define RSYSQ_ETMACVR12() MRSQ(2, 1, c2, c8, 1)
+#define RSYSQ_ETMACVR13() MRSQ(2, 1, c2, c10, 1)
+#define RSYSQ_ETMACVR14() MRSQ(2, 1, c2, c12, 1)
+#define RSYSQ_ETMACVR15() MRSQ(2, 1, c2, c14, 1)
+#define RSYSQ_ETMVMIDCVR0() MRSQ(2, 1, c3, c0, 1)
+#define RSYSQ_ETMVMIDCVR1() MRSQ(2, 1, c3, c2, 1)
+#define RSYSQ_ETMVMIDCVR2() MRSQ(2, 1, c3, c4, 1)
+#define RSYSQ_ETMVMIDCVR3() MRSQ(2, 1, c3, c6, 1)
+#define RSYSQ_ETMVMIDCVR4() MRSQ(2, 1, c3, c8, 1)
+#define RSYSQ_ETMVMIDCVR5() MRSQ(2, 1, c3, c10, 1)
+#define RSYSQ_ETMVMIDCVR6() MRSQ(2, 1, c3, c12, 1)
+#define RSYSQ_ETMVMIDCVR7() MRSQ(2, 1, c3, c14, 1)
+#define RSYSQ_ETMDVCVR0() MRSQ(2, 1, c2, c0, 4)
+#define RSYSQ_ETMDVCVR1() MRSQ(2, 1, c2, c4, 4)
+#define RSYSQ_ETMDVCVR2() MRSQ(2, 1, c2, c8, 4)
+#define RSYSQ_ETMDVCVR3() MRSQ(2, 1, c2, c12, 4)
+#define RSYSQ_ETMDVCVR4() MRSQ(2, 1, c2, c0, 5)
+#define RSYSQ_ETMDVCVR5() MRSQ(2, 1, c2, c4, 5)
+#define RSYSQ_ETMDVCVR6() MRSQ(2, 1, c2, c8, 5)
+#define RSYSQ_ETMDVCVR7() MRSQ(2, 1, c2, c12, 5)
+#define RSYSQ_ETMDVCMR0() MRSQ(2, 1, c2, c0, 6)
+#define RSYSQ_ETMDVCMR1() MRSQ(2, 1, c2, c4, 6)
+#define RSYSQ_ETMDVCMR2() MRSQ(2, 1, c2, c8, 6)
+#define RSYSQ_ETMDVCMR3() MRSQ(2, 1, c2, c12, 6)
+#define RSYSQ_ETMDVCMR4() MRSQ(2, 1, c2, c0, 7)
+#define RSYSQ_ETMDVCMR5() MRSQ(2, 1, c2, c4, 7)
+#define RSYSQ_ETMDVCMR6() MRSQ(2, 1, c2, c8, 7)
+#define RSYSQ_ETMDVCMR7() MRSQ(2, 1, c2, c12, 7)
+
+/* 32 and 64 bit registers */
+#define WSYS_ETMAUXCTLR(val) MSR(val, 2, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MSR(val, 2, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MSR(val, 2, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MSR(val, 2, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MSR(val, 2, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MSR(val, 2, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MSR(val, 2, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MSR(val, 2, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MSR(val, 2, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MSR(val, 2, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MSR(val, 2, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MSR(val, 2, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MSR(val, 2, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MSR(val, 2, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MSR(val, 2, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MSR(val, 2, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MSR(val, 2, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MSR(val, 2, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MSR(val, 2, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MSR(val, 2, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MSR(val, 2, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MSR(val, 2, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MSR(val, 2, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MSR(val, 2, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MSR(val, 2, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MSR(val, 2, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MSR(val, 2, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MSR(val, 2, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MSR(val, 2, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MSR(val, 2, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MSR(val, 2, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MSR(val, 2, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MSR(val, 2, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MSR(val, 2, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MSR(val, 2, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MSR(val, 2, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MSR(val, 2, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MSR(val, 2, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MSR(val, 2, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MSR(val, 2, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MSR(val, 2, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MSR(val, 2, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MSR(val, 2, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MSR(val, 2, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MSR(val, 2, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MSR(val, 2, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MSR(val, 2, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MSR(val, 2, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MSR(val, 2, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MSR(val, 2, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MSR(val, 2, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MSR(val, 2, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MSR(val, 2, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MSR(val, 2, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MSR(val, 2, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MSR(val, 2, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MSR(val, 2, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MSR(val, 2, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MSR(val, 2, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MSR(val, 2, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MSR(val, 2, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MSR(val, 2, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MSR(val, 2, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MSR(val, 2, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MSR(val, 2, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MSR(val, 2, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MSR(val, 2, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MSR(val, 2, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MSR(val, 2, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MSR(val, 2, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MSR(val, 2, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MSR(val, 2, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MSR(val, 2, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MSR(val, 2, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MSR(val, 2, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MSR(val, 2, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MSR(val, 2, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MSR(val, 2, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MSR(val, 2, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MSR(val, 2, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MSR(val, 2, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MSR(val, 2, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MSR(val, 2, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MSR(val, 2, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MSR(val, 2, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MSR(val, 2, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MSR(val, 2, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MSR(val, 2, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MSR(val, 2, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MSR(val, 2, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MSR(val, 2, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MSR(val, 2, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MSR(val, 2, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MSR(val, 2, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MSR(val, 2, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MSR(val, 2, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MSR(val, 2, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MSR(val, 2, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MSR(val, 2, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MSR(val, 2, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MSR(val, 2, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MSR(val, 2, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MSR(val, 2, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MSR(val, 2, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MSR(val, 2, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MSR(val, 2, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MSR(val, 2, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MSR(val, 2, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MSR(val, 2, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MSR(val, 2, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MSR(val, 2, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MSR(val, 2, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MSR(val, 2, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MSR(val, 2, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MSR(val, 2, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MSR(val, 2, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MSR(val, 2, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MSR(val, 2, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MSR(val, 2, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MSR(val, 2, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MSR(val, 2, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MSR(val, 2, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MSR(val, 2, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MSR(val, 2, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MSR(val, 2, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MSR(val, 2, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MSR(val, 2, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MSR(val, 2, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MSR(val, 2, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MSR(val, 2, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MSR(val, 2, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MSR(val, 2, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MSR(val, 2, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MSR(val, 2, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MSR(val, 2, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MSR(val, 2, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MSR(val, 2, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MSR(val, 2, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MSR(val, 2, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MSR(val, 2, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MSR(val, 2, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MSR(val, 2, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MSR(val, 2, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MSR(val, 2, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MSR(val, 2, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MSR(val, 2, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MSR(val, 2, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MSR(val, 2, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MSR(val, 2, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MSR(val, 2, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MSR(val, 2, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MSR(val, 2, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 50f559f574fe..3efaa5cebc03 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -81,6 +81,14 @@ extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
u32 num_regs);
extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+#ifdef CONFIG_ENABLE_FP_SIMD_SETTINGS
+extern void fpsimd_disable_trap(void);
+extern void fpsimd_enable_trap(void);
+#else
+static inline void fpsimd_disable_trap(void) {}
+static inline void fpsimd_enable_trap(void) {}
+#endif
+
#endif
#endif
diff --git a/arch/arm64/include/asm/gpio.h b/arch/arm64/include/asm/gpio.h
new file mode 100644
index 000000000000..9019bc99ed47
--- /dev/null
+++ b/arch/arm64/include/asm/gpio.h
@@ -0,0 +1,32 @@
+#ifndef _ARCH_ARM64_GPIO_H
+#define _ARCH_ARM64_GPIO_H
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#endif
+
+/* not all ARM64 platforms necessarily support this API ... */
+#ifdef CONFIG_NEED_MACH_GPIO_H
+#include <mach/gpio.h>
+#endif
+
+#ifndef __ARM64_GPIOLIB_COMPLEX
+/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
+#include <asm-generic/gpio.h>
+
+/* The trivial gpiolib dispatchers */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#endif
+
+/*
+ * Provide a default gpio_to_irq() which should satisfy every case.
+ * However, some platforms want to do this differently, so allow them
+ * to override it.
+ */
+#ifndef gpio_to_irq
+#define gpio_to_irq __gpio_to_irq
+#endif
+
+#endif /* _ARCH_ARM64_GPIO_H */
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297dac77..1473fc2f7ab7 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 30e50eb54a67..1dbaa901d7e5 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -120,6 +120,29 @@ enum aarch64_insn_register {
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
};
+enum aarch64_insn_special_register {
+ AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200,
+ AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201,
+ AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208,
+ AARCH64_INSN_SPCLREG_SPSEL = 0xC210,
+ AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212,
+ AARCH64_INSN_SPCLREG_DAIF = 0xDA11,
+ AARCH64_INSN_SPCLREG_NZCV = 0xDA10,
+ AARCH64_INSN_SPCLREG_FPCR = 0xDA20,
+ AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28,
+ AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29,
+ AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200,
+ AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201,
+ AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208,
+ AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218,
+ AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219,
+ AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A,
+ AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B,
+ AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200,
+ AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201,
+ AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
+};
+
enum aarch64_insn_variant {
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_VARIANT_64BIT
@@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
+__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000)
+__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
+__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
+__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
+__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
+__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
+__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
+__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
#undef __AARCH64_INSN_FUNCS
@@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+bool aarch64_insn_uses_literal(u32 insn);
+bool aarch64_insn_is_branch(u32 insn);
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
@@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn);
#define A32_RT_OFFSET 12
#define A32_RT2_OFFSET 0
+u32 aarch64_insn_extract_system_reg(u32 insn);
u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
u32 aarch32_insn_mcr_extract_opc2(u32 insn);
u32 aarch32_insn_mcr_extract_crm(u32 insn);
+
+typedef bool (pstate_check_t)(unsigned long);
+extern pstate_check_t * const aarch32_opcode_cond_checks[16];
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 44be1e03ed65..3112c2a9d96f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -31,38 +31,35 @@
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
* Generic IO read/write. These perform native-endian accesses.
+ * that some architectures will want to re-define __raw_{read,write}w.
*/
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_writeq __raw_writeq
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
{
asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
}
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
{
u8 val;
asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
@@ -72,8 +69,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
return val;
}
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
{
u16 val;
@@ -84,8 +80,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
return val;
}
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
{
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
@@ -95,8 +90,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return val;
}
-#define __raw_readq __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
{
u64 val;
asm volatile(ALTERNATIVE("ldr %0, [%1]",
@@ -106,6 +100,46 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
return val;
}
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging, The appropriate functions can be used but clinets should carefully
+ * consider why they can't support the logging
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+ int _ret; \
+ volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ _ret = uncached_logk(LOGK_WRITEL, _addr); \
+ ETB_WAYPOINT; \
+ __raw_write##_t##_no_log((v), _a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ })
+
+#define __raw_writeb(v, a) __raw_write_logged((v), a, b)
+#define __raw_writew(v, a) __raw_write_logged((v), a, w)
+#define __raw_writel(v, a) __raw_write_logged((v), a, l)
+#define __raw_writeq(v, a) __raw_write_logged((v), a, q)
+
+#define __raw_read_logged(a, _l, _t) ({ \
+ _t __a; \
+ const volatile void __iomem *_a = (const volatile void __iomem *)(a); \
+ void *_addr = (void __force *)(_a); \
+ int _ret; \
+ _ret = uncached_logk(LOGK_READL, _addr); \
+ ETB_WAYPOINT; \
+ __a = __raw_read##_l##_no_log(_a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ __a; \
+ })
+
+#define __raw_readb(a) __raw_read_logged((a), b, u8)
+#define __raw_readw(a) __raw_read_logged((a), w, u16)
+#define __raw_readl(a) __raw_read_logged((a), l, u32)
+#define __raw_readq(a) __raw_read_logged((a), q, u64)
+
/* IO barriers */
#define __iormb() rmb()
#define __iowmb() wmb()
@@ -127,6 +161,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
+#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; })
+#define readw_relaxed_no_log(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; })
+#define readl_relaxed_no_log(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; })
+#define readq_relaxed_no_log(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
+
+#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
+#define writel_relaxed_no_log(v, c) ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
+
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
@@ -142,6 +186,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
+#define readb_no_log(c) ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; })
+#define readw_no_log(c) ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+#define readl_no_log(c) ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+#define readq_no_log(c) ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+
+#define writeb_no_log(v, c) ({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
+#define writew_no_log(v, c) ({ __iowmb(); writew_relaxed_no_log((v), (c)); })
+#define writel_no_log(v, c) ({ __iowmb(); writel_relaxed_no_log((v), (c)); })
+#define writeq_no_log(v, c) ({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
+
/*
* I/O port access primitives.
*/
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b77197d941fc..3e1c0e7ef082 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -47,6 +47,9 @@ static inline int nr_legacy_irqs(void)
return 0;
}
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
static inline bool on_irq_stack(unsigned long sp, int cpu)
{
/* variable names the same as kernel/stacktrace.c */
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
new file mode 100644
index 000000000000..1737aecfcc5e
--- /dev/null
+++ b/arch/arm64/include/asm/kprobes.h
@@ -0,0 +1,60 @@
+/*
+ * arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KPROBES_H
+#define _ARM_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_irqflag;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+ struct pt_regs jprobe_saved_regs;
+};
+
+void arch_remove_kprobe(struct kprobe *);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
+int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index d7e7cf56e8d6..5385adcd157d 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -86,17 +86,6 @@
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
-/* Hyp System Control Register (SCTLR_EL2) bits */
-#define SCTLR_EL2_EE (1 << 25)
-#define SCTLR_EL2_WXN (1 << 19)
-#define SCTLR_EL2_I (1 << 12)
-#define SCTLR_EL2_SA (1 << 3)
-#define SCTLR_EL2_C (1 << 2)
-#define SCTLR_EL2_A (1 << 1)
-#define SCTLR_EL2_M 1
-#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
- SCTLR_EL2_SA | SCTLR_EL2_I)
-
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
@@ -126,6 +115,7 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+#define VTCR_EL2_VS 19
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -169,7 +159,7 @@
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT (UL(48))
-#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 419bc6661b5c..36a30c80032d 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,84 +20,10 @@
#include <asm/virt.h>
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
-#define CSSELR_EL1 2 /* Cache Size Selection Register */
-#define SCTLR_EL1 3 /* System Control Register */
-#define ACTLR_EL1 4 /* Auxiliary Control Register */
-#define CPACR_EL1 5 /* Coprocessor Access Control */
-#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
-#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
-#define TCR_EL1 8 /* Translation Control Register */
-#define ESR_EL1 9 /* Exception Syndrome Register */
-#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
-#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
-#define FAR_EL1 12 /* Fault Address Register */
-#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
-#define VBAR_EL1 14 /* Vector Base Address Register */
-#define CONTEXTIDR_EL1 15 /* Context ID Register */
-#define TPIDR_EL0 16 /* Thread ID, User R/W */
-#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
-#define TPIDR_EL1 18 /* Thread ID, Privileged */
-#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
-#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
-#define PAR_EL1 21 /* Physical Address Register */
-#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
-#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 24 /* Domain Access Control Register */
-#define IFSR32_EL2 25 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
-#define NR_SYS_REGS 28
-
-/* 32bit mapping */
-#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
-#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
-#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
-#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
-#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
-#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
-#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
-#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
-#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
-#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
-#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
-#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
-#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
-#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
-#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
-#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
-#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
-#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
-#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
-#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
-#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
-#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
-#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
-#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
-#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
-#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
-#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
-#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-
-#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
-#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
-#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
-#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
-#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
-#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
-#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
-
-#define NR_COPRO_REGS (NR_SYS_REGS * 2)
-
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE 2
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@@ -105,11 +31,27 @@
#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
#ifndef __ASSEMBLY__
+#if __GNUC__ > 4
+#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
+#else
+/*
+ * GCC versions 4.9 and older will fold the constant below into the addend of
+ * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
+ * constant is used directly. However, since we use the small code model for
+ * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
+ * with a +/- 4 GB range, resulting in linker relocation errors if the shift
+ * is sufficiently large. So prevent the compiler from folding the shift into
+ * the addend, by making the shift a variable with external linkage.
+ */
+__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
+#endif
+
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 0729a2f94482..9917b55148d8 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -26,7 +26,6 @@
#include <asm/esr.h>
#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d2166bd67cd0..e875deff69f1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,7 +25,6 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -45,6 +44,7 @@
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
+phys_addr_t kvm_hyp_reset_entry(void);
struct kvm_arch {
/* The VMID generation used for the virt. memory system */
@@ -85,6 +85,86 @@ struct kvm_vcpu_fault_info {
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
};
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+ __INVALID_SYSREG__,
+ MPIDR_EL1, /* MultiProcessor Affinity Register */
+ CSSELR_EL1, /* Cache Size Selection Register */
+ SCTLR_EL1, /* System Control Register */
+ ACTLR_EL1, /* Auxiliary Control Register */
+ CPACR_EL1, /* Coprocessor Access Control */
+ TTBR0_EL1, /* Translation Table Base Register 0 */
+ TTBR1_EL1, /* Translation Table Base Register 1 */
+ TCR_EL1, /* Translation Control Register */
+ ESR_EL1, /* Exception Syndrome Register */
+ AFSR0_EL1, /* Auxilary Fault Status Register 0 */
+ AFSR1_EL1, /* Auxilary Fault Status Register 1 */
+ FAR_EL1, /* Fault Address Register */
+ MAIR_EL1, /* Memory Attribute Indirection Register */
+ VBAR_EL1, /* Vector Base Address Register */
+ CONTEXTIDR_EL1, /* Context ID Register */
+ TPIDR_EL0, /* Thread ID, User R/W */
+ TPIDRRO_EL0, /* Thread ID, User R/O */
+ TPIDR_EL1, /* Thread ID, Privileged */
+ AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
+ CNTKCTL_EL1, /* Timer Control Register (EL1) */
+ PAR_EL1, /* Physical Address Register */
+ MDSCR_EL1, /* Monitor Debug System Control Register */
+ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+ /* 32bit specific registers. Keep them at the end of the range */
+ DACR32_EL2, /* Domain Access Control Register */
+ IFSR32_EL2, /* Instruction Fault Status Register */
+ FPEXC32_EL2, /* Floating-Point Exception Control Register */
+ DBGVCR32_EL2, /* Debug Vector Catch Register */
+
+ NR_SYS_REGS /* Nothing after this line! */
+};
+
+/* 32bit mapping */
+#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
+#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
+#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
+#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
+#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
+#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
+#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
+#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
+#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
+#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
+#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
+
struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
@@ -249,7 +329,21 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
-static inline void kvm_arch_hardware_disable(void) {}
+static inline void __cpu_init_stage2(void)
+{
+}
+
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t phys_idmap_start)
+{
+ /*
+ * Call reset code, and switch back to stub hyp vectors.
+ * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
+ */
+ __kvm_call_hyp((void *)kvm_hyp_reset_entry(),
+ boot_pgd_ptr, phys_idmap_start);
+}
+
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 889c908ee631..fe612a962576 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -19,7 +19,6 @@
#define __ARM64_KVM_MMIO_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
/*
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 2b1020a056ad..c6aae0b85cef 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/cpufeature.h>
/*
* As we only have the TTBR0_EL2 register, we cannot express
@@ -98,6 +99,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -158,7 +160,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
#endif
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
@@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+ int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+
+ return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6b17d08845c3..1d870666b8d9 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -73,6 +73,9 @@
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+#define KERNEL_START _text
+#define KERNEL_END _end
+
/*
* The size of the KASAN shadow region. This should be 1/8th of the
* size of the entire kernel virtual address space.
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 39e502f0ab80..67e8c0c5e3cc 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -20,6 +20,10 @@
#define TTBR_ASID_MASK (UL(0xffff) << 48)
#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+
+#include <asm/cpufeature.h>
+#include <asm/percpu.h>
typedef struct {
atomic64_t id;
@@ -39,6 +43,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
@@ -46,6 +87,13 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void hotplug_paging(phys_addr_t start, phys_addr_t size);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern void remove_pagetable(unsigned long start,
+ unsigned long end, bool direct);
+#endif
+#endif
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e0d53cfca847..17d0ada5b473 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -28,20 +28,25 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
+#include <linux/msm_rtb.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ pid_t pid = task_pid_nr(next);
asm(
" msr contextidr_el1, %0\n"
" isb"
:
- : "r" (task_pid_nr(next)));
+ : "r" (pid));
+ uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
}
#else
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ uncached_logk(LOGK_CTXID, (void *)(u64)task_pid_nr(next));
}
#endif
@@ -181,7 +186,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
- task_thread_info(tsk)->ttbr0 = ttbr;
+ WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b008a72f8bc0..ed8f4351cc2d 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -41,3 +41,8 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#endif /* __KERNEL__ */
#endif /* __ASM_PCI_H */
+
+#ifdef CONFIG_PCI_MSM
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 91b6be092ce2..0c38c189fb3b 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -33,4 +33,91 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
(regs)->pstate = PSR_MODE_EL1h; \
}
+static inline u32 armv8pmu_pmcr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmccntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmccntr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmxevcntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmxevcntr_el0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmovsclr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrs %0, pmovsclr_el0" : "=r" (val));
+ return val;
+}
+
+static inline void armv8pmu_pmcr_write_reg(u32 val)
+{
+ asm volatile("msr pmcr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmselr_write_reg(u32 val)
+{
+ asm volatile("msr pmselr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmccntr_write_reg(u32 val)
+{
+ asm volatile("msr pmccntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevcntr_write_reg(u32 val)
+{
+ asm volatile("msr pmxevcntr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmxevtyper_write_reg(u32 val)
+{
+ asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenset_write_reg(u32 val)
+{
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmcntenclr_write_reg(u32 val)
+{
+ asm volatile("msr pmcntenclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenset_write_reg(u32 val)
+{
+ asm volatile("msr pmintenset_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmintenclr_write_reg(u32 val)
+{
+ asm volatile("msr pmintenclr_el1, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmovsclr_write_reg(u32 val)
+{
+ asm volatile("msr pmovsclr_el0, %0" :: "r" (val));
+}
+
+static inline void armv8pmu_pmuserenr_write_reg(u32 val)
+{
+ asm volatile("msr pmuserenr_el0, %0" :: "r" (val));
+}
+
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 81eee57ad519..13d6b496de92 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
#include <asm/bug.h>
#include <asm/proc-fns.h>
+#include <asm/bug.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -231,6 +232,16 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
+static inline pte_t pte_clear_rdonly(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_VALID));
+}
+
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
@@ -238,6 +249,34 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+ pteval_t old = pte_val(*ptep);
+ pteval_t new = pte_val(pte);
+
+ /* Only problematic if valid -> valid */
+ if (!(old & new & PTE_VALID))
+ goto pte_ok;
+
+ /* Changing attributes should go via an invalid entry */
+ if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+ goto pte_bad;
+
+ /* Change of OA is only an issue if one mapping is writable */
+ if (!(old & new & PTE_RDONLY) &&
+ WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+ goto pte_bad;
+
+ goto pte_ok;
+
+pte_bad:
+ *ptep = __pte(0);
+ dsb(ishst);
+ asm("tlbi vmalle1is");
+ dsb(ish);
+ isb();
+pte_ok:
+#endif
+
*ptep = pte;
/*
@@ -364,6 +403,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
@@ -440,6 +480,11 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long) __va(pmd_page_paddr(pmd));
+}
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -491,6 +536,11 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long) __va(pud_page_paddr(pud));
+}
+
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
@@ -543,6 +593,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return (unsigned long) __va(pgd_page_paddr(pgd));
+}
+
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
new file mode 100644
index 000000000000..5af574d632fa
--- /dev/null
+++ b/arch/arm64/include/asm/probes.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm64/include/asm/probes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ARM_PROBES_H
+#define _ARM_PROBES_H
+
+#include <asm/opcodes.h>
+
+struct kprobe;
+struct arch_specific_insn;
+
+typedef u32 kprobe_opcode_t;
+typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_specific_insn {
+ kprobe_opcode_t *insn;
+ pstate_check_t *pstate_cc;
+ kprobes_handler_t *handler;
+ /* restore address after step xol */
+ unsigned long restore;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 16cef2e8449e..9da52c2c6c5c 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,8 +28,12 @@
struct mm_struct;
struct cpu_suspend_ctx;
+extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+ unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 4be934fde409..7766635158df 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -40,9 +40,9 @@
#ifdef __KERNEL__
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
-#define AARCH32_VECTORS_BASE 0xffff0000
+#define AARCH32_KUSER_HELPERS_BASE 0xffff0000
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
- AARCH32_VECTORS_BASE : STACK_TOP_MAX)
+ AARCH32_KUSER_HELPERS_BASE : STACK_TOP_MAX)
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
@@ -51,6 +51,9 @@ extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
#endif /* __KERNEL__ */
+extern unsigned int boot_reason;
+extern unsigned int cold_boot;
+
struct debug_info {
/* Have we suspended stepping by a debugger? */
int suspended_step;
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 7f94755089e2..1528d52eb8c0 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -121,6 +121,8 @@ struct pt_regs {
u64 unused; // maintain 16 byte alignment
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
+
#define arch_has_single_step() (1)
#ifdef CONFIG_COMPAT
@@ -146,9 +148,57 @@ struct pt_regs {
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
-#define user_stack_pointer(regs) \
+#define GET_USP(regs) \
(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
+#define SET_USP(ptregs, value) \
+ (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ u64 val = 0;
+
+ offset >>= 3;
+ switch (offset) {
+ case 0 ... 30:
+ val = regs->regs[offset];
+ break;
+ case offsetof(struct pt_regs, sp) >> 3:
+ val = regs->sp;
+ break;
+ case offsetof(struct pt_regs, pc) >> 3:
+ val = regs->pc;
+ break;
+ case offsetof(struct pt_regs, pstate) >> 3:
+ val = regs->pstate;
+ break;
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->regs[0];
@@ -158,8 +208,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
struct task_struct;
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
-#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+#define GET_IP(regs) ((unsigned long)(regs)->pc)
+#define SET_IP(regs, value) ((regs)->pc = ((u64) (value)))
+
+#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29])
+#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value)))
+
+#include <asm-generic/ptrace.h>
+#undef profile_pc
extern unsigned long profile_pc(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
new file mode 100644
index 000000000000..b865e83e57f5
--- /dev/null
+++ b/arch/arm64/include/asm/sections.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __alt_instructions[], __alt_instructions_end[];
+extern char __exception_text_start[], __exception_text_end[];
+extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
+extern char __idmap_text_start[], __idmap_text_end[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __mmuoff_data_start[], __mmuoff_data_end[];
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index 81abea0b7650..bcd0e139ee4a 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -20,7 +20,51 @@
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
+struct compat_sigcontext {
+ /* We always set these two fields to 0 */
+ compat_ulong_t trap_no;
+ compat_ulong_t error_code;
+
+ compat_ulong_t oldmask;
+ compat_ulong_t arm_r0;
+ compat_ulong_t arm_r1;
+ compat_ulong_t arm_r2;
+ compat_ulong_t arm_r3;
+ compat_ulong_t arm_r4;
+ compat_ulong_t arm_r5;
+ compat_ulong_t arm_r6;
+ compat_ulong_t arm_r7;
+ compat_ulong_t arm_r8;
+ compat_ulong_t arm_r9;
+ compat_ulong_t arm_r10;
+ compat_ulong_t arm_fp;
+ compat_ulong_t arm_ip;
+ compat_ulong_t arm_sp;
+ compat_ulong_t arm_lr;
+ compat_ulong_t arm_pc;
+ compat_ulong_t arm_cpsr;
+ compat_ulong_t fault_address;
+};
+
+struct compat_ucontext {
+ compat_ulong_t uc_flags;
+ compat_uptr_t uc_link;
+ compat_stack_t uc_stack;
+ struct compat_sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask;
+ int __unused[32 - (sizeof(compat_sigset_t) / sizeof(int))];
+ compat_ulong_t uc_regspace[128] __aligned(8);
+};
+
+struct compat_sigframe {
+ struct compat_ucontext uc;
+ compat_ulong_t retcode[2];
+};
+
+struct compat_rt_sigframe {
+ struct compat_siginfo info;
+ struct compat_sigframe sig;
+};
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 74a9d301819f..81c69fe1adc0 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -18,7 +18,11 @@
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS 48
+#ifndef CONFIG_MEMORY_HOTPLUG
#define SECTION_SIZE_BITS 30
+#else
+#define SECTION_SIZE_BITS CONFIG_HOTPLUG_SIZE_BITS
+#endif
#endif
#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fc037c3e2c27..da7a921d88d5 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -103,7 +103,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
- " prfm pstl1strm, %2\n"
"1: ldaxr %w0, %2\n"
" eor %w1, %w0, %w0, ror #16\n"
" cbnz %w1, 2f\n"
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 4d19a03d316e..92d6a628e478 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,8 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 13
+#define NR_CTX_REGS 12
+#define NR_CALLEE_SAVED_REGS 12
/*
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -16,11 +17,34 @@ struct cpu_suspend_ctx {
u64 sp;
} __aligned(16);
-struct sleep_save_sp {
- phys_addr_t *save_ptr_stash;
- phys_addr_t save_ptr_stash_phys;
+/*
+ * Memory to save the cpu state is allocated on the stack by
+ * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
+ * This data must survive until cpu_resume() is called.
+ *
+ * This struct desribes the size and the layout of the saved cpu state.
+ * The layout of the callee_saved_regs is defined by the implementation
+ * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
+ * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
+ * returns, and the data would be subsequently corrupted by the call to the
+ * finisher.
+ */
+struct sleep_stack_data {
+ struct cpu_suspend_ctx system_regs;
+ unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS];
};
+extern unsigned long *sleep_save_stash;
+
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
+int __cpu_suspend_enter(struct sleep_stack_data *state);
+void __cpu_suspend_exit(void);
+void _cpu_resume(void);
+
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index accf6dc2dfe4..c768daa084ca 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -86,10 +86,21 @@
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
(!!x)<<8 | 0x1f)
-/* SCTLR_EL1 */
-#define SCTLR_EL1_CP15BEN (0x1 << 5)
-#define SCTLR_EL1_SED (0x1 << 8)
-#define SCTLR_EL1_SPAN (0x1 << 23)
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M 1
+
+#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_SPAN (1 << 23)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_CP15BEN (1 << 5)
/* id_aa64isar0 */
@@ -101,6 +112,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 57f110bea6a8..2fbc254a8a37 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -44,6 +44,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+extern char* (*arch_read_hardware_id)(void);
#define show_unhandled_signals_ratelimited() \
({ \
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 67dd228c3f17..70c11c25d3e4 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -120,6 +120,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
+#define TIF_MM_RELEASED 24
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -140,10 +142,12 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_FSCHECK)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index bbd362cd1ed1..7d35ea7b5b95 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -21,6 +21,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
struct sched_domain;
#ifdef CONFIG_CPU_FREQ
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 8f05d3b21418..7fe6a2e1c93f 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -19,6 +19,7 @@
#define __ASM_TRAP_H
#include <linux/list.h>
+#include <asm/sections.h>
struct pt_regs;
@@ -36,17 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook);
static inline int __in_irqentry_text(unsigned long ptr)
{
- extern char __irqentry_text_start[];
- extern char __irqentry_text_end[];
-
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
static inline int in_exception_text(unsigned long ptr)
{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
int in;
in = ptr >= (unsigned long)&__exception_text_start &&
@@ -55,4 +51,5 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
+static inline void get_pct_hook_init(void) {}
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index d39d8bde42d7..d0919bcb1953 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -73,6 +73,9 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+ /* On user-mode return, check fs is correct */
+ set_thread_flag(TIF_FSCHECK);
+
/*
* Enable/disable UAO so that copy_to_user() etc can access
* kernel memory with the unprivileged instructions.
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 839ce0031bd5..f2a952338f1e 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -28,6 +28,9 @@
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
+#ifdef CONFIG_VDSO32
+#include <generated/vdso32-offsets.h>
+#endif
#define VDSO_SYMBOL(base, name) \
({ \
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..348b9be9efe7 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -20,16 +20,33 @@
#ifndef __ASSEMBLY__
+#ifndef _VDSO_WTM_CLOCK_SEC_T
+#define _VDSO_WTM_CLOCK_SEC_T
+typedef __u64 vdso_wtm_clock_nsec_t;
+#endif
+
+#ifndef _VDSO_XTIME_CLOCK_SEC_T
+#define _VDSO_XTIME_CLOCK_SEC_T
+typedef __u64 vdso_xtime_clock_sec_t;
+#endif
+
+#ifndef _VDSO_RAW_TIME_SEC_T
+#define _VDSO_RAW_TIME_SEC_T
+typedef __u64 vdso_raw_time_sec_t;
+#endif
+
struct vdso_data {
__u64 cs_cycle_last; /* Timebase at clocksource init */
- __u64 raw_time_sec; /* Raw time */
+ vdso_raw_time_sec_t raw_time_sec; /* Raw time */
__u64 raw_time_nsec;
- __u64 xtime_clock_sec; /* Kernel time */
- __u64 xtime_clock_nsec;
+ vdso_xtime_clock_sec_t xtime_clock_sec; /* Kernel time */
+ __u64 xtime_clock_snsec;
__u64 xtime_coarse_sec; /* Coarse time */
__u64 xtime_coarse_nsec;
__u64 wtm_clock_sec; /* Wall to monotonic time */
- __u64 wtm_clock_nsec;
+ vdso_wtm_clock_nsec_t wtm_clock_nsec;
+ __u32 btm_sec; /* monotonic to boot time */
+ __u32 btm_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
/* cs_* members must be adjacent and in this order (ldp accesses) */
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df5252dd7..46e0bbddee94 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,11 +18,30 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
+/*
+ * The arm64 hcall implementation uses x0 to specify the hcall type. A value
+ * less than 0xfff indicates a special hcall, such as get/set vector.
+ * Any other value is used as a pointer to the function to call.
+ */
+
+/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
+#define HVC_GET_VECTORS 0
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x1: Physical address of the new vector table.
+ */
+#define HVC_SET_VECTORS 1
+
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+
/*
* __boot_cpu_mode records what mode CPUs were booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +69,14 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ u64 el;
+
+ asm("mrs %0, CurrentEL" : "=r" (el));
+ return el == CurrentEL_EL2;
+}
+
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 825b0fe51c2b..13a97aa2285f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,21 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += kvm_para.h
-header-y += perf_regs.h
-header-y += param.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index ee469be1ae1d..c731ca011ca3 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -16,6 +16,7 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
+#ifdef CONFIG_64BIT
#include <linux/types.h>
/*
@@ -61,4 +62,35 @@ struct esr_context {
__u64 esr;
};
+#else /* CONFIG_64BIT */
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long arm_r0;
+ unsigned long arm_r1;
+ unsigned long arm_r2;
+ unsigned long arm_r3;
+ unsigned long arm_r4;
+ unsigned long arm_r5;
+ unsigned long arm_r6;
+ unsigned long arm_r7;
+ unsigned long arm_r8;
+ unsigned long arm_r9;
+ unsigned long arm_r10;
+ unsigned long arm_fp;
+ unsigned long arm_ip;
+ unsigned long arm_sp;
+ unsigned long arm_lr;
+ unsigned long arm_pc;
+ unsigned long arm_cpsr;
+ unsigned long fault_address;
+};
+#endif /* CONFIG_64BIT */
#endif /* _UAPI__ASM_SIGCONTEXT_H */