summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild4
-rw-r--r--arch/arm/include/asm/cacheflush.h33
-rw-r--r--arch/arm/include/asm/cpuidle.h2
-rw-r--r--arch/arm/include/asm/device.h2
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h29
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/etmv4x.h387
-rw-r--r--arch/arm/include/asm/glue-cache.h8
-rw-r--r--arch/arm/include/asm/hardware/debugv8.h247
-rw-r--r--arch/arm/include/asm/io.h126
-rw-r--r--arch/arm/include/asm/kvm_arm.h34
-rw-r--r--arch/arm/include/asm/kvm_host.h14
-rw-r--r--arch/arm/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm/include/asm/mach-types.h1
-rw-r--r--arch/arm/include/asm/perf_event.h87
-rw-r--r--arch/arm/include/asm/processor.h3
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/system_misc.h1
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/topology.h4
-rw-r--r--arch/arm/include/asm/traps.h1
-rw-r--r--arch/arm/include/asm/types.h40
-rw-r--r--arch/arm/include/asm/unistd.h26
-rw-r--r--arch/arm/include/asm/vdso_datapage.h37
-rw-r--r--arch/arm/include/asm/virt.h9
26 files changed, 1026 insertions, 99 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 628a38a11a70..730b4e850f6f 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
@@ -36,3 +37,6 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
+
+generated-y += mach-types.h
+generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 9156fc303afd..012a3aafcf33 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -94,6 +94,21 @@
* DMA Cache Coherency
* ===================
*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -115,6 +130,8 @@ struct cpu_cache_fns {
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
@@ -140,6 +157,8 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -159,6 +178,11 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+extern void __dma_map_area(const void *, size_t, int);
+extern void __dma_unmap_area(const void *, size_t, int);
+
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -518,4 +542,13 @@ static inline void secure_flush_area(const void *addr, size_t size)
outer_flush_range(phys, phys + size);
}
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a) set_memory_ro((unsigned long)a, 1)
+#define mark_addr_rdwrite(a) set_memory_rw((unsigned long)a, 1)
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
+
+
#endif
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0f8424924902..3848259bebf8 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -30,7 +30,7 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct device_node;
struct cpuidle_ops {
- int (*suspend)(int cpu, unsigned long arg);
+ int (*suspend)(unsigned long arg);
int (*init)(struct device_node *, int cpu);
};
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 4111592f0130..d8a572f9c187 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,7 +7,7 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
+ const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 4f8e9e5514b1..d54f8feec78f 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -1,14 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
#ifndef ASMARM_DMA_CONTIGUOUS_H
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
#endif
-#endif
#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f96651..74643f5b41c4 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -8,6 +8,7 @@
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
#include <linux/kref.h>
+#include <linux/dma-mapping-fast.h>
struct dma_iommu_mapping {
/* iommu specific data */
@@ -22,8 +23,12 @@ struct dma_iommu_mapping {
spinlock_t lock;
struct kref kref;
+
+ struct dma_fast_smmu_mapping *fast;
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
@@ -33,5 +38,29 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
+#else /* !CONFIG_ARM_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+ return NULL;
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif /* CONFIG_ARM_DMA_USE_IOMMU */
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b91a2d17a521..c8bfa1aabd6a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -17,14 +17,14 @@
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
@@ -32,7 +32,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *ops)
{
BUG_ON(!dev);
dev->archdata.dma_ops = ops;
diff --git a/arch/arm/include/asm/etmv4x.h b/arch/arm/include/asm/etmv4x.h
new file mode 100644
index 000000000000..5251d55df3b3
--- /dev/null
+++ b/arch/arm/include/asm/etmv4x.h
@@ -0,0 +1,387 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+
+/* 32 bit register read for AArch32 */
+#define trc_readl(reg) RSYSL_##reg()
+#define trc_readq(reg) RSYSL_##reg()
+
+/* 32 bit register write for AArch32 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRC(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrc p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRC(15, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MCR(val, 15, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRC(14, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRC(14, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRC(14, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRC(14, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRC(14, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRC(14, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRC(14, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRC(14, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRC(14, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRC(14, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRC(14, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRC(14, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRC(14, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRC(14, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRC(14, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRC(14, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRC(14, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRC(14, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRC(14, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRC(14, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRC(14, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRC(14, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRC(14, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRC(14, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRC(14, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRC(14, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRC(14, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRC(14, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRC(14, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRC(14, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRC(14, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRC(14, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRC(14, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRC(14, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRC(14, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRC(14, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRC(14, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRC(14, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRC(14, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRC(14, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRC(14, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRC(14, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRC(14, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRC(14, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRC(14, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRC(14, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRC(14, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRC(14, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRC(14, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRC(14, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRC(14, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRC(14, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRC(14, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRC(14, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRC(14, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRC(14, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRC(14, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRC(14, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRC(14, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRC(14, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRC(14, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRC(14, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRC(14, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRC(14, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRC(14, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRC(14, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRC(14, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRC(14, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRC(14, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRC(14, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRC(14, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRC(14, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRC(14, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRC(14, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRC(14, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRC(14, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRC(14, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRC(14, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRC(14, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRC(14, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRC(14, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRC(14, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRC(14, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRC(14, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRC(14, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRC(14, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRC(14, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRC(14, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRC(14, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRC(14, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRC(14, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRC(14, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRC(14, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRC(14, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRC(14, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRC(14, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRC(14, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRC(14, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRC(14, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRC(14, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRC(14, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRC(14, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRC(14, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRC(14, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRC(14, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRC(14, 1, c1, c7, 3)
+
+/*
+ * 64 bit registers, ignore the upper 32bit
+ * A read from a 32-bit register location using a 64-bit access result
+ * in the upper 32bits being return as RES0.
+ */
+#define RSYSL_ETMACATR0() MRC(14, 1, c2, c0, 2)
+#define RSYSL_ETMACATR1() MRC(14, 1, c2, c2, 2)
+#define RSYSL_ETMACATR2() MRC(14, 1, c2, c4, 2)
+#define RSYSL_ETMACATR3() MRC(14, 1, c2, c6, 2)
+#define RSYSL_ETMACATR4() MRC(14, 1, c2, c8, 2)
+#define RSYSL_ETMACATR5() MRC(14, 1, c2, c10, 2)
+#define RSYSL_ETMACATR6() MRC(14, 1, c2, c12, 2)
+#define RSYSL_ETMACATR7() MRC(14, 1, c2, c14, 2)
+#define RSYSL_ETMACATR8() MRC(14, 1, c2, c0, 3)
+#define RSYSL_ETMACATR9() MRC(14, 1, c2, c2, 3)
+#define RSYSL_ETMACATR10() MRC(14, 1, c2, c4, 3)
+#define RSYSL_ETMACATR11() MRC(14, 1, c2, c6, 3)
+#define RSYSL_ETMACATR12() MRC(14, 1, c2, c8, 3)
+#define RSYSL_ETMACATR13() MRC(14, 1, c2, c10, 3)
+#define RSYSL_ETMACATR14() MRC(14, 1, c2, c12, 3)
+#define RSYSL_ETMACATR15() MRC(14, 1, c2, c14, 3)
+#define RSYSL_ETMCIDCVR0() MRC(14, 1, c3, c0, 0)
+#define RSYSL_ETMCIDCVR1() MRC(14, 1, c3, c2, 0)
+#define RSYSL_ETMCIDCVR2() MRC(14, 1, c3, c4, 0)
+#define RSYSL_ETMCIDCVR3() MRC(14, 1, c3, c6, 0)
+#define RSYSL_ETMCIDCVR4() MRC(14, 1, c3, c8, 0)
+#define RSYSL_ETMCIDCVR5() MRC(14, 1, c3, c10, 0)
+#define RSYSL_ETMCIDCVR6() MRC(14, 1, c3, c12, 0)
+#define RSYSL_ETMCIDCVR7() MRC(14, 1, c3, c14, 0)
+#define RSYSL_ETMACVR0() MRC(14, 1, c2, c0, 0)
+#define RSYSL_ETMACVR1() MRC(14, 1, c2, c2, 0)
+#define RSYSL_ETMACVR2() MRC(14, 1, c2, c4, 0)
+#define RSYSL_ETMACVR3() MRC(14, 1, c2, c6, 0)
+#define RSYSL_ETMACVR4() MRC(14, 1, c2, c8, 0)
+#define RSYSL_ETMACVR5() MRC(14, 1, c2, c10, 0)
+#define RSYSL_ETMACVR6() MRC(14, 1, c2, c12, 0)
+#define RSYSL_ETMACVR7() MRC(14, 1, c2, c14, 0)
+#define RSYSL_ETMACVR8() MRC(14, 1, c2, c0, 1)
+#define RSYSL_ETMACVR9() MRC(14, 1, c2, c2, 1)
+#define RSYSL_ETMACVR10() MRC(14, 1, c2, c4, 1)
+#define RSYSL_ETMACVR11() MRC(14, 1, c2, c6, 1)
+#define RSYSL_ETMACVR12() MRC(14, 1, c2, c8, 1)
+#define RSYSL_ETMACVR13() MRC(14, 1, c2, c10, 1)
+#define RSYSL_ETMACVR14() MRC(14, 1, c2, c12, 1)
+#define RSYSL_ETMACVR15() MRC(14, 1, c2, c14, 1)
+#define RSYSL_ETMVMIDCVR0() MRC(14, 1, c3, c0, 1)
+#define RSYSL_ETMVMIDCVR1() MRC(14, 1, c3, c2, 1)
+#define RSYSL_ETMVMIDCVR2() MRC(14, 1, c3, c4, 1)
+#define RSYSL_ETMVMIDCVR3() MRC(14, 1, c3, c6, 1)
+#define RSYSL_ETMVMIDCVR4() MRC(14, 1, c3, c8, 1)
+#define RSYSL_ETMVMIDCVR5() MRC(14, 1, c3, c10, 1)
+#define RSYSL_ETMVMIDCVR6() MRC(14, 1, c3, c12, 1)
+#define RSYSL_ETMVMIDCVR7() MRC(14, 1, c3, c14, 1)
+#define RSYSL_ETMDVCVR0() MRC(14, 1, c2, c0, 4)
+#define RSYSL_ETMDVCVR1() MRC(14, 1, c2, c4, 4)
+#define RSYSL_ETMDVCVR2() MRC(14, 1, c2, c8, 4)
+#define RSYSL_ETMDVCVR3() MRC(14, 1, c2, c12, 4)
+#define RSYSL_ETMDVCVR4() MRC(14, 1, c2, c0, 5)
+#define RSYSL_ETMDVCVR5() MRC(14, 1, c2, c4, 5)
+#define RSYSL_ETMDVCVR6() MRC(14, 1, c2, c8, 5)
+#define RSYSL_ETMDVCVR7() MRC(14, 1, c2, c12, 5)
+#define RSYSL_ETMDVCMR0() MRC(14, 1, c2, c0, 6)
+#define RSYSL_ETMDVCMR1() MRC(14, 1, c2, c4, 6)
+#define RSYSL_ETMDVCMR2() MRC(14, 1, c2, c8, 6)
+#define RSYSL_ETMDVCMR3() MRC(14, 1, c2, c12, 6)
+#define RSYSL_ETMDVCMR4() MRC(14, 1, c2, c0, 7)
+#define RSYSL_ETMDVCMR5() MRC(14, 1, c2, c4, 7)
+#define RSYSL_ETMDVCMR6() MRC(14, 1, c2, c8, 7)
+#define RSYSL_ETMDVCMR7() MRC(14, 1, c2, c12, 7)
+
+/*
+ * 32 and 64 bit registers
+ * A write to a 32-bit register location using a 64-bit access result
+ * in the upper 32bit of access
+ */
+#define WSYS_ETMAUXCTLR(val) MCR(val, 14, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MCR(val, 14, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MCR(val, 14, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MCR(val, 14, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MCR(val, 14, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MCR(val, 14, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MCR(val, 14, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MCR(val, 14, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MCR(val, 14, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MCR(val, 14, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MCR(val, 14, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MCR(val, 14, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MCR(val, 14, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MCR(val, 14, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MCR(val, 14, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MCR(val, 14, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MCR(val, 14, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MCR(val, 14, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MCR(val, 14, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MCR(val, 14, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MCR(val, 14, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MCR(val, 14, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MCR(val, 14, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MCR(val, 14, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MCR(val, 14, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MCR(val, 14, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MCR(val, 14, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MCR(val, 14, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MCR(val, 14, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MCR(val, 14, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MCR(val, 14, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MCR(val, 14, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MCR(val, 14, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MCR(val, 14, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MCR(val, 14, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MCR(val, 14, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MCR(val, 14, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MCR(val, 14, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MCR(val, 14, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MCR(val, 14, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MCR(val, 14, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MCR(val, 14, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MCR(val, 14, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MCR(val, 14, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MCR(val, 14, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MCR(val, 14, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MCR(val, 14, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MCR(val, 14, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MCR(val, 14, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MCR(val, 14, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MCR(val, 14, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MCR(val, 14, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MCR(val, 14, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MCR(val, 14, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MCR(val, 14, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MCR(val, 14, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MCR(val, 14, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MCR(val, 14, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MCR(val, 14, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MCR(val, 14, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MCR(val, 14, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MCR(val, 14, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MCR(val, 14, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MCR(val, 14, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MCR(val, 14, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MCR(val, 14, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MCR(val, 14, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MCR(val, 14, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MCR(val, 14, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MCR(val, 14, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MCR(val, 14, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MCR(val, 14, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MCR(val, 14, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MCR(val, 14, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MCR(val, 14, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MCR(val, 14, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MCR(val, 14, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MCR(val, 14, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MCR(val, 14, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MCR(val, 14, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MCR(val, 14, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MCR(val, 14, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MCR(val, 14, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MCR(val, 14, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MCR(val, 14, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MCR(val, 14, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MCR(val, 14, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MCR(val, 14, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MCR(val, 14, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MCR(val, 14, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MCR(val, 14, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MCR(val, 14, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MCR(val, 14, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MCR(val, 14, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MCR(val, 14, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MCR(val, 14, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MCR(val, 14, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MCR(val, 14, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MCR(val, 14, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MCR(val, 14, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MCR(val, 14, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MCR(val, 14, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MCR(val, 14, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MCR(val, 14, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MCR(val, 14, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MCR(val, 14, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MCR(val, 14, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MCR(val, 14, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MCR(val, 14, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MCR(val, 14, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MCR(val, 14, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MCR(val, 14, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MCR(val, 14, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MCR(val, 14, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MCR(val, 14, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MCR(val, 14, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MCR(val, 14, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MCR(val, 14, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MCR(val, 14, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MCR(val, 14, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MCR(val, 14, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MCR(val, 14, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MCR(val, 14, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MCR(val, 14, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MCR(val, 14, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MCR(val, 14, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MCR(val, 14, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MCR(val, 14, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MCR(val, 14, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MCR(val, 14, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MCR(val, 14, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MCR(val, 14, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MCR(val, 14, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MCR(val, 14, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MCR(val, 14, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MCR(val, 14, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MCR(val, 14, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MCR(val, 14, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MCR(val, 14, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MCR(val, 14, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MCR(val, 14, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MCR(val, 14, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MCR(val, 14, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MCR(val, 14, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MCR(val, 14, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MCR(val, 14, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MCR(val, 14, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MCR(val, 14, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MCR(val, 14, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MCR(val, 14, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MCR(val, 14, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MCR(val, 14, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cab07f69382d..7a4893e61866 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -159,6 +159,14 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
+#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
+#define dmac_map_area __glue(_CACHE, _dma_map_area)
+#define dmac_unmap_area __glue(_CACHE, _dma_unmap_area)
+
+#define __dma_map_area dmac_map_area
+#define __dma_unmap_area dmac_unmap_area
+#define __dma_flush_range dmac_flush_range
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/debugv8.h b/arch/arm/include/asm/hardware/debugv8.h
new file mode 100644
index 000000000000..054226cbe7ce
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debugv8.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_DEBUGV8_H
+#define __ASM_HARDWARE_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 registers */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+/* MCR14 registers */
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGDSAR,
+ * DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDCCINT() MRC14(0, c0, c2, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGOSECCR() MRC14(0, c0, c6, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDCCINT(val) MCR14(val, 0, c0, c2, 0)
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGOSECCR(val) MCR14(val, 0, c0, c6, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 485982084fe9..c5d7c8b995eb 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -29,6 +29,7 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
@@ -62,23 +63,21 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
* the bus. Rather than special-case the machine, just let the compiler
* generate the access for CPUs prior to ARMv6.
*/
-#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_readw_no_log(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
#else
/*
* When running under a hypervisor, we want to avoid I/O accesses with
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: : "Q" (*(volatile u16 __force *)addr), "r" (val));
}
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %0, %1"
@@ -88,22 +87,30 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
}
#endif
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
: : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
: : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
+{
+ register u64 v asm ("r2");
+
+ v = val;
+
+ asm volatile("strd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr)
+ : "r" (v));
+}
+
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
{
u8 val;
asm volatile("ldrb %0, %1"
@@ -112,8 +119,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
return val;
}
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
{
u32 val;
asm volatile("ldr %0, %1"
@@ -122,6 +128,58 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return val;
}
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
+{
+ register u64 val asm ("r2");
+
+ asm volatile("ldrd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr),
+ "=r" (val));
+ return val;
+}
+
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging. The appropriate functions can be used but clients should carefully
+ * consider why they can't support the logging.
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+ int _ret; \
+ volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ _ret = uncached_logk(LOGK_WRITEL, _addr); \
+ ETB_WAYPOINT; \
+ __raw_write##_t##_no_log((v), _a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ })
+
+
+#define __raw_writeb(v, a) __raw_write_logged((v), (a), b)
+#define __raw_writew(v, a) __raw_write_logged((v), (a), w)
+#define __raw_writel(v, a) __raw_write_logged((v), (a), l)
+#define __raw_writeq(v, a) __raw_write_logged((v), (a), q)
+
+#define __raw_read_logged(a, _l, _t) ({ \
+ unsigned _t __a; \
+ const volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ int _ret; \
+ _ret = uncached_logk(LOGK_READL, _addr); \
+ ETB_WAYPOINT; \
+ __a = __raw_read##_l##_no_log(_a);\
+ if (_ret) \
+ LOG_BARRIER; \
+ __a; \
+ })
+
+
+#define __raw_readb(a) __raw_read_logged((a), b, char)
+#define __raw_readw(a) __raw_read_logged((a), w, short)
+#define __raw_readl(a) __raw_read_logged((a), l, int)
+#define __raw_readq(a) __raw_read_logged((a), q, long long)
+
/*
* Architecture ioremap implementation.
*/
@@ -291,18 +349,32 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
-
-#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
-#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq(c)); __r; })
+#define readb_relaxed_no_log(c) ({ u8 __r = __raw_readb_no_log(c); __r; })
+#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl_no_log(c)); __r; })
+#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq_no_log(c)); __r; })
+
+
+#define writeb_relaxed(v, c) __raw_writeb(v, c)
+#define writew_relaxed(v, c) __raw_writew((__force u16) cpu_to_le16(v), c)
+#define writel_relaxed(v, c) __raw_writel((__force u32) cpu_to_le32(v), c)
+#define writeq_relaxed(v, c) __raw_writeq((__force u64) cpu_to_le64(v), c)
+#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
+#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c)
+#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) cpu_to_le64(v), c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writeq(v, c) ({ __iowmb(); writeq_relaxed(v, c); })
#define readsb(p,d,l) __raw_readsb(p,d,l)
#define readsw(p,d,l) __raw_readsw(p,d,l)
@@ -401,6 +473,23 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
+/*
+ * io{read,write}{8,16,32,64} macros
+ */
+#ifndef ioread8
+#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
+#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
+#define ioread64(p) ({ unsigned int __v = le64_to_cpu((__force __le64)__raw_readq(p)); __iormb(); __v; })
+
+#define ioread64be(p) ({ unsigned int __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
+
+#define iowrite8(v, p) ({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v, p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v, p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite64(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_le64(v), p); })
+
+#define iowrite64be(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
/*
* io{read,write}{16,32}be() macros
@@ -419,6 +508,7 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
#define ioport_unmap ioport_unmap
extern void ioport_unmap(void __iomem *addr);
#endif
+#endif
struct pci_dev;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 14602e883509..98d6de177b7a 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__
+#include <linux/const.h>
#include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */
@@ -132,10 +133,9 @@
* space.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
-#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
+#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12)
@@ -161,17 +161,17 @@
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_X)
-#define VTTBR_VMID_SHIFT (48LLU)
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
+#define VTTBR_VMID_SHIFT _AC(48, ULL)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26)
-#define HSR_EC (0x3fU << HSR_EC_SHIFT)
-#define HSR_IL (1U << 25)
+#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
+#define HSR_IL (_AC(1, UL) << 25)
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (1U << HSR_ISV_SHIFT)
+#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_FSC (0x3f)
@@ -179,9 +179,9 @@
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
-#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20)
-#define HSR_COND (0xfU << HSR_COND_SHIFT)
+#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08)
@@ -210,13 +210,13 @@
#define HSR_EC_DABT_HYP (0x25)
#define HSR_EC_MAX (0x3f)
-#define HSR_WFI_IS_WFE (1U << 0)
+#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-#define HSR_DABT_S1PTW (1U << 7)
-#define HSR_DABT_CM (1U << 8)
-#define HSR_DABT_EA (1U << 9)
+#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
+#define HSR_DABT_CM (_AC(1, UL) << 8)
+#define HSR_DABT_EA (_AC(1, UL) << 9)
#define kvm_arm_exception_type \
{0, "RESET" }, \
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 6692982c9b57..bedaf65c0ff9 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -214,6 +214,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
+static inline void __cpu_init_stage2(void)
+{
+}
+
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t phys_idmap_start)
+{
+ /*
+ * TODO
+ * kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
+ */
+}
+
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@@ -226,7 +239,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 23d5cad56ddc..ebf866a3a8c8 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -272,6 +273,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { }
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+ return 8;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
deleted file mode 100644
index 948178cc6ba8..000000000000
--- a/arch/arm/include/asm/mach-types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 4f9dec489931..306c4f4e778e 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -26,4 +26,91 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
(regs)->ARM_cpsr = SVC_MODE; \
}
+static inline u32 armv8pmu_pmcr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmccntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmxevcntr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+ return val;
+}
+
+static inline u32 armv8pmu_pmovsclr_read_reg(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+ return val;
+}
+
+static inline void armv8pmu_pmcr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+static inline void armv8pmu_pmselr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+}
+
+static inline void armv8pmu_pmccntr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
+}
+
+static inline void armv8pmu_pmxevcntr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
+}
+
+static inline void armv8pmu_pmxevtyper_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+}
+
+static inline void armv8pmu_pmcntenset_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+}
+
+static inline void armv8pmu_pmcntenclr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+}
+
+static inline void armv8pmu_pmintenset_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
+}
+
+static inline void armv8pmu_pmintenclr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+}
+
+static inline void armv8pmu_pmovsclr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+}
+
+static inline void armv8pmu_pmuserenr_write_reg(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (val));
+}
+
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 08509183c7df..f59a19607cb0 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -30,6 +30,9 @@
#define STACK_TOP_MAX TASK_SIZE
#endif
+extern unsigned int boot_reason;
+extern unsigned int cold_boot;
+
struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 51622ba7c4a6..d3c0c23703b6 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -121,7 +121,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
extern int regs_query_register_offset(const char *name);
-extern const char *regs_query_register_name(unsigned int offset);
extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 1fed41440af9..84e65cb22c4b 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -36,6 +36,7 @@ static inline void harden_branch_predictor(void)
#define UDBG_BUS (1 << 4)
extern unsigned int user_debug;
+extern char* (*arch_read_hardware_id)(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index df8420672c7e..cfbf32bb9fc4 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
+#define TIF_MM_RELEASED 21 /* task MM has been released */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index d06064120694..5bf87c62a418 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -9,14 +9,14 @@
struct cputopo_arm {
int thread_id;
int core_id;
- int socket_id;
+ int cluster_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
};
extern struct cputopo_arm cpu_topology[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 683d9230984a..24a47af4d05f 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -39,6 +39,7 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
+extern void get_pct_hook_init(void);
extern void __init early_trap_init(void *);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
deleted file mode 100644
index a53cdb8f068c..000000000000
--- a/arch/arm/include/asm/types.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
-
-#include <asm-generic/int-ll64.h>
-
-/*
- * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
- * unambiguous on ARM as you would expect. For the types below, there is a
- * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
- * and the kernel itself, which results in build errors if you try to build with
- * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
- * in order to use NEON intrinsics)
- *
- * As the typedefs for these types in 'stdint.h' are based on builtin defines
- * supplied by GCC, we can tweak these to align with the kernel's idea of those
- * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
- * source file (provided that -ffreestanding is used).
- *
- * int32_t uint32_t uintptr_t
- * bare metal GCC long unsigned long unsigned int
- * glibc GCC int unsigned int unsigned int
- * kernel int unsigned int unsigned long
- */
-
-#ifdef __INT32_TYPE__
-#undef __INT32_TYPE__
-#define __INT32_TYPE__ int
-#endif
-
-#ifdef __UINT32_TYPE__
-#undef __UINT32_TYPE__
-#define __UINT32_TYPE__ unsigned int
-#endif
-
-#ifdef __UINTPTR_TYPE__
-#undef __UINTPTR_TYPE__
-#define __UINTPTR_TYPE__ unsigned long
-#endif
-
-#endif /* _ASM_TYPES_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7b84657fba35..076090d2dbf5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -14,12 +14,7 @@
#define __ASM_ARM_UNISTD_H
#include <uapi/asm/unistd.h>
-
-/*
- * This may need to be greater than __NR_last_syscall+1 in order to
- * account for the padding in the syscall table
- */
-#define __NR_syscalls (392)
+#include <asm/unistd-nr.h>
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -52,4 +47,23 @@
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
+#ifdef __ARM_EABI__
+/*
+ * The following syscalls are obsolete and no longer available for EABI:
+ * __NR_time
+ * __NR_umount
+ * __NR_stime
+ * __NR_alarm
+ * __NR_utime
+ * __NR_getrlimit
+ * __NR_select
+ * __NR_readdir
+ * __NR_mmap
+ * __NR_socketcall
+ * __NR_syscall
+ * __NR_ipc
+ */
+#define __IGNORE_getrlimit
+#endif
+
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
index 9be259442fca..0120852b6b12 100644
--- a/arch/arm/include/asm/vdso_datapage.h
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -24,21 +24,38 @@
#include <asm/page.h>
+#ifndef _VDSO_WTM_CLOCK_SEC_T
+#define _VDSO_WTM_CLOCK_SEC_T
+typedef u32 vdso_wtm_clock_nsec_t;
+#endif
+
+#ifndef _VDSO_XTIME_CLOCK_SEC_T
+#define _VDSO_XTIME_CLOCK_SEC_T
+typedef u32 vdso_xtime_clock_sec_t;
+#endif
+
+#ifndef _VDSO_RAW_TIME_SEC_T
+#define _VDSO_RAW_TIME_SEC_T
+typedef u32 vdso_raw_time_sec_t;
+#endif
+
/* Try to be cache-friendly on systems that don't implement the
* generic timer: fit the unconditionally updated fields in the first
* 32 bytes.
*/
struct vdso_data {
- u32 seq_count; /* sequence count - odd during updates */
- u16 tk_is_cntvct; /* fall back to syscall if false */
+ u32 tb_seq_count; /* sequence count - odd during updates */
+ u16 use_syscall; /* fall back to syscall if true */
u16 cs_shift; /* clocksource shift */
u32 xtime_coarse_sec; /* coarse time */
u32 xtime_coarse_nsec;
- u32 wtm_clock_sec; /* wall to monotonic offset */
- u32 wtm_clock_nsec;
- u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
- u32 cs_mult; /* clocksource multiplier */
+ /* wall to monotonic offset */
+ u32 wtm_clock_sec;
+ vdso_wtm_clock_nsec_t wtm_clock_nsec;
+ /* CLOCK_REALTIME - seconds */
+ vdso_xtime_clock_sec_t xtime_clock_sec;
+ u32 cs_mono_mult; /* clocksource multiplier */
u64 cs_cycle_last; /* last cycle value */
u64 cs_mask; /* clocksource mask */
@@ -46,6 +63,14 @@ struct vdso_data {
u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
+
+ u32 btm_sec; /* monotonic to boot time */
+ u32 btm_nsec;
+ /* Raw clocksource multipler */
+ u32 cs_raw_mult;
+ /* Raw time */
+ vdso_raw_time_sec_t raw_time_sec;
+ u32 raw_time_nsec;
};
union vdso_data_store {
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 4371f45c5784..d4ceaf5f299b 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void)
{
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
}
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return false;
+}
+
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
#endif
#endif /* __ASSEMBLY__ */