diff options
| author | Josh Cartwright <joshc@codeaurora.org> | 2014-06-27 00:30:04 -0500 |
|---|---|---|
| committer | Kyle Yan <kyan@codeaurora.org> | 2016-06-15 16:14:30 -0700 |
| commit | f6280bbae3570bd15e358a7f8026c5980ab64d76 (patch) | |
| tree | dac7980517919302e4d8d8bd7df6d598f8ca12a5 | |
| parent | d6118c0a9f7ab2b131ca36dd3dbd5634603d14fe (diff) | |
ARM: msm: add support for logged IO accessors
This is a snapshot of logged IO accessor functions as the following
commit in the msm-3.10 tree:
'commit acdce027751d5a ("Merge defconfig: arm64: Enable
ONESHOT_SYNC for msm8994")'
Change-Id: I4d52ad23cc40d03d2bae1d3942c8d35543a0d461
Signed-off-by: Josh Cartwright <joshc@codeaurora.org>
[sramana: Fixied merge conflicts]
Signed-off-by: Srinivas Ramana <sramana@codeaurora.org>
| -rw-r--r-- | arch/arm/include/asm/io.h | 69 |
1 files changed, 55 insertions, 14 deletions
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 485982084fe9..2fd57366bb9b 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -29,6 +29,7 @@ #include <asm/byteorder.h> #include <asm/memory.h> #include <asm-generic/pci_iomap.h> +#include <linux/msm_rtb.h> #include <xen/xen.h> /* @@ -62,23 +63,21 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); * the bus. Rather than special-case the machine, just let the compiler * generate the access for CPUs prior to ARMv6. */ -#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) -#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) +#define __raw_readw_no_log(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) #else /* * When running under a hypervisor, we want to avoid I/O accesses with * writeback addressing modes as these incur a significant performance * overhead (the address generation must be emulated in software). */ -#define __raw_writew __raw_writew -static inline void __raw_writew(u16 val, volatile void __iomem *addr) +static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } -#define __raw_readw __raw_readw -static inline u16 __raw_readw(const volatile void __iomem *addr) +static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) { u16 val; asm volatile("ldrh %0, %1" @@ -88,22 +87,19 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) } #endif -#define __raw_writeb __raw_writeb -static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } -#define __raw_writel __raw_writel -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } -#define __raw_readb __raw_readb -static inline u8 __raw_readb(const volatile void __iomem *addr) +static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) { u8 val; asm volatile("ldrb %0, %1" @@ -112,8 +108,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) return val; } -#define __raw_readl __raw_readl -static inline u32 __raw_readl(const volatile void __iomem *addr) +static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) { u32 val; asm volatile("ldr %0, %1" @@ -122,6 +117,49 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) return val; } + +/* + * There may be cases when clients don't want to support or can't support the + * logging. The appropriate functions can be used but clients should carefully + * consider why they can't support the logging. + */ + +#define __raw_write_logged(v, a, _t) ({ \ + int _ret; \ + volatile void __iomem *_a = (a); \ + void *_addr = (void __force *)(_a); \ + _ret = uncached_logk(LOGK_WRITEL, _addr); \ + ETB_WAYPOINT; \ + __raw_write##_t##_no_log((v), _a); \ + if (_ret) \ + LOG_BARRIER; \ + }) + + +#define __raw_writeb(v, a) __raw_write_logged((v), (a), b) +#define __raw_writew(v, a) __raw_write_logged((v), (a), w) +#define __raw_writel(v, a) __raw_write_logged((v), (a), l) +#define __raw_writeq(v, a) __raw_write_logged((v), (a), q) + +#define __raw_read_logged(a, _l, _t) ({ \ + unsigned _t __a; \ + const volatile void __iomem *_a = (a); \ + void *_addr = (void __force *)(_a); \ + int _ret; \ + _ret = uncached_logk(LOGK_READL, _addr); \ + ETB_WAYPOINT; \ + __a = __raw_read##_l##_no_log(_a);\ + if (_ret) \ + LOG_BARRIER; \ + __a; \ + }) + + +#define __raw_readb(a) __raw_read_logged((a), b, char) +#define __raw_readw(a) __raw_read_logged((a), w, short) +#define __raw_readl(a) __raw_read_logged((a), l, int) +#define __raw_readq(a) __raw_read_logged((a), q, long long) + /* * Architecture ioremap implementation. */ @@ -291,10 +329,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t); __raw_readw(c)); __r; }) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ __raw_readl(c)); __r; }) +#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl_no_log(c)); __r; }) #define writeb_relaxed(v,c) __raw_writeb(v,c) #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) +#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) |
