diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
| commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
| tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/sh/include/asm/atomic-llsc.h | |
| parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
| parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) | |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/sh/include/asm/atomic-llsc.h')
| -rw-r--r-- | arch/sh/include/asm/atomic-llsc.h | 101 |
1 files changed, 41 insertions, 60 deletions
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index 4b00b78e3f4f..8575dccb9ef7 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h @@ -2,39 +2,6 @@ #define __ASM_SH_ATOMIC_LLSC_H /* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); -} - -/* * SH-4A note: * * We basically get atomic_xxx_return() for free compared with @@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v) * encoding, so the retval is automatically set without having to * do any special work. */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long temp; +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add_return \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + \ + __asm__ __volatile__ ( \ +"1: movli.l @%2, %0 ! atomic_" #op "\n" \ +" " #op " %1, %0 \n" \ +" movco.l %0, @%2 \n" \ +" bf 1b \n" \ + : "=&z" (tmp) \ + : "r" (i), "r" (&v->counter) \ + : "t"); \ +} - return temp; +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long temp; \ + \ + __asm__ __volatile__ ( \ +"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \ +" " #op " %1, %0 \n" \ +" movco.l %0, @%2 \n" \ +" bf 1b \n" \ +" synco \n" \ + : "=&z" (temp) \ + : "r" (i), "r" (&v->counter) \ + : "t"); \ + \ + return temp; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long temp; +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub_return \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); +ATOMIC_OPS(add) +ATOMIC_OPS(sub) - return temp; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { |
