diff options
| author | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:18:27 +0200 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:19:04 +0200 |
| commit | 6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch) | |
| tree | 021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /arch/hexagon/include/asm/system.h | |
| parent | 682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff) | |
| parent | a385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff) | |
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree),
to prepare for tooling changes, and also to pick up v3.4 MM
changes that the uprobes code needs to take care of.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/hexagon/include/asm/system.h')
| -rw-r--r-- | arch/hexagon/include/asm/system.h | 126 |
1 files changed, 0 insertions, 126 deletions
diff --git a/arch/hexagon/include/asm/system.h b/arch/hexagon/include/asm/system.h deleted file mode 100644 index 323ed1dd65e2..000000000000 --- a/arch/hexagon/include/asm/system.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * System level definitions for the Hexagon architecture - * - * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H - -#include <linux/linkage.h> -#include <linux/irqflags.h> -#include <asm/atomic.h> -#include <asm/hexagon_vm.h> - -struct thread_struct; - -extern struct task_struct *__switch_to(struct task_struct *, - struct task_struct *, - struct task_struct *); - -#define switch_to(p, n, r) do {\ - r = __switch_to((p), (n), (r));\ -} while (0) - - -#define rmb() barrier() -#define read_barrier_depends() barrier() -#define wmb() barrier() -#define mb() barrier() -#define smp_rmb() barrier() -#define smp_read_barrier_depends() barrier() -#define smp_wmb() barrier() -#define smp_mb() barrier() -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - -/* - * __xchg - atomically exchange a register and a memory location - * @x: value to swap - * @ptr: pointer to memory - * @size: size of the value - * - * Only 4 bytes supported currently. - * - * Note: there was an errata for V2 about .new's and memw_locked. - * - */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) -{ - unsigned long retval; - - /* Can't seem to use printk or panic here, so just stop */ - if (size != 4) do { asm volatile("brkpt;\n"); } while (1); - - __asm__ __volatile__ ( - "1: %0 = memw_locked(%1);\n" /* load into retval */ - " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" - : "=&r" (retval) - : "r" (ptr), "r" (x) - : "memory", "p0" - ); - return retval; -} - -/* - * Atomically swap the contents of a register with memory. Should be atomic - * between multiple CPU's and within interrupts on the same CPU. - */ -#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ - sizeof(*(ptr)))) - -/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) - -/* - * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. - * looks just like atomic_cmpxchg on our arch currently with a bunch of - * variable casting. - */ -#define __HAVE_ARCH_CMPXCHG 1 - -#define cmpxchg(ptr, old, new) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __oldval = 0; \ - \ - asm volatile( \ - "1: %0 = memw_locked(%1);\n" \ - " { P0 = cmp.eq(%0,%2);\n" \ - " if (!P0.new) jump:nt 2f; }\n" \ - " memw_locked(%1,p0) = %3;\n" \ - " if (!P0) jump 1b;\n" \ - "2:\n" \ - : "=&r" (__oldval) \ - : "r" (__ptr), "r" (__old), "r" (__new) \ - : "memory", "p0" \ - ); \ - __oldval; \ -}) - -/* Should probably shoot for an 8-byte aligned stack pointer */ -#define STACK_MASK (~7) -#define arch_align_stack(x) (x & STACK_MASK) - -#endif |
