diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/atomic.h | 39 | ||||
-rw-r--r-- | arch/mips/include/asm/bitops.h | 35 | ||||
-rw-r--r-- | arch/mips/include/asm/cmpxchg.h | 27 | ||||
-rw-r--r-- | arch/mips/include/asm/compiler.h | 8 | ||||
-rw-r--r-- | arch/mips/include/asm/edac.h | 6 | ||||
-rw-r--r-- | arch/mips/include/asm/futex.h | 23 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h | 25 | ||||
-rw-r--r-- | arch/mips/include/asm/octeon/cvmx-cmd-queue.h | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/spinlock.h | 50 |
9 files changed, 126 insertions, 91 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 6dd6bfc607e9..ec4b4d658bc4 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -17,6 +17,7 @@ #include <linux/irqflags.h> #include <linux/types.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/cmpxchg.h> #include <asm/war.h> @@ -53,7 +54,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " sc %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ @@ -65,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -95,7 +96,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ " beqzl %0, 1b \n" \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ @@ -107,7 +109,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!result)); \ \ @@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i), "m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) + : "Ir" (i), GCC_OFF12_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { int temp; @@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; @@ -328,7 +333,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ " scd %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ @@ -340,7 +345,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -370,7 +375,8 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ " beqzl %0, 1b \n" \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ @@ -382,8 +388,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ - : "Ir" (i), "m" (v->counter) \ + : "=&r" (result), "=&r" (temp), \ + "=" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ : "memory"); \ } while (unlikely(!result)); \ \ @@ -443,8 +450,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) + : "=&r" (result), "=&r" (temp), + "=" GCC_OFF12_ASM() (v->counter) + : "Ir" (i), GCC_OFF12_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { long temp; @@ -461,7 +469,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index bae6b0fa8ab5..6663bcca9d0c 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <asm/barrier.h> #include <asm/byteorder.h> /* sigh ... */ +#include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/sgidefs.h> #include <asm/war.h> @@ -78,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=m" (*m) - : "ir" (1UL << bit), "m" (*m)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) + : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { @@ -87,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # set_bit \n" " " __INS "%0, %3, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -99,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " or %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -130,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (~(1UL << bit))); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { @@ -139,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # clear_bit \n" " " __INS "%0, $0, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -151,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " and %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); } else @@ -196,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -209,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " xor %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -244,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -258,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -312,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -354,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); #ifdef CONFIG_CPU_MIPSR2 @@ -368,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, " " __EXT "%2, %0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "ir" (bit) : "memory"); } while (unlikely(!temp)); @@ -385,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr, " xor %2, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -427,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -441,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr, " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index eefcaa363a87..28b1edf19501 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -10,6 +10,7 @@ #include <linux/bug.h> #include <linux/irqflags.h> +#include <asm/compiler.h> #include <asm/war.h> static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) @@ -30,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " sc %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -45,8 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set arch=r4000 \n" " sc %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -80,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " scd %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -93,8 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -155,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqzl $1, 1b \n" \ "2: \n" \ " .set pop \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else if (kernel_uses_llsc) { \ __asm__ __volatile__( \ @@ -172,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqz $1, 1b \n" \ " .set pop \n" \ "2: \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else { \ unsigned long __flags; \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index 71f5c5cfc58a..c73815e0123a 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -16,4 +16,12 @@ #define GCC_REG_ACCUM "accum" #endif +#ifndef CONFIG_CPU_MICROMIPS +#define GCC_OFF12_ASM() "R" +#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) +#define GCC_OFF12_ASM() "ZC" +#else +#error "microMIPS compilation unsupported with GCC older than 4.9" +#endif + #endif /* _ASM_COMPILER_H */ diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index 4da0c1fe30d9..ae6fedcb0060 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h @@ -1,6 +1,8 @@ #ifndef ASM_EDAC_H #define ASM_EDAC_H +#include <asm/compiler.h> + /* ECC atomic, DMA, SMP and interrupt safe scrub function */ static inline void atomic_scrub(void *va, u32 size) @@ -24,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size) " sc %0, %1 \n" " beqz %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=m" (*virt_addr) - : "m" (*virt_addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) + : GCC_OFF12_ASM() (*virt_addr)); virt_addr++; } diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index 194cda0396a3..d0177bf915bb 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <asm/asm-eva.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/errno.h> #include <asm/war.h> @@ -42,8 +43,10 @@ " "__UA_ADDR "\t1b, 4b \n" \ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ + : "=r" (ret), "=&r" (oldval), \ + "=" GCC_OFF12_ASM() (*uaddr) \ + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ __asm__ __volatile__( \ @@ -68,8 +71,10 @@ " "__UA_ADDR "\t1b, 4b \n" \ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ + : "=r" (ret), "=&r" (oldval), \ + "=" GCC_OFF12_ASM() (*uaddr) \ + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "i" (-EFAULT) \ : "memory"); \ } else \ ret = -ENOSYS; \ @@ -166,8 +171,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__( @@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + "i" (-EFAULT) : "memory"); } else return -ENOSYS; diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index fc946c835995..2e54b4bff5cf 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h @@ -49,6 +49,7 @@ #include <linux/types.h> +#include <asm/compiler.h> #include <asm/war.h> #ifndef R10000_LLSC_WAR @@ -84,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (~mask), "ir" (value), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); } /* @@ -105,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (mask), GCC_OFF12_ASM() (*addr)); } /* @@ -126,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (~mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (~mask), GCC_OFF12_ASM() (*addr)); } /* @@ -147,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (mask), GCC_OFF12_ASM() (*addr)); } /* @@ -219,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " .set arch=r4000 \n" \ "1: ll %0, %1 #custom_read_reg32 \n" \ " .set pop \n" \ - : "=r" (tmp), "=m" (*address) \ - : "m" (*address)) + : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ + : GCC_OFF12_ASM() (*address)) #define custom_write_reg32(address, tmp) \ __asm__ __volatile__( \ @@ -230,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " "__beqz"%0, 1b \n" \ " nop \n" \ " .set pop \n" \ - : "=&r" (tmp), "=m" (*address) \ - : "0" (tmp), "m" (*address)) + : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ + : "0" (tmp), GCC_OFF12_ASM() (*address)) #endif /* __ASM_REGOPS_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h index 024a71b2bff9..75739c83f07e 100644 --- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h +++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h @@ -76,6 +76,8 @@ #include <linux/prefetch.h> +#include <asm/compiler.h> + #include <asm/octeon/cvmx-fpa.h> /** * By default we disable the max depth support. Most programs @@ -273,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, " lbu %[ticket], %[now_serving]\n" "4:\n" ".set pop\n" : - [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), + [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), [my_ticket] "=r"(my_ticket) ); diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 78d201fb6c87..c6d06d383ef9 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -12,6 +12,7 @@ #include <linux/compiler.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/war.h> /* @@ -88,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -121,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -163,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -187,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -234,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -244,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " bltz %1, 1b \n" " addu %1, 1 \n" "2: sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -268,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -277,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -298,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -308,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " bnez %1, 1b \n" " lui %1, 0x8000 \n" "2: sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -348,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { __asm__ __volatile__( @@ -365,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } @@ -392,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " li %2, 1 \n" " .set reorder \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -405,8 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " sc %1, %0 \n" " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), + "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); |