diff options
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/include/asm/atomic.h | 235 |
1 files changed, 83 insertions, 152 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e5103b47a8ce..00b7d46b35b8 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -47,7 +47,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /** * atomic_set - set atomic variable @@ -58,165 +58,96 @@ */ #define atomic_set(v,i) ((v)->counter = (i)) -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static inline void atomic_add(int i, atomic_t * v) -{ #if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " add %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " add %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); -#endif -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static inline void atomic_sub(int i, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " sub %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " sub %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); -#endif +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %1, %3, 0\n" \ + " wsr %1, scompare1\n" \ + " " #op " %0, %1, %2\n" \ + " s32c1i %0, %3, 0\n" \ + " bne %0, %1, 1b\n" \ + : "=&a" (result), "=&a" (tmp) \ + : "a" (i), "a" (v) \ + : "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + __asm__ __volatile__( \ + "1: l32i %1, %3, 0\n" \ + " wsr %1, scompare1\n" \ + " " #op " %0, %1, %2\n" \ + " s32c1i %0, %3, 0\n" \ + " bne %0, %1, 1b\n" \ + " " #op " %0, %0, %2\n" \ + : "=&a" (result), "=&a" (tmp) \ + : "a" (i), "a" (v) \ + : "memory" \ + ); \ + \ + return result; \ } -/* - * We use atomic_{add|sub}_return to define other functions. - */ - -static inline int atomic_add_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " add %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - " add %0, %0, %2\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); - - return result; -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " add %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; -#endif +#else /* XCHAL_HAVE_S32C1I */ + +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned int vval; \ + \ + __asm__ __volatile__( \ + " rsil a15, "__stringify(LOCKLEVEL)"\n"\ + " l32i %0, %2, 0\n" \ + " " #op " %0, %0, %1\n" \ + " s32i %0, %2, 0\n" \ + " wsr a15, ps\n" \ + " rsync\n" \ + : "=&a" (vval) \ + : "a" (i), "a" (v) \ + : "a15", "memory" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned int vval; \ + \ + __asm__ __volatile__( \ + " rsil a15,"__stringify(LOCKLEVEL)"\n" \ + " l32i %0, %2, 0\n" \ + " " #op " %0, %0, %1\n" \ + " s32i %0, %2, 0\n" \ + " wsr a15, ps\n" \ + " rsync\n" \ + : "=&a" (vval) \ + : "a" (i), "a" (v) \ + : "a15", "memory" \ + ); \ + \ + return vval; \ } -static inline int atomic_sub_return(int i, atomic_t * v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; +#endif /* XCHAL_HAVE_S32C1I */ - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " sub %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - " sub %0, %0, %2\n" - : "=&a" (result), "=&a" (tmp) - : "a" (i), "a" (v) - : "memory" - ); +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) - return result; -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " sub %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +ATOMIC_OPS(add) +ATOMIC_OPS(sub) - return vval; -#endif -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP /** * atomic_sub_and_test - subtract value from variable and test result |