diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 15:46:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 15:46:07 -0700 |
commit | ca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch) | |
tree | 883eb497642d98635817f9cf954ac98e043fb573 /arch/tile | |
parent | 4c12ab7e5e2e892fa94df500f96001837918a281 (diff) | |
parent | d420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar:
"Main changes in this cycle are:
- Extend atomic primitives with coherent logic op primitives
(atomic_{or,and,xor}()) and deprecate the old partial APIs
(atomic_{set,clear}_mask())
The old ops were incoherent with incompatible signatures across
architectures and with incomplete support. Now every architecture
supports the primitives consistently (by Peter Zijlstra)
- Generic support for 'relaxed atomics':
- _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return()
- atomic_read_acquire()
- atomic_set_release()
This came out of porting qwrlock code to arm64 (by Will Deacon)
- Clean up the fragile static_key APIs that were causing repeat bugs,
by introducing a new one:
DEFINE_STATIC_KEY_TRUE(name);
DEFINE_STATIC_KEY_FALSE(name);
which define a key of different types with an initial true/false
value.
Then allow:
static_branch_likely()
static_branch_unlikely()
to take a key of either type and emit the right instruction for the
case. To be able to know the 'type' of the static key we encode it
in the jump entry (by Peter Zijlstra)
- Static key self-tests (by Jason Baron)
- qrwlock optimizations (by Waiman Long)
- small futex enhancements (by Davidlohr Bueso)
- ... and misc other changes"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits)
jump_label/x86: Work around asm build bug on older/backported GCCs
locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h
locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
locking/qrwlock: Implement queue_write_unlock() using smp_store_release()
locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition
locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'
locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication
locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations
locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic
locking/static_keys: Make verify_keys() static
jump label, locking/static_keys: Update docs
locking/static_keys: Provide a selftest
jump_label: Provide a self-test
s390/uaccess, locking/static_keys: employ static_branch_likely()
x86, tsc, locking/static_keys: Employ static_branch_likely()
locking/static_keys: Add selftest
locking/static_keys: Add a new static_key interface
locking/static_keys: Rework update logic
locking/static_keys: Add static_key_{en,dis}able() helpers
...
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 28 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 40 | ||||
-rw-r--r-- | arch/tile/lib/atomic_32.c | 23 | ||||
-rw-r--r-- | arch/tile/lib/atomic_asm_32.S | 4 |
4 files changed, 95 insertions, 0 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1b109fad9fff..d320ce253d86 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v) _atomic_xchg_add(&v->counter, i); } +#define ATOMIC_OP(op) \ +unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + _atomic_##op((unsigned long *)&v->counter, i); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t @@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v) _atomic64_xchg_add(&v->counter, i); } +#define ATOMIC64_OP(op) \ +long long _atomic64_##op(long long *v, long long n); \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + _atomic64_##op(&v->counter, i); \ +} + +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + /** * atomic64_add_return - add integer and return * @v: pointer of type atomic64_t @@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xchg_add_unless(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); +extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, @@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add_unless(volatile long long *p, int *lock, long long o, long long n); +extern long long __atomic64_and(volatile long long *p, int *lock, long long n); +extern long long __atomic64_or(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 0496970cef82..096a56d6ead4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +static inline void atomic_and(int i, atomic_t *v) +{ + __insn_fetchand4((void *)&v->counter, i); +} + +static inline void atomic_or(int i, atomic_t *v) +{ + __insn_fetchor4((void *)&v->counter, i); +} + +static inline void atomic_xor(int i, atomic_t *v) +{ + int guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch4(&v->counter, guess ^ i); + } while (guess != oldval); +} + /* Now the true 64-bit operations. */ #define ATOMIC64_INIT(i) { (i) } @@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) return oldval != u; } +static inline void atomic64_and(long i, atomic64_t *v) +{ + __insn_fetchand((void *)&v->counter, i); +} + +static inline void atomic64_or(long i, atomic64_t *v) +{ + __insn_fetchor((void *)&v->counter, i); +} + +static inline void atomic64_xor(long i, atomic64_t *v) +{ + long guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch(&v->counter, guess ^ i); + } while (guess != oldval); +} + #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index c89b211fd9e7..298df1e9912a 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c @@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) } EXPORT_SYMBOL(_atomic_or); +unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) +{ + return __atomic_and((int *)p, __atomic_setup(p), mask).val; +} +EXPORT_SYMBOL(_atomic_and); + unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) { return __atomic_andn((int *)p, __atomic_setup(p), mask).val; @@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) } EXPORT_SYMBOL(_atomic64_cmpxchg); +long long _atomic64_and(long long *v, long long n) +{ + return __atomic64_and(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_and); + +long long _atomic64_or(long long *v, long long n) +{ + return __atomic64_or(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_or); + +long long _atomic64_xor(long long *v, long long n) +{ + return __atomic64_xor(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_xor); /* * If any of the atomic or futex routines hit a bad address (not in diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 6bda3132cd61..f611265633d6 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2" atomic_op _xchg_add_unless, 32, \ "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" atomic_op _or, 32, "or r24, r22, r2" +atomic_op _and, 32, "and r24, r22, r2" atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" atomic_op _xor, 32, "xor r24, r22, r2" @@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \ { bbns r26, 3f; add r24, r22, r4 }; \ { bbns r27, 3f; add r25, r23, r5 }; \ slt_u r26, r24, r22; add r25, r25, r26" +atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" +atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" +atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" jrp lr /* happy backtracer */ |