diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 8 | ||||
-rw-r--r-- | lib/atomic64.c | 32 |
2 files changed, 20 insertions, 20 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6629cab453e8..06d9c9d70385 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1095,7 +1095,7 @@ config PROVE_LOCKING select DEBUG_SPINLOCK select DEBUG_MUTEXES select DEBUG_RT_MUTEXES if RT_MUTEXES - select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER + select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC select TRACE_IRQFLAGS @@ -1199,10 +1199,10 @@ config DEBUG_WW_MUTEX_SLOWPATH config DEBUG_RWSEMS bool "RW Semaphore debugging: basic checks" - depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER + depends on DEBUG_KERNEL help - This debugging feature allows mismatched rw semaphore locks and unlocks - to be detected and reported. + This debugging feature allows mismatched rw semaphore locks + and unlocks to be detected and reported. config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" diff --git a/lib/atomic64.c b/lib/atomic64.c index 7e6905751522..e98c85a99787 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -42,11 +42,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; } -long long atomic64_read(const atomic64_t *v) +s64 atomic64_read(const atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - long long val; + s64 val; raw_spin_lock_irqsave(lock, flags); val = v->counter; @@ -55,7 +55,7 @@ long long atomic64_read(const atomic64_t *v) } EXPORT_SYMBOL(atomic64_read); -void atomic64_set(atomic64_t *v, long long i) +void atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -67,7 +67,7 @@ void atomic64_set(atomic64_t *v, long long i) EXPORT_SYMBOL(atomic64_set); #define ATOMIC64_OP(op, c_op) \ -void atomic64_##op(long long a, atomic64_t *v) \ +void atomic64_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ @@ -79,11 +79,11 @@ void atomic64_##op(long long a, atomic64_t *v) \ EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op, c_op) \ -long long atomic64_##op##_return(long long a, atomic64_t *v) \ +s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ - long long val; \ + s64 val; \ \ raw_spin_lock_irqsave(lock, flags); \ val = (v->counter c_op a); \ @@ -93,11 +93,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \ EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op, c_op) \ -long long atomic64_fetch_##op(long long a, atomic64_t *v) \ +s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ - long long val; \ + s64 val; \ \ raw_spin_lock_irqsave(lock, flags); \ val = v->counter; \ @@ -130,11 +130,11 @@ ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -long long atomic64_dec_if_positive(atomic64_t *v) +s64 atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - long long val; + s64 val; raw_spin_lock_irqsave(lock, flags); val = v->counter - 1; @@ -145,11 +145,11 @@ long long atomic64_dec_if_positive(atomic64_t *v) } EXPORT_SYMBOL(atomic64_dec_if_positive); -long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - long long val; + s64 val; raw_spin_lock_irqsave(lock, flags); val = v->counter; @@ -160,11 +160,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) } EXPORT_SYMBOL(atomic64_cmpxchg); -long long atomic64_xchg(atomic64_t *v, long long new) +s64 atomic64_xchg(atomic64_t *v, s64 new) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - long long val; + s64 val; raw_spin_lock_irqsave(lock, flags); val = v->counter; @@ -174,11 +174,11 @@ long long atomic64_xchg(atomic64_t *v, long long new) } EXPORT_SYMBOL(atomic64_xchg); -long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u) +s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - long long val; + s64 val; raw_spin_lock_irqsave(lock, flags); val = v->counter; |