diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 09:50:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 09:50:09 -0700 |
commit | b68e7e952f24527de62f4768b1cead91f92f5f6e (patch) | |
tree | c9c1dbc333becac5396eaef4d5971d3f4ca337e3 /arch/s390/lib | |
parent | d3b5d35290d729a2518af00feca867385a1b08fa (diff) | |
parent | d0790fb6e5bc5a6bb923de9c2be7fc210d6b689b (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
- three merges for KVM/s390 with changes for vfio-ccw and cpacf. The
patches are included in the KVM tree as well, let git sort it out.
- add the new 'trng' random number generator
- provide the secure key verification API for the pkey interface
- introduce the z13 cpu counters to perf
- add a new system call to set up the guarded storage facility
- simplify TASK_SIZE and arch_get_unmapped_area
- export the raw STSI data related to CPU topology to user space
- ... and the usual churn of bug-fixes and cleanups.
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (74 commits)
s390/crypt: use the correct module alias for paes_s390.
s390/cpacf: Introduce kma instruction
s390/cpacf: query instructions use unique parameters for compatibility with KMA
s390/trng: Introduce s390 TRNG device driver.
s390/crypto: Provide s390 specific arch random functionality.
s390/crypto: Add new subfunctions to the cpacf PRNO function.
s390/crypto: Renaming PPNO to PRNO.
s390/pageattr: avoid unnecessary page table splitting
s390/mm: simplify arch_get_unmapped_area[_topdown]
s390/mm: make TASK_SIZE independent from the number of page table levels
s390/gs: add regset for the guarded storage broadcast control block
s390/kvm: Add use_cmma field to mm_context_t
s390/kvm: Add PGSTE manipulation functions
vfio: ccw: improve error handling for vfio_ccw_mdev_remove
vfio: ccw: remove unnecessary NULL checks of a pointer
s390/spinlock: remove compare and delay instruction
s390/spinlock: use atomic primitives for spinlocks
s390/cpumf: simplify detection of guest samples
s390/pci: remove forward declaration
s390/pci: increase the PCI_NR_FUNCTIONS default
...
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/spinlock.c | 84 |
1 files changed, 29 insertions, 55 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index ba427eb6f14c..ffb15bd4c593 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -17,7 +17,7 @@ int spin_retry = -1; static int __init spin_retry_init(void) { if (spin_retry < 0) - spin_retry = MACHINE_HAS_CAD ? 10 : 1000; + spin_retry = 1000; return 0; } early_initcall(spin_retry_init); @@ -32,23 +32,17 @@ static int __init spin_retry_setup(char *str) } __setup("spin_retry=", spin_retry_setup); -static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) -{ - asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); -} - void arch_spin_lock_wait(arch_spinlock_t *lp) { - unsigned int cpu = SPINLOCK_LOCKVAL; - unsigned int owner; - int count, first_diag; + int cpu = SPINLOCK_LOCKVAL; + int owner, count, first_diag; first_diag = 1; while (1) { owner = ACCESS_ONCE(lp->lock); /* Try to get the lock if it is free. */ if (!owner) { - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) return; continue; } @@ -61,8 +55,6 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) /* Loop for a while on the lock value. */ count = spin_retry; do { - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&lp->lock, owner); owner = ACCESS_ONCE(lp->lock); } while (owner && count-- > 0); if (!owner) @@ -82,9 +74,8 @@ EXPORT_SYMBOL(arch_spin_lock_wait); void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { - unsigned int cpu = SPINLOCK_LOCKVAL; - unsigned int owner; - int count, first_diag; + int cpu = SPINLOCK_LOCKVAL; + int owner, count, first_diag; local_irq_restore(flags); first_diag = 1; @@ -93,7 +84,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) /* Try to get the lock if it is free. */ if (!owner) { local_irq_disable(); - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) return; local_irq_restore(flags); continue; @@ -107,8 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) /* Loop for a while on the lock value. */ count = spin_retry; do { - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&lp->lock, owner); owner = ACCESS_ONCE(lp->lock); } while (owner && count-- > 0); if (!owner) @@ -128,18 +117,16 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags); int arch_spin_trylock_retry(arch_spinlock_t *lp) { - unsigned int cpu = SPINLOCK_LOCKVAL; - unsigned int owner; - int count; + int cpu = SPINLOCK_LOCKVAL; + int owner, count; for (count = spin_retry; count > 0; count--) { owner = READ_ONCE(lp->lock); /* Try to get the lock if it is free. */ if (!owner) { - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) return 1; - } else if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&lp->lock, owner); + } } return 0; } @@ -147,8 +134,8 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); void _raw_read_lock_wait(arch_rwlock_t *rw) { - unsigned int owner, old; int count = spin_retry; + int owner, old; #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); @@ -162,12 +149,9 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) } old = ACCESS_ONCE(rw->lock); owner = ACCESS_ONCE(rw->owner); - if ((int) old < 0) { - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&rw->lock, old); + if (old < 0) continue; - } - if (_raw_compare_and_swap(&rw->lock, old, old + 1)) + if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) return; } } @@ -175,17 +159,14 @@ EXPORT_SYMBOL(_raw_read_lock_wait); int _raw_read_trylock_retry(arch_rwlock_t *rw) { - unsigned int old; int count = spin_retry; + int old; while (count-- > 0) { old = ACCESS_ONCE(rw->lock); - if ((int) old < 0) { - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&rw->lock, old); + if (old < 0) continue; - } - if (_raw_compare_and_swap(&rw->lock, old, old + 1)) + if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) return 1; } return 0; @@ -194,10 +175,10 @@ EXPORT_SYMBOL(_raw_read_trylock_retry); #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES -void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) +void _raw_write_lock_wait(arch_rwlock_t *rw, int prev) { - unsigned int owner, old; int count = spin_retry; + int owner, old; owner = 0; while (1) { @@ -209,14 +190,12 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) old = ACCESS_ONCE(rw->lock); owner = ACCESS_ONCE(rw->owner); smp_mb(); - if ((int) old >= 0) { + if (old >= 0) { prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); old = prev; } - if ((old & 0x7fffffff) == 0 && (int) prev >= 0) + if ((old & 0x7fffffff) == 0 && prev >= 0) break; - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&rw->lock, old); } } EXPORT_SYMBOL(_raw_write_lock_wait); @@ -225,8 +204,8 @@ EXPORT_SYMBOL(_raw_write_lock_wait); void _raw_write_lock_wait(arch_rwlock_t *rw) { - unsigned int owner, old, prev; int count = spin_retry; + int owner, old, prev; prev = 0x80000000; owner = 0; @@ -238,15 +217,13 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) } old = ACCESS_ONCE(rw->lock); owner = ACCESS_ONCE(rw->owner); - if ((int) old >= 0 && - _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) + if (old >= 0 && + __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000)) prev = old; else smp_mb(); - if ((old & 0x7fffffff) == 0 && (int) prev >= 0) + if ((old & 0x7fffffff) == 0 && prev >= 0) break; - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&rw->lock, old); } } EXPORT_SYMBOL(_raw_write_lock_wait); @@ -255,24 +232,21 @@ EXPORT_SYMBOL(_raw_write_lock_wait); int _raw_write_trylock_retry(arch_rwlock_t *rw) { - unsigned int old; int count = spin_retry; + int old; while (count-- > 0) { old = ACCESS_ONCE(rw->lock); - if (old) { - if (MACHINE_HAS_CAD) - _raw_compare_and_delay(&rw->lock, old); + if (old) continue; - } - if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) + if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)) return 1; } return 0; } EXPORT_SYMBOL(_raw_write_trylock_retry); -void arch_lock_relax(unsigned int cpu) +void arch_lock_relax(int cpu) { if (!cpu) return; |