summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-12 12:51:17 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-27 09:24:15 +0100
commitdb1c45154a82195ad0b4d45d0e330ebac6883b70 (patch)
tree4d7d411fcda98f9f0e4299c4578f64147696f35d /arch/s390
parent1a2c5840acf9f657c9b580d4ee12a4c9db3429cb (diff)
s390/spinlock: avoid diagnose loop
The spinlock implementation calls the diagnose 0x9c / 0x44 immediately if the SIGP sense running reported the target CPU as not running. The diagnose 0x9c is a hint to the hypervisor to schedule the target CPU in preference to the source CPU that issued the diagnose. It can happen that on return from the diagnose the target CPU has not been scheduled yet, e.g. if the target logical CPU is on another physical CPU and the hypervisor did not want to migrate the logical CPU. Avoid the immediate repeat of the diagnose instruction, instead do the retry loop before the next invocation of diagnose 0x9c. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/lib/spinlock.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 427aa44b3505..0a68fe04a9e1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -41,8 +41,9 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
- int count;
+ int count, first_diag;
+ first_diag = 1;
while (1) {
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
@@ -51,9 +52,10 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
return;
continue;
}
- /* Check if the lock owner is running. */
- if (!smp_vcpu_scheduled(~owner)) {
+ /* First iteration: check if the lock owner is running. */
+ if (first_diag && !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
+ first_diag = 0;
continue;
}
/* Loop for a while on the lock value. */
@@ -67,10 +69,13 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU if the lock is still unavailable.
+ * yield the CPU unconditionally. For LPAR rely on the
+ * sense running status.
*/
- if (!MACHINE_IS_LPAR)
+ if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
+ first_diag = 0;
+ }
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -79,9 +84,10 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
- int count;
+ int count, first_diag;
local_irq_restore(flags);
+ first_diag = 1;
while (1) {
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
@@ -92,8 +98,9 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
}
/* Check if the lock owner is running. */
- if (!smp_vcpu_scheduled(~owner)) {
+ if (first_diag && !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
+ first_diag = 0;
continue;
}
/* Loop for a while on the lock value. */
@@ -107,10 +114,13 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU if the lock is still unavailable.
+ * yield the CPU unconditionally. For LPAR rely on the
+ * sense running status.
*/
- if (!MACHINE_IS_LPAR)
+ if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
+ first_diag = 0;
+ }
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);