diff options
-rw-r--r-- | arch/s390/lib/spinlock.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 427aa44b3505..0a68fe04a9e1 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -41,8 +41,9 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) { unsigned int cpu = SPINLOCK_LOCKVAL; unsigned int owner; - int count; + int count, first_diag; + first_diag = 1; while (1) { owner = ACCESS_ONCE(lp->lock); /* Try to get the lock if it is free. */ @@ -51,9 +52,10 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) return; continue; } - /* Check if the lock owner is running. */ - if (!smp_vcpu_scheduled(~owner)) { + /* First iteration: check if the lock owner is running. */ + if (first_diag && !smp_vcpu_scheduled(~owner)) { smp_yield_cpu(~owner); + first_diag = 0; continue; } /* Loop for a while on the lock value. */ @@ -67,10 +69,13 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) continue; /* * For multiple layers of hypervisors, e.g. z/VM + LPAR - * yield the CPU if the lock is still unavailable. + * yield the CPU unconditionally. For LPAR rely on the + * sense running status. */ - if (!MACHINE_IS_LPAR) + if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { smp_yield_cpu(~owner); + first_diag = 0; + } } } EXPORT_SYMBOL(arch_spin_lock_wait); @@ -79,9 +84,10 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { unsigned int cpu = SPINLOCK_LOCKVAL; unsigned int owner; - int count; + int count, first_diag; local_irq_restore(flags); + first_diag = 1; while (1) { owner = ACCESS_ONCE(lp->lock); /* Try to get the lock if it is free. */ @@ -92,8 +98,9 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } /* Check if the lock owner is running. */ - if (!smp_vcpu_scheduled(~owner)) { + if (first_diag && !smp_vcpu_scheduled(~owner)) { smp_yield_cpu(~owner); + first_diag = 0; continue; } /* Loop for a while on the lock value. */ @@ -107,10 +114,13 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) continue; /* * For multiple layers of hypervisors, e.g. z/VM + LPAR - * yield the CPU if the lock is still unavailable. + * yield the CPU unconditionally. For LPAR rely on the + * sense running status. */ - if (!MACHINE_IS_LPAR) + if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { smp_yield_cpu(~owner); + first_diag = 0; + } } } EXPORT_SYMBOL(arch_spin_lock_wait_flags); |