summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/spinlock.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-07-22 18:25:52 +0100
committerWill Deacon <will.deacon@arm.com>2015-07-27 14:26:34 +0100
commit9511ca19dafbd503fb467d451fe331a6008f08cf (patch)
tree315f497d0d6f578a7e400c7834560164bd097097 /arch/arm64/include/asm/spinlock.h
parentfc9eb93cd4c5d819e9a68a7906d78ce37f42d8cd (diff)
arm64: rwlocks: don't fail trylock purely due to contention
STXR can fail for a number of reasons, so don't fail an rwlock trylock operation simply because the STXR reported failure. I'm not aware of any issues with the current code, but this makes it consistent with spin_trylock and also other architectures (e.g. arch/arm). Reported-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/spinlock.h')
-rw-r--r--arch/arm64/include/asm/spinlock.h14
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee128732435..0f08ba5cfb33 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -140,10 +140,11 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned int tmp;
asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
+ "1: ldaxr %w0, %1\n"
+ " cbnz %w0, 2f\n"
" stxr %w0, %w2, %1\n"
- "1:\n"
+ " cbnz %w0, 1b\n"
+ "2:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
@@ -209,11 +210,12 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned int tmp, tmp2 = 1;
asm volatile(
- " ldaxr %w0, %2\n"
+ "1: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1f\n"
+ " tbnz %w0, #31, 2f\n"
" stxr %w1, %w0, %2\n"
- "1:\n"
+ " cbnz %w1, 1b\n"
+ "2:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
: "memory");