diff options
author | Paul Burton <paul.burton@imgtec.com> | 2017-06-09 17:26:40 -0700 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2017-06-29 02:42:25 +0200 |
commit | 3ba7f44d2b19166b34031db48ce613d1bddbd384 (patch) | |
tree | 973693babaf2dd5eee8646c084e155e7b30f1adb /arch/mips/kernel | |
parent | b70eb30056dc84568f3d32440d9be6a558025843 (diff) |
MIPS: cmpxchg: Implement 1 byte & 2 byte cmpxchg()
Implement support for 1 & 2 byte cmpxchg() using read-modify-write atop
a 4 byte cmpxchg(). This allows us to support these atomic operations
despite the MIPS ISA only providing 4 & 8 byte atomic operations.
This is required in order to support queued rwlocks (qrwlock) in a later
patch, since these make use of a 1 byte cmpxchg() in their slow path.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16355/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/cmpxchg.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 5acfbf9fb2c5..7730f1d3434f 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -50,3 +50,60 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s return (load32 & mask) >> shift; } + +unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size) +{ + u32 mask, old32, new32, load32; + volatile u32 *ptr32; + unsigned int shift; + u8 load; + + /* Check that ptr is naturally aligned */ + WARN_ON((unsigned long)ptr & (size - 1)); + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + shift ^= sizeof(u32) - size; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + load32 = *ptr32; + + while (true) { + /* + * Ensure the byte we want to exchange matches the expected + * old value, and if not then bail. + */ + load = (load32 & mask) >> shift; + if (load != old) + return load; + + /* + * Calculate the old & new values of the naturally aligned + * 4 byte integer that include the byte we want to exchange. + * Attempt to exchange the old value for the new value, and + * return if we succeed. + */ + old32 = (load32 & ~mask) | (old << shift); + new32 = (load32 & ~mask) | (new << shift); + load32 = cmpxchg(ptr32, old32, new32); + if (load32 == old32) + return old; + } +} |