summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-02-22 13:37:21 +0000
committerWill Deacon <will.deacon@arm.com>2019-04-08 12:00:28 +0100
commit346e91ee090b07da8d15e36bc3169ddea6968713 (patch)
tree8ec14892fbf565049c4ffa4a23d628514da4e991 /arch/mips/include/asm
parente9e8543fecd2e1ca53616ba82fbd55a25cd2ab8a (diff)
mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
The mmiowb() macro is horribly difficult to use and drivers will continue to work most of the time if they omit a call when it is required. Rather than rely on driver authors getting this right, push mmiowb() into arch_spin_unlock() for mips. If this is deemed to be a performance issue, a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide the barrier in cases where no I/O writes were performed inside the critical section. Acked-by: Paul Burton <paul.burton@mips.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/io.h3
-rw-r--r--arch/mips/include/asm/mmiowb.h11
-rw-r--r--arch/mips/include/asm/spinlock.h15
4 files changed, 26 insertions, 4 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index bf39c2253ec8..87b86cdf126a 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 845fbbc7a2e3..29997e42480e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
-/* Some callers use this older API instead. */
-#define mmiowb() iobarrier_w()
-
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644
index 000000000000..a40824e3ef8e
--- /dev/null
+++ b/arch/mips/include/asm/mmiowb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ee81297d9117..8a88eb265516 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -11,6 +11,21 @@
#include <asm/processor.h>
#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
#include <asm/qspinlock.h>
#endif /* _ASM_SPINLOCK_H */