summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-15 16:22:12 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-02-02 21:23:25 +0000
commit000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 (patch)
tree4a142d5c4d4b589ac696f3a6a932ba1965d29ed1
parent6323f0ccedf756dfe5f46549cec69a2d6d97937b (diff)
ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h
SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren <tony@atomide.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/spinlock.h37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355707dd..da1af5240159 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,36 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+/*
+ * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#define ALT_SMP(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEV ALT_SMP("sev.w", "nop.w")
+#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w")
+#else
+#define SEV ALT_SMP("sev", "nop")
+#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#endif
+
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb\n"
- "sev"
+ SEV
);
-#elif defined(CONFIG_CPU_32v6K)
+#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
- "sev"
+ SEV
: : "r" (0)
);
#endif
@@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
@@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-" wfene\n"
-#endif
+ WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
@@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
-#ifdef CONFIG_CPU_32v6K
-" wfemi\n"
-#endif
+ WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)