summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-10-21 07:44:08 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-24 17:17:13 +0200
commit0702fbf572ac5e513873628bf534da4a8a2025b4 (patch)
tree29a64064d299f04f7fc22e65c5a5c1ecc828ea60 /arch/s390/include
parentf26946d7ecad0afdd85e6ae56663d0fe26676b34 (diff)
s390/percpu: use generic percpu ops for CONFIG_32BIT
Remove the special cases for the this_cpu_* functions for 32 bit in order to make it easier to add additional code for 64 bit. 32 bit will use the generic implementation. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/percpu.h26
1 files changed, 7 insertions, 19 deletions
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee2cee5..41baca870d0c 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,12 +10,14 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
+#ifdef CONFIG_64BIT
+
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
@@ -30,13 +32,7 @@
do { \
old__ = prev__; \
new__ = old__ op (val); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- prev__ = cmpxchg64(ptr__, old__, new__); \
- break; \
- default: \
- prev__ = cmpxchg(ptr__, old__, new__); \
- } \
+ prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
@@ -74,13 +70,7 @@
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- ret__ = cmpxchg64(ptr__, oval, nval); \
- break; \
- default: \
- ret__ = cmpxchg(ptr__, oval, nval); \
- } \
+ ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
})
@@ -104,9 +94,7 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -124,9 +112,9 @@
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h>