summaryrefslogtreecommitdiff
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/debuglocks.c56
-rw-r--r--arch/sparc64/lib/mb.S73
3 files changed, 96 insertions, 35 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d6..6201f1040982 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
copy_in_user.o user_fixup.o memmove.o \
- mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
index f03344cf784e..f5f0b5586f01 100644
--- a/arch/sparc64/lib/debuglocks.c
+++ b/arch/sparc64/lib/debuglocks.c
@@ -12,8 +12,6 @@
#ifdef CONFIG_SMP
-#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
-
static inline void show (char *str, spinlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
#undef INIT_STUCK
#define INIT_STUCK 100000000
-void _do_spin_lock(spinlock_t *lock, char *str)
+void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
again:
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val) {
while (lock->lock) {
if (!--stuck) {
@@ -72,7 +69,7 @@ again:
show(str, lock, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto again;
}
@@ -84,17 +81,16 @@ again:
put_cpu();
}
-int _do_spin_trylock(spinlock_t *lock)
+int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
{
- unsigned long val, caller;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (!val) {
lock->owner_pc = ((unsigned int)caller);
lock->owner_cpu = cpu;
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock)
{
lock->owner_pc = 0;
lock->owner_cpu = NO_PROC_ID;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
lock->lock = 0;
current->thread.smp_lock_count--;
}
/* Keep INIT_STUCK the same... */
-void _do_read_lock (rwlock_t *rw, char *str)
+void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Wait for any writer to go away. */
while (((long)(rw->lock)) < 0) {
@@ -134,7 +129,7 @@ wlock_again:
show_read(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try once to increment the counter. */
__asm__ __volatile__(
@@ -147,7 +142,7 @@ wlock_again:
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g1", "g7", "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val)
goto wlock_again;
rw->reader_pc[cpu] = ((unsigned int)caller);
@@ -157,15 +152,13 @@ wlock_again:
put_cpu();
}
-void _do_read_unlock (rwlock_t *rw, char *str)
+void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_. */
rw->reader_pc[cpu] = 0;
current->thread.smp_lock_count--;
@@ -193,14 +186,13 @@ runlock_again:
put_cpu();
}
-void _do_write_lock (rwlock_t *rw, char *str)
+void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Spin while there is another writer. */
while (((long)rw->lock) < 0) {
@@ -209,7 +201,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try to acuire the write bit. */
@@ -264,7 +256,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto wlock_again;
}
@@ -278,14 +270,12 @@ wlock_again:
put_cpu();
}
-void _do_write_unlock(rwlock_t *rw)
+void _do_write_unlock(rwlock_t *rw, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_ */
rw->writer_pc = 0;
rw->writer_cpu = NO_PROC_ID;
@@ -313,13 +303,11 @@ wlock_again:
}
}
-int _do_write_trylock (rwlock_t *rw, char *str)
+int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
-
/* Try to acuire the write bit. */
__asm__ __volatile__(
" mov 1, %%g3\n"
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S
new file mode 100644
index 000000000000..4004f748619f
--- /dev/null
+++ b/arch/sparc64/lib/mb.S
@@ -0,0 +1,73 @@
+/* mb.S: Out of line memory barriers.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+ /* These are here in an effort to more fully work around
+ * Spitfire Errata #51. Essentially, if a memory barrier
+ * occurs soon after a mispredicted branch, the chip can stop
+ * executing instructions until a trap occurs. Therefore, if
+ * interrupts are disabled, the chip can hang forever.
+ *
+ * It used to be believed that the memory barrier had to be
+ * right in the delay slot, but a case has been traced
+ * recently wherein the memory barrier was one instruction
+ * after the branch delay slot and the chip still hung. The
+ * offending sequence was the following in sym_wakeup_done()
+ * of the sym53c8xx_2 driver:
+ *
+ * call sym_ccb_from_dsa, 0
+ * movge %icc, 0, %l0
+ * brz,pn %o0, .LL1303
+ * mov %o0, %l2
+ * membar #LoadLoad
+ *
+ * The branch has to be mispredicted for the bug to occur.
+ * Therefore, we put the memory barrier explicitly into a
+ * "branch always, predicted taken" delay slot to avoid the
+ * problem case.
+ */
+
+ .text
+
+99: retl
+ nop
+
+ .globl mb
+mb: ba,pt %xcc, 99b
+ membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad
+ .size mb, .-mb
+
+ .globl rmb
+rmb: ba,pt %xcc, 99b
+ membar #LoadLoad
+ .size rmb, .-rmb
+
+ .globl wmb
+wmb: ba,pt %xcc, 99b
+ membar #StoreStore
+ .size wmb, .-wmb
+
+ .globl membar_storeload
+membar_storeload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad
+ .size membar_storeload, .-membar_storeload
+
+ .globl membar_storeload_storestore
+membar_storeload_storestore:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #StoreStore
+ .size membar_storeload_storestore, .-membar_storeload_storestore
+
+ .globl membar_storeload_loadload
+membar_storeload_loadload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #LoadLoad
+ .size membar_storeload_loadload, .-membar_storeload_loadload
+
+ .globl membar_storestore_loadstore
+membar_storestore_loadstore:
+ ba,pt %xcc, 99b
+ membar #StoreStore | #LoadStore
+ .size membar_storestore_loadstore, .-membar_storestore_loadstore