summaryrefslogtreecommitdiff
path: root/firmware/asm
diff options
context:
space:
mode:
authorSolomon Peachy <pizza@shaftnet.org>2020-06-29 19:23:03 -0400
committerSolomon Peachy <pizza@shaftnet.org>2020-07-03 21:36:41 +0000
commit905e19905b8601b672ed9a9b7121dfdb2444602c (patch)
tree851998a51631946a860ed678c518f32ec92affb4 /firmware/asm
parent9cf2492407af6fae5928685205e7fc17032ac253 (diff)
ARM: Rejigger the asm corelock functions
This appears to solve _some_ of the crashes experienced when using gcc494 on the multicore PP targets (eg most older ipods). (With this change, the asm vs plain-C versions behave identically) corelock_lock(), corelock_unlock(), and corelock_trylock() were declared with the 'naked' attribute. However, naked functions are only allowed to have 'Basic Asm' components, and we used some extended asm, but without declaring clobbered registers, making assumptions about register arguments, and also directly returned to the caller via asm code. This is what the GCC docs have to say about this stuff: "While using extended asm or a mixture of basic asm and C code may appear to work, they cannot be depended upon to work reliably and are not supported." Change-Id: I79a9c4895584f9af365e6c2387595e9c45d89c7d
Diffstat (limited to 'firmware/asm')
-rw-r--r--firmware/asm/arm/corelock.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c
index 713164e49b..b36a40b45b 100644
--- a/firmware/asm/arm/corelock.c
+++ b/firmware/asm/arm/corelock.c
@@ -28,69 +28,72 @@
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
-void __attribute__((naked)) corelock_lock(struct corelock *cl)
+void corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
"1: \n"
- "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "cmp r3, #0 \n" /* yes? lock acquired */
- "bxeq lr \n"
- "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
- "cmp r3, r1 \n"
- "bxeq lr \n" /* yes? lock acquired */
- "b 1b \n" /* keep trying */
- : : "i"(&PROCESSOR_ID)
+ "ldrb r2, [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "cmp r2, #0 \n" /* yes? lock acquired */
+ "beq 2f \n"
+ "ldrb r2, [%[cl], #2] \n" /* || cl->turn == core ? */
+ "cmp r2, r1 \n"
+ "bne 1b \n" /* no? try again */
+ "2: \n" /* Done */
+ :
+ : [id] "i"(&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2","cc"
);
- (void)cl;
}
/*---------------------------------------------------------------------------
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
-int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
+int corelock_try_lock(struct corelock *cl)
{
+ int rval = 0;
+
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
- "mov r3, r0 \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
- "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "eors r0, r0, r2 \n" /* yes? lock acquired */
- "bxne lr \n"
- "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
- "ands r0, r0, r1 \n"
- "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
- "bx lr \n" /* return result */
- : : "i"(&PROCESSOR_ID)
+ "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
+ "ldrb %[rv], [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "eors %[rv], %[rv], r2 \n"
+ "bne 1f \n" /* yes? lock acquired */
+ "ldrb %[rv], [%[cl], #2] \n" /* || cl->turn == core? */
+ "ands %[rv], %[rv], r1 \n"
+ "streqb %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
+ "1: \n" /* Done */
+ : [rv] "=r"(rval)
+ : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2","cc"
);
- return 0;
- (void)cl;
+ return rval;
}
/*---------------------------------------------------------------------------
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
-void __attribute__((naked)) corelock_unlock(struct corelock *cl)
+void corelock_unlock(struct corelock *cl)
{
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r2, #0 \n" /* cl->myl[core] = 0 */
- "strb r2, [r0, r1, lsr #7] \n"
- "bx lr \n"
- : : "i"(&PROCESSOR_ID)
+ "strb r2, [%[cl], r1, lsr #7] \n"
+ :
+ : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2"
);
- (void)cl;
}