summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Arnold <amiconn@rockbox.org>2008-10-07 19:40:17 +0000
committerJens Arnold <amiconn@rockbox.org>2008-10-07 19:40:17 +0000
commit6b84f600466ab02f5a671d5004cc5b13f18460af (patch)
tree2dddfb3838ec570cd415e10f0251c3d3f0d0cf10
parent46573019a53dca411f754d40d0f21c1e4eafaedf (diff)
APE: Further ARMv6 filter optimisations: Save 4 'ror's per round by utilising the shift feature of the 'pack halfword' instructions in the unaligned vector addition/ subtraction, better pipelining in the aligned scalarproduct(), and a new method to calculate the unaligned scalarproduct().
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@18736 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/codecs/demac/libdemac/vector_math16_armv6.h147
1 files changed, 66 insertions, 81 deletions
diff --git a/apps/codecs/demac/libdemac/vector_math16_armv6.h b/apps/codecs/demac/libdemac/vector_math16_armv6.h
index bf50d9cabd..e180429193 100644
--- a/apps/codecs/demac/libdemac/vector_math16_armv6.h
+++ b/apps/codecs/demac/libdemac/vector_math16_armv6.h
@@ -39,36 +39,33 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"10: \n"
"ldrh r4, [%[v2]], #2 \n"
+ "mov r4, r4, lsl #16 \n"
"1: \n"
"ldmia %[v2]!, {r5-r8} \n"
"ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n"
- "pkhbt r4, r4, r5 \n"
+ "pkhtb r4, r5, r4, asr #16 \n"
"sadd16 r0, r0, r4 \n"
- "mov r6, r6, ror #16 \n"
- "pkhbt r5, r5, r6 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
"sadd16 r1, r1, r5 \n"
"mov r7, r7, ror #16 \n"
- "pkhbt r6, r6, r7 \n"
+ "pkhtb r6, r7, r6, asr #16 \n"
"sadd16 r2, r2, r6 \n"
- "mov r8, r8, ror #16 \n"
- "pkhbt r7, r7, r8 \n"
+ "pkhbt r7, r7, r8, lsl #16 \n"
"sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
"mov r4, r8 \n"
"ldmia %[v2]!, {r5-r8} \n"
"ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n"
- "pkhbt r4, r4, r5 \n"
+ "pkhtb r4, r5, r4, asr #16 \n"
"sadd16 r0, r0, r4 \n"
- "mov r6, r6, ror #16 \n"
- "pkhbt r5, r5, r6 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
"sadd16 r1, r1, r5 \n"
"mov r7, r7, ror #16 \n"
- "pkhbt r6, r6, r7 \n"
+ "pkhtb r6, r7, r6, asr #16 \n"
"sadd16 r2, r2, r6 \n"
- "mov r8, r8, ror #16 \n"
- "pkhbt r7, r7, r8 \n"
+ "pkhbt r7, r7, r8, lsl #16 \n"
"sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
@@ -128,36 +125,33 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"10: \n"
"ldrh r4, [%[v2]], #2 \n"
+ "mov r4, r4, lsl #16 \n"
"1: \n"
"ldmia %[v2]!, {r5-r8} \n"
"ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n"
- "pkhbt r4, r4, r5 \n"
+ "pkhtb r4, r5, r4, asr #16 \n"
"ssub16 r0, r0, r4 \n"
- "mov r6, r6, ror #16 \n"
- "pkhbt r5, r5, r6 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
"ssub16 r1, r1, r5 \n"
"mov r7, r7, ror #16 \n"
- "pkhbt r6, r6, r7 \n"
+ "pkhtb r6, r7, r6, asr #16 \n"
"ssub16 r2, r2, r6 \n"
- "mov r8, r8, ror #16 \n"
- "pkhbt r7, r7, r8 \n"
+ "pkhbt r7, r7, r8, lsl #16 \n"
"ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
"mov r4, r8 \n"
"ldmia %[v2]!, {r5-r8} \n"
"ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n"
- "pkhbt r4, r4, r5 \n"
+ "pkhtb r4, r5, r4, asr #16 \n"
"ssub16 r0, r0, r4 \n"
- "mov r6, r6, ror #16 \n"
- "pkhbt r5, r5, r6 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
"ssub16 r1, r1, r5 \n"
"mov r7, r7, ror #16 \n"
- "pkhbt r6, r6, r7 \n"
+ "pkhtb r6, r7, r6, asr #16 \n"
"ssub16 r2, r2, r6 \n"
- "mov r8, r8, ror #16 \n"
- "pkhbt r7, r7, r8 \n"
+ "pkhbt r7, r7, r8, lsl #16 \n"
"ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
@@ -217,80 +211,71 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"beq 20f \n"
"10: \n"
- "ldrh r2, [%[v2]], #2 \n"
- "ldr r0, [%[v1]], #4 \n"
- "ldr r3, [%[v2]], #4 \n"
- "mov r2, r2, lsl #16 \n"
+ "ldrh r7, [%[v2]], #2 \n"
+ "ldmia %[v2]!, {r4-r5} \n"
+ "ldmia %[v1]!, {r0-r1} \n"
+ "mov r7, r7, lsl #16 \n"
"1: \n"
- "ldr r1, [%[v1]], #4 \n"
- "smlabt %[res], r0, r2, %[res] \n"
- "ldr r4, [%[v2]], #4 \n"
- "smlatb %[res], r0, r3, %[res] \n"
- "ldr r0, [%[v1]], #4 \n"
- "smlabt %[res], r1, r3, %[res] \n"
- "ldr r5, [%[v2]], #4 \n"
- "smlatb %[res], r1, r4, %[res] \n"
- "ldr r1, [%[v1]], #4 \n"
- "smlabt %[res], r0, r4, %[res] \n"
- "ldr r6, [%[v2]], #4 \n"
- "smlatb %[res], r0, r5, %[res] \n"
- "ldr r0, [%[v1]], #4 \n"
- "smlabt %[res], r1, r5, %[res] \n"
- "ldr r3, [%[v2]], #4 \n"
- "smlatb %[res], r1, r6, %[res] \n"
- "mov r2, r6 \n"
- "ldr r1, [%[v1]], #4 \n"
- "smlabt %[res], r0, r2, %[res] \n"
- "ldr r4, [%[v2]], #4 \n"
- "smlatb %[res], r0, r3, %[res] \n"
- "ldr r0, [%[v1]], #4 \n"
- "smlabt %[res], r1, r3, %[res] \n"
- "ldr r5, [%[v2]], #4 \n"
- "smlatb %[res], r1, r4, %[res] \n"
- "ldr r1, [%[v1]], #4 \n"
- "smlabt %[res], r0, r4, %[res] \n"
- "ldr r6, [%[v2]], #4 \n"
- "smlatb %[res], r0, r5, %[res] \n"
+ "pkhbt r8, r4, r7 \n"
+ "ldmia %[v2]!, {r6-r7} \n"
+ "smladx %[res], r0, r8, %[res] \n"
+ "pkhbt r8, r5, r4 \n"
+ "ldmia %[v1]!, {r2-r3} \n"
+ "smladx %[res], r1, r8, %[res] \n"
+ "pkhbt r8, r6, r5 \n"
+ "ldmia %[v2]!, {r4-r5} \n"
+ "smladx %[res], r2, r8, %[res] \n"
+ "pkhbt r8, r7, r6 \n"
+ "ldmia %[v1]!, {r0-r1} \n"
+ "smladx %[res], r3, r8, %[res] \n"
+ "pkhbt r8, r4, r7 \n"
+ "ldmia %[v2]!, {r6-r7} \n"
+ "smladx %[res], r0, r8, %[res] \n"
+ "pkhbt r8, r5, r4 \n"
+ "ldmia %[v1]!, {r2-r3} \n"
+ "smladx %[res], r1, r8, %[res] \n"
+ "pkhbt r8, r6, r5 \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n"
- "ldrne r0, [%[v1]], #4 \n"
- "smlabt %[res], r1, r5, %[res] \n"
- "ldrne r3, [%[v2]], #4 \n"
- "smlatb %[res], r1, r6, %[res] \n"
- "mov r2, r6 \n"
+ "ldmneia %[v2]!, {r4-r5} \n"
+ "smladx %[res], r2, r8, %[res] \n"
+ "pkhbt r8, r7, r6 \n"
+ "ldmneia %[v1]!, {r0-r1} \n"
+ "smladx %[res], r3, r8, %[res] \n"
"bne 1b \n"
#else
- "smlabt %[res], r1, r5, %[res] \n"
- "smlatb %[res], r1, r6, %[res] \n"
+ "pkhbt r7, r7, r6 \n"
+ "smladx %[res], r2, r8, %[res] \n"
+ "smladx %[res], r3, r7, %[res] \n"
#endif
"b 99f \n"
"20: \n"
"ldmia %[v1]!, {r0-r1} \n"
- "ldmia %[v2]!, {r4-r5} \n"
+ "ldmia %[v2]!, {r5-r7} \n"
"1: \n"
"ldmia %[v1]!, {r2-r3} \n"
- "smlad %[res], r0, r4, %[res] \n"
- "ldmia %[v2]!, {r6-r7} \n"
- "smlad %[res], r1, r5, %[res] \n"
- "ldmia %[v1]!, {r0-r1} \n"
- "smlad %[res], r2, r6, %[res] \n"
+ "smlad %[res], r0, r5, %[res] \n"
"ldmia %[v2]!, {r4-r5} \n"
- "smlad %[res], r3, r7, %[res] \n"
- "ldmia %[v1]!, {r2-r3} \n"
- "smlad %[res], r0, r4, %[res] \n"
+ "smlad %[res], r1, r6, %[res] \n"
+ "ldmia %[v1]!, {r0-r1} \n"
+ "smlad %[res], r2, r7, %[res] \n"
"ldmia %[v2]!, {r6-r7} \n"
- "smlad %[res], r1, r5, %[res] \n"
+ "smlad %[res], r3, r4, %[res] \n"
+ "ldmia %[v1]!, {r2-r3} \n"
+ "smlad %[res], r0, r5, %[res] \n"
+ "ldmia %[v2]!, {r4-r5} \n"
+ "smlad %[res], r1, r6, %[res] \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n"
"ldmneia %[v1]!, {r0-r1} \n"
- "smlad %[res], r2, r6, %[res] \n"
- "ldmneia %[v2]!, {r4-r5} \n"
- "smlad %[res], r3, r7, %[res] \n"
+ "smlad %[res], r2, r7, %[res] \n"
+ "ldmneia %[v2]!, {r6-r7} \n"
+ "smlad %[res], r3, r4, %[res] \n"
"bne 1b \n"
#else
- "smlad %[res], r2, r6, %[res] \n"
- "smlad %[res], r3, r7, %[res] \n"
+ "smlad %[res], r2, r7, %[res] \n"
+ "smlad %[res], r3, r4, %[res] \n"
#endif
"99: \n"
@@ -303,8 +288,8 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
[res]"+r"(res)
: /* inputs */
: /* clobbers */
- "r0", "r1", "r2", "r3",
- "r4", "r5", "r6", "r7"
+ "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8"
);
return res;
}