diff options
author | Nils Wallménius <nils@rockbox.org> | 2011-09-13 16:53:13 +0000 |
---|---|---|
committer | Nils Wallménius <nils@rockbox.org> | 2011-09-13 16:53:13 +0000 |
commit | 85ef10826f7ebf742349acfc22cdd279db29db7c (patch) | |
tree | bbbbe21d2c113dc404f8df15decc47e2668c1653 | |
parent | b6271900b00548d98a121ded09ee51645b2d0f9b (diff) |
codeclib: tweak coldfire mdct asm a bit more, flip usage of mac/msac instead of explicit negation, changes output slightly but is as close to the c version as before. Gives about 0.4MHz speedup of vorbis on cf.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@30516 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r-- | apps/codecs/lib/mdct.c | 95 |
1 files changed, 65 insertions, 30 deletions
diff --git a/apps/codecs/lib/mdct.c b/apps/codecs/lib/mdct.c index 621b9cbbb8..5524afbbc4 100644 --- a/apps/codecs/lib/mdct.c +++ b/apps/codecs/lib/mdct.c @@ -124,7 +124,6 @@ void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input) default: { fixed32 * z1 = (fixed32 *)(&z[0]); - fixed32 * z2 = (fixed32 *)(&z[n4-1]); int magic_step = step>>2; int newstep; if(n<=1024) @@ -137,47 +136,84 @@ void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input) T = sincos_lookup1; newstep = 2; } - - while(z1<z2) - { + #ifdef CPU_COLDFIRE + fixed32 * z2 = (fixed32 *)(&z[n4]); + int c = n4; + if (newstep == 2) + { asm volatile ("movem.l (%[z1]), %%d0-%%d1\n\t" + "addq.l #8, %[z1]\n\t" "movem.l (%[T]), %%d2-%%d3\n\t" - "mac.l %%d1, %%d2, %%acc0\n\t" - "msac.l %%d0, %%d3, %%acc0\n\t" - "mac.l %%d0, %%d2, %%acc1\n\t" - "mac.l %%d1, %%d3, %%acc1\n\t" + "addq.l #8, %[T]\n\t" + "bra.s 1f\n\t" + "0:\n\t" + "msac.l %%d1, %%d2, (%[T])+, %%a3, %%acc0\n\t" + "mac.l %%d0, %%d3, (%[T])+, %%a4, %%acc0\n\t" + + "msac.l %%d1, %%d3, -(%[z2]), %%d1, %%acc1\n\t" + "msac.l %%d0, %%d2, -(%[z2]), %%d0, %%acc1\n\t" - "lea (%[newstep]*4, %[T]), %[T]\n\t" + "msac.l %%d1, %%a4, (%[T])+, %%d2, %%acc2\n\t" + "mac.l %%d0, %%a3, (%[T])+, %%d3, %%acc2\n\t" + "msac.l %%d0, %%a4, (%[z1])+, %%d0, %%acc3\n\t" + "msac.l %%d1, %%a3, (%[z1])+, %%d1, %%acc3\n\t" + + "movclr.l %%acc0, %%a3\n\t" + "movclr.l %%acc3, %%a4\n\t" + "movem.l %%a3-%%a4, (-16, %[z1])\n\t" - "movem.l (%[z2]), %%d0-%%d1\n\t" + "movclr.l %%acc1, %%a4\n\t" + "movclr.l %%acc2, %%a3\n\t" + "movem.l %%a3-%%a4, (%[z2])\n\t" + + "subq.l #2, %[n]\n\t" + "1:\n\t" + "bhi.s 0b\n\t" + : [z1] "+a" (z1), [z2] "+a" (z2), [T] "+a" (T), [n] "+d" (c) + : + : "d0", "d1", "d2", "d3", "a3", "a4", "cc", "memory"); + } + else + { + asm volatile ("movem.l (%[z1]), %%d0-%%d1\n\t" + "addq.l #8, %[z1]\n\t" "movem.l (%[T]), %%d2-%%d3\n\t" - "mac.l %%d1, %%d3, %%acc2\n\t" - "msac.l %%d0, %%d2, %%acc2\n\t" - "mac.l %%d0, %%d3, %%acc3\n\t" - "mac.l %%d1, %%d2, %%acc3\n\t" + "lea (%[newstep]*4, %[T]), %[T]\n\t" + "bra.s 1f\n\t" + "0:\n\t" + "msac.l %%d1, %%d2, (%[T]), %%a3, %%acc0\n\t" + "mac.l %%d0, %%d3, (4, %[T]), %%a4, %%acc0\n\t" + "msac.l %%d1, %%d3, -(%[z2]), %%d1, %%acc1\n\t" + "msac.l %%d0, %%d2, -(%[z2]), %%d0, %%acc1\n\t" "lea (%[newstep]*4, %[T]), %[T]\n\t" + "msac.l %%d1, %%a4, (%[T]), %%d2, %%acc2\n\t" + "mac.l %%d0, %%a3, (4, %[T]), %%d3, %%acc2\n\t" + "msac.l %%d0, %%a4, (%[z1])+, %%d0, %%acc3\n\t" + "msac.l %%d1, %%a3, (%[z1])+, %%d1, %%acc3\n\t" - "movclr.l %%acc0, %%d0\n\t" - "movclr.l %%acc1, %%d2\n\t" - "movclr.l %%acc2, %%d1\n\t" - "movclr.l %%acc3, %%d3\n\t" + "lea (%[newstep]*4, %[T]), %[T]\n\t" - "neg.l %%d0\n\t" - "neg.l %%d1\n\t" - "neg.l %%d2\n\t" - "neg.l %%d3\n\t" + "movclr.l %%acc0, %%a3\n\t" + "movclr.l %%acc3, %%a4\n\t" + "movem.l %%a3-%%a4, (-16, %[z1])\n\t" - "movem.l %%d0/%%d3, (%[z1])\n\t" - "movem.l %%d1/%%d2, (%[z2])\n\t" + "movclr.l %%acc1, %%a4\n\t" + "movclr.l %%acc2, %%a3\n\t" + "movem.l %%a3-%%a4, (%[z2])\n\t" - "addq.l #8, %[z1]\n\t" - "subq.l #8, %[z2]\n\t" - : [z1] "+a" (z1), [z2] "+a" (z2), [T] "+a" (T) + "subq.l #2, %[n]\n\t" + "1:\n\t" + "bhi.s 0b\n\t" + : [z1] "+a" (z1), [z2] "+a" (z2), [T] "+a" (T), [n] "+d" (c) : [newstep] "d" (newstep) - : "d0", "d1", "d2", "d3", "cc", "memory"); + : "d0", "d1", "d2", "d3", "a3", "a4", "cc", "memory"); + } #else + fixed32 * z2 = (fixed32 *)(&z[n4-1]); + while(z1<z2) + { fixed32 r0,i0,r1,i1; XNPROD31_R(z1[1], z1[0], T[0], T[1], r0, i1 ); T+=newstep; XNPROD31_R(z2[1], z2[0], T[1], T[0], r1, i0 ); T+=newstep; @@ -187,9 +223,8 @@ void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input) z2[1] = -i1; z1+=2; z2-=2; -#endif } - +#endif break; } |