diff options
author | Andree Buschmann <AndreeBuschmann@t-online.de> | 2010-02-21 22:05:48 +0000 |
---|---|---|
committer | Andree Buschmann <AndreeBuschmann@t-online.de> | 2010-02-21 22:05:48 +0000 |
commit | 28bc321dbb20bc061c4e39b4eccce4c80fcac4a1 (patch) | |
tree | 2c60b2ca5d5ad4563da8b44cdd0949e4ba1eff49 | |
parent | 84a4a1d5cc214e7450b60c006bef24ae550a76f3 (diff) |
Remove CLIP_TO_15 from codeclib. Remove tabs.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@24834 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r-- | apps/codecs/lib/asm_arm.h | 103 | ||||
-rw-r--r-- | apps/codecs/lib/asm_mcf5249.h | 6 | ||||
-rw-r--r-- | apps/codecs/lib/codeclib_misc.h | 29 | ||||
-rw-r--r-- | apps/codecs/libcook/cook_fixpoint.h | 2 |
4 files changed, 72 insertions, 68 deletions
diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h index 4f31f80c3e..627f4afd78 100644 --- a/apps/codecs/lib/asm_arm.h +++ b/apps/codecs/lib/asm_arm.h @@ -33,9 +33,9 @@ static inline int32_t MULT31(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { int lo,hi; - asm volatile("smull %0, %1, %2, %3\n\t" - "movs %0, %0, lsr #15\n\t" - "adc %1, %0, %1, lsl #17\n\t" + asm volatile("smull %0, %1, %2, %3\n\t" + "movs %0, %0, lsr #15\n\t" + "adc %1, %0, %1, lsl #17\n\t" : "=&r"(lo),"=&r"(hi) : "r"(x),"r"(y) : "cc" ); @@ -45,43 +45,43 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { #define XPROD32(a, b, t, v, x, y) \ { \ long l; \ - asm( "smull %0, %1, %3, %5\n\t" \ - "rsb %2, %6, #0\n\t" \ - "smlal %0, %1, %4, %6\n\t" \ - "smull %0, %2, %3, %2\n\t" \ - "smlal %0, %2, %4, %5" \ - : "=&r" (l), "=&r" (x), "=&r" (y) \ + asm( "smull %0, %1, %3, %5\n\t" \ + "rsb %2, %6, #0\n\t" \ + "smlal %0, %1, %4, %6\n\t" \ + "smull %0, %2, %3, %2\n\t" \ + "smlal %0, %2, %4, %5" \ + : "=&r" (l), "=&r" (x), "=&r" (y) \ : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ } static inline void XPROD31(int32_t a, int32_t b, - int32_t t, int32_t v, - int32_t *x, int32_t *y) + int32_t t, int32_t v, + int32_t *x, int32_t *y) { int x1, y1, l; - asm( "smull %0, %1, %3, %5\n\t" - "rsb %2, %6, #0\n\t" - "smlal %0, %1, %4, %6\n\t" - "smull %0, %2, %3, %2\n\t" - "smlal %0, %2, %4, %5" - : "=&r" (l), "=&r" (x1), "=&r" (y1) - : "r" (a), "r" (b), "r" (t), "r" (v) ); + asm( "smull %0, %1, %3, %5\n\t" + "rsb %2, %6, #0\n\t" + "smlal %0, %1, %4, %6\n\t" + "smull %0, %2, %3, %2\n\t" + "smlal %0, %2, %4, %5" + : "=&r" (l), "=&r" (x1), "=&r" (y1) + : "r" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; *y = y1 << 1; } static inline void XNPROD31(int32_t a, int32_t b, - int32_t t, int32_t v, - int32_t *x, int32_t *y) + int32_t t, int32_t v, + int32_t *x, int32_t *y) { int x1, y1, l; - asm( "smull %0, %1, %3, %5\n\t" - "rsb %2, %4, #0\n\t" - "smlal %0, %1, %2, %6\n\t" - "smull %0, %2, %4, %5\n\t" - "smlal %0, %2, %3, %6" - : "=&r" (l), "=&r" (x1), "=&r" (y1) - : "r" (a), "r" (b), "r" (t), "r" (v) ); + asm( "smull %0, %1, %3, %5\n\t" + "rsb %2, %4, #0\n\t" + "smlal %0, %1, %2, %6\n\t" + "smull %0, %2, %4, %5\n\t" + "smlal %0, %2, %3, %6" + : "=&r" (l), "=&r" (x1), "=&r" (y1) + : "r" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; *y = y1 << 1; } @@ -89,13 +89,13 @@ static inline void XNPROD31(int32_t a, int32_t b, #define XPROD31_R(_a, _b, _t, _v, _x, _y)\ {\ int x1, y1, l;\ - asm( "smull %0, %1, %5, %3\n\t"\ - "rsb %2, %3, #0\n\t"\ - "smlal %0, %1, %6, %4\n\t"\ - "smull %0, %2, %6, %2\n\t"\ - "smlal %0, %2, %5, %4"\ - : "=&r" (l), "=&r" (x1), "=&r" (y1)\ - : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\ + asm( "smull %0, %1, %5, %3\n\t"\ + "rsb %2, %3, #0\n\t"\ + "smlal %0, %1, %6, %4\n\t"\ + "smull %0, %2, %6, %2\n\t"\ + "smlal %0, %2, %5, %4"\ + : "=&r" (l), "=&r" (x1), "=&r" (y1)\ + : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\ _x = x1 << 1;\ _y = y1 << 1;\ } @@ -103,13 +103,13 @@ static inline void XNPROD31(int32_t a, int32_t b, #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\ {\ int x1, y1, l;\ - asm( "smull %0, %1, %5, %3\n\t"\ - "rsb %2, %4, #0\n\t"\ - "smlal %0, %1, %6, %2\n\t"\ - "smull %0, %2, %5, %4\n\t"\ - "smlal %0, %2, %6, %3"\ - : "=&r" (l), "=&r" (x1), "=&r" (y1)\ - : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\ + asm( "smull %0, %1, %5, %3\n\t"\ + "rsb %2, %4, #0\n\t"\ + "smlal %0, %1, %6, %2\n\t"\ + "smull %0, %2, %5, %4\n\t"\ + "smlal %0, %2, %6, %3"\ + : "=&r" (l), "=&r" (x1), "=&r" (y1)\ + : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\ _x = x1 << 1;\ _y = y1 << 1;\ } @@ -221,25 +221,26 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) #endif #endif - +/* not used anymore */ +/* #ifndef _V_CLIP_MATH #define _V_CLIP_MATH static inline int32_t CLIP_TO_15(int32_t x) { int tmp; - asm volatile("subs %1, %0, #32768\n\t" - "movpl %0, #0x7f00\n\t" - "orrpl %0, %0, #0xff\n" - "adds %1, %0, #32768\n\t" - "movmi %0, #0x8000" - : "+r"(x),"=r"(tmp) - : - : "cc"); + asm volatile("subs %1, %0, #32768\n\t" + "movpl %0, #0x7f00\n\t" + "orrpl %0, %0, #0xff\n" + "adds %1, %0, #32768\n\t" + "movmi %0, #0x8000" + : "+r"(x),"=r"(tmp) + : + : "cc"); return(x); } #endif - +*/ #ifndef _V_LSP_MATH_ASM #define _V_LSP_MATH_ASM diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h index 2888f6dc54..f103e78769 100644 --- a/apps/codecs/lib/asm_mcf5249.h +++ b/apps/codecs/lib/asm_mcf5249.h @@ -325,17 +325,19 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) #endif #endif - +/* not used anymore */ +/* #ifndef _V_CLIP_MATH #define _V_CLIP_MATH -/* this is portable C and simple; why not use this as default? */ +* this is portable C and simple; why not use this as default? static inline int32_t CLIP_TO_15(register int32_t x) { register int32_t hi=32767, lo=-32768; return (x>=hi ? hi : (x<=lo ? lo : x)); } #endif +*/ #else #define LINE_ATTR #endif diff --git a/apps/codecs/lib/codeclib_misc.h b/apps/codecs/lib/codeclib_misc.h index 6749231ebb..0d560755d7 100644 --- a/apps/codecs/lib/codeclib_misc.h +++ b/apps/codecs/lib/codeclib_misc.h @@ -116,18 +116,18 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { /* replaced XPROD32 with a macro to avoid memory reference _x, _y are the results (must be l-values) */ -#define XPROD32(_a, _b, _t, _v, _x, _y) \ - { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ +#define XPROD32(_a, _b, _t, _v, _x, _y) \ + { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ (_y)=MULT32(_b,_t)-MULT32(_a,_v); } #ifdef __i386__ -#define XPROD31(_a, _b, _t, _v, _x, _y) \ - { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \ +#define XPROD31(_a, _b, _t, _v, _x, _y) \ + { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \ *(_y)=MULT31(_b,_t)-MULT31(_a,_v); } -#define XNPROD31(_a, _b, _t, _v, _x, _y) \ - { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \ +#define XNPROD31(_a, _b, _t, _v, _x, _y) \ + { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \ *(_y)=MULT31(_b,_t)+MULT31(_a,_v); } #else @@ -207,7 +207,8 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) #endif #endif - +/* not used anymore */ +/* #ifndef _V_CLIP_MATH #define _V_CLIP_MATH @@ -219,10 +220,10 @@ static inline int32_t CLIP_TO_15(int32_t x) { } #endif - +*/ static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap, - int32_t b,int32_t bp, - int32_t *p){ + int32_t b,int32_t bp, + int32_t *p){ if(a && b){ #ifndef _LOW_ACCURACY_ *p=ap+bp+32; @@ -236,16 +237,16 @@ static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap, } /*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap, - int32_t i, - int32_t *p){ + int32_t i, + int32_t *p){ int ip=_ilog(abs(i))-31; return VFLOAT_MULT(a,ap,i<<-ip,ip,p); } */ static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap, - int32_t b,int32_t bp, - int32_t *p){ + int32_t b,int32_t bp, + int32_t *p){ if(!a){ *p=bp; diff --git a/apps/codecs/libcook/cook_fixpoint.h b/apps/codecs/libcook/cook_fixpoint.h index 1d8b3ffa2f..faa86dc77a 100644 --- a/apps/codecs/libcook/cook_fixpoint.h +++ b/apps/codecs/libcook/cook_fixpoint.h @@ -36,7 +36,7 @@ */ #ifdef ROCKBOX -/* get definitions of MULT31, MULT31_SHIFT15, CLIP_TO_15, vect_add, from codelib */ +/* get definitions of MULT31, MULT31_SHIFT15, vect_add, from codelib */ #include "asm_arm.h" #include "asm_mcf5249.h" #include "codeclib_misc.h" |