[xiph-commits] r17218 - branches/lowmem-branch/Tremolo

robin at svn.xiph.org robin at svn.xiph.org
Sat May 15 13:40:20 PDT 2010


Author: robin
Date: 2010-05-15 13:40:20 -0700 (Sat, 15 May 2010)
New Revision: 17218

Added:
   branches/lowmem-branch/Tremolo/mdctARM.s
   branches/lowmem-branch/Tremolo/mdctLARM.s
Modified:
   branches/lowmem-branch/Tremolo/Makefile.am
   branches/lowmem-branch/Tremolo/configure.in
   branches/lowmem-branch/Tremolo/mdct.c
   branches/lowmem-branch/Tremolo/mdct_lookup.h
Log:
ARM mdct implementations, and changes to the C code to call them.

Also changes in the configure.ac file to correctly include the assembly
files on ARM targets.

This is 'my first autoconf' stuff, so any hints/pointers on how to do it
better gratefully accepted.




Modified: branches/lowmem-branch/Tremolo/Makefile.am
===================================================================
--- branches/lowmem-branch/Tremolo/Makefile.am	2010-05-15 16:17:50 UTC (rev 17217)
+++ branches/lowmem-branch/Tremolo/Makefile.am	2010-05-15 20:40:20 UTC (rev 17218)
@@ -7,6 +7,13 @@
 
 lib_LTLIBRARIES = libvorbisidec.la
 
+if ARM_TARGET
+# Build 'em both and let God sort 'em out
+TARGET_SPECIFIC_SOURCES = mdctARM.s mdctLARM.s
+else
+TARGET_SPECIFIC_SOURCES =
+endif
+
 libvorbisidec_la_SOURCES = mdct.c dsp.c info.c misc.c \
                         floor1.c floor0.c vorbisfile.c \
                         res012.c mapping0.c codebook.c \
@@ -15,6 +22,7 @@
                         os.h mdct.h ivorbisfile.h lsp_lookup.h\
                         window_lookup.h floor_lookup.c \
                         codec_internal.h ogg.h \
+			$(TARGET_SPECIFIC_SOURCES) \
 			asm_arm.h ivorbiscodec.h
 libvorbisidec_la_LDFLAGS = -version-info @V_LIB_CURRENT@:@V_LIB_REVISION@:@V_LIB_AGE@
 

Modified: branches/lowmem-branch/Tremolo/configure.in
===================================================================
--- branches/lowmem-branch/Tremolo/configure.in	2010-05-15 16:17:50 UTC (rev 17217)
+++ branches/lowmem-branch/Tremolo/configure.in	2010-05-15 20:40:20 UTC (rev 17218)
@@ -37,6 +37,7 @@
 CFLAGS="$cflags_save"
 
 AM_PROG_LIBTOOL
+AM_PROG_AS
 
 dnl --------------------------------------------------
 dnl Set build flags based on environment
@@ -44,11 +45,13 @@
 
 dnl Set some target options
 
+arm_target=0
 cflags_save="$CFLAGS"
 ldflags_save="$LDFLAGS"
 if test -z "$GCC"; then
         case $host in 
         arm-*-*)
+		arm_target=1
                 DEBUG="-g -D_ARM_ASSEM_" 
                 CFLAGS="-O -D_ARM_ASSEM_"
                 PROFILE="-p -g -O -D_ARM_ASSEM" ;;
@@ -61,6 +64,7 @@
 
         case $host in 
         arm-*-*)
+		arm_target=1
                 DEBUG="-g -Wall -W -D__NO_MATH_INLINES -fsigned-char -D_ARM_ASSEM_"
                 CFLAGS="-O2 -Wall -D_ARM_ASSEM_ -fsigned-char"
                 PROFILE="-Wall -pg -g -O2 -D_ARM_ASSEM_ -fsigned-char -fno-inline-functions";;
@@ -74,6 +78,7 @@
 CFLAGS="$CFLAGS $cflags_save -D_REENTRANT"
 LDFLAGS="$LDFLAGS $ldflags_save"
 
+AM_CONDITIONAL(ARM_TARGET, test x$arm_target = 1)
 
 # Test whenever ld supports -version-script
 AC_PROG_LD

Modified: branches/lowmem-branch/Tremolo/mdct.c
===================================================================
--- branches/lowmem-branch/Tremolo/mdct.c	2010-05-15 16:17:50 UTC (rev 17217)
+++ branches/lowmem-branch/Tremolo/mdct.c	2010-05-15 20:40:20 UTC (rev 17218)
@@ -38,6 +38,43 @@
 #include "mdct.h"
 #include "mdct_lookup.h"
 
+#ifdef _ARM_ASSEM_
+
+/* We have 2 different variants of ARM routines, according to whether we
+ * are using _LOW_ACCURACY_ or not. We suffix the calling routines
+ * appropriately to call the right version. */
+/* FIXME: This could be avoided by people being smarter with configure. */
+#ifdef _LOW_ACCURACY_
+#define ARM_SUFFIX(A) A ## _arm_low
+#else
+#define ARM_SUFFIX(A) A ## _arm
+#endif
+
+extern ogg_int16_t *ARM_SUFFIX(mdct_unroll_prelap)(ogg_int16_t *out,
+                                                   DATA_TYPE   *post,
+                                                   DATA_TYPE   *l,
+                                                   int          step);
+extern ogg_int16_t *ARM_SUFFIX(mdct_unroll_part2)(ogg_int16_t *out,
+                                                  DATA_TYPE   *post,
+                                                  DATA_TYPE   *l,
+                                                  DATA_TYPE   *r,
+                                                  int          step,
+                                                  LOOKUP_T    *wL,
+                                                  LOOKUP_T    *wR);
+extern ogg_int16_t *ARM_SUFFIX(mdct_unroll_part3)(ogg_int16_t *out,
+                                                  DATA_TYPE   *post,
+                                                  DATA_TYPE   *l,
+                                                  DATA_TYPE   *r,
+                                                  int          step,
+                                                  LOOKUP_T    *wL,
+                                                  LOOKUP_T    *wR);
+extern ogg_int16_t *ARM_SUFFIX(mdct_unroll_postlap)(ogg_int16_t *out,
+                                                    DATA_TYPE   *post,
+                                                    DATA_TYPE   *l,
+                                                    int          step);
+#endif
+
+#ifndef _ARM_ASSEM_
 STIN void presymmetry(DATA_TYPE *in,int n2,int step){
   DATA_TYPE *aX;
   DATA_TYPE *bX;
@@ -289,14 +326,15 @@
 	      w0    += 2;
   }while(w0<w1);
 }
+#endif
 
 STIN void mdct_step8(DATA_TYPE *x, int n, int step){
   LOOKUP_T *T;
   LOOKUP_T *V;
   DATA_TYPE *iX =x+(n>>1);
-  step>>=2;
 
   switch(step) {
+#ifndef _ARM_ASSEM_
   default: 
     T=(step>=4)?(sincos_lookup0+(step>>1)):sincos_lookup1;
     do{
@@ -306,6 +344,7 @@
                    x  +=2;
     }while(x<iX);
     break;
+#endif
   
   case 1: 
     {
@@ -377,11 +416,16 @@
   }
 }
 
+#ifdef _ARM_ASSEM_
+void ARM_SUFFIX(mdct_backward)(int n, DATA_TYPE *in);
+#endif
+
 /* partial; doesn't perform last-step deinterleave/unrolling.  That
    can be done more efficiently during pcm output */
 void mdct_backward(int n, DATA_TYPE *in){
+  int step;
+#ifndef _ARM_ASSEM_
   int shift;
-  int step;
   
   for (shift=4;!(n&(1<<shift));shift++);
   shift=13-shift;
@@ -391,16 +435,25 @@
   mdct_butterflies(in,n>>1,shift);
   mdct_bitreverse(in,n,shift);
   mdct_step7(in,n,step);
-  mdct_step8(in,n,step);
+  mdct_step8(in,n,step>>2);
+#else
+  step = ARM_SUFFIX(mdct_backward)(n, in);
+  if (step < 1)
+    mdct_step8(in,n,step);
+#endif
 }
 
-void mdct_shift_right(int n, DATA_TYPE *in, DATA_TYPE *right){
+void mdct_shift_right(int n, DATA_TYPE *in, DATA_TYPE *right) {
+#ifdef _ARM_ASSEM_
+  ARM_SUFFIX(mdct_shift_right)(n, in, right);
+#else
   int i;
   n>>=2;
   in+=1;
 
   for(i=0;i<n;i++)
     right[i]=in[i<<1];
+#endif
 }
 
 void mdct_unroll_lap(int n0,int n1,
@@ -433,10 +486,18 @@
     r     -= off;
     start -= off;
     end   -= n;
+#ifndef _ARM_ASSEM_
     while(r>post){
       *out = CLIP_TO_15((*--r)>>9);
       out+=step;
     }
+#else
+    out = ARM_SUFFIX(mdct_unroll_prelap)(out,post,r,step);
+    n -= off;
+    if (n < 0)
+      n = 0;
+    r -= n;
+#endif
   }
   
   /* cross-lap; two halves due to wrap-around */
@@ -449,11 +510,22 @@
   wR    -= off;
   wL    += off;
   end   -= n;
+#ifndef _ARM_ASSEM_
   while(r>post){
     l-=2;
     *out = CLIP_TO_15((MULT31(*--r,*--wR) + MULT31(*l,*wL++))>>9);
     out+=step;
   }
+#else
+  out = ARM_SUFFIX(mdct_unroll_part2)(out, post, l, r, step, wL, wR);
+  n -= off;
+  if (n < 0)
+      n = 0;
+  l -= 2*n;
+  r -= n;
+  wR -= n;
+  wL += n;
+#endif
 
   n      = (end<halfLap?end:halfLap);
   off    = (start<halfLap?start:halfLap);
@@ -464,11 +536,22 @@
   end   -= n;
   wR    -= off;
   wL    += off;
+#ifndef _ARM_ASSEM_
   while(r<post){
     *out = CLIP_TO_15((MULT31(*r++,*--wR) - MULT31(*l,*wL++))>>9);
     out+=step;
     l+=2;
   }
+#else
+  out = ARM_SUFFIX(mdct_unroll_part3)(out, post, l, r, step, wL, wR);
+  n -= off;
+  if (n < 0)
+      n = 0;
+  l += 2*n;
+  r += n;
+  wR -= n;
+  wL += n;
+#endif
 
   /* preceeding direct-copy lapping from previous frame, if any */
   if(postLap){
@@ -476,11 +559,15 @@
     off    = (start<postLap?start:postLap);
     post   = l+n*2;
     l     += off*2;
+#ifndef _ARM_ASSEM_
     while(l<post){
       *out = CLIP_TO_15((-*l)>>9);
       out+=step;
       l+=2;
     }
+#else
+    out = ARM_SUFFIX(mdct_unroll_postlap)(out,post,l,step);
+#endif
   }
 }
 

Added: branches/lowmem-branch/Tremolo/mdctARM.s
===================================================================
--- branches/lowmem-branch/Tremolo/mdctARM.s	                        (rev 0)
+++ branches/lowmem-branch/Tremolo/mdctARM.s	2010-05-15 20:40:20 UTC (rev 17218)
@@ -0,0 +1,1187 @@
+; Tremolo library
+; Copyright (C) 2009 Robin Watts for Pinknoise Productions Ltd
+
+	AREA	|.text|, CODE, READONLY
+
+	; full accuracy version
+
+	EXPORT mdct_backward_arm
+	EXPORT mdct_shift_right_arm
+	EXPORT mdct_unroll_prelap_arm
+	EXPORT mdct_unroll_part2_arm
+	EXPORT mdct_unroll_part3_arm
+	EXPORT mdct_unroll_postlap_arm
+
+	IMPORT	sincos_lookup0
+	IMPORT	sincos_lookup1
+
+mdct_unroll_prelap_arm
+	; r0 = out
+	; r1 = post
+	; r2 = r
+	; r3 = step
+	STMFD	r13!,{r4-r7,r14}
+	MVN	r4, #0x8000
+	MOV	r3, r3, LSL #1
+	SUB	r1, r2, r1		; r1 = r - post
+	SUBS	r1, r1, #16		; r1 = r - post - 16
+	BLT	unroll_over
+unroll_loop
+	LDMDB	r2!,{r5,r6,r7,r12}
+
+	MOV	r5, r5, ASR #9		; r5 = (*--r)>>9
+	MOV	r6, r6, ASR #9		; r6 = (*--r)>>9
+	MOV	r7, r7, ASR #9		; r7 = (*--r)>>9
+	MOV	r12,r12,ASR #9		; r12= (*--r)>>9
+
+	MOV	r14,r12,ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r12,r4, r14,ASR #31
+	STRH	r12,[r0], r3
+
+	MOV	r14,r7, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r7, r4, r14,ASR #31
+	STRH	r7, [r0], r3
+
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r3
+
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+
+	SUBS	r1, r1, #16
+	BGE	unroll_loop
+
+unroll_over
+	ADDS	r1, r1, #16
+	BLE	unroll_end
+unroll_loop2
+	LDR	r5,[r2,#-4]!
+	; stall
+	; stall (Xscale)
+	MOV	r5, r5, ASR #9		; r5 = (*--r)>>9
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+	SUBS	r1, r1, #4
+	BGT	unroll_loop2
+unroll_end
+	LDMFD	r13!,{r4-r7,PC}
+
+mdct_unroll_postlap_arm
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = step
+	STMFD	r13!,{r4-r7,r14}
+	MVN	r4, #0x8000
+	MOV	r3, r3, LSL #1
+	SUB	r1, r1, r2		; r1 = post - l
+	MOV	r1, r1, ASR #1		; r1 = (post - l)>>1
+	SUBS	r1, r1, #16		; r1 = ((post - l)>>1) - 4
+	BLT	unroll_over3
+unroll_loop3
+	LDR	r12,[r2],#8
+	LDR	r7, [r2],#8
+	LDR	r6, [r2],#8
+	LDR	r5, [r2],#8
+
+	RSB	r12,r12,#0
+	RSB	r5, r5, #0
+	RSB	r6, r6, #0
+	RSB	r7, r7, #0
+
+	MOV	r12, r12,ASR #9		; r12= (-*l)>>9
+	MOV	r5,  r5, ASR #9		; r5 = (-*l)>>9
+	MOV	r6,  r6, ASR #9		; r6 = (-*l)>>9
+	MOV	r7,  r7, ASR #9		; r7 = (-*l)>>9
+
+	MOV	r14,r12,ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r12,r4, r14,ASR #31
+	STRH	r12,[r0], r3
+
+	MOV	r14,r7, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r7, r4, r14,ASR #31
+	STRH	r7, [r0], r3
+
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r3
+
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+
+	SUBS	r1, r1, #16
+	BGE	unroll_loop3
+
+unroll_over3
+	ADDS	r1, r1, #16
+	BLE	unroll_over4
+unroll_loop4
+	LDR	r5,[r2], #8
+	; stall
+	; stall (Xscale)
+	RSB	r5, r5, #0
+	MOV	r5, r5, ASR #9		; r5 = (-*l)>>9
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+	SUBS	r1, r1, #4
+	BGT	unroll_loop4
+unroll_over4
+	LDMFD	r13!,{r4-r7,PC}
+
+mdct_unroll_part2_arm
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = r
+	; <> = step
+	; <> = wL
+	; <> = wR
+	MOV	r12,r13
+	STMFD	r13!,{r4,r6-r11,r14}
+	LDMFD	r12,{r8,r9,r10}		; r8 = step
+					; r9 = wL
+					; r10= wR
+	MVN	r4, #0x8000
+	MOV	r8, r8, LSL #1
+	SUBS	r1, r3, r1		; r1 = (r - post)
+	BLE	unroll_over5
+unroll_loop5
+	LDR	r12,[r2, #-8]!		; r12= *l       (but l -= 2 first)
+	LDR	r11,[r9],#4		; r11= *wL++
+	LDR	r7, [r3, #-4]!		; r7 = *--r
+	LDR	r6, [r10,#-4]!		; r6 = *--wR
+
+	; Can save a cycle here, at the cost of 1bit errors in rounding
+	SMULL	r14,r11,r12,r11		; (r14,r11)  = *l   * *wL++
+	SMULL	r14,r6, r7, r6		; (r14,r6)   = *--r * *--wR
+	ADD	r6, r6, r11
+	MOV	r6, r6, ASR #8
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r8
+
+	SUBS	r1, r1, #4
+	BGT	unroll_loop5
+
+unroll_over5
+	LDMFD	r13!,{r4,r6-r11,PC}
+
+mdct_unroll_part3_arm
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = r
+	; <> = step
+	; <> = wL
+	; <> = wR
+	MOV	r12,r13
+	STMFD	r13!,{r4,r6-r11,r14}
+	LDMFD	r12,{r8,r9,r10}		; r8 = step
+					; r9 = wL
+					; r10= wR
+	MVN	r4, #0x8000
+	MOV	r8, r8, LSL #1
+	SUBS	r1, r1, r3		; r1 = (post - r)
+	BLE	unroll_over6
+unroll_loop6
+	LDR	r12,[r2],#8		; r12= *l       (but l += 2 first)
+	LDR	r11,[r9],#4		; r11= *wL++
+	LDR	r7, [r3],#4		; r7 = *r++
+	LDR	r6, [r10,#-4]!		; r6 = *--wR
+
+	; Can save a cycle here, at the cost of 1bit errors in rounding
+	SMULL	r14,r11,r12,r11		; (r14,r11)  = *l   * *wL++
+	SMULL	r14,r6, r7, r6		; (r14,r6)   = *--r * *--wR
+	SUB	r6, r6, r11
+	MOV	r6, r6, ASR #8
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r8
+
+	SUBS	r1, r1, #4
+	BGT	unroll_loop6
+
+unroll_over6
+	LDMFD	r13!,{r4,r6-r11,PC}
+
+mdct_shift_right_arm
+	; r0 = n
+	; r1 = in
+	; r2 = right
+	STMFD	r13!,{r4-r11,r14}
+
+	MOV	r0, r0, LSR #2		; n >>= 2
+	ADD	r1, r1, #4
+
+	SUBS	r0, r0,	#8
+	BLT	sr_less_than_8
+sr_loop
+	LDR	r3, [r1], #8
+	LDR	r4, [r1], #8
+	LDR	r5, [r1], #8
+	LDR	r6, [r1], #8
+	LDR	r7, [r1], #8
+	LDR	r8, [r1], #8
+	LDR	r12,[r1], #8
+	LDR	r14,[r1], #8
+	SUBS	r0, r0, #8
+	STMIA	r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
+	BGE	sr_loop
+sr_less_than_8
+	ADDS	r0, r0, #8
+	BEQ	sr_end
+sr_loop2
+	LDR	r3, [r1], #8
+	SUBS	r0, r0, #1
+	STR	r3, [r2], #4
+	BGT	sr_loop2
+sr_end
+	LDMFD	r13!,{r4-r11,PC}
+
+mdct_backward_arm
+	; r0 = n
+	; r1 = in
+	STMFD	r13!,{r4-r11,r14}
+
+	MOV	r2,#1<<4	; r2 = 1<<shift
+	MOV	r3,#13-4	; r3 = 13-shift
+find_shift_loop
+	TST	r0,r2		; if (n & (1<<shift)) == 0
+	MOV	r2,r2,LSL #1
+	SUBEQ	r3,r3,#1	; shift--
+	BEQ	find_shift_loop
+	MOV	r2,#2
+	MOV	r2,r2,LSL r3	; r2 = step = 2<<shift
+
+	; presymmetry
+	; r0 = n (a multiple of 4)
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	ADD	r4, r1, r0, LSL #1	; r4 = aX = in+(n>>1)
+	ADD	r14,r1, r0		; r14= in+(n>>2)
+	SUB	r4, r4, #3*4		; r4 = aX = in+n2-3
+	LDR	r5, =sincos_lookup0	; r5 = T=sincos_lookup0
+
+presymmetry_loop1
+	LDR	r7, [r4,#8]		; r6 = s2 = aX[2]
+	LDR	r11,[r5,#4]		; r11= T[1]
+	LDR	r6, [r4]		; r6 = s0 = aX[0]
+	LDR	r10,[r5],r2,LSL #2	; r10= T[0]   T += step
+
+	; XPROD31(s0, s2, T[0], T[1], &aX[0], &ax[2])
+	SMULL	r8, r9, r7, r11		; (r8, r9)   = s2*T[1]
+	; stall
+	; stall ?
+	SMLAL	r8, r9, r6, r10		; (r8, r9)  += s0*T[0]
+	RSB	r6, r6, #0
+	; stall ?
+	SMULL	r8, r12,r7, r10		; (r8, r12)  = s2*T[0]
+	MOV	r9, r9, LSL #1
+	; stall ?
+	SMLAL	r8, r12,r6, r11		; (r8, r12) -= s0*T[1]
+	STR	r9, [r4],#-16		; aX[0] = r9
+	CMP	r4,r14
+	MOV	r12,r12,LSL #1
+	STR	r12,[r4,#8+16]		; aX[2] = r12
+
+	BGE	presymmetry_loop1	; while (aX >= in+n4)
+
+presymmetry_loop2
+	LDR	r6,[r4]			; r6 = s0 = aX[0]
+	LDR	r10,[r5,#4]		; r10= T[1]
+	LDR	r7,[r4,#8]		; r6 = s2 = aX[2]
+	LDR	r11,[r5],-r2,LSL #2	; r11= T[0]   T -= step
+
+	; XPROD31(s0, s2, T[1], T[0], &aX[0], &ax[2])
+	SMULL	r8, r9, r6, r10		; (r8, r9)   = s0*T[1]
+	; stall
+	; stall ?
+	SMLAL	r8, r9, r7, r11		; (r8, r9)  += s2*T[0]
+	RSB	r6, r6, #0
+	; stall ?
+	SMULL	r8, r12,r7, r10		; (r8, r12)  = s2*T[1]
+	MOV	r9, r9, LSL #1
+	; stall ?
+	SMLAL	r8, r12,r6, r11		; (r8, r12) -= s0*T[0]
+	STR	r9, [r4],#-16		; aX[0] = r9
+	CMP	r4,r1
+	MOV	r12,r12,LSL #1
+	STR	r12,[r4,#8+16]		; aX[2] = r12
+
+	BGE	presymmetry_loop2	; while (aX >= in)
+
+	; r0 = n
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+	STMFD	r13!,{r3}
+	LDR	r5, =sincos_lookup0	; r5 = T=sincos_lookup0
+	ADD	r4, r1, r0, LSL #1	; r4 = aX = in+(n>>1)
+	SUB	r4, r4, #4*4		; r4 = aX = in+(n>>1)-4
+	LDR	r11,[r5,#4]		; r11= T[1]
+	LDR	r10,[r5],r2, LSL #2	; r10= T[0]    T += step
+presymmetry_loop3
+	LDR	r8,[r1],#16 		; r8 = ro0 = bX[0]
+	LDR	r9,[r1,#8-16]		; r9 = ro2 = bX[2]
+	LDR	r6,[r4]			; r6 = ri0 = aX[0]
+
+	; XNPROD31( ro2, ro0, T[1], T[0], &aX[0], &aX[2] )
+	; aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
+	SMULL	r14,r12,r8, r11		; (r14,r12)  = ro0*T[1]
+	RSB	r8,r8,#0		; r8 = -ro0
+	; Stall ?
+	SMLAL	r14,r12,r9, r10		; (r14,r12) += ro2*T[0]
+	LDR	r7,[r4,#8]		; r7 = ri2 = aX[2]
+	; Stall ?
+	SMULL	r14,r3, r9, r11		; (r14,r3)   = ro2*T[1]
+	MOV	r12,r12,LSL #1
+	LDR	r11,[r5,#4]		; r11= T[1]
+	SMLAL	r14,r3, r8, r10		; (r14,r3)  -= ro0*T[0]
+	LDR	r10,[r5],r2, LSL #2	; r10= T[0]    T += step
+	STR	r12,[r4,#8]
+	MOV	r3, r3, LSL #1
+	STR	r3, [r4],#-16
+
+	; XNPROD31( ri2, ri0, T[0], T[1], &bX[0], &bX[2] )
+	; bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
+	SMULL	r14,r12,r6, r10		; (r14,r12)  = ri0*T[0]
+	RSB	r6,r6,#0		; r6 = -ri0
+	; stall ?
+	SMLAL	r14,r12,r7, r11		; (r14,r12) += ri2*T[1]
+	; stall ?
+	; stall ?
+	SMULL	r14,r3, r7, r10		; (r14,r3)   = ri2*T[0]
+	MOV	r12,r12,LSL #1
+	; stall ?
+	SMLAL	r14,r3, r6, r11		; (r14,r3)  -= ri0*T[1]
+	CMP	r4,r1
+	STR	r12,[r1,#8-16]
+	MOV	r3, r3, LSL #1
+	STR	r3, [r1,#-16]
+
+	BGE	presymmetry_loop3
+
+	SUB	r1,r1,r0		; r1 = in -= n>>2 (i.e. restore in)
+
+	LDR	r3,[r13]
+	STR	r2,[r13,#-4]!
+
+	; mdct_butterflies
+	; r0 = n  = (points * 2)
+	; r1 = in = x
+	; r2 = i
+	; r3 = shift
+	STMFD	r13!,{r0-r1}
+	RSBS	r4,r3,#6		; r4 = stages = 7-shift then --stages
+	LDR	r5,=sincos_lookup0
+	BLE	no_generics
+	MOV	r14,#4			; r14= 4               (i=0)
+	MOV	r6, r14,LSL r3		; r6 = (4<<i)<<shift
+mdct_butterflies_loop1
+	MOV	r0, r0, LSR #1		; r0 = points>>i = POINTS
+	MOV	r2, r14,LSR #2		; r2 = (1<<i)-j        (j=0)
+	STMFD	r13!,{r4,r14}
+mdct_butterflies_loop2
+
+	; mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
+	; mdct_butterfly_generic(r1, r0, r6)
+	; r0 = points
+	; r1 = x
+	; preserve r2 (external loop counter)
+	; preserve r3
+	; preserve r4 (external loop counter)
+	; r5 = T = sincos_lookup0
+	; r6 = step
+	; preserve r14
+
+	STR	r2,[r13,#-4]!		; stack r2
+	ADD	r1,r1,r0,LSL #1		; r1 = x2+4 = x + (POINTS>>1)
+	ADD	r7,r1,r0,LSL #1		; r7 = x1+4 = x + POINTS
+	ADD	r12,r5,#1024*4		; r12= sincos_lookup0+1024
+
+mdct_bufferfly_generic_loop1
+	LDMDB	r7!,{r2,r3,r8,r11}	; r2 = x1[0]
+					; r3 = x1[1]
+					; r8 = x1[2]
+					; r11= x1[3]    x1 -= 4
+	LDMDB	r1!,{r4,r9,r10,r14}	; r4 = x2[0]
+					; r9 = x2[1]
+					; r10= x2[2]
+					; r14= x2[3]    x2 -= 4
+
+	SUB	r2, r2, r3		; r2 = s0 = x1[0] - x1[1]
+	ADD	r3, r2, r3, LSL #1	; r3 =      x1[0] + x1[1] (-> x1[0])
+	SUB	r11,r11,r8		; r11= s1 = x1[3] - x1[2]
+	ADD	r8, r11,r8, LSL #1	; r8 =      x1[3] + x1[2] (-> x1[2])
+	SUB	r9, r9, r4		; r9 = s2 = x2[1] - x2[0]
+	ADD	r4, r9, r4, LSL #1	; r4 =      x2[1] + x2[0] (-> x1[1])
+	SUB	r14,r14,r10		; r14= s3 = x2[3] - x2[2]
+	ADD	r10,r14,r10,LSL #1	; r10=      x2[3] + x2[2] (-> x1[3])
+	STMIA	r7,{r3,r4,r8,r10}
+
+	; r0 = points
+	; r1 = x2
+	; r2 = s0
+	; r3 free
+	; r4 free
+	; r5 = T
+	; r6 = step
+	; r7 = x1
+	; r8 free
+	; r9 = s2
+	; r10 free
+	; r11= s1
+	; r12= limit
+	; r14= s3
+
+	LDR	r8, [r5,#4]		; r8 = T[1]
+	LDR	r10,[r5],r6,LSL #2	; r10= T[0]		T += step
+
+	; XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
+	; x2[0] = (s1*T[0] + s0*T[1])>>31     x2[2] = (s0*T[0] - s1*T[1])>>31
+	; stall Xscale
+	SMULL	r4, r3, r2, r8		; (r4, r3)   = s0*T[1]
+	SMLAL	r4, r3, r11,r10		; (r4, r3)  += s1*T[0]
+	RSB	r11,r11,#0
+	SMULL	r11,r4, r8, r11		; (r11,r4)   = -s1*T[1]
+	SMLAL	r11,r4, r2, r10		; (r11,r4)  += s0*T[0]
+	MOV	r2, r3, LSL #1		; r2 = r3<<1 = Value for x2[0]
+
+	; XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
+	; x2[1] = (s2*T[0] + s3*T[1])>>31     x2[3] = (s3*T[0] - s2*T[1])>>31
+	SMULL	r11,r3, r9, r10		; (r11,r3)   = s2*T[0]
+	MOV	r4, r4, LSL #1		; r4 = r4<<1 = Value for x2[2]
+	SMLAL	r11,r3, r14,r8		; (r11,r3)  += s3*T[1]
+	RSB	r9, r9, #0
+	SMULL	r10,r11,r14,r10		; (r10,r11)  = s3*T[0]
+	MOV	r3, r3, LSL #1		; r3 = r3<<1 = Value for x2[1]
+	SMLAL	r10,r11,r9,r8		; (r10,r11) -= s2*T[1]
+	CMP	r5, r12
+	MOV	r11,r11,LSL #1		; r11= r11<<1 = Value for x2[3]
+
+	STMIA	r1,{r2,r3,r4,r11}
+
+	BLT	mdct_bufferfly_generic_loop1
+
+	SUB	r12,r12,#1024*4
+mdct_bufferfly_generic_loop2
+	LDMDB	r7!,{r2,r3,r9,r10}	; r2 = x1[0]
+					; r3 = x1[1]
+					; r9 = x1[2]
+					; r10= x1[3]    x1 -= 4
+	LDMDB	r1!,{r4,r8,r11,r14}	; r4 = x2[0]
+					; r8 = x2[1]
+					; r11= x2[2]
+					; r14= x2[3]    x2 -= 4
+
+	SUB	r2, r2, r3		; r2 = s0 = x1[0] - x1[1]
+	ADD	r3, r2, r3, LSL #1	; r3 =      x1[0] + x1[1] (-> x1[0])
+	SUB	r9, r9,r10		; r9 = s1 = x1[2] - x1[3]
+	ADD	r10,r9,r10, LSL #1	; r10=      x1[2] + x1[3] (-> x1[2])
+	SUB	r4, r4, r8		; r4 = s2 = x2[0] - x2[1]
+	ADD	r8, r4, r8, LSL #1	; r8 =      x2[0] + x2[1] (-> x1[1])
+	SUB	r14,r14,r11		; r14= s3 = x2[3] - x2[2]
+	ADD	r11,r14,r11,LSL #1	; r11=      x2[3] + x2[2] (-> x1[3])
+	STMIA	r7,{r3,r8,r10,r11}
+
+	; r0 = points
+	; r1 = x2
+	; r2 = s0
+	; r3 free
+	; r4 = s2
+	; r5 = T
+	; r6 = step
+	; r7 = x1
+	; r8 free
+	; r9 = s1
+	; r10 free
+	; r11 free
+	; r12= limit
+	; r14= s3
+
+	LDR	r8, [r5,#4]		; r8 = T[1]
+	LDR	r10,[r5],-r6,LSL #2	; r10= T[0]		T -= step
+
+	; XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
+	; x2[0] = (s0*T[0] - s1*T[1])>>31     x2[2] = (s1*T[0] + s0*T[1])>>31
+	; stall Xscale
+	SMULL	r3, r11,r2, r8		; (r3, r11)  = s0*T[1]
+	SMLAL	r3, r11,r9, r10		; (r3, r11) += s1*T[0]
+	RSB	r9, r9, #0
+	SMULL	r3, r2, r10,r2		; (r3, r2)   = s0*T[0]
+	SMLAL	r3, r2, r9, r8		; (r3, r2)  += -s1*T[1]
+	MOV	r9, r11,LSL #1		; r9 = r11<<1 = Value for x2[2]
+
+	; XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
+	; x2[1] = (s3*T[0] - s2*T[1])>>31     x2[3] = (s2*T[0] + s3*T[1])>>31
+	SMULL	r3, r11,r4, r10		; (r3,r11)   = s2*T[0]
+	MOV	r2, r2, LSL #1		; r2 = r2<<1  = Value for x2[0]
+	SMLAL	r3, r11,r14,r8		; (r3,r11)  += s3*T[1]
+	RSB	r4, r4, #0
+	SMULL	r10,r3,r14,r10		; (r10,r3)   = s3*T[0]
+	MOV	r11,r11,LSL #1		; r11= r11<<1 = Value for x2[3]
+	SMLAL	r10,r3, r4, r8		; (r10,r3)  -= s2*T[1]
+	CMP	r5, r12
+	MOV	r3, r3, LSL #1		; r3 = r3<<1  = Value for x2[1]
+
+	STMIA	r1,{r2,r3,r9,r11}
+
+	BGT	mdct_bufferfly_generic_loop2
+
+	LDR	r2,[r13],#4		; unstack r2
+	ADD	r1, r1, r0, LSL #2	; r1 = x+POINTS*j
+	; stall Xscale
+	SUBS	r2, r2, #1		; r2--                 (j++)
+	BGT	mdct_butterflies_loop2
+
+	LDMFD	r13!,{r4,r14}
+
+	LDR	r1,[r13,#4]
+
+	SUBS	r4, r4, #1		; stages--
+	MOV	r14,r14,LSL #1		; r14= 4<<i            (i++)
+	MOV	r6, r6, LSL #1		; r6 = step <<= 1      (i++)
+	BGE	mdct_butterflies_loop1
+	LDMFD	r13,{r0-r1}
+no_generics
+	; mdct_butterflies part2 (loop around mdct_bufferfly_32)
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+mdct_bufferflies_loop3
+	; mdct_bufferfly_32
+
+	; block1
+	ADD	r4, r1, #16*4		; r4 = &in[16]
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[16]
+					; r6 = x[17]
+					; r9 = x[18]
+					; r10= x[19]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[16] - x[17]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[16] + x[17]  -> x[16]
+	SUB	r9, r9, r10		; r9 = s1 = x[18] - x[19]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[18] + x[19]  -> x[18]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[17]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[19]
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	LDR	r6,cPI1_8
+	LDR	r7,cPI3_8
+
+	; XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
+	; x[0] = s0*cPI3_8 - s1*cPI1_8     x[2] = s1*cPI3_8 + s0*cPI1_8
+	; stall Xscale
+	SMULL	r14,r11,r5, r6		; (r14,r11)  = s0*cPI1_8
+	SMLAL	r14,r11,r9, r7		; (r14,r11) += s1*cPI3_8
+	RSB	r9, r9, #0
+	SMULL	r14,r5, r7, r5		; (r14,r5)   = s0*cPI3_8
+	SMLAL	r14,r5, r9, r6		; (r14,r5)  -= s1*cPI1_8
+	MOV	r11,r11,LSL #1
+	MOV	r5, r5, LSL #1
+
+	; XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
+	; x[1] = s2*cPI1_8 + s3*cPI3_8     x[3] = s3*cPI1_8 - s2*cPI3_8
+	SMULL	r14,r9, r8, r6		; (r14,r9)   = s2*cPI1_8
+	SMLAL	r14,r9, r12,r7		; (r14,r9)  += s3*cPI3_8
+	RSB	r8,r8,#0
+	SMULL	r14,r12,r6, r12		; (r14,r12)  = s3*cPI1_8
+	SMLAL	r14,r12,r8, r7		; (r14,r12) -= s2*cPI3_8
+	MOV	r9, r9, LSL #1
+	MOV	r12,r12,LSL #1
+	STMIA	r1!,{r5,r9,r11,r12}
+
+	; block2
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[20]
+					; r6 = x[21]
+					; r9 = x[22]
+					; r10= x[23]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[4]
+					; r8 = x[5]
+					; r11= x[6]
+					; r12= x[7]
+	SUB	r5, r5, r6		; r5 = s0 = x[20] - x[21]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[20] + x[21]  -> x[20]
+	SUB	r9, r9, r10		; r9 = s1 = x[22] - x[23]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[22] + x[23]  -> x[22]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 5] - x[ 4]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 5] + x[ 4]  -> x[21]
+	SUB	r12,r12,r11		; r12= s3 = x[ 7] - x[ 6]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[23]
+	LDR	r14,cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	SMULL	r6, r5, r14,r5		; (r6,r5)  = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	SMULL	r6, r8, r14,r8		; (r6,r8)  = (s3+s2)*cPI2_8
+	MOV	r5, r5, LSL #1
+	SMULL	r6, r9, r14,r9		; (r6,r9)  = (s0+s1)*cPI2_8
+	MOV	r8, r8, LSL #1
+	SMULL	r6, r12,r14,r12		; (r6,r12) = (s3-s2)*cPI2_8
+	MOV	r9, r9, LSL #1
+	MOV	r12,r12,LSL #1
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block3
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[24]
+					; r6 = x[25]
+					; r9 = x[25]
+					; r10= x[26]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[8]
+					; r8 = x[9]
+					; r11= x[10]
+					; r12= x[11]
+	SUB	r5, r5, r6		; r5 = s0 = x[24] - x[25]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[24] + x[25]  -> x[25]
+	SUB	r9, r9, r10		; r9 = s1 = x[26] - x[27]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[26] + x[27]  -> x[26]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 9] - x[ 8]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 9] + x[ 8]  -> x[25]
+	SUB	r12,r12,r11		; r12= s3 = x[11] - x[10]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[11] + x[10]  -> x[27]
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	LDR	r6,cPI3_8
+	LDR	r7,cPI1_8
+
+	; XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
+	; x[8] = s0*cPI1_8 - s1*cPI3_8     x[10] = s1*cPI1_8 + s0*cPI3_8
+	; stall Xscale
+	SMULL	r14,r11,r5, r6		; (r14,r11)  = s0*cPI3_8
+	SMLAL	r14,r11,r9, r7		; (r14,r11) += s1*cPI1_8
+	RSB	r9, r9, #0
+	SMULL	r14,r5, r7, r5		; (r14,r5)   = s0*cPI1_8
+	SMLAL	r14,r5, r9, r6		; (r14,r5)  -= s1*cPI3_8
+	MOV	r11,r11,LSL #1
+	MOV	r5, r5, LSL #1
+
+	; XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
+	; x[9] = s2*cPI3_8 + s3*cPI1_8     x[11] = s3*cPI3_8 - s2*cPI1_8
+	SMULL	r14,r9, r8, r6		; (r14,r9)   = s2*cPI3_8
+	SMLAL	r14,r9, r12,r7		; (r14,r9)  += s3*cPI1_8
+	RSB	r8,r8,#0
+	SMULL	r14,r12,r6, r12		; (r14,r12)  = s3*cPI3_8
+	SMLAL	r14,r12,r8, r7		; (r14,r12) -= s2*cPI1_8
+	MOV	r9, r9, LSL #1
+	MOV	r12,r12,LSL #1
+	STMIA	r1!,{r5,r9,r11,r12}
+
+	; block4
+	LDMIA	r4,{r5,r6,r10,r11}	; r5 = x[28]
+					; r6 = x[29]
+					; r10= x[30]
+					; r11= x[31]
+	LDMIA	r1,{r8,r9,r12,r14}	; r8 = x[12]
+					; r9 = x[13]
+					; r12= x[14]
+					; r14= x[15]
+	SUB	r5, r5, r6		; r5 = s0 = x[28] - x[29]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[28] + x[29]  -> x[28]
+	SUB	r7, r14,r12		; r7 = s3 = x[15] - x[14]
+	ADD	r12,r7, r12, LSL #1	; r12=      x[15] + x[14]  -> x[31]
+	SUB	r10,r10,r11		; r10= s1 = x[30] - x[31]
+	ADD	r11,r10,r11,LSL #1	; r11=      x[30] + x[31]  -> x[30]
+	SUB	r14, r8, r9		; r14= s2 = x[12] - x[13]
+	ADD	r9, r14, r9, LSL #1	; r9 =      x[12] + x[13]  -> x[29]
+	STMIA	r4!,{r6,r9,r11,r12}
+	STMIA	r1!,{r5,r7,r10,r14}
+
+	; mdct_butterfly16 (1st version)
+	; block 1
+	SUB	r1,r1,#16*4
+	ADD	r4,r1,#8*4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[ 8]
+					; r6 = x[ 9]
+					; r9 = x[10]
+					; r10= x[11]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[ 8] - x[ 9]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[ 8] + x[ 9]  -> x[ 8]
+	SUB	r9, r9, r10		; r9 = s1 = x[10] - x[11]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[10] + x[11]  -> x[10]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[ 9]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[11]
+	LDR	r14,cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	SMULL	r6, r5, r14,r5		; (r6,r5)  = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	SMULL	r6, r8, r14,r8		; (r6,r8)  = (s3+s2)*cPI2_8
+	MOV	r5, r5, LSL #1
+	SMULL	r6, r9, r14,r9		; (r6,r9)  = (s0+s1)*cPI2_8
+	MOV	r8, r8, LSL #1
+	SMULL	r6, r12,r14,r12		; (r6,r12) = (s3-s2)*cPI2_8
+	MOV	r9, r9, LSL #1
+	MOV	r12,r12,LSL #1
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[12]
+					; r6 = x[13]
+					; r9 = x[14]
+					; r10= x[15]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[ 4]
+					; r8 = x[ 5]
+					; r11= x[ 6]
+					; r12= x[ 7]
+	SUB	r14,r7, r8		; r14= s0 = x[ 4] - x[ 5]
+	ADD	r8, r14,r8, LSL #1	; r8 =      x[ 4] + x[ 5]  -> x[13]
+	SUB	r7, r12,r11		; r7 = s1 = x[ 7] - x[ 6]
+	ADD	r11,r7, r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[15]
+	SUB	r5, r5, r6		; r5 = s2 = x[12] - x[13]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[12] + x[13]  -> x[12]
+	SUB	r12,r9, r10		; r12= s3 = x[14] - x[15]
+	ADD	r10,r12,r10,LSL #1	; r10=      x[14] + x[15]  -> x[14]
+	STMIA	r4!,{r6,r8,r10,r11}
+	STMIA	r1!,{r5,r7,r12,r14}
+
+	; mdct_butterfly_8
+	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; mdct_butterfly_8
+	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; block 2
+	ADD	r1,r1,#16*4-8*4
+	ADD	r4,r1,#8*4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[ 8]
+					; r6 = x[ 9]
+					; r9 = x[10]
+					; r10= x[11]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[ 8] - x[ 9]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[ 8] + x[ 9]  -> x[ 8]
+	SUB	r9, r9, r10		; r9 = s1 = x[10] - x[11]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[10] + x[11]  -> x[10]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[ 9]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[11]
+	LDR	r14,cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	SMULL	r6, r5, r14,r5		; (r6,r5)  = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	SMULL	r6, r8, r14,r8		; (r6,r8)  = (s3+s2)*cPI2_8
+	MOV	r5, r5, LSL #1
+	SMULL	r6, r9, r14,r9		; (r6,r9)  = (s0+s1)*cPI2_8
+	MOV	r8, r8, LSL #1
+	SMULL	r6, r12,r14,r12		; (r6,r12) = (s3-s2)*cPI2_8
+	MOV	r9, r9, LSL #1
+	MOV	r12,r12,LSL #1
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[12]
+					; r6 = x[13]
+					; r9 = x[14]
+					; r10= x[15]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[ 4]
+					; r8 = x[ 5]
+					; r11= x[ 6]
+					; r12= x[ 7]
+	SUB	r5, r5, r6		; r5 = s2 = x[12] - x[13]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[12] + x[13]  -> x[12]
+	SUB	r9, r9, r10		; r9 = s3 = x[14] - x[15]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[14] + x[15]  -> x[14]
+	SUB	r14,r7, r8		; r14= s0 = x[ 4] - x[ 5]
+	ADD	r8, r14,r8, LSL #1	; r8 =      x[ 4] + x[ 5]  -> x[13]
+	SUB	r7, r12,r11		; r7 = s1 = x[ 7] - x[ 6]
+	ADD	r11,r7, r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[15]
+	STMIA	r4!,{r6,r8,r10,r11}
+	STMIA	r1!,{r5,r7,r9,r14}
+
+	; mdct_butterfly_8
+	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; mdct_butterfly_8
+	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	ADD	r1,r1,#8*4
+	SUBS	r0,r0,#64
+	BGT	mdct_bufferflies_loop3
+
+	LDMFD	r13,{r0-r3}
+
+mdct_bitreverse_arm
+	; r0 = points = n
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	MOV	r4, #0			; r4 = bit = 0
+	ADD	r5, r1, r0, LSL #1	; r5 = w = x + (n>>1)
+	ADR	r6, bitrev
+	SUB	r5, r5, #8
+brev_lp
+	LDRB	r7, [r6, r4, LSR #6]
+	AND	r8, r4, #0x3f
+	LDRB	r8, [r6, r8]
+	ADD	r4, r4, #1		; bit++
+	; stall XScale
+	ORR	r7, r7, r8, LSL #6	; r7 = bitrev[bit]
+	MOV	r7, r7, LSR r3
+	ADD	r9, r1, r7, LSL #2	; r9 = xx = x + (b>>shift)
+	CMP	r5, r9			; if (w > xx)
+	LDR	r10,[r5],#-8		;   r10 = w[0]		w -= 2
+	LDRGT	r11,[r5,#12]		;   r11 = w[1]
+	LDRGT	r12,[r9]		;   r12 = xx[0]
+	LDRGT	r14,[r9,#4]		;   r14 = xx[1]
+	STRGT	r10,[r9]		;   xx[0]= w[0]
+	STRGT	r11,[r9,#4]		;   xx[1]= w[1]
+	STRGT	r12,[r5,#8]		;   w[0] = xx[0]
+	STRGT	r14,[r5,#12]		;   w[1] = xx[1]
+	CMP	r5,r1
+	BGT	brev_lp
+
+	; mdct_step7
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	CMP	r2, #4			; r5 = T = (step>=4) ?
+	LDRGE	r5, =sincos_lookup0	;          sincos_lookup0 +
+	LDRLT	r5, =sincos_lookup1	;          sincos_lookup0 +
+	ADD	r7, r1, r0, LSL #1	; r7 = w1 = x + (n>>1)
+	ADDGE	r5, r5, r2, LSL #1	;		            (step>>1)
+	ADD	r8, r5, #1024*4		; r8 = Ttop
+step7_loop1
+	LDR	r6, [r1]		; r6 = w0[0]
+	LDR	r9, [r1,#4]		; r9 = w0[1]
+	LDR	r10,[r7,#-8]!		; r10= w1[0]	w1 -= 2
+	LDR	r11,[r7,#4]		; r11= w1[1]
+	LDR	r14,[r5,#4]		; r14= T[1]
+	LDR	r12,[r5],r2,LSL #2	; r12= T[0]	T += step
+
+	ADD	r6, r6, r10		; r6 = s0 = w0[0] + w1[0]
+	SUB	r10,r6, r10,LSL #1	; r10= s1b= w0[0] - w1[0]
+	SUB	r11,r11,r9		; r11= s1 = w1[1] - w0[1]
+	ADD	r9, r11,r9, LSL #1	; r9 = s0b= w1[1] + w0[1]
+
+	; Can save 1 cycle by using SMULL SMLAL - at the cost of being
+	; 1 off.
+	SMULL	r0, r3, r6, r14		; (r0,r3)   = s0*T[1]
+	SMULL	r0, r4, r11,r12		; (r0,r4)  += s1*T[0] = s2
+	ADD	r3, r3, r4
+	SMULL	r0, r14,r11,r14		; (r0,r14)  = s1*T[1]
+	SMULL	r0, r12,r6, r12		; (r0,r12) += s0*T[0] = s3
+	SUB	r14,r14,r12
+
+	; r9 = s0b<<1
+	; r10= s1b<<1
+	ADD	r9, r3, r9, ASR #1	; r9 = s0b + s2
+	SUB	r3, r9, r3, LSL #1	; r3 = s0b - s2
+
+	SUB	r12,r14,r10,ASR #1	; r12= s3  - s1b
+	ADD	r10,r14,r10,ASR #1	; r10= s3  + s1b
+	STR	r9, [r1],#4
+	STR	r10,[r1],#4		; w0 += 2
+	STR	r3, [r7]
+	STR	r12,[r7,#4]
+
+	CMP	r5,r8
+	BLT	step7_loop1
+
+step7_loop2
+	LDR	r6, [r1]		; r6 = w0[0]
+	LDR	r9, [r1,#4]		; r9 = w0[1]
+	LDR	r10,[r7,#-8]!		; r10= w1[0]	w1 -= 2
+	LDR	r11,[r7,#4]		; r11= w1[1]
+	LDR	r14,[r5,-r2,LSL #2]!	; r12= T[1]	T -= step
+	LDR	r12,[r5,#4]		; r14= T[0]
+
+	ADD	r6, r6, r10		; r6 = s0 = w0[0] + w1[0]
+	SUB	r10,r6, r10,LSL #1	; r10= s1b= w0[0] - w1[0]
+	SUB	r11,r11,r9		; r11= s1 = w1[1] - w0[1]
+	ADD	r9, r11,r9, LSL #1	; r9 = s0b= w1[1] + w0[1]
+
+	; Can save 1 cycle by using SMULL SMLAL - at the cost of being
+	; 1 off.
+	SMULL	r0, r3, r6, r14		; (r0,r3)   = s0*T[0]
+	SMULL	r0, r4, r11,r12		; (r0,r4)  += s1*T[1] = s2
+	ADD	r3, r3, r4
+	SMULL	r0, r14,r11,r14		; (r0,r14)  = s1*T[0]
+	SMULL	r0, r12,r6, r12		; (r0,r12) += s0*T[1] = s3
+	SUB	r14,r14,r12
+
+	; r9 = s0b<<1
+	; r10= s1b<<1
+	ADD	r9, r3, r9, ASR #1	; r9 = s0b + s2
+	SUB	r3, r9, r3, LSL #1	; r3 = s0b - s2
+
+	SUB	r12,r14,r10,ASR #1	; r12= s3  - s1b
+	ADD	r10,r14,r10,ASR #1	; r10= s3  + s1b
+	STR	r9, [r1],#4
+	STR	r10,[r1],#4		; w0 += 2
+	STR	r3, [r7]
+	STR	r12,[r7,#4]
+
+	CMP	r1,r7
+	BLT	step7_loop2
+
+	LDMFD	r13!,{r0-r3}
+
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+	MOV	r2, r2, ASR #2		; r2 = step >>= 2
+	CMP	r2, #0
+	CMPNE	r2, #1
+	BEQ	mdct_end
+
+	; step > 1 (default case)
+	CMP	r2, #4			; r5 = T = (step>=4) ?
+	LDRGE	r5, =sincos_lookup0	;          sincos_lookup0 +
+	LDRLT	r5, =sincos_lookup1	;          sincos_lookup1
+	ADD	r7, r1, r0, LSL #1	; r7 = iX = x + (n>>1)
+	ADDGE	r5, r5, r2, LSL #1	;		            (step>>1)
+mdct_step8_default
+	LDR	r6, [r1],#4		; r6 =  s0 = x[0]
+	LDR	r8, [r1],#4		; r8 = -s1 = x[1]
+	LDR	r12,[r5,#4]       	; r12= T[1]
+	LDR	r14,[r5],r2,LSL #2	; r14= T[0]	T += step
+	RSB	r8, r8, #0		; r8 = s1
+
+	; XPROD31(s0, s1, T[0], T[1], x, x+1)
+	; x[0] = s0 * T[0] + s1 * T[1]      x[1] = s1 * T[0] - s0 * T[1]
+	SMULL	r9, r10, r8, r12	; (r9,r10)  = s1 * T[1]
+	CMP	r1, r7
+	SMLAL	r9, r10, r6, r14	; (r9,r10) += s0 * T[0]
+	RSB	r6, r6, #0		; r6 = -s0
+	SMULL	r9, r11, r8, r14	; (r9,r11)  = s1 * T[0]
+	MOV	r10,r10,LSL #1
+	SMLAL	r9, r11, r6, r12	; (r9,r11) -= s0 * T[1]
+	STR	r10,[r1,#-8]
+	MOV	r11,r11,LSL #1
+	STR	r11,[r1,#-4]
+	BLT	mdct_step8_default
+
+mdct_end
+	MOV	r0, r2
+	LDMFD	r13!,{r4-r11,PC}
+
+cPI1_8
+	DCD	0x7641af3d
+cPI2_8
+	DCD	0x5a82799a
+cPI3_8
+	DCD	0x30fbc54d
+bitrev
+	DCB	0
+	DCB	32
+	DCB	16
+	DCB	48
+	DCB	8
+	DCB	40
+	DCB	24
+	DCB	56
+	DCB	4
+	DCB	36
+	DCB	20
+	DCB	52
+	DCB	12
+	DCB	44
+	DCB	28
+	DCB	60
+	DCB	2
+	DCB	34
+	DCB	18
+	DCB	50
+	DCB	10
+	DCB	42
+	DCB	26
+	DCB	58
+	DCB	6
+	DCB	38
+	DCB	22
+	DCB	54
+	DCB	14
+	DCB	46
+	DCB	30
+	DCB	62
+	DCB	1
+	DCB	33
+	DCB	17
+	DCB	49
+	DCB	9
+	DCB	41
+	DCB	25
+	DCB	57
+	DCB	5
+	DCB	37
+	DCB	21
+	DCB	53
+	DCB	13
+	DCB	45
+	DCB	29
+	DCB	61
+	DCB	3
+	DCB	35
+	DCB	19
+	DCB	51
+	DCB	11
+	DCB	43
+	DCB	27
+	DCB	59
+	DCB	7
+	DCB	39
+	DCB	23
+	DCB	55
+	DCB	15
+	DCB	47
+	DCB	31
+	DCB	63
+
+	END

Added: branches/lowmem-branch/Tremolo/mdctLARM.s
===================================================================
--- branches/lowmem-branch/Tremolo/mdctLARM.s	                        (rev 0)
+++ branches/lowmem-branch/Tremolo/mdctLARM.s	2010-05-15 20:40:20 UTC (rev 17218)
@@ -0,0 +1,1175 @@
+; Tremolo library
+; Copyright (C) 2009 Robin Watts for Pinknoise Productions Ltd
+
+	AREA	|.text|, CODE, READONLY
+
+	; low accuracy version
+
+	EXPORT mdct_backward_arm_low
+	EXPORT mdct_shift_right_arm_low
+	EXPORT mdct_unroll_prelap_arm_low
+	EXPORT mdct_unroll_part2_arm_low
+	EXPORT mdct_unroll_part3_arm_low
+	EXPORT mdct_unroll_postlap_arm_low
+
+	IMPORT	sincos_lookup0
+	IMPORT	sincos_lookup1
+
+mdct_unroll_prelap_arm_low
+	; r0 = out
+	; r1 = post
+	; r2 = r
+	; r3 = step
+	STMFD	r13!,{r4-r7,r14}
+	MVN	r4, #0x8000
+	MOV	r3, r3, LSL #1
+	SUB	r1, r2, r1		; r1 = r - post
+	SUBS	r1, r1, #16		; r1 = r - post - 16
+	BLT	unroll_over
+unroll_loop
+	LDMDB	r2!,{r5,r6,r7,r12}
+
+	MOV	r5, r5, ASR #9		; r5 = (*--r)>>9
+	MOV	r6, r6, ASR #9		; r6 = (*--r)>>9
+	MOV	r7, r7, ASR #9		; r7 = (*--r)>>9
+	MOV	r12,r12,ASR #9		; r12= (*--r)>>9
+
+	MOV	r14,r12,ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r12,r4, r14,ASR #31
+	STRH	r12,[r0], r3
+
+	MOV	r14,r7, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r7, r4, r14,ASR #31
+	STRH	r7, [r0], r3
+
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r3
+
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+
+	SUBS	r1, r1, #16
+	BGE	unroll_loop
+
+unroll_over
+	ADDS	r1, r1, #16
+	BLE	unroll_end
+unroll_loop2
+	LDR	r5,[r2,#-4]!
+	; stall
+	; stall (Xscale)
+	MOV	r5, r5, ASR #9		; r5 = (*--r)>>9
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+	SUBS	r1, r1, #4
+	BGT	unroll_loop2
+unroll_end
+	LDMFD	r13!,{r4-r7,PC}
+
+mdct_unroll_postlap_arm_low
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = step
+	STMFD	r13!,{r4-r7,r14}
+	MVN	r4, #0x8000
+	MOV	r3, r3, LSL #1
+	SUB	r1, r1, r2		; r1 = post - l
+	MOV	r1, r1, ASR #1		; r1 = (post - l)>>1
+	SUBS	r1, r1, #16		; r1 = ((post - l)>>1) - 4
+	BLT	unroll_over3
+unroll_loop3
+	LDR	r12,[r2],#8
+	LDR	r7, [r2],#8
+	LDR	r6, [r2],#8
+	LDR	r5, [r2],#8
+
+	RSB	r12,r12,#0
+	RSB	r5, r5, #0
+	RSB	r6, r6, #0
+	RSB	r7, r7, #0
+
+	MOV	r12, r12,ASR #9		; r12= (-*l)>>9
+	MOV	r5,  r5, ASR #9		; r5 = (-*l)>>9
+	MOV	r6,  r6, ASR #9		; r6 = (-*l)>>9
+	MOV	r7,  r7, ASR #9		; r7 = (-*l)>>9
+
+	MOV	r14,r12,ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r12,r4, r14,ASR #31
+	STRH	r12,[r0], r3
+
+	MOV	r14,r7, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r7, r4, r14,ASR #31
+	STRH	r7, [r0], r3
+
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r3
+
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+
+	SUBS	r1, r1, #16
+	BGE	unroll_loop3
+
+unroll_over3
+	ADDS	r1, r1, #16
+	BLE	unroll_over4
+unroll_loop4
+	LDR	r5,[r2], #8
+	; stall
+	; stall (Xscale)
+	RSB	r5, r5, #0
+	MOV	r5, r5, ASR #9		; r5 = (-*l)>>9
+	MOV	r14,r5, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r5, r4, r14,ASR #31
+	STRH	r5, [r0], r3
+	SUBS	r1, r1, #4
+	BGT	unroll_loop4
+unroll_over4
+	LDMFD	r13!,{r4-r7,PC}
+
+mdct_unroll_part2_arm_low
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = r
+	; <> = step
+	; <> = wL
+	; <> = wR
+	MOV	r12,r13
+	STMFD	r13!,{r4,r6-r11,r14}
+	LDMFD	r12,{r8,r9,r10}		; r8 = step
+					; r9 = wL
+					; r10= wR
+	MVN	r4, #0x8000
+	MOV	r8, r8, LSL #1
+	SUBS	r1, r3, r1		; r1 = (r - post)
+	BLE	unroll_over5
+unroll_loop5
+	LDR	r12,[r2, #-8]!		; r12= *l       (but l -= 2 first)
+	LDR	r7, [r3, #-4]!		; r7 = *--r
+	LDRB	r6, [r10,#-1]!		; r6 = *--wR
+	LDRB	r11,[r9],#1		; r11= *wL++
+
+	MOV	r12, r12, ASR #8
+	; Can save a cycle here, at the cost of 1bit errors in rounding
+	MUL	r11,r12,r11		; r11  = *l   * *wL++
+	MOV	r7, r7, ASR #8
+	MLA	r6, r7, r6, r11		; r6   = *--r * *--wR
+	MOV	r6, r6, ASR #9
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r8
+
+	SUBS	r1, r1, #4
+	BGT	unroll_loop5
+
+unroll_over5
+	LDMFD	r13!,{r4,r6-r11,PC}
+
+mdct_unroll_part3_arm_low
+	; r0 = out
+	; r1 = post
+	; r2 = l
+	; r3 = r
+	; <> = step
+	; <> = wL
+	; <> = wR
+	MOV	r12,r13
+	STMFD	r13!,{r4,r6-r11,r14}
+	LDMFD	r12,{r8,r9,r10}		; r8 = step
+					; r9 = wL
+					; r10= wR
+	MVN	r4, #0x8000
+	MOV	r8, r8, LSL #1
+	SUBS	r1, r1, r3		; r1 = (post - r)
+	BLE	unroll_over6
+unroll_loop6
+	LDR	r12,[r2],#8		; r12= *l       (but l += 2 first)
+	LDR	r7, [r3],#4		; r7 = *r++
+	LDRB	r11,[r9],#1		; r11= *wL++
+	LDRB	r6, [r10,#-1]!		; r6 = *--wR
+
+	; Can save a cycle here, at the cost of 1bit errors in rounding
+	MOV	r12,r12,ASR #8
+	MUL	r11,r12,r11		; (r14,r11)  = *l   * *wL++
+	MOV	r7, r7, ASR #8
+	MUL	r6, r7, r6		; (r14,r6)   = *--r * *--wR
+	SUB	r6, r6, r11
+	MOV	r6, r6, ASR #9
+	MOV	r14,r6, ASR #15
+	TEQ	r14,r14,ASR #31		; if r14==0 || r14==-1 then in range
+	EORNE	r6, r4, r14,ASR #31
+	STRH	r6, [r0], r8
+
+	SUBS	r1, r1, #4
+	BGT	unroll_loop6
+
+unroll_over6
+	LDMFD	r13!,{r4,r6-r11,PC}
+
+mdct_shift_right_arm_low
+	; r0 = n
+	; r1 = in
+	; r2 = right
+	STMFD	r13!,{r4-r11,r14}
+
+	MOV	r0, r0, LSR #2		; n >>= 2
+	ADD	r1, r1, #4
+
+	SUBS	r0, r0,	#8
+	BLT	sr_less_than_8
+sr_loop
+	LDR	r3, [r1], #8
+	LDR	r4, [r1], #8
+	LDR	r5, [r1], #8
+	LDR	r6, [r1], #8
+	LDR	r7, [r1], #8
+	LDR	r8, [r1], #8
+	LDR	r12,[r1], #8
+	LDR	r14,[r1], #8
+	SUBS	r0, r0, #8
+	STMIA	r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
+	BGE	sr_loop
+sr_less_than_8
+	ADDS	r0, r0, #8
+	BEQ	sr_end
+sr_loop2
+	LDR	r3, [r1], #8
+	SUBS	r0, r0, #1
+	STR	r3, [r2], #4
+	BGT	sr_loop2
+sr_end
+	LDMFD	r13!,{r4-r11,PC}
+
+mdct_backward_arm_low
+	; r0 = n
+	; r1 = in
+	STMFD	r13!,{r4-r11,r14}
+
+	MOV	r2, #1<<4	; r2 = 1<<shift
+	MOV	r3, #13-4	; r3 = 13-shift
+find_shift_loop
+	TST	r0, r2		; if (n & (1<<shift)) == 0
+	MOV	r2, r2, LSL #1
+	SUBEQ	r3, r3, #1	; shift--
+	BEQ	find_shift_loop
+	MOV	r2, #2
+	MOV	r2, r2, LSL r3	; r2 = step = 2<<shift
+
+	; presymmetry
+	; r0 = n (a multiple of 4)
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	ADD	r4, r1, r0, LSL #1	; r4 = aX = in+(n>>1)
+	ADD	r14,r1, r0		; r14= in+(n>>2)
+	SUB	r4, r4, #3*4		; r4 = aX = in+n2-3
+	LDR	r5, =sincos_lookup0	; r5 = T=sincos_lookup0
+
+presymmetry_loop1
+	LDR	r7, [r4,#8]		; r6 = s2 = aX[2]
+	LDRB	r11,[r5,#1]		; r11= T[1]
+	LDR	r6, [r4],#-16		; r6 = s0 = aX[0]
+	LDRB	r10,[r5],r2		; r10= T[0]   T += step
+	MOV	r6, r6, ASR #8
+	MOV	r7, r7, ASR #8
+
+	; XPROD31(s0, s2, T[0], T[1], &aX[0], &ax[2])
+	MUL	r9, r6, r10		; r9   = s0*T[0]
+	RSB	r6, r6, #0
+	MLA	r9, r7, r11,r9		; r9  += s2*T[1]
+	CMP	r4, r14
+	MUL	r12,r7, r10		; r12  = s2*T[0]
+	STR	r9, [r4,#16]		; aX[0] = r9
+	MLA	r12,r6, r11,r12		; r12 -= s0*T[1]
+	STR	r12,[r4,#8+16]		; aX[2] = r12
+
+	BGE	presymmetry_loop1	; while (aX >= in+n4)
+
+presymmetry_loop2
+	LDR	r6, [r4],#-16		; r6 = s0 = aX[0]
+	LDRB	r10,[r5,#1]		; r10= T[1]
+	LDR	r7, [r4,#16+8]		; r6 = s2 = aX[2]
+	LDRB	r11,[r5],-r2		; r11= T[0]   T -= step
+	MOV	r6, r6, ASR #8
+	MOV	r7, r7, ASR #8
+
+	; XPROD31(s0, s2, T[1], T[0], &aX[0], &ax[2])
+	MUL	r9, r6, r10		; r9   = s0*T[1]
+	RSB	r6, r6, #0
+	MLA	r9, r7, r11,r9		; r9  += s2*T[0]
+	CMP	r4, r1
+	MUL	r12,r7, r10		; r12  = s2*T[1]
+	STR	r9, [r4,#16]		; aX[0] = r9
+	MLA	r12,r6, r11,r12		; r12 -= s0*T[0]
+	STR	r12,[r4,#8+16]		; aX[2] = r12
+
+	BGE	presymmetry_loop2	; while (aX >= in)
+
+	; r0 = n
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+	STMFD	r13!,{r3}
+	LDR	r5, =sincos_lookup0	; r5 = T=sincos_lookup0
+	ADD	r4, r1, r0, LSL #1	; r4 = aX = in+(n>>1)
+	SUB	r4, r4, #4*4		; r4 = aX = in+(n>>1)-4
+	LDRB	r11,[r5,#1]		; r11= T[1]
+	LDRB	r10,[r5],r2		; r10= T[0]    T += step
+presymmetry_loop3
+	LDR	r8, [r1],#16 		; r8 = ro0 = bX[0]
+	LDR	r9, [r1,#8-16]		; r9 = ro2 = bX[2]
+	LDR	r6, [r4],#-16		; r6 = ri0 = aX[0]
+	LDR	r7, [r4,#8+16]		; r7 = ri2 = aX[2]
+	MOV	r8, r8, ASR #8
+	MOV	r9, r9, ASR #8
+	MOV	r6, r6, ASR #8
+
+	; XNPROD31( ro2, ro0, T[1], T[0], &aX[0], &aX[2] )
+	; aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
+	MUL	r12,r8, r11		; r12  = ro0*T[1]
+	MOV	r7, r7, ASR #8
+	MLA	r12,r9, r10,r12		; r12 += ro2*T[0]
+	RSB	r8, r8, #0		; r8 = -ro0
+	MUL	r3, r9, r11		; r3   = ro2*T[1]
+	LDRB	r11,[r5,#1]		; r11= T[1]
+	MLA	r3, r8, r10,r3		; r3  -= ro0*T[0]
+	LDRB	r10,[r5],r2		; r10= T[0]    T += step
+	STR	r12,[r4,#16+8]
+	STR	r3, [r4,#16]
+
+	; XNPROD31( ri2, ri0, T[0], T[1], &bX[0], &bX[2] )
+	; bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
+	MUL	r12,r6, r10		; r12  = ri0*T[0]
+	RSB	r6, r6, #0		; r6 = -ri0
+	MLA	r12,r7, r11,r12		; r12 += ri2*T[1]
+	CMP	r4, r1
+	MUL	r3, r7, r10		; r3   = ri2*T[0]
+	STR	r12,[r1,#8-16]
+	MLA	r3, r6, r11,r3		; r3  -= ri0*T[1]
+	STR	r3, [r1,#-16]
+
+	BGE	presymmetry_loop3
+
+	SUB	r1,r1,r0		; r1 = in -= n>>2 (i.e. restore in)
+
+	LDR	r3,[r13]
+	STR	r2,[r13,#-4]!
+
+	; mdct_butterflies
+	; r0 = n  = (points * 2)
+	; r1 = in = x
+	; r2 = i
+	; r3 = shift
+	STMFD	r13!,{r0-r1}
+	RSBS	r4,r3,#6		; r4 = stages = 7-shift then --stages
+	LDR	r5,=sincos_lookup0
+	BLE	no_generics
+	MOV	r14,#4			; r14= 4               (i=0)
+	MOV	r6, r14,LSL r3		; r6 = (4<<i)<<shift
+mdct_butterflies_loop1
+	MOV	r0, r0, LSR #1		; r0 = points>>i = POINTS
+	MOV	r2, r14,LSR #2		; r2 = (1<<i)-j        (j=0)
+	STMFD	r13!,{r4,r14}
+mdct_butterflies_loop2
+
+	; mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
+	; mdct_butterfly_generic(r1, r0, r6)
+	; r0 = points
+	; r1 = x
+	; preserve r2 (external loop counter)
+	; preserve r3
+	; preserve r4 (external loop counter)
+	; r5 = T = sincos_lookup0
+	; r6 = step
+	; preserve r14
+
+	STR	r2,[r13,#-4]!		; stack r2
+	ADD	r1,r1,r0,LSL #1		; r1 = x2+4 = x + (POINTS>>1)
+	ADD	r7,r1,r0,LSL #1		; r7 = x1+4 = x + POINTS
+	ADD	r12,r5,#1024		; r12= sincos_lookup0+1024
+
+mdct_bufferfly_generic_loop1
+	LDMDB	r7!,{r2,r3,r8,r11}	; r2 = x1[0]
+					; r3 = x1[1]
+					; r8 = x1[2]
+					; r11= x1[3]    x1 -= 4
+	LDMDB	r1!,{r4,r9,r10,r14}	; r4 = x2[0]
+					; r9 = x2[1]
+					; r10= x2[2]
+					; r14= x2[3]    x2 -= 4
+
+	SUB	r2, r2, r3		; r2 = s0 = x1[0] - x1[1]
+	ADD	r3, r2, r3, LSL #1	; r3 =      x1[0] + x1[1] (-> x1[0])
+	SUB	r11,r11,r8		; r11= s1 = x1[3] - x1[2]
+	ADD	r8, r11,r8, LSL #1	; r8 =      x1[3] + x1[2] (-> x1[2])
+	SUB	r9, r9, r4		; r9 = s2 = x2[1] - x2[0]
+	ADD	r4, r9, r4, LSL #1	; r4 =      x2[1] + x2[0] (-> x1[1])
+	SUB	r14,r14,r10		; r14= s3 = x2[3] - x2[2]
+	ADD	r10,r14,r10,LSL #1	; r10=      x2[3] + x2[2] (-> x1[3])
+	STMIA	r7,{r3,r4,r8,r10}
+
+	; r0 = points
+	; r1 = x2
+	; r2 = s0
+	; r3 free
+	; r4 free
+	; r5 = T
+	; r6 = step
+	; r7 = x1
+	; r8 free
+	; r9 = s2
+	; r10 free
+	; r11= s1
+	; r12= limit
+	; r14= s3
+
+	LDRB	r8, [r5,#1]		; r8 = T[1]
+	LDRB	r10,[r5],r6		; r10= T[0]		T += step
+	MOV	r2, r2, ASR #8
+	MOV	r11,r11,ASR #8
+	MOV	r9, r9, ASR #8
+	MOV	r14,r14,ASR #8
+
+	; XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
+	; x2[0] = (s1*T[0] + s0*T[1])>>31     x2[2] = (s0*T[0] - s1*T[1])>>31
+	; stall Xscale
+	MUL	r3, r2, r8		; r3   = s0*T[1]
+	MLA	r3, r11,r10,r3		; r3  += s1*T[0]
+	RSB	r11,r11,#0
+	MUL	r4, r8, r11		; r4   = -s1*T[1]
+	MLA	r4, r2, r10,r4		; r4  += s0*T[0] = Value for x2[2]
+	MOV	r2, r3			; r2 = r3 = Value for x2[0]
+
+	; XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
+	; x2[1] = (s2*T[0] + s3*T[1])>>31     x2[3] = (s3*T[0] - s2*T[1])>>31
+	MUL	r3, r9, r10		; r3   = s2*T[0]
+	MLA	r3, r14,r8, r3		; r3  += s3*T[1] = Value for x2[1]
+	RSB	r9, r9, #0
+	MUL	r11,r14,r10		; r11  = s3*T[0]
+	MLA	r11,r9, r8, r11		; r11 -= s2*T[1] = Value for x2[3]
+	CMP	r5, r12
+
+	STMIA	r1,{r2,r3,r4,r11}
+
+	BLT	mdct_bufferfly_generic_loop1
+
+	SUB	r12,r12,#1024
+mdct_bufferfly_generic_loop2
+	LDMDB	r7!,{r2,r3,r9,r10}	; r2 = x1[0]
+					; r3 = x1[1]
+					; r9 = x1[2]
+					; r10= x1[3]    x1 -= 4
+	LDMDB	r1!,{r4,r8,r11,r14}	; r4 = x2[0]
+					; r8 = x2[1]
+					; r11= x2[2]
+					; r14= x2[3]    x2 -= 4
+
+	SUB	r2, r2, r3		; r2 = s0 = x1[0] - x1[1]
+	ADD	r3, r2, r3, LSL #1	; r3 =      x1[0] + x1[1] (-> x1[0])
+	SUB	r9, r9,r10		; r9 = s1 = x1[2] - x1[3]
+	ADD	r10,r9,r10, LSL #1	; r10=      x1[2] + x1[3] (-> x1[2])
+	SUB	r4, r4, r8		; r4 = s2 = x2[0] - x2[1]
+	ADD	r8, r4, r8, LSL #1	; r8 =      x2[0] + x2[1] (-> x1[1])
+	SUB	r14,r14,r11		; r14= s3 = x2[3] - x2[2]
+	ADD	r11,r14,r11,LSL #1	; r11=      x2[3] + x2[2] (-> x1[3])
+	STMIA	r7,{r3,r8,r10,r11}
+
+	; r0 = points
+	; r1 = x2
+	; r2 = s0
+	; r3 free
+	; r4 = s2
+	; r5 = T
+	; r6 = step
+	; r7 = x1
+	; r8 free
+	; r9 = s1
+	; r10 free
+	; r11 free
+	; r12= limit
+	; r14= s3
+
+	LDRB	r8, [r5,#1]		; r8 = T[1]
+	LDRB	r10,[r5],-r6		; r10= T[0]		T -= step
+	MOV	r2, r2, ASR #8
+	MOV	r9, r9, ASR #8
+	MOV	r4, r4, ASR #8
+	MOV	r14,r14,ASR #8
+
+	; XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
+	; x2[0] = (s0*T[0] - s1*T[1])>>31     x2[2] = (s1*T[0] + s0*T[1])>>31
+	; stall Xscale
+	MUL	r11,r2, r8		; r11  = s0*T[1]
+	MLA	r11,r9, r10,r11		; r11 += s1*T[0]
+	RSB	r9, r9, #0
+	MUL	r2, r10,r2		; r2   = s0*T[0]
+	MLA	r2, r9, r8, r2		; r2  += -s1*T[1] = Value for x2[0]
+	MOV	r9, r11			; r9 = r11 = Value for x2[2]
+
+	; XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
+	; x2[1] = (s3*T[0] - s2*T[1])>>31     x2[3] = (s2*T[0] + s3*T[1])>>31
+	MUL	r11,r4, r10		; r11   = s2*T[0]
+	MLA	r11,r14,r8, r11		; r11  += s3*T[1] = Value for x2[3]
+	RSB	r4, r4, #0
+	MUL	r3, r14,r10		; r3   = s3*T[0]
+	MLA	r3, r4, r8, r3		; r3  -= s2*T[1] = Value for x2[1]
+	CMP	r5, r12
+
+	STMIA	r1,{r2,r3,r9,r11}
+
+	BGT	mdct_bufferfly_generic_loop2
+
+	LDR	r2,[r13],#4		; unstack r2
+	ADD	r1, r1, r0, LSL #2	; r1 = x+POINTS*j
+	; stall Xscale
+	SUBS	r2, r2, #1		; r2--                 (j++)
+	BGT	mdct_butterflies_loop2
+
+	LDMFD	r13!,{r4,r14}
+
+	LDR	r1,[r13,#4]
+
+	SUBS	r4, r4, #1		; stages--
+	MOV	r14,r14,LSL #1		; r14= 4<<i            (i++)
+	MOV	r6, r6, LSL #1		; r6 = step <<= 1      (i++)
+	BGE	mdct_butterflies_loop1
+	LDMFD	r13,{r0-r1}
+
+no_generics
+	; mdct_butterflies part2 (loop around mdct_bufferfly_32)
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+mdct_bufferflies_loop3
+	; mdct_bufferfly_32
+
+	; block1
+	ADD	r4, r1, #16*4		; r4 = &in[16]
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[16]
+					; r6 = x[17]
+					; r9 = x[18]
+					; r10= x[19]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[16] - x[17]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[16] + x[17]  -> x[16]
+	SUB	r9, r9, r10		; r9 = s1 = x[18] - x[19]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[18] + x[19]  -> x[18]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[17]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[19]
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	MOV	r6,#0xed		; r6 =cPI1_8
+	MOV	r7,#0x62		; r7 =cPI3_8
+
+	MOV	r5, r5, ASR #8
+	MOV	r9, r9, ASR #8
+	MOV	r8, r8, ASR #8
+	MOV	r12,r12,ASR #8
+
+	; XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
+	; x[0] = s0*cPI3_8 - s1*cPI1_8     x[2] = s1*cPI3_8 + s0*cPI1_8
+	; stall Xscale
+	MUL	r11,r5, r6		; r11  = s0*cPI1_8
+	MLA	r11,r9, r7, r11		; r11 += s1*cPI3_8
+	RSB	r9, r9, #0
+	MUL	r5, r7, r5		; r5   = s0*cPI3_8
+	MLA	r5, r9, r6, r5		; r5  -= s1*cPI1_8
+
+	; XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
+	; x[1] = s2*cPI1_8 + s3*cPI3_8     x[3] = s3*cPI1_8 - s2*cPI3_8
+	MUL	r9, r8, r6		; r9   = s2*cPI1_8
+	MLA	r9, r12,r7, r9		; r9  += s3*cPI3_8
+	RSB	r8,r8,#0
+	MUL	r12,r6, r12		; r12  = s3*cPI1_8
+	MLA	r12,r8, r7, r12		; r12 -= s2*cPI3_8
+	STMIA	r1!,{r5,r9,r11,r12}
+
+	; block2
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[20]
+					; r6 = x[21]
+					; r9 = x[22]
+					; r10= x[23]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[4]
+					; r8 = x[5]
+					; r11= x[6]
+					; r12= x[7]
+	SUB	r5, r5, r6		; r5 = s0 = x[20] - x[21]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[20] + x[21]  -> x[20]
+	SUB	r9, r9, r10		; r9 = s1 = x[22] - x[23]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[22] + x[23]  -> x[22]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 5] - x[ 4]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 5] + x[ 4]  -> x[21]
+	SUB	r12,r12,r11		; r12= s3 = x[ 7] - x[ 6]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[23]
+	MOV	r14,#0xb5		; cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	MOV	r5, r5, ASR #8
+	MUL	r5, r14,r5		; r5 = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	MOV	r8, r8, ASR #8
+	MUL	r8, r14,r8		; r8  = (s3+s2)*cPI2_8
+	MOV	r9, r9, ASR #8
+	MUL	r9, r14,r9		; r9  = (s0+s1)*cPI2_8
+	MOV	r12,r12,ASR #8
+	MUL	r12,r14,r12		; r12 = (s3-s2)*cPI2_8
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block3
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[24]
+					; r6 = x[25]
+					; r9 = x[25]
+					; r10= x[26]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[8]
+					; r8 = x[9]
+					; r11= x[10]
+					; r12= x[11]
+	SUB	r5, r5, r6		; r5 = s0 = x[24] - x[25]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[24] + x[25]  -> x[25]
+	SUB	r9, r9, r10		; r9 = s1 = x[26] - x[27]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[26] + x[27]  -> x[26]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 9] - x[ 8]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 9] + x[ 8]  -> x[25]
+	SUB	r12,r12,r11		; r12= s3 = x[11] - x[10]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[11] + x[10]  -> x[27]
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	MOV	r6,#0x62		; r6 = cPI3_8
+	MOV	r7,#0xED		; r7 = cPI1_8
+
+	; XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
+	; x[8] = s0*cPI1_8 - s1*cPI3_8     x[10] = s1*cPI1_8 + s0*cPI3_8
+	; stall Xscale
+	MOV	r5, r5, ASR #8
+	MUL	r11,r5, r6		; r11  = s0*cPI3_8
+	MOV	r9, r9, ASR #8
+	MLA	r11,r9, r7, r11		; r11 += s1*cPI1_8
+	RSB	r9, r9, #0
+	MUL	r5, r7, r5		; r5   = s0*cPI1_8
+	MLA	r5, r9, r6, r5		; r5  -= s1*cPI3_8
+
+	; XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
+	; x[9] = s2*cPI3_8 + s3*cPI1_8     x[11] = s3*cPI3_8 - s2*cPI1_8
+	MOV	r8, r8, ASR #8
+	MUL	r9, r8, r6		; r9   = s2*cPI3_8
+	MOV	r12,r12,ASR #8
+	MLA	r9, r12,r7, r9		; r9  += s3*cPI1_8
+	RSB	r8,r8,#0
+	MUL	r12,r6, r12		; r12  = s3*cPI3_8
+	MLA	r12,r8, r7, r12		; r12 -= s2*cPI1_8
+	STMIA	r1!,{r5,r9,r11,r12}
+
+	; block4
+	LDMIA	r4,{r5,r6,r10,r11}	; r5 = x[28]
+					; r6 = x[29]
+					; r10= x[30]
+					; r11= x[31]
+	LDMIA	r1,{r8,r9,r12,r14}	; r8 = x[12]
+					; r9 = x[13]
+					; r12= x[14]
+					; r14= x[15]
+	SUB	r5, r5, r6		; r5 = s0 = x[28] - x[29]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[28] + x[29]  -> x[28]
+	SUB	r7, r14,r12		; r7 = s3 = x[15] - x[14]
+	ADD	r12,r7, r12, LSL #1	; r12=      x[15] + x[14]  -> x[31]
+	SUB	r10,r10,r11		; r10= s1 = x[30] - x[31]
+	ADD	r11,r10,r11,LSL #1	; r11=      x[30] + x[31]  -> x[30]
+	SUB	r14, r8, r9		; r14= s2 = x[12] - x[13]
+	ADD	r9, r14, r9, LSL #1	; r9 =      x[12] + x[13]  -> x[29]
+	STMIA	r4!,{r6,r9,r11,r12}
+	STMIA	r1!,{r5,r7,r10,r14}
+
+	; mdct_butterfly16 (1st version)
+	; block 1
+	SUB	r1,r1,#16*4
+	ADD	r4,r1,#8*4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[ 8]
+					; r6 = x[ 9]
+					; r9 = x[10]
+					; r10= x[11]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[ 8] - x[ 9]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[ 8] + x[ 9]  -> x[ 8]
+	SUB	r9, r9, r10		; r9 = s1 = x[10] - x[11]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[10] + x[11]  -> x[10]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[ 9]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[11]
+	MOV	r14,#0xB5		; r14= cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	MOV	r5, r5, ASR #8
+	MUL	r5, r14,r5		; r5  = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	MOV	r8, r8, ASR #8
+	MUL	r8, r14,r8		; r8  = (s3+s2)*cPI2_8
+	MOV	r9, r9, ASR #8
+	MUL	r9, r14,r9		; r9  = (s0+s1)*cPI2_8
+	MOV	r12,r12,ASR #8
+	MUL	r12,r14,r12		; r12 = (s3-s2)*cPI2_8
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block2
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[12]
+					; r6 = x[13]
+					; r9 = x[14]
+					; r10= x[15]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[ 4]
+					; r8 = x[ 5]
+					; r11= x[ 6]
+					; r12= x[ 7]
+	SUB	r14,r7, r8		; r14= s0 = x[ 4] - x[ 5]
+	ADD	r8, r14,r8, LSL #1	; r8 =      x[ 4] + x[ 5]  -> x[13]
+	SUB	r7, r12,r11		; r7 = s1 = x[ 7] - x[ 6]
+	ADD	r11,r7, r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[15]
+	SUB	r5, r5, r6		; r5 = s2 = x[12] - x[13]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[12] + x[13]  -> x[12]
+	SUB	r12,r9, r10		; r12= s3 = x[14] - x[15]
+	ADD	r10,r12,r10,LSL #1	; r10=      x[14] + x[15]  -> x[14]
+	STMIA	r4!,{r6,r8,r10,r11}
+	STMIA	r1!,{r5,r7,r12,r14}
+
+	; mdct_butterfly_8
+	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; mdct_butterfly_8
+	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; mdct_butterfly16 (2nd version)
+	; block 1
+	ADD	r1,r1,#16*4-8*4
+	ADD	r4,r1,#8*4
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[ 8]
+					; r6 = x[ 9]
+					; r9 = x[10]
+					; r10= x[11]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[0]
+					; r8 = x[1]
+					; r11= x[2]
+					; r12= x[3]
+	SUB	r5, r5, r6		; r5 = s0 = x[ 8] - x[ 9]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[ 8] + x[ 9]  -> x[ 8]
+	SUB	r9, r9, r10		; r9 = s1 = x[10] - x[11]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[10] + x[11]  -> x[10]
+	SUB	r8, r8, r7		; r8 = s2 = x[ 1] - x[ 0]
+	ADD	r7, r8, r7, LSL #1	; r7 =      x[ 1] + x[ 0]  -> x[ 9]
+	SUB	r12,r12,r11		; r12= s3 = x[ 3] - x[ 2]
+	ADD	r11,r12,r11, LSL #1	; r11=      x[ 3] + x[ 2]  -> x[11]
+	MOV	r14,#0xb5		; r14= cPI2_8
+	STMIA	r4!,{r6,r7,r10,r11}
+
+	SUB	r5, r5, r9		; r5 = s0 - s1
+	ADD	r9, r5, r9, LSL #1	; r9 = s0 + s1
+	MOV	r5, r5, ASR #8
+	MUL	r5, r14,r5		; r5  = (s0-s1)*cPI2_8
+	SUB	r12,r12,r8		; r12= s3 - s2
+	ADD	r8, r12,r8, LSL #1	; r8 = s3 + s2
+
+	MOV	r8, r8, ASR #8
+	MUL	r8, r14,r8		; r8  = (s3+s2)*cPI2_8
+	MOV	r9, r9, ASR #8
+	MUL	r9, r14,r9		; r9  = (s0+s1)*cPI2_8
+	MOV	r12,r12,ASR #8
+	MUL	r12,r14,r12		; r12 = (s3-s2)*cPI2_8
+	STMIA	r1!,{r5,r8,r9,r12}
+
+	; block2
+	LDMIA	r4,{r5,r6,r9,r10}	; r5 = x[12]
+					; r6 = x[13]
+					; r9 = x[14]
+					; r10= x[15]
+	LDMIA	r1,{r7,r8,r11,r12}	; r7 = x[ 4]
+					; r8 = x[ 5]
+					; r11= x[ 6]
+					; r12= x[ 7]
+	SUB	r5, r5, r6		; r5 = s2 = x[12] - x[13]
+	ADD	r6, r5, r6, LSL #1	; r6 =      x[12] + x[13]  -> x[12]
+	SUB	r9, r9, r10		; r9 = s3 = x[14] - x[15]
+	ADD	r10,r9, r10,LSL #1	; r10=      x[14] + x[15]  -> x[14]
+	SUB	r14,r7, r8		; r14= s0 = x[ 4] - x[ 5]
+	ADD	r8, r14,r8, LSL #1	; r8 =      x[ 4] + x[ 5]  -> x[13]
+	SUB	r7, r12,r11		; r7 = s1 = x[ 7] - x[ 6]
+	ADD	r11,r7, r11, LSL #1	; r11=      x[ 7] + x[ 6]  -> x[15]
+	STMIA	r4!,{r6,r8,r10,r11}
+	STMIA	r1!,{r5,r7,r9,r14}
+
+	; mdct_butterfly_8
+	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	; mdct_butterfly_8
+	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
+					; r6 = x[0]
+					; r7 = x[1]
+					; r8 = x[2]
+					; r9 = x[3]
+					; r10= x[4]
+					; r11= x[5]
+					; r12= x[6]
+					; r14= x[7]
+	ADD	r6, r6, r7		; r6 = s0 = x[0] + x[1]
+	SUB	r7, r6, r7, LSL #1	; r7 = s1 = x[0] - x[1]
+	ADD	r8, r8, r9		; r8 = s2 = x[2] + x[3]
+	SUB	r9, r8, r9, LSL #1	; r9 = s3 = x[2] - x[3]
+	ADD	r10,r10,r11		; r10= s4 = x[4] + x[5]
+	SUB	r11,r10,r11,LSL #1	; r11= s5 = x[4] - x[5]
+	ADD	r12,r12,r14		; r12= s6 = x[6] + x[7]
+	SUB	r14,r12,r14,LSL #1	; r14= s7 = x[6] - x[7]
+
+	ADD	r2, r11,r9		; r2 = x[0] = s5 + s3
+	SUB	r4, r2, r9, LSL #1	; r4 = x[2] = s5 - s3
+	SUB	r3, r14,r7		; r3 = x[1] = s7 - s1
+	ADD	r5, r3, r7, LSL #1	; r5 = x[3] = s7 + s1
+	SUB	r10,r10,r6		; r10= x[4] = s4 - s0
+	SUB	r11,r12,r8		; r11= x[5] = s6 - s2
+	ADD	r12,r10,r6, LSL #1	; r12= x[6] = s4 + s0
+	ADD	r14,r11,r8, LSL #1	; r14= x[7] = s6 + s2
+	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
+
+	ADD	r1,r1,#8*4
+	SUBS	r0,r0,#64
+	BGT	mdct_bufferflies_loop3
+
+	LDMFD	r13,{r0-r3}
+
+mdct_bitreverse_arm_low
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	MOV	r4, #0			; r4 = bit = 0
+	ADD	r5, r1, r0, LSL #1	; r5 = w = x + (n>>1)
+	ADR	r6, bitrev
+	SUB	r5, r5, #8
+brev_lp
+	LDRB	r7, [r6, r4, LSR #6]
+	AND	r8, r4, #0x3f
+	LDRB	r8, [r6, r8]
+	ADD	r4, r4, #1		; bit++
+	; stall XScale
+	ORR	r7, r7, r8, LSL #6	; r7 = bitrev[bit]
+	MOV	r7, r7, LSR r3
+	ADD	r9, r1, r7, LSL #2	; r9 = xx = x + (b>>shift)
+	CMP	r5, r9			; if (w > xx)
+	LDR	r10,[r5],#-8		;   r10 = w[0]		w -= 2
+	LDRGT	r11,[r5,#12]		;   r11 = w[1]
+	LDRGT	r12,[r9]		;   r12 = xx[0]
+	LDRGT	r14,[r9,#4]		;   r14 = xx[1]
+	STRGT	r10,[r9]		;   xx[0]= w[0]
+	STRGT	r11,[r9,#4]		;   xx[1]= w[1]
+	STRGT	r12,[r5,#8]		;   w[0] = xx[0]
+	STRGT	r14,[r5,#12]		;   w[1] = xx[1]
+	CMP	r5,r1
+	BGT	brev_lp
+
+	; mdct_step7
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+
+	CMP	r2, #4			; r5 = T = (step>=4) ?
+	LDRGE	r5, =sincos_lookup0	;          sincos_lookup0 +
+	LDRLT	r5, =sincos_lookup1	;          sincos_lookup0 +
+	ADD	r7, r1, r0, LSL #1	; r7 = w1 = x + (n>>1)
+	ADDGE	r5, r5, r2, LSR #1	;		            (step>>1)
+	ADD	r8, r5, #1024		; r8 = Ttop
+step7_loop1
+	LDR	r6, [r1]		; r6 = w0[0]
+	LDR	r9, [r1,#4]		; r9 = w0[1]
+	LDR	r10,[r7,#-8]!		; r10= w1[0]	w1 -= 2
+	LDR	r11,[r7,#4]		; r11= w1[1]
+	LDRB	r14,[r5,#1]		; r14= T[1]
+	LDRB	r12,[r5],r2		; r12= T[0]	T += step
+
+	ADD	r6, r6, r10		; r6 = s0 = w0[0] + w1[0]
+	SUB	r10,r6, r10,LSL #1	; r10= s1b= w0[0] - w1[0]
+	SUB	r11,r11,r9		; r11= s1 = w1[1] - w0[1]
+	ADD	r9, r11,r9, LSL #1	; r9 = s0b= w1[1] + w0[1]
+
+	MOV	r6, r6, ASR #9
+	MUL	r3, r6, r14		; r3   = s0*T[1]
+	MOV	r11,r11,ASR #9
+	MUL	r4, r11,r12		; r4  += s1*T[0] = s2
+	ADD	r3, r3, r4
+	MUL	r14,r11,r14		; r14  = s1*T[1]
+	MUL	r12,r6, r12		; r12 += s0*T[0] = s3
+	SUB	r14,r14,r12
+
+	; r9 = s0b<<1
+	; r10= s1b<<1
+	ADD	r9, r3, r9, ASR #1	; r9 = s0b + s2
+	SUB	r3, r9, r3, LSL #1	; r3 = s0b - s2
+
+	SUB	r12,r14,r10,ASR #1	; r12= s3  - s1b
+	ADD	r10,r14,r10,ASR #1	; r10= s3  + s1b
+	STR	r9, [r1],#4
+	STR	r10,[r1],#4		; w0 += 2
+	STR	r3, [r7]
+	STR	r12,[r7,#4]
+
+	CMP	r5,r8
+	BLT	step7_loop1
+
+step7_loop2
+	LDR	r6, [r1]		; r6 = w0[0]
+	LDR	r9, [r1,#4]		; r9 = w0[1]
+	LDR	r10,[r7,#-8]!		; r10= w1[0]	w1 -= 2
+	LDR	r11,[r7,#4]		; r11= w1[1]
+	LDRB	r14,[r5,-r2]!		; r12= T[1]	T -= step
+	LDRB	r12,[r5,#1]		; r14= T[0]
+
+	ADD	r6, r6, r10		; r6 = s0 = w0[0] + w1[0]
+	SUB	r10,r6, r10,LSL #1	; r10= s1b= w0[0] - w1[0]
+	SUB	r11,r11,r9		; r11= s1 = w1[1] - w0[1]
+	ADD	r9, r11,r9, LSL #1	; r9 = s0b= w1[1] + w0[1]
+
+	MOV	r6, r6, ASR #9
+	MUL	r3, r6, r14		; r3   = s0*T[0]
+	MOV	r11,r11,ASR #9
+	MUL	r4, r11,r12		; r4  += s1*T[1] = s2
+	ADD	r3, r3, r4
+	MUL	r14,r11,r14		; r14  = s1*T[0]
+	MUL	r12,r6, r12		; r12 += s0*T[1] = s3
+	SUB	r14,r14,r12
+
+	; r9 = s0b<<1
+	; r10= s1b<<1
+	ADD	r9, r3, r9, ASR #1	; r9 = s0b + s2
+	SUB	r3, r9, r3, LSL #1	; r3 = s0b - s2
+
+	SUB	r12,r14,r10,ASR #1	; r12= s3  - s1b
+	ADD	r10,r14,r10,ASR #1	; r10= s3  + s1b
+	STR	r9, [r1],#4
+	STR	r10,[r1],#4		; w0 += 2
+	STR	r3, [r7]
+	STR	r12,[r7,#4]
+
+	CMP	r1,r7
+	BLT	step7_loop2
+
+	LDMFD	r13!,{r0-r3}
+
+	; r0 = points
+	; r1 = in
+	; r2 = step
+	; r3 = shift
+	MOV	r2, r2, ASR #2		; r2 = step >>= 2
+	CMP	r2, #0
+	CMPNE	r2, #1
+	BEQ	mdct_end
+
+	; step > 1 (default case)
+	CMP	r2, #4			; r5 = T = (step>=4) ?
+	LDRGE	r5, =sincos_lookup0	;          sincos_lookup0 +
+	LDRLT	r5, =sincos_lookup1	;          sincos_lookup1
+	ADD	r7, r1, r0, LSL #1	; r7 = iX = x + (n>>1)
+	ADDGE	r5, r5, r2, LSR #1	;		            (step>>1)
+mdct_step8_default
+	LDR	r6, [r1],#4		; r6 =  s0 = x[0]
+	LDR	r8, [r1],#4		; r8 = -s1 = x[1]
+	LDRB	r12,[r5,#1]       	; r12= T[1]
+	LDRB	r14,[r5],r2		; r14= T[0]	T += step
+	RSB	r8, r8, #0		; r8 = s1
+
+	; XPROD31(s0, s1, T[0], T[1], x, x+1)
+	; x[0] = s0 * T[0] + s1 * T[1]      x[1] = s1 * T[0] - s0 * T[1]
+	MOV	r6, r6, ASR #8
+	MOV	r8, r8, ASR #8
+	MUL	r10,r8, r12		; r10  = s1 * T[1]
+	CMP	r1, r7
+	MLA	r10,r6, r14,r10	; r10 += s0 * T[0]
+	RSB	r6, r6, #0		; r6 = -s0
+	MUL	r11,r8, r14		; r11  = s1 * T[0]
+	MLA	r11,r6, r12,r11	; r11 -= s0 * T[1]
+	STR	r10,[r1,#-8]
+	STR	r11,[r1,#-4]
+	BLT	mdct_step8_default
+
+mdct_end
+	MOV	r0, r2
+	LDMFD	r13!,{r4-r11,PC}
+
+bitrev
+	DCB	0
+	DCB	32
+	DCB	16
+	DCB	48
+	DCB	8
+	DCB	40
+	DCB	24
+	DCB	56
+	DCB	4
+	DCB	36
+	DCB	20
+	DCB	52
+	DCB	12
+	DCB	44
+	DCB	28
+	DCB	60
+	DCB	2
+	DCB	34
+	DCB	18
+	DCB	50
+	DCB	10
+	DCB	42
+	DCB	26
+	DCB	58
+	DCB	6
+	DCB	38
+	DCB	22
+	DCB	54
+	DCB	14
+	DCB	46
+	DCB	30
+	DCB	62
+	DCB	1
+	DCB	33
+	DCB	17
+	DCB	49
+	DCB	9
+	DCB	41
+	DCB	25
+	DCB	57
+	DCB	5
+	DCB	37
+	DCB	21
+	DCB	53
+	DCB	13
+	DCB	45
+	DCB	29
+	DCB	61
+	DCB	3
+	DCB	35
+	DCB	19
+	DCB	51
+	DCB	11
+	DCB	43
+	DCB	27
+	DCB	59
+	DCB	7
+	DCB	39
+	DCB	23
+	DCB	55
+	DCB	15
+	DCB	47
+	DCB	31
+	DCB	63
+
+	END

Modified: branches/lowmem-branch/Tremolo/mdct_lookup.h
===================================================================
--- branches/lowmem-branch/Tremolo/mdct_lookup.h	2010-05-15 16:17:50 UTC (rev 17217)
+++ branches/lowmem-branch/Tremolo/mdct_lookup.h	2010-05-15 20:40:20 UTC (rev 17218)
@@ -18,7 +18,10 @@
 #include "os_types.h"
 
 /* {sin(2*i*PI/4096), cos(2*i*PI/4096)}, with i = 0 to 512 */
-static LOOKUP_T sincos_lookup0[1026] = {
+#ifndef _ARM_ASSEM_
+static
+#endif
+LOOKUP_T sincos_lookup0[1026] = {
   X(0x00000000), X(0x7fffffff), X(0x003243f5), X(0x7ffff621),
   X(0x006487e3), X(0x7fffd886), X(0x0096cbc1), X(0x7fffa72c),
   X(0x00c90f88), X(0x7fff6216), X(0x00fb5330), X(0x7fff0943),
@@ -279,7 +282,10 @@
   };
   
   /* {sin((2*i+1)*PI/4096), cos((2*i+1)*PI/4096)}, with i = 0 to 511 */
-static LOOKUP_T sincos_lookup1[1024] = {
+#ifndef _ARM_ASSEM_
+static
+#endif
+LOOKUP_T sincos_lookup1[1024] = {
   X(0x001921fb), X(0x7ffffd88), X(0x004b65ee), X(0x7fffe9cb),
   X(0x007da9d4), X(0x7fffc251), X(0x00afeda8), X(0x7fff8719),
   X(0x00e23160), X(0x7fff3824), X(0x011474f6), X(0x7ffed572),



More information about the commits mailing list