[FFmpeg-cvslog] r19806 - in trunk/libavcodec: Makefile arm/fft_neon.S arm/mdct_neon.S dsputil.h fft.c

mru subversion
Thu Sep 10 10:50:03 CEST 2009


Author: mru
Date: Thu Sep 10 10:50:03 2009
New Revision: 19806

Log:
ARM: NEON optimised FFT and MDCT

Vorbis and AC3 ~3x faster.

Parts by Naotoshi Nojiri, naonoj gmail

Added:
   trunk/libavcodec/arm/fft_neon.S
   trunk/libavcodec/arm/mdct_neon.S
Modified:
   trunk/libavcodec/Makefile
   trunk/libavcodec/dsputil.h
   trunk/libavcodec/fft.c

Modified: trunk/libavcodec/Makefile
==============================================================================
--- trunk/libavcodec/Makefile	Thu Sep 10 10:49:59 2009	(r19805)
+++ trunk/libavcodec/Makefile	Thu Sep 10 10:50:03 2009	(r19806)
@@ -496,6 +496,10 @@ OBJS-$(HAVE_ARMVFP)                    +
 OBJS-$(HAVE_IWMMXT)                    += arm/dsputil_iwmmxt.o          \
                                           arm/mpegvideo_iwmmxt.o        \
 
+NEON-OBJS-$(CONFIG_FFT)                += arm/fft_neon.o                \
+
+NEON-OBJS-$(CONFIG_MDCT)               += arm/mdct_neon.o               \
+
 NEON-OBJS-$(CONFIG_H264_DECODER)       += arm/h264dsp_neon.o            \
                                           arm/h264idct_neon.o           \
 

Added: trunk/libavcodec/arm/fft_neon.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/libavcodec/arm/fft_neon.S	Thu Sep 10 10:50:03 2009	(r19806)
@@ -0,0 +1,369 @@
+/*
+ * ARM NEON optimised FFT
+ *
+ * Copyright (c) 2009 Mans Rullgard <mans at mansr.com>
+ * Copyright (c) 2009 Naotoshi Nojiri
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+#define M_SQRT1_2 0.70710678118654752440
+
+        .text
+
+function fft4_neon
+        vld1.32         {d0-d3}, [r0,:128]
+
+        vext.32         q8,  q1,  q1,  #1       @ i2,r3 d3=i3,r2
+        vsub.f32        d6,  d0,  d1            @ r0-r1,i0-i1
+        vsub.f32        d7,  d16, d17           @ r3-r2,i2-i3
+        vadd.f32        d4,  d0,  d1            @ r0+r1,i0+i1
+        vadd.f32        d5,  d2,  d3            @ i2+i3,r2+r3
+        vadd.f32        d1,  d6,  d7
+        vsub.f32        d3,  d6,  d7
+        vadd.f32        d0,  d4,  d5
+        vsub.f32        d2,  d4,  d5
+
+        vst1.32         {d0-d3}, [r0,:128]
+
+        bx              lr
+.endfunc
+
+function fft8_neon
+        mov             r1,  r0
+        vld1.32         {d0-d3},   [r1,:128]!
+        vld1.32         {d16-d19}, [r1,:128]
+
+        movw            r2,  #0x04f3            @ sqrt(1/2)
+        movt            r2,  #0x3f35
+        eor             r3,  r2,  #1<<31
+        vdup.32         d31, r2
+
+        vext.32         q11, q1,  q1,  #1       @ i2,r3,i3,r2
+        vadd.f32        d4,  d16, d17           @ r4+r5,i4+i5
+        vmov            d28, r3,  r2
+        vadd.f32        d5,  d18, d19           @ r6+r7,i6+i7
+        vsub.f32        d17, d16, d17           @ r4-r5,i4-i5
+        vsub.f32        d19, d18, d19           @ r6-r7,i6-i7
+        vrev64.32       d29, d28
+        vadd.f32        d20, d0,  d1            @ r0+r1,i0+i1
+        vadd.f32        d21, d2,  d3            @ r2+r3,i2+i3
+        vmul.f32        d26, d17, d28           @ -a2r*w,a2i*w
+        vext.32         q3,  q2,  q2,  #1
+        vmul.f32        d27, d19, d29           @ a3r*w,-a3i*w
+        vsub.f32        d23, d22, d23           @ i2-i3,r3-r2
+        vsub.f32        d22, d0,  d1            @ r0-r1,i0-i1
+        vmul.f32        d24, d17, d31           @ a2r*w,a2i*w
+        vmul.f32        d25, d19, d31           @ a3r*w,a3i*w
+        vadd.f32        d0,  d20, d21
+        vsub.f32        d2,  d20, d21
+        vadd.f32        d1,  d22, d23
+        vrev64.32       q13, q13
+        vsub.f32        d3,  d22, d23
+        vsub.f32        d6,  d6,  d7
+        vadd.f32        d24, d24, d26           @ a2r+a2i,a2i-a2r   t1,t2
+        vadd.f32        d25, d25, d27           @ a3r-a3i,a3i+a3r   t5,t6
+        vadd.f32        d7,  d4,  d5
+        vsub.f32        d18, d2,  d6
+        vext.32         q13, q12, q12, #1
+        vadd.f32        d2,  d2,  d6
+        vsub.f32        d16, d0,  d7
+        vadd.f32        d5,  d25, d24
+        vsub.f32        d4,  d26, d27
+        vadd.f32        d0,  d0,  d7
+        vsub.f32        d17, d1,  d5
+        vsub.f32        d19, d3,  d4
+        vadd.f32        d3,  d3,  d4
+        vadd.f32        d1,  d1,  d5
+
+        vst1.32         {d16-d19}, [r1,:128]
+        vst1.32         {d0-d3},   [r0,:128]
+
+        bx              lr
+.endfunc
+
+function fft16_neon
+        movrel          r1, mppm
+        vld1.32         {d16-d19}, [r0,:128]!   @ q8{r0,i0,r1,i1} q9{r2,i2,r3,i3}
+        pld             [r0, #32]
+        vld1.32         {d2-d3}, [r1,:128]
+        vext.32         q13, q9,  q9,  #1
+        vld1.32         {d22-d25}, [r0,:128]!   @ q11{r4,i4,r5,i5} q12{r6,i5,r7,i7}
+        vadd.f32        d4,  d16, d17
+        vsub.f32        d5,  d16, d17
+        vadd.f32        d18, d18, d19
+        vsub.f32        d19, d26, d27
+
+        vadd.f32        d20, d22, d23
+        vsub.f32        d22, d22, d23
+        vsub.f32        d23, d24, d25
+        vadd.f32        q8,  q2,  q9            @ {r0,i0,r1,i1}
+        vadd.f32        d21, d24, d25
+        vmul.f32        d24, d22, d2
+        vsub.f32        q9,  q2,  q9            @ {r2,i2,r3,i3}
+        vmul.f32        d25, d23, d3
+        vuzp.32         d16, d17                @ {r0,r1,i0,i1}
+        vmul.f32        q1,  q11, d2[1]
+        vuzp.32         d18, d19                @ {r2,r3,i2,i3}
+        vrev64.32       q12, q12
+        vadd.f32        q11, q12, q1            @ {t1a,t2a,t5,t6}
+        vld1.32         {d24-d27}, [r0,:128]!   @ q12{r8,i8,r9,i9} q13{r10,i10,r11,i11}
+        vzip.32         q10, q11
+        vld1.32         {d28-d31}, [r0,:128]    @ q14{r12,i12,r13,i13} q15{r14,i14,r15,i15}
+        vadd.f32        d0,  d22, d20
+        vadd.f32        d1,  d21, d23
+        vsub.f32        d2,  d21, d23
+        vsub.f32        d3,  d22, d20
+        sub             r0,  r0,  #96
+        vext.32         q13, q13, q13, #1
+        vsub.f32        q10, q8,  q0            @ {r4,r5,i4,i5}
+        vadd.f32        q8,  q8,  q0            @ {r0,r1,i0,i1}
+        vext.32         q15, q15, q15, #1
+        vsub.f32        q11, q9,  q1            @ {r6,r7,i6,i7}
+        vswp            d25, d26                @ q12{r8,i8,i10,r11} q13{r9,i9,i11,r10}
+        vadd.f32        q9,  q9,  q1            @ {r2,r3,i2,i3}
+        vswp            d29, d30                @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14}
+        vadd.f32        q0,  q12, q13           @ {t1,t2,t5,t6}
+        vadd.f32        q1,  q14, q15           @ {t1a,t2a,t5a,t6a}
+        movrel          r2,  ff_cos_16
+        vsub.f32        q13, q12, q13           @ {t3,t4,t7,t8}
+        vrev64.32       d1,  d1
+        vsub.f32        q15, q14, q15           @ {t3a,t4a,t7a,t8a}
+        vrev64.32       d3,  d3
+        movrel          r3,  pmmp
+        vswp            d1,  d26                @ q0{t1,t2,t3,t4} q13{t6,t5,t7,t8}
+        vswp            d3,  d30                @ q1{t1a,t2a,t3a,t4a} q15{t6a,t5a,t7a,t8a}
+        vadd.f32        q12, q0,  q13           @ {r8,i8,r9,i9}
+        vadd.f32        q14, q1,  q15           @ {r12,i12,r13,i13}
+        vld1.32         {d4-d5},  [r2,:64]
+        vsub.f32        q13, q0,  q13           @ {r10,i10,r11,i11}
+        vsub.f32        q15, q1,  q15           @ {r14,i14,r15,i15}
+        vswp            d25, d28                @ q12{r8,i8,r12,i12} q14{r9,i9,r13,i13}
+        vld1.32         {d6-d7},  [r3,:128]
+        vrev64.32       q1,  q14
+        vmul.f32        q14, q14, d4[1]
+        vmul.f32        q1,  q1,  q3
+        vmla.f32        q14, q1,  d5[1]         @ {t1a,t2a,t5a,t6a}
+        vswp            d27, d30                @ q13{r10,i10,r14,i14} q15{r11,i11,r15,i15}
+        vzip.32         q12, q14
+        vadd.f32        d0,  d28, d24
+        vadd.f32        d1,  d25, d29
+        vsub.f32        d2,  d25, d29
+        vsub.f32        d3,  d28, d24
+        vsub.f32        q12, q8,  q0            @ {r8,r9,i8,i9}
+        vadd.f32        q8,  q8,  q0            @ {r0,r1,i0,i1}
+        vsub.f32        q14, q10, q1            @ {r12,r13,i12,i13}
+        mov             r1,  #32
+        vadd.f32        q10, q10, q1            @ {r4,r5,i4,i5}
+        vrev64.32       q0,  q13
+        vmul.f32        q13, q13, d5[0]
+        vrev64.32       q1,  q15
+        vmul.f32        q15, q15, d5[1]
+        vst2.32         {d16-d17},[r0,:128], r1
+        vmul.f32        q0,  q0,  q3
+        vst2.32         {d20-d21},[r0,:128], r1
+        vmul.f32        q1,  q1,  q3
+        vmla.f32        q13, q0,  d5[0]         @ {t1,t2,t5,t6}
+        vmla.f32        q15, q1,  d4[1]         @ {t1a,t2a,t5a,t6a}
+        vst2.32         {d24-d25},[r0,:128], r1
+        vst2.32         {d28-d29},[r0,:128]
+        vzip.32         q13, q15
+        sub             r0, r0, #80
+        vadd.f32        d0,  d30, d26
+        vadd.f32        d1,  d27, d31
+        vsub.f32        d2,  d27, d31
+        vsub.f32        d3,  d30, d26
+        vsub.f32        q13, q9,  q0            @ {r10,r11,i10,i11}
+        vadd.f32        q9,  q9,  q0            @ {r2,r3,i2,i3}
+        vsub.f32        q15, q11, q1            @ {r14,r15,i14,i15}
+        vadd.f32        q11, q11, q1            @ {r6,r7,i6,i7}
+        vst2.32         {d18-d19},[r0,:128], r1
+        vst2.32         {d22-d23},[r0,:128], r1
+        vst2.32         {d26-d27},[r0,:128], r1
+        vst2.32         {d30-d31},[r0,:128]
+        bx              lr
+.endfunc
+
+function fft_pass_neon
+        push            {r4-r6,lr}
+        mov             r6,  r2                 @ n
+        lsl             r5,  r2,  #3            @ 2 * n * sizeof FFTSample
+        lsl             r4,  r2,  #4            @ 2 * n * sizeof FFTComplex
+        lsl             r2,  r2,  #5            @ 4 * n * sizeof FFTComplex
+        add             r3,  r2,  r4
+        add             r4,  r4,  r0            @ &z[o1]
+        add             r2,  r2,  r0            @ &z[o2]
+        add             r3,  r3,  r0            @ &z[o3]
+        vld1.32         {d20-d21},[r2,:128]     @ {z[o2],z[o2+1]}
+        movrel          r12, pmmp
+        vld1.32         {d22-d23},[r3,:128]     @ {z[o3],z[o3+1]}
+        add             r5,  r5,  r1            @ wim
+        vld1.32         {d6-d7},  [r12,:128]    @ pmmp
+        vswp            d21, d22
+        vld1.32         {d4},     [r1,:64]!     @ {wre[0],wre[1]}
+        sub             r5,  r5,  #4            @ wim--
+        vrev64.32       q1,  q11
+        vmul.f32        q11, q11, d4[1]
+        vmul.f32        q1,  q1,  q3
+        vld1.32         {d5[0]},  [r5,:32]      @ d5[0] = wim[-1]
+        vmla.f32        q11, q1,  d5[0]         @ {t1a,t2a,t5a,t6a}
+        vld2.32         {d16-d17},[r0,:128]     @ {z[0],z[1]}
+        sub             r6, r6, #1              @ n--
+        vld2.32         {d18-d19},[r4,:128]     @ {z[o1],z[o1+1]}
+        vzip.32         q10, q11
+        vadd.f32        d0,  d22, d20
+        vadd.f32        d1,  d21, d23
+        vsub.f32        d2,  d21, d23
+        vsub.f32        d3,  d22, d20
+        vsub.f32        q10, q8,  q0
+        vadd.f32        q8,  q8,  q0
+        vsub.f32        q11, q9,  q1
+        vadd.f32        q9,  q9,  q1
+        vst2.32         {d20-d21},[r2,:128]!    @ {z[o2],z[o2+1]}
+        vst2.32         {d16-d17},[r0,:128]!    @ {z[0],z[1]}
+        vst2.32         {d22-d23},[r3,:128]!    @ {z[o3],z[o3+1]}
+        vst2.32         {d18-d19},[r4,:128]!    @ {z[o1],z[o1+1]}
+        sub             r5,  r5,  #8            @ wim -= 2
+1:
+        vld1.32         {d20-d21},[r2,:128]     @ {z[o2],z[o2+1]}
+        vld1.32         {d22-d23},[r3,:128]     @ {z[o3],z[o3+1]}
+        vswp            d21, d22
+        vld1.32         {d4}, [r1]!             @ {wre[0],wre[1]}
+        vrev64.32       q0,  q10
+        vmul.f32        q10, q10, d4[0]
+        vrev64.32       q1,  q11
+        vmul.f32        q11, q11, d4[1]
+        vld1.32         {d5}, [r5]              @ {wim[-1],wim[0]}
+        vmul.f32        q0,  q0,  q3
+        sub             r5,  r5,  #8            @ wim -= 2
+        vmul.f32        q1,  q1,  q3
+        vmla.f32        q10, q0,  d5[1]         @ {t1,t2,t5,t6}
+        vmla.f32        q11, q1,  d5[0]         @ {t1a,t2a,t5a,t6a}
+        vld2.32         {d16-d17},[r0,:128]     @ {z[0],z[1]}
+        subs            r6,  r6,  #1            @ n--
+        vld2.32         {d18-d19},[r4,:128]     @ {z[o1],z[o1+1]}
+        vzip.32         q10, q11
+        vadd.f32        d0,  d22, d20
+        vadd.f32        d1,  d21, d23
+        vsub.f32        d2,  d21, d23
+        vsub.f32        d3,  d22, d20
+        vsub.f32        q10, q8,  q0
+        vadd.f32        q8,  q8,  q0
+        vsub.f32        q11, q9,  q1
+        vadd.f32        q9,  q9,  q1
+        vst2.32         {d20-d21}, [r2,:128]!   @ {z[o2],z[o2+1]}
+        vst2.32         {d16-d17}, [r0,:128]!   @ {z[0],z[1]}
+        vst2.32         {d22-d23}, [r3,:128]!   @ {z[o3],z[o3+1]}
+        vst2.32         {d18-d19}, [r4,:128]!   @ {z[o1],z[o1+1]}
+        bne             1b
+
+        pop             {r4-r6,pc}
+.endfunc
+
+.macro  def_fft n, n2, n4
+        .align 6
+function fft\n\()_neon
+        push            {r4, lr}
+        mov             r4,  r0
+        bl              fft\n2\()_neon
+        add             r0,  r4,  #\n4*2*8
+        bl              fft\n4\()_neon
+        add             r0,  r4,  #\n4*3*8
+        bl              fft\n4\()_neon
+        mov             r0,  r4
+        pop             {r4, lr}
+        movrel          r1,  ff_cos_\n
+        mov             r2,  #\n4/2
+        b               fft_pass_neon
+.endfunc
+.endm
+
+        def_fft    32,    16,     8
+        def_fft    64,    32,    16
+        def_fft   128,    64,    32
+        def_fft   256,   128,    64
+        def_fft   512,   256,   128
+        def_fft  1024,   512,   256
+        def_fft  2048,  1024,   512
+        def_fft  4096,  2048,  1024
+        def_fft  8192,  4096,  2048
+        def_fft 16384,  8192,  4096
+        def_fft 32768, 16384,  8192
+        def_fft 65536, 32768, 16384
+
+function ff_fft_calc_neon, export=1
+        ldr             r2,  [r0]
+        sub             r2,  r2,  #2
+        movrel          r3,  fft_tab_neon
+        ldr             r3,  [r3, r2, lsl #2]
+        mov             r0,  r1
+        bx              r3
+.endfunc
+
+function ff_fft_permute_neon, export=1
+        push            {r4,lr}
+        mov             r12, #1
+        ldr             r2,  [r0]       @ nbits
+        ldr             r3,  [r0, #20]  @ tmp_buf
+        ldr             r0,  [r0, #8]   @ revtab
+        lsl             r12, r12, r2
+        mov             r2,  r12
+1:
+        vld1.32         {d0-d1}, [r1,:128]!
+        ldr             r4,  [r0], #4
+        uxtah           lr,  r3,  r4
+        uxtah           r4,  r3,  r4,  ror #16
+        vst1.32         {d0}, [lr,:64]
+        vst1.32         {d1}, [r4,:64]
+        subs            r12, r12, #2
+        bgt             1b
+
+        sub             r1,  r1,  r2,  lsl #3
+1:
+        vld1.32         {d0-d3}, [r3,:128]!
+        vst1.32         {d0-d3}, [r1,:128]!
+        subs            r2,  r2,  #4
+        bgt             1b
+
+        pop             {r4,pc}
+.endfunc
+
+        .section .rodata
+        .align 4
+fft_tab_neon:
+        .word fft4_neon
+        .word fft8_neon
+        .word fft16_neon
+        .word fft32_neon
+        .word fft64_neon
+        .word fft128_neon
+        .word fft256_neon
+        .word fft512_neon
+        .word fft1024_neon
+        .word fft2048_neon
+        .word fft4096_neon
+        .word fft8192_neon
+        .word fft16384_neon
+        .word fft32768_neon
+        .word fft65536_neon
+        .size fft_tab_neon, . - fft_tab_neon
+
+        .align 4
+pmmp:   .float  +1.0, -1.0, -1.0, +1.0
+mppm:   .float  -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2

Added: trunk/libavcodec/arm/mdct_neon.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/libavcodec/arm/mdct_neon.S	Thu Sep 10 10:50:03 2009	(r19806)
@@ -0,0 +1,178 @@
+/*
+ * ARM NEON optimised MDCT
+ * Copyright (c) 2009 Mans Rullgard <mans at mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+        .fpu neon
+        .text
+
+function ff_imdct_half_neon, export=1
+        push            {r4-r8,lr}
+
+        mov             r12, #1
+        ldr             lr,  [r0, #4]           @ nbits
+        ldr             r4,  [r0, #8]           @ tcos
+        ldr             r5,  [r0, #12]          @ tsin
+        ldr             r3,  [r0, #24]          @ revtab
+        lsl             r12, r12, lr            @ n  = 1 << nbits
+        lsr             lr,  r12, #2            @ n4 = n >> 2
+        add             r7,  r2,  r12,  lsl #1
+        mov             r12,  #-16
+        sub             r7,  r7,  #16
+
+        vld1.32         {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0
+        vld1.32         {d0-d1},  [r2,:128]!    @ d0 =m0,x d1 =m1,x
+        vld1.32         {d2},     [r4,:64]!     @ d2=c0,c1
+        vld1.32         {d3},     [r5,:64]!     @ d3=s0,s1
+        vuzp.32         d17, d16
+        vuzp.32         d0,  d1
+        vmul.f32        d6,  d16, d2
+        vmul.f32        d7,  d0,  d2
+1:
+        subs            lr,  lr,  #2
+        ldr             r6,  [r3], #4
+        vmul.f32        d4,  d0,  d3
+        vmul.f32        d5,  d16, d3
+        vsub.f32        d4,  d6,  d4
+        vadd.f32        d5,  d5,  d7
+        uxtah           r8,  r1,  r6,  ror #16
+        uxtah           r6,  r1,  r6
+        beq             1f
+        vld1.32         {d16-d17},[r7,:128],r12
+        vld1.32         {d0-d1},  [r2,:128]!
+        vuzp.32         d17, d16
+        vld1.32         {d2},     [r4,:64]!
+        vuzp.32         d0,  d1
+        vmul.f32        d6,  d16, d2
+        vld1.32         {d3},     [r5,:64]!
+        vmul.f32        d7,  d0,  d2
+        vst2.32         {d4[0],d5[0]}, [r6,:64]
+        vst2.32         {d4[1],d5[1]}, [r8,:64]
+        b               1b
+1:
+        vst2.32         {d4[0],d5[0]}, [r6,:64]
+        vst2.32         {d4[1],d5[1]}, [r8,:64]
+
+        mov             r4,  r0
+        mov             r6,  r1
+        add             r0,  r0,  #16
+        bl              ff_fft_calc_neon
+
+        mov             r12, #1
+        ldr             lr,  [r4, #4]           @ nbits
+        ldr             r5,  [r4, #12]          @ tsin
+        ldr             r4,  [r4, #8]           @ tcos
+        lsl             r12, r12, lr            @ n  = 1 << nbits
+        lsr             lr,  r12, #3            @ n8 = n >> 3
+
+        add             r4,  r4,  lr,  lsl #2
+        add             r5,  r5,  lr,  lsl #2
+        add             r6,  r6,  lr,  lsl #3
+        sub             r1,  r4,  #8
+        sub             r2,  r5,  #8
+        sub             r3,  r6,  #16
+
+        mov             r7,  #-16
+        mov             r12, #-8
+        mov             r8,  r6
+        mov             r0,  r3
+
+        vld1.32         {d0-d1},  [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0
+        vld1.32         {d20-d21},[r6,:128]!    @ d20=i2,r2 d21=i3,r3
+        vld1.32         {d18},    [r2,:64], r12 @ d18=s1,s0
+        vuzp.32         d20, d21
+        vuzp.32         d0,  d1
+1:
+        subs            lr,  lr,  #2
+        vmul.f32        d7,  d0,  d18
+        vld1.32         {d19},    [r5,:64]!     @ d19=s2,s3
+        vmul.f32        d4,  d1,  d18
+        vld1.32         {d16},    [r1,:64], r12 @ d16=c1,c0
+        vmul.f32        d5,  d21, d19
+        vld1.32         {d17},    [r4,:64]!     @ d17=c2,c3
+        vmul.f32        d6,  d20, d19
+        vmul.f32        d22, d1,  d16
+        vmul.f32        d23, d21, d17
+        vmul.f32        d24, d0,  d16
+        vmul.f32        d25, d20, d17
+        vadd.f32        d7,  d7,  d22
+        vadd.f32        d6,  d6,  d23
+        vsub.f32        d4,  d4,  d24
+        vsub.f32        d5,  d5,  d25
+        beq             1f
+        vld1.32         {d0-d1},  [r3,:128], r7
+        vld1.32         {d20-d21},[r6,:128]!
+        vld1.32         {d18},    [r2,:64], r12
+        vuzp.32         d20, d21
+        vuzp.32         d0,  d1
+        vrev64.32       q3,  q3
+        vtrn.32         d4,  d6
+        vtrn.32         d5,  d7
+        vswp            d5,  d6
+        vst1.32         {d4-d5},  [r0,:128], r7
+        vst1.32         {d6-d7},  [r8,:128]!
+        b               1b
+1:
+        vrev64.32       q3,  q3
+        vtrn.32         d4,  d6
+        vtrn.32         d5,  d7
+        vswp            d5,  d6
+        vst1.32         {d4-d5},  [r0,:128]
+        vst1.32         {d6-d7},  [r8,:128]
+
+        pop             {r4-r8,pc}
+.endfunc
+
+function ff_imdct_calc_neon, export=1
+        push            {r4-r6,lr}
+
+        ldr             r3,  [r0, #4]
+        mov             r4,  #1
+        mov             r5,  r1
+        lsl             r4,  r4,  r3
+        add             r1,  r1,  r4
+
+        bl              ff_imdct_half_neon
+
+        add             r0,  r5,  r4,  lsl #2
+        add             r1,  r5,  r4,  lsl #1
+        sub             r0,  r0,  #8
+        sub             r2,  r1,  #16
+        mov             r3,  #-16
+        mov             r6,  #-8
+        vmov.i32        d30, #1<<31
+1:
+        vld1.32         {d0-d1},  [r2,:128], r3
+        pld             [r0, #-16]
+        vrev64.32       q0,  q0
+        vld1.32         {d2-d3},  [r1,:128]!
+        veor            d4,  d1,  d30
+        pld             [r2, #-16]
+        vrev64.32       q1,  q1
+        veor            d5,  d0,  d30
+        vst1.32         {d2},     [r0,:64], r6
+        vst1.32         {d3},     [r0,:64], r6
+        vst1.32         {d4-d5},  [r5,:128]!
+        subs            r4,  r4,  #16
+        bgt             1b
+
+        pop             {r4-r6,pc}
+.endfunc

Modified: trunk/libavcodec/dsputil.h
==============================================================================
--- trunk/libavcodec/dsputil.h	Thu Sep 10 10:49:59 2009	(r19805)
+++ trunk/libavcodec/dsputil.h	Thu Sep 10 10:50:03 2009	(r19806)
@@ -694,11 +694,13 @@ extern FFTSample* const ff_cos_tabs[13];
 int ff_fft_init(FFTContext *s, int nbits, int inverse);
 void ff_fft_permute_c(FFTContext *s, FFTComplex *z);
 void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_permute_neon(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_neon(FFTContext *s, FFTComplex *z);
 
 /**
  * Do the permutation needed BEFORE calling ff_fft_calc().
@@ -768,6 +770,8 @@ void ff_imdct_calc_3dn2(MDCTContext *s, 
 void ff_imdct_half_3dn2(MDCTContext *s, FFTSample *output, const FFTSample *input);
 void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output, const FFTSample *input);
 void ff_imdct_half_sse(MDCTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_calc_neon(MDCTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_neon(MDCTContext *s, FFTSample *output, const FFTSample *input);
 void ff_mdct_calc(MDCTContext *s, FFTSample *out, const FFTSample *input);
 void ff_mdct_end(MDCTContext *s);
 

Modified: trunk/libavcodec/fft.c
==============================================================================
--- trunk/libavcodec/fft.c	Thu Sep 10 10:49:59 2009	(r19805)
+++ trunk/libavcodec/fft.c	Thu Sep 10 10:50:03 2009	(r19806)
@@ -64,6 +64,7 @@ av_cold int ff_fft_init(FFTContext *s, i
     float alpha, c1, s1, s2;
     int split_radix = 1;
     int av_unused has_vectors;
+    int revtab_shift = 0;
 
     if (nbits < 2 || nbits > 16)
         goto fail;
@@ -112,6 +113,12 @@ av_cold int ff_fft_init(FFTContext *s, i
         s->fft_calc = ff_fft_calc_altivec;
         split_radix = 0;
     }
+#elif HAVE_NEON
+    s->fft_permute = ff_fft_permute_neon;
+    s->fft_calc    = ff_fft_calc_neon;
+    s->imdct_calc  = ff_imdct_calc_neon;
+    s->imdct_half  = ff_imdct_half_neon;
+    revtab_shift = 3;
 #endif
 
     if (split_radix) {
@@ -125,7 +132,8 @@ av_cold int ff_fft_init(FFTContext *s, i
                 tab[m/2-i] = tab[i];
         }
         for(i=0; i<n; i++)
-            s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i;
+            s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] =
+                i << revtab_shift;
         s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
     } else {
         int np, nblocks, np2, l;



More information about the ffmpeg-cvslog mailing list