[FFmpeg-cvslog] x86: synth filter float: implement SSE2 version

Christophe Gisquet git at videolan.org
Fri Feb 28 20:54:07 CET 2014


ffmpeg | branch: master | Christophe Gisquet <christophe.gisquet at gmail.com> | Fri Feb 14 16:00:48 2014 +0000| [2cdbcc004837ce092a14f326f24d97a29512a2c3] | committer: Michael Niedermayer

x86: synth filter float: implement SSE2 version

Timings for Arrandale:
          C    SSE
win32:  2108   334
win64:  1152   322

Factorizing the inner loop with a call/jmp is a >15 cycles cost, even with
the jmp destination being aligned.

Unrolling for ARCH_X86_64 is a 20 cycles gain.

Signed-off-by: Michael Niedermayer <michaelni at gmx.at>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=2cdbcc004837ce092a14f326f24d97a29512a2c3
---

 libavcodec/synth_filter.c    |    1 +
 libavcodec/synth_filter.h    |    1 +
 libavcodec/x86/dcadsp.asm    |  154 ++++++++++++++++++++++++++++++++++++++++++
 libavcodec/x86/dcadsp_init.c |   28 ++++++++
 4 files changed, 184 insertions(+)

diff --git a/libavcodec/synth_filter.c b/libavcodec/synth_filter.c
index 5f10530..d49ffe6 100644
--- a/libavcodec/synth_filter.c
+++ b/libavcodec/synth_filter.c
@@ -61,4 +61,5 @@ av_cold void ff_synth_filter_init(SynthFilterContext *c)
     c->synth_filter_float = synth_filter_float;
 
     if (ARCH_ARM) ff_synth_filter_init_arm(c);
+    if (ARCH_X86) ff_synth_filter_init_x86(c);
 }
diff --git a/libavcodec/synth_filter.h b/libavcodec/synth_filter.h
index 33edcc4..b63fd77 100644
--- a/libavcodec/synth_filter.h
+++ b/libavcodec/synth_filter.h
@@ -33,5 +33,6 @@ typedef struct SynthFilterContext {
 
 void ff_synth_filter_init(SynthFilterContext *c);
 void ff_synth_filter_init_arm(SynthFilterContext *c);
+void ff_synth_filter_init_x86(SynthFilterContext *c);
 
 #endif /* AVCODEC_SYNTH_FILTER_H */
diff --git a/libavcodec/x86/dcadsp.asm b/libavcodec/x86/dcadsp.asm
index b9e8b8b..0c73bbb 100644
--- a/libavcodec/x86/dcadsp.asm
+++ b/libavcodec/x86/dcadsp.asm
@@ -175,3 +175,157 @@ cglobal dca_lfe_fir%1, 3,3,6-%1, out, in, cf0
 INIT_XMM sse
 DCA_LFE_FIR 0
 DCA_LFE_FIR 1
+
+INIT_XMM sse2
+%macro INNER_LOOP   1
+    ; reading backwards:  ptr1=synth_buf+j+i   ptr2=synth_big+j-i
+    ;~ a += window[i + j     ]*(-synth_buf[15 - i + j      ])
+    ;~ b += window[i + j + 16]*( synth_buf[     i + j      ])
+    pshufd        m5, [ptr2 + j + (15-3)*4], q0123
+    mova          m6, [ptr1 + j]
+%if ARCH_X86_64
+    pshufd       m11, [ptr2 + j + (15-3)*4 - mmsize], q0123
+    mova         m12, [ptr1 + j + mmsize]
+%endif
+    mulps         m6, [win  + %1 + j + 16*4]
+    mulps         m5, [win  + %1 + j]
+%if ARCH_X86_64
+    mulps        m12, [win  + %1 + j + mmsize + 16*4]
+    mulps        m11, [win  + %1 + j + mmsize]
+%endif
+    addps         m2, m6
+    subps         m1, m5
+%if ARCH_X86_64
+    addps         m8, m12
+    subps         m7, m11
+%endif
+    ;~ c += window[i + j + 32]*( synth_buf[16 + i + j      ])
+    ;~ d += window[i + j + 48]*( synth_buf[31 - i + j      ])
+    pshufd        m6, [ptr2 + j + (31-3)*4], q0123
+    mova          m5, [ptr1 + j + 16*4]
+%if ARCH_X86_64
+    pshufd       m12, [ptr2 + j + (31-3)*4 - mmsize], q0123
+    mova         m11, [ptr1 + j + mmsize + 16*4]
+%endif
+    mulps         m5, [win  + %1 + j + 32*4]
+    mulps         m6, [win  + %1 + j + 48*4]
+%if ARCH_X86_64
+    mulps        m11, [win  + %1 + j + mmsize + 32*4]
+    mulps        m12, [win  + %1 + j + mmsize + 48*4]
+%endif
+    addps         m3, m5
+    addps         m4, m6
+%if ARCH_X86_64
+    addps         m9, m11
+    addps        m10, m12
+%endif
+    sub            j, 64*4
+%endmacro
+
+; void ff_synth_filter_inner_sse2(float *synth_buf, float synth_buf2[32],
+;                                 const float window[512], float out[32],
+;                                 intptr_t offset, float scale)
+cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \
+                              synth_buf, synth_buf2, window, out, off, scale
+%define scale m0
+%if ARCH_X86_32 || WIN64
+    movd       scale, scalem
+; Make sure offset is in a register and not on the stack
+%define OFFQ  r4q
+%else
+%define OFFQ  offq
+%endif
+    pshufd        m0, m0, 0
+    ; prepare inner counter limit 1
+    mov          r5q, 480
+    sub          r5q, offmp
+    and          r5q, -64
+    shl          r5q, 2
+    mov         OFFQ, r5q
+%define i        r5q
+    mov            i, 16*4-(ARCH_X86_64+1)*mmsize  ; main loop counter
+
+%define buf2     synth_buf2q
+%if ARCH_X86_32
+    mov         buf2, synth_buf2mp
+%endif
+.mainloop
+    ; m1=a  m2=b  m3=c  m4=d
+    pxor          m3, m3
+    pxor          m4, m4
+    mova          m1, [buf2 + i]
+    mova          m2, [buf2 + i + 16*4]
+%if ARCH_X86_32
+%define ptr1     r0q
+%define ptr2     r1q
+%define win      r2q
+%define j        r3q
+    mov          win, windowm
+    mov         ptr1, synth_bufm
+    add          win, i
+    add         ptr1, i
+%else
+%define ptr1     r6q
+%define ptr2     r7q ; must be loaded
+%define win      r8q
+%define j        r9q
+%if ARCH_X86_64
+    pxor          m9, m9
+    pxor         m10, m10
+    mova          m7, [buf2 + i + mmsize]
+    mova          m8, [buf2 + i + mmsize + 16*4]
+%endif
+    lea          win, [windowq + i]
+    lea         ptr1, [synth_bufq + i]
+%endif
+    mov         ptr2, synth_bufmp
+    ; prepare the inner loop counter
+    mov            j, OFFQ
+    sub         ptr2, i
+.loop1:
+    INNER_LOOP  0
+    jge       .loop1
+
+    mov            j, 448*4
+    sub            j, OFFQ
+    jz          .end
+    sub         ptr1, j
+    sub         ptr2, j
+    add          win, OFFQ ; now at j-64, so define OFFSET
+    sub            j, 64*4
+.loop2:
+    INNER_LOOP  64*4
+    jge       .loop2
+
+.end:
+%if ARCH_X86_32
+    mov         buf2, synth_buf2m ; needed for next iteration anyway
+    mov         outq, outmp       ; j, which will be set again during it
+%endif
+    ;~ out[i     ] = a*scale;
+    ;~ out[i + 16] = b*scale;
+    mulps         m1, scale
+    mulps         m2, scale
+%if ARCH_X86_64
+    mulps         m7, scale
+    mulps         m8, scale
+%endif
+    ;~ synth_buf2[i     ] = c;
+    ;~ synth_buf2[i + 16] = d;
+    mova   [buf2 + i +  0*4], m3
+    mova   [buf2 + i + 16*4], m4
+%if ARCH_X86_64
+    mova   [buf2 + i +  0*4 + mmsize], m9
+    mova   [buf2 + i + 16*4 + mmsize], m10
+%endif
+    ;~ out[i     ] = a;
+    ;~ out[i + 16] = a;
+    mova   [outq + i +  0*4], m1
+    mova   [outq + i + 16*4], m2
+%if ARCH_X86_64
+    mova   [outq + i +  0*4 + mmsize], m7
+    mova   [outq + i + 16*4 + mmsize], m8
+%endif
+    sub            i, (ARCH_X86_64+1)*mmsize
+    jge    .mainloop
+    RET
diff --git a/libavcodec/x86/dcadsp_init.c b/libavcodec/x86/dcadsp_init.c
index 664019d..06c31a0 100644
--- a/libavcodec/x86/dcadsp_init.c
+++ b/libavcodec/x86/dcadsp_init.c
@@ -49,3 +49,31 @@ av_cold void ff_dcadsp_init_x86(DCADSPContext *s)
         s->int8x8_fmul_int32 = ff_int8x8_fmul_int32_sse4;
     }
 }
+
+void ff_synth_filter_inner_sse2(float *synth_buf_ptr, float synth_buf2[32],
+                                const float window[512],
+                                float out[32], intptr_t offset, float scale);
+
+static void synth_filter_sse2(FFTContext *imdct,
+                              float *synth_buf_ptr, int *synth_buf_offset,
+                              float synth_buf2[32], const float window[512],
+                              float out[32], const float in[32], float scale)
+{
+    float *synth_buf= synth_buf_ptr + *synth_buf_offset;
+
+    imdct->imdct_half(imdct, synth_buf, in);
+
+    ff_synth_filter_inner_sse2(synth_buf, synth_buf2, window,
+                               out, *synth_buf_offset, scale);
+
+    *synth_buf_offset= (*synth_buf_offset - 32)&511;
+}
+
+av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
+{
+    int cpu_flags = av_get_cpu_flags();
+
+    if (EXTERNAL_SSE2(cpu_flags)) {
+        s->synth_filter_float = synth_filter_sse2;
+    }
+}



More information about the ffmpeg-cvslog mailing list