[FFmpeg-cvslog] vp9/x86: idct_add_16x16_ssse3.

Ronald S. Bultje git at videolan.org
Sat Dec 14 18:36:11 CET 2013


ffmpeg | branch: master | Ronald S. Bultje <rsbultje at gmail.com> | Sat Dec 14 08:09:18 2013 -0500| [8d4c616fc05f2f3c76d13594788129df72069f30] | committer: Ronald S. Bultje

vp9/x86: idct_add_16x16_ssse3.

Currently only dc-only and full 16x16. Other subforms will follow in the
near future. Total decoding time of ped1080p.webm goes from 9.7 to 9.3
seconds. DC-only goes from 957 -> 131 cycles, and the full IDCT goes
from ~4050 to ~745 cycles.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8d4c616fc05f2f3c76d13594788129df72069f30
---

 libavcodec/x86/vp9dsp_init.c |    5 +-
 libavcodec/x86/vp9itxfm.asm  |  279 ++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 275 insertions(+), 9 deletions(-)

diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c
index 3c02520..5c31db6 100644
--- a/libavcodec/x86/vp9dsp_init.c
+++ b/libavcodec/x86/vp9dsp_init.c
@@ -153,6 +153,7 @@ filters_8tap_1d_fn3(avg)
 
 void ff_vp9_idct_idct_4x4_add_ssse3(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 void ff_vp9_idct_idct_8x8_add_ssse3(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+void ff_vp9_idct_idct_16x16_add_ssse3(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 
 #endif /* HAVE_YASM */
 
@@ -208,8 +209,10 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
         init_subpel3(0, put, ssse3);
         init_subpel3(1, avg, ssse3);
         dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_ssse3;
-        if (ARCH_X86_64)
+        if (ARCH_X86_64) {
             dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
+            dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_ssse3;
+        }
     }
 
 #undef init_fpel
diff --git a/libavcodec/x86/vp9itxfm.asm b/libavcodec/x86/vp9itxfm.asm
index ebf3044..b6dd0bf 100644
--- a/libavcodec/x86/vp9itxfm.asm
+++ b/libavcodec/x86/vp9itxfm.asm
@@ -2,6 +2,7 @@
 ;* VP9 IDCT SIMD optimizations
 ;*
 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
+;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
 ;*
 ;* This file is part of FFmpeg.
 ;*
@@ -26,24 +27,32 @@ SECTION_RODATA
 
 pw_11585x2: times 8 dw 23170
 
-%macro VP9_IDCT_COEFFS 2
-pw_m%1_%2: dw -%1, %2, -%1, %2, -%1, %2, -%1, %2
-pw_%2_%1:  dw  %2, %1,  %2, %1,  %2, %1,  %2, %1
+%macro VP9_IDCT_COEFFS 2-3 0
+pw_m%1_%2:  times 4 dw -%1,  %2
+pw_%2_%1:   times 4 dw  %2,  %1
+%if %3 == 1
+pw_m%2_m%1: times 4 dw -%2, -%1
+%endif
 %endmacro
 
-%macro VP9_IDCT_COEFFS_ALL 2
+%macro VP9_IDCT_COEFFS_ALL 2-3 0
 pw_%1x2: times 8 dw %1*2
 pw_%2x2: times 8 dw %2*2
-VP9_IDCT_COEFFS %1, %2
+VP9_IDCT_COEFFS %1, %2, %3
 %endmacro
 
-VP9_IDCT_COEFFS_ALL 15137,  6270
+VP9_IDCT_COEFFS_ALL 15137,  6270, 1
 VP9_IDCT_COEFFS_ALL 16069,  3196
 VP9_IDCT_COEFFS_ALL  9102, 13623
+VP9_IDCT_COEFFS_ALL 16305,  1606
+VP9_IDCT_COEFFS_ALL 10394, 12665
+VP9_IDCT_COEFFS_ALL 14449,  7723
+VP9_IDCT_COEFFS_ALL  4756, 15679
 
 pd_8192: times 4 dd 8192
 pw_2048: times 8 dw 2048
 pw_1024: times 8 dw 1024
+pw_512:  times 8 dw 512
 
 SECTION .text
 
@@ -174,11 +183,12 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
     VP9_IDCT4_WRITEOUT
     RET
 
+%if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use more)
+
 ;-------------------------------------------------------------------------------------------
 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 ;-------------------------------------------------------------------------------------------
 
-%if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use 13 here)
 %macro VP9_IDCT8_1D_FINALIZE 0
     SUMSUB_BA            w,  3, 10, 4                       ;  m3=t0+t7, m10=t0-t7
     SUMSUB_BA            w,  1,  2, 4                       ;  m1=t1+t6,  m2=t1-t6
@@ -352,4 +362,257 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
     mova      [blockq+112], m4
     VP9_IDCT8_WRITEOUT
     RET
-%endif
+
+;---------------------------------------------------------------------------------------------
+; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+;---------------------------------------------------------------------------------------------
+
+%macro VP9_IDCT16_1D 2 ; src, pass
+    mova                m5, [%1+ 32]       ; IN(1)
+    mova               m14, [%1+ 64]       ; IN(2)
+    mova                m6, [%1+ 96]       ; IN(3)
+    mova                m9, [%1+128]       ; IN(4)
+    mova                m7, [%1+160]       ; IN(5)
+    mova               m15, [%1+192]       ; IN(6)
+    mova                m4, [%1+224]       ; IN(7)
+    mova                m3, [%1+288]       ; IN(9)
+    mova               m12, [%1+320]       ; IN(10)
+    mova                m0, [%1+352]       ; IN(11)
+    mova                m8, [%1+384]       ; IN(12)
+    mova                m1, [%1+416]       ; IN(13)
+    mova               m13, [%1+448]       ; IN(14)
+    mova                m2, [%1+480]       ; IN(15)
+
+    ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
+    ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
+
+    VP9_UNPACK_MULSUB_2W_4X   9,   8, 15137,  6270, [pd_8192], 10, 11 ; t2,  t3
+    VP9_UNPACK_MULSUB_2W_4X  14,  13, 16069,  3196, [pd_8192], 10, 11 ; t4,  t7
+    VP9_UNPACK_MULSUB_2W_4X  12,  15,  9102, 13623, [pd_8192], 10, 11 ; t5,  t6
+    VP9_UNPACK_MULSUB_2W_4X   5,   2, 16305,  1606, [pd_8192], 10, 11 ; t8,  t15
+    VP9_UNPACK_MULSUB_2W_4X   3,   4, 10394, 12665, [pd_8192], 10, 11 ; t9,  t14
+    VP9_UNPACK_MULSUB_2W_4X   7,   0, 14449,  7723, [pd_8192], 10, 11 ; t10, t13
+    VP9_UNPACK_MULSUB_2W_4X   1,   6,  4756, 15679, [pd_8192], 10, 11 ; t11, t12
+
+    ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
+    ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
+
+    SUMSUB_BA            w, 12, 14, 10      ; t4,  t5
+    SUMSUB_BA            w, 15, 13, 10      ; t7,  t6
+    SUMSUB_BA            w,  3,  5, 10      ; t8,  t9
+    SUMSUB_BA            w,  7,  1, 10      ; t11, t10
+    SUMSUB_BA            w,  0,  6, 10      ; t12, t13
+    SUMSUB_BA            w,  4,  2, 10      ; t15, t14
+
+    ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
+    ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
+
+    SUMSUB_BA            w, 14, 13, 10
+    pmulhrsw           m13, [pw_11585x2]                              ; t5
+    pmulhrsw           m14, [pw_11585x2]                              ; t6
+    VP9_UNPACK_MULSUB_2W_4X   2,   5, 15137,  6270, [pd_8192], 10, 11 ; t9,  t14
+    VP9_UNPACK_MULSUB_2W_4X   6,   1, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
+
+    ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
+    ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
+
+    SUMSUB_BA            w,  7,  3, 10      ; t8,  t11
+    SUMSUB_BA            w,  6,  2, 10      ; t9,  t10
+    SUMSUB_BA            w,  0,  4, 10      ; t15, t12
+    SUMSUB_BA            w,  1,  5, 10      ; t14. t13
+
+    ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
+    ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
+
+    SUMSUB_BA            w,  2,  5, 10
+    SUMSUB_BA            w,  3,  4, 10
+    pmulhrsw            m5, [pw_11585x2]    ; t10
+    pmulhrsw            m4, [pw_11585x2]    ; t11
+    pmulhrsw            m3, [pw_11585x2]    ; t12
+    pmulhrsw            m2, [pw_11585x2]    ; t13
+
+    ; backup first register
+    mova          [rsp+32], m7
+
+    ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
+    ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
+
+    ; from load/start
+    mova               m10, [%1+  0]        ; IN(0)
+    mova               m11, [%1+256]        ; IN(8)
+
+    ; from 3 stages back
+    SUMSUB_BA            w, 11, 10, 7
+    pmulhrsw           m11, [pw_11585x2]    ; t0
+    pmulhrsw           m10, [pw_11585x2]    ; t1
+
+    ; from 2 stages back
+    SUMSUB_BA            w,  8, 11, 7       ; t0,  t3
+    SUMSUB_BA            w,  9, 10, 7       ; t1,  t2
+
+    ; from 1 stage back
+    SUMSUB_BA            w, 15,  8, 7       ; t0,  t7
+    SUMSUB_BA            w, 14,  9, 7       ; t1,  t6
+    SUMSUB_BA            w, 13, 10, 7       ; t2,  t5
+    SUMSUB_BA            w, 12, 11, 7       ; t3,  t4
+
+    SUMSUB_BA            w,  0, 15, 7       ; t0, t15
+    SUMSUB_BA            w,  1, 14, 7       ; t1, t14
+    SUMSUB_BA            w,  2, 13, 7       ; t2, t13
+    SUMSUB_BA            w,  3, 12, 7       ; t3, t12
+    SUMSUB_BA            w,  4, 11, 7       ; t4, t11
+    SUMSUB_BA            w,  5, 10, 7       ; t5, t10
+
+%if %2 == 1
+    ; backup a different register
+    mova          [rsp+16], m15
+    mova                m7, [rsp+32]
+
+    SUMSUB_BA            w,  6,  9, 15      ; t6, t9
+    SUMSUB_BA            w,  7,  8, 15      ; t7, t8
+
+    TRANSPOSE8x8W        0, 1, 2, 3, 4, 5, 6, 7, 15
+    mova         [rsp+  0], m0
+    mova         [rsp+ 32], m1
+    mova         [rsp+ 64], m2
+    mova         [rsp+ 96], m3
+    mova         [rsp+128], m4
+    mova         [rsp+160], m5
+    mova         [rsp+192], m6
+    mova         [rsp+224], m7
+
+    mova               m15, [rsp+16]
+    TRANSPOSE8x8W        8, 9, 10, 11, 12, 13, 14, 15, 0
+    mova         [rsp+ 16], m8
+    mova         [rsp+ 48], m9
+    mova         [rsp+ 80], m10
+    mova         [rsp+112], m11
+    mova         [rsp+144], m12
+    mova         [rsp+176], m13
+    mova         [rsp+208], m14
+    mova         [rsp+240], m15
+%else ; %2 == 2
+    ; backup more registers
+    mova          [rsp+64], m8
+    mova          [rsp+96], m9
+
+    pxor                m7, m7
+    pmulhrsw            m0, [pw_512]
+    pmulhrsw            m1, [pw_512]
+    VP9_STORE_2X         0,  1,  8,  9,  7
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw            m2, [pw_512]
+    pmulhrsw            m3, [pw_512]
+    VP9_STORE_2X         2,  3,  8,  9,  7
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw            m4, [pw_512]
+    pmulhrsw            m5, [pw_512]
+    VP9_STORE_2X         4,  5,  8,  9,  7
+    lea               dstq, [dstq+strideq*2]
+
+    ; restore from cache
+    SWAP                 0, 7               ; move zero from m7 to m0
+    mova                m7, [rsp+32]
+    mova                m8, [rsp+64]
+    mova                m9, [rsp+96]
+
+    SUMSUB_BA            w,  6,  9, 1       ; t6, t9
+    SUMSUB_BA            w,  7,  8, 1       ; t7, t8
+
+    pmulhrsw            m6, [pw_512]
+    pmulhrsw            m7, [pw_512]
+    VP9_STORE_2X         6,  7,  1,  2,  0
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw            m8, [pw_512]
+    pmulhrsw            m9, [pw_512]
+    VP9_STORE_2X         8,  9,  1,  2,  0
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw           m10, [pw_512]
+    pmulhrsw           m11, [pw_512]
+    VP9_STORE_2X        10, 11,  1,  2,  0
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw           m12, [pw_512]
+    pmulhrsw           m13, [pw_512]
+    VP9_STORE_2X        12, 13,  1,  2,  0
+    lea               dstq, [dstq+strideq*2]
+    pmulhrsw           m14, [pw_512]
+    pmulhrsw           m15, [pw_512]
+    VP9_STORE_2X        14, 15,  1,  2,  0
+%endif ; %2 == 1/2
+%endmacro
+
+%macro ZERO_BLOCK 3 ; mem, n_bytes, zero_reg
+%assign %%off 0
+%rep %2/mmsize
+    mova        [%1+%%off], %3
+%assign %%off (%%off+mmsize)
+%endrep
+%endmacro
+
+%macro VP9_STORE_2XFULL 6; dc, tmp1, tmp2, tmp3, tmp4, zero
+    mova               m%3, [dstq]
+    mova               m%5, [dstq+strideq]
+    punpcklbw          m%2, m%3, m%6
+    punpckhbw          m%3, m%6
+    punpcklbw          m%4, m%5, m%6
+    punpckhbw          m%5, m%6
+    paddw              m%2, m%1
+    paddw              m%3, m%1
+    paddw              m%4, m%1
+    paddw              m%5, m%1
+    packuswb           m%2, m%3
+    packuswb           m%4, m%5
+    mova            [dstq], m%2
+    mova    [dstq+strideq], m%4
+%endmacro
+
+INIT_XMM ssse3
+cglobal vp9_idct_idct_16x16_add, 4, 5, 16, 512, dst, stride, block, eob
+    cmp eobd, 1 ; faster path for when only DC is set
+    jne .idctfull
+
+    ; dc-only
+    movd                m0, [blockq]
+    mova                m1, [pw_11585x2]
+    pmulhrsw            m0, m1
+    pmulhrsw            m0, m1
+    SPLATW              m0, m0, q0000
+    pmulhrsw            m0, [pw_512]
+    pxor                m5, m5
+    movd          [blockq], m5
+%rep 7
+    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5
+    lea               dstq, [dstq+2*strideq]
+%endrep
+    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5
+    RET
+
+.idctfull:
+    DEFINE_ARGS dst, stride, block, cnt, dst_bak
+    mov               cntd, 2
+.loop1_full:
+    VP9_IDCT16_1D   blockq, 1
+    add             blockq, 16
+    add                rsp, 256
+    dec               cntd
+    jg .loop1_full
+    sub             blockq, 32
+    sub                rsp, 512
+
+    mov               cntd, 2
+    mov           dst_bakq, dstq
+.loop2_full:
+    VP9_IDCT16_1D      rsp, 2
+    lea               dstq, [dst_bakq+8]
+    add                rsp, 16
+    dec               cntd
+    jg .loop2_full
+    sub                rsp, 32
+
+    ; at the end of the loop, m0 should still be zero
+    ; use that to zero out block coefficients
+    ZERO_BLOCK      blockq, 512, m0
+
+    RET
+
+%endif ; x86-64



More information about the ffmpeg-cvslog mailing list