[FFmpeg-devel] [PATCH] x86/fdct: port fdct functions to yasm

James Almer jamrial at gmail.com
Mon May 26 01:58:50 CEST 2014


Signed-off-by: James Almer <jamrial at gmail.com>
---
 libavcodec/x86/Makefile         |   3 +-
 libavcodec/x86/dsputilenc_mmx.c |  42 ++-
 libavcodec/x86/fdct.asm         | 298 ++++++++++++++++++++
 libavcodec/x86/fdct.c           | 594 ----------------------------------------
 4 files changed, 315 insertions(+), 622 deletions(-)
 create mode 100644 libavcodec/x86/fdct.asm
 delete mode 100644 libavcodec/x86/fdct.c

diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index 9c39265..27b09e0 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -6,7 +6,6 @@ OBJS-$(CONFIG_DCT)                     += x86/dct_init.o
 OBJS-$(CONFIG_DSPUTIL)                 += x86/dsputil_init.o            \
                                           x86/dsputil_x86.o
 OBJS-$(CONFIG_ENCODERS)                += x86/dsputilenc_mmx.o          \
-                                          x86/fdct.o                    \
                                           x86/motion_est.o
 OBJS-$(CONFIG_FFT)                     += x86/fft_init.o
 OBJS-$(CONFIG_FLAC_DECODER)            += x86/flacdsp_init.o
@@ -71,7 +70,7 @@ YASM-OBJS-$(CONFIG_DSPUTIL)            += x86/dsputil.o                 \
                                           x86/fpel.o                    \
                                           x86/mpeg4qpel.o               \
                                           x86/qpel.o
-YASM-OBJS-$(CONFIG_ENCODERS)           += x86/dsputilenc.o
+YASM-OBJS-$(CONFIG_ENCODERS)           += x86/dsputilenc.o x86/fdct.o
 YASM-OBJS-$(CONFIG_FFT)                += x86/fft.o
 YASM-OBJS-$(CONFIG_FLAC_DECODER)       += x86/flacdsp.o
 YASM-OBJS-$(CONFIG_H263DSP)            += x86/h263_loopfilter.o
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index e63d510..3394b14 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -885,24 +885,8 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
     int cpu_flags = av_get_cpu_flags();
     const int dct_algo = avctx->dct_algo;
 
-    if (EXTERNAL_MMX(cpu_flags)) {
-        if (!high_bit_depth)
-            c->get_pixels = ff_get_pixels_mmx;
-        c->diff_pixels = ff_diff_pixels_mmx;
-        c->pix_sum     = ff_pix_sum16_mmx;
-        c->pix_norm1   = ff_pix_norm1_mmx;
-    }
-
-    if (EXTERNAL_SSE2(cpu_flags))
-        if (!high_bit_depth)
-            c->get_pixels = ff_get_pixels_sse2;
-
 #if HAVE_INLINE_ASM
     if (INLINE_MMX(cpu_flags)) {
-        if (!high_bit_depth &&
-            (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
-            c->fdct = ff_fdct_mmx;
-
         c->diff_bytes      = diff_bytes_mmx;
         c->sse[0]  = sse16_mmx;
         c->sse[1]  = sse8_mmx;
@@ -927,10 +911,6 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
     }
 
     if (INLINE_MMXEXT(cpu_flags)) {
-        if (!high_bit_depth &&
-            (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
-            c->fdct = ff_fdct_mmxext;
-
         c->vsad[4]         = vsad_intra16_mmxext;
 
         if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
@@ -940,12 +920,6 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
         c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
     }
 
-    if (INLINE_SSE2(cpu_flags)) {
-        if (!high_bit_depth &&
-            (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
-            c->fdct = ff_fdct_sse2;
-    }
-
 #if HAVE_SSSE3_INLINE
     if (INLINE_SSSE3(cpu_flags)) {
         if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
@@ -957,18 +931,34 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
 #endif /* HAVE_INLINE_ASM */
 
     if (EXTERNAL_MMX(cpu_flags)) {
+        if (!high_bit_depth) {
+            c->get_pixels = ff_get_pixels_mmx;
+            if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)
+                c->fdct = ff_fdct_mmx;
+        }
+        c->diff_pixels = ff_diff_pixels_mmx;
+        c->pix_sum     = ff_pix_sum16_mmx;
+        c->pix_norm1   = ff_pix_norm1_mmx;
         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
         c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmx;
     }
 
     if (EXTERNAL_MMXEXT(cpu_flags)) {
+        if (!high_bit_depth &&
+            (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
+            c->fdct = ff_fdct_mmxext;
         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
         c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmxext;
     }
 
     if (EXTERNAL_SSE2(cpu_flags)) {
+        if (!high_bit_depth) {
+            c->get_pixels = ff_get_pixels_sse2;
+            if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)
+                c->fdct = ff_fdct_sse2;
+        }
         c->sse[0] = ff_sse16_sse2;
         c->sum_abs_dctelem   = ff_sum_abs_dctelem_sse2;
 
diff --git a/libavcodec/x86/fdct.asm b/libavcodec/x86/fdct.asm
new file mode 100644
index 0000000..66b9a02
--- /dev/null
+++ b/libavcodec/x86/fdct.asm
@@ -0,0 +1,298 @@
+;******************************************************************************
+;* SIMD-optimized forward DCT
+;******************************************************************************
+;* The gcc porting is Copyright (c) 2001 Fabrice Bellard.
+;* cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni at gmx.at>
+;* SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
+;* NASM syntax porting is Copyright (c) 2014 James Almer
+;*
+;* from  fdctam32.c - AP922 MMX(3D-Now) forward-DCT
+;*
+;*  Intel Application Note AP-922 - fast, precise implementation of DCT
+;*        http://developer.intel.com/vtune/cbts/appnotes.htm
+;*
+;* Also of inspiration:
+;* a page about fdct at http://www.geocities.com/ssavekar/dct.htm
+;* Skal's fdct at http://skal.planet-d.net/coding/dct.html
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+; constants for the forward DCT
+%define BITS_FRW_ACC   3 ; 2 or 3 for accuracy
+%define SHIFT_FRW_COL  BITS_FRW_ACC
+%define SHIFT_FRW_ROW  (BITS_FRW_ACC + 17 - 3)
+%define RND_FRW_ROW    (1 << (SHIFT_FRW_ROW-1))
+
+; concatenated table, for forward DCT transformation
+pw_fdct_tg:       dw 13036,  13036,  13036,  13036,  13036,  13036,  13036,  13036,  27146,  27146,  27146,   27146,  27146,   27146,  27146,   27146,\
+                    -21746, -21746, -21746, -21746, -21746, -21746, -21746, -21746
+; forward_dct coeff table
+pw_frw_coef_mmx:  dw 16384,  16384,  22725,  19266,  16384,  16384,  12873,   4520,  21407,   8867,  19266,   -4520,  -8867,  -21407, -22725,  -12873,\
+                     16384, -16384,  12873, -22725, -16384,  16384,   4520,  19266,   8867, -21407,   4520,  -12873,  21407,   -8867,  19266,  -22725,\
+                     22725,  22725,  31521,  26722,  22725,  22725,  17855,   6270,  29692,  12299,  26722,   -6270, -12299,  -29692, -31521,  -17855,\
+                     22725, -22725,  17855, -31521, -22725,  22725,   6270,  26722,  12299, -29692,   6270,  -17855,  29692,  -12299,  26722,  -31521,\
+                     21407,  21407,  29692,  25172,  21407,  21407,  16819,   5906,  27969,  11585,  25172,   -5906, -11585,  -27969, -29692,  -16819,\
+                     21407, -21407,  16819, -29692, -21407,  21407,   5906,  25172,  11585, -27969,   5906,  -16819,  27969,  -11585,  25172,  -29692,\
+                     19266,  19266,  26722,  22654,  19266,  19266,  15137,   5315,  25172,  10426,  22654,   -5315, -10426,  -25172, -26722,  -15137,\
+                     19266, -19266,  15137, -26722, -19266,  19266,   5315,  22654,  10426, -25172,   5315,  -15137,  25172,  -10426,  22654,  -26722,\
+                     16384,  16384,  22725,  19266,  16384,  16384,  12873,   4520,  21407,   8867,  19266,   -4520,  -8867,  -21407, -22725,  -12873,\
+                     16384, -16384,  12873, -22725, -16384,  16384,   4520,  19266,   8867, -21407,   4520,  -12873,  21407,   -8867,  19266,  -22725,\
+                     19266,  19266,  26722,  22654,  19266,  19266,  15137,   5315,  25172,  10426,  22654,   -5315, -10426,  -25172, -26722,  -15137,\
+                     19266, -19266,  15137, -26722, -19266,  19266,   5315,  22654,  10426, -25172,   5315,  -15137,  25172,  -10426,  22654,  -26722,\
+                     21407,  21407,  29692,  25172,  21407,  21407,  16819,   5906,  27969,  11585,  25172,   -5906, -11585,  -27969, -29692,  -16819,\
+                     21407, -21407,  16819, -29692, -21407,  21407,   5906,  25172,  11585, -27969,   5906,  -16819,  27969,  -11585,  25172,  -29692,\
+                     22725,  22725,  31521,  26722,  22725,  22725,  17855,   6270,  29692,  12299,  26722,   -6270, -12299,  -29692, -31521,  -17855,\
+                     22725, -22725,  17855, -31521, -22725,  22725,   6270,  26722,  12299, -29692,   6270,  -17855,  29692,  -12299,  26722,  -31521
+pw_frw_coef_sse2: dw 16384,  16384,  22725,  19266,  -8867, -21407, -22725, -12873,  16384,  16384,  12873,    4520,  21407,    8867,  19266,   -4520,\
+                    -16384,  16384,   4520,  19266,   8867, -21407,   4520, -12873,  16384, -16384,  12873,  -22725,  21407,   -8867,  19266,  -22725,\
+                     22725,  22725,  31521,  26722, -12299, -29692, -31521, -17855,  22725,  22725,  17855,    6270,  29692,   12299,  26722,   -6270,\
+                    -22725,  22725,   6270,  26722,  12299, -29692,   6270, -17855,  22725, -22725,  17855,  -31521,  29692,  -12299,  26722,  -31521,\
+                     21407,  21407,  29692,  25172, -11585, -27969, -29692, -16819,  21407,  21407,  16819,    5906,  27969,   11585,  25172,   -5906,\
+                    -21407,  21407,   5906,  25172,  11585, -27969,   5906, -16819,  21407, -21407,  16819,  -29692,  27969,  -11585,  25172,  -29692,\
+                     19266,  19266,  26722,  22654, -10426, -25172, -26722, -15137,  19266,  19266,  15137,    5315,  25172,   10426,  22654,   -5315,\
+                    -19266,  19266,   5315,  22654,  10426, -25172,   5315, -15137,  19266, -19266,  15137,  -26722,  25172,  -10426,  22654,  -26722,\
+                     16384,  16384,  22725,  19266,  -8867, -21407, -22725, -12873,  16384,  16384,  12873,    4520,  21407,    8867,  19266,   -4520,\
+                    -16384,  16384,   4520,  19266,   8867, -21407,   4520, -12873,  16384, -16384,  12873,  -22725,  21407,   -8867,  19266,  -22725,\
+                     19266,  19266,  26722,  22654, -10426, -25172, -26722, -15137,  19266,  19266,  15137,    5315,  25172,   10426,  22654,   -5315,\
+                    -19266,  19266,   5315,  22654,  10426, -25172,   5315, -15137,  19266, -19266,  15137,  -26722,  25172,  -10426,  22654,  -26722,\
+                     21407,  21407,  29692,  25172, -11585, -27969, -29692, -16819,  21407,  21407,  16819,    5906,  27969,   11585,  25172,   -5906,\
+                    -21407,  21407,   5906,  25172,  11585, -27969,   5906, -16819,  21407, -21407,  16819,  -29692,  27969,  -11585,  25172,  -29692,\
+                     22725,  22725,  31521,  26722, -12299, -29692, -31521, -17855,  22725,  22725,  17855,    6270,  29692,   12299,  26722,   -6270,\
+                    -22725,  22725,   6270,  26722,  12299, -29692,   6270, -17855, 22725,  -22725,  17855,  -31521,  29692,  -12299,  26722,  -31521
+pw_ocos:  times 8 dw 23170
+pd_row:   times 4 dd RND_FRW_ROW
+
+cextern pw_1
+
+SECTION_TEXT
+
+; %1 = block offset
+%macro FDCT_COL 1
+    mova      m0, [blockq+%1+16]
+    mova      m1, [blockq+%1+96]
+    mova      m2, m0
+    mova      m3, [blockq+%1+32]
+    paddsw    m0, m1
+    mova      m4, [blockq+%1+80]
+    psllw     m0, SHIFT_FRW_COL
+    mova      m5, [blockq+%1]
+    paddsw    m4, m3
+    paddsw    m5, [blockq+%1+112]
+    psllw     m4, SHIFT_FRW_COL
+    mova      m6, m0
+    psubsw    m2, m1
+    mova      m1, [pw_fdct_tg+16]
+    psubsw    m0, m4
+    mova      m7, [blockq+%1+48]
+    pmulhw    m1, m0
+    paddsw    m7, [blockq+%1+64]
+    psllw     m5, SHIFT_FRW_COL
+    paddsw    m6, m4
+    psllw     m7, SHIFT_FRW_COL
+    mova      m4, m5
+    psubsw    m5, m7
+    paddsw    m1, m5
+    paddsw    m4, m7
+    por       m1, [pw_1]
+    psllw     m2, SHIFT_FRW_COL+1
+    pmulhw    m5, [pw_fdct_tg+16]
+    mova      m7, m4
+    psubsw    m3, [blockq+%1+80]
+    psubsw    m4, m6
+    mova      [blockq+%1+32], m1
+    paddsw    m7, m6
+    mova      m1, [blockq+%1+48]
+    psllw     m3, SHIFT_FRW_COL+1
+    psubsw    m1, [blockq+%1+64]
+    mova      m6, m2
+    mova      [blockq+%1+64], m4
+    paddsw    m2, m3
+    pmulhw    m2, [pw_ocos]
+    psubsw    m6, m3
+    pmulhw    m6, [pw_ocos]
+    psubsw    m5, m0
+    por       m5, [pw_1]
+    psllw     m1, SHIFT_FRW_COL
+    por       m2, [pw_1]
+    mova      m4, m1
+    mova      m3, [blockq+%1]
+    paddsw    m1, m6
+    psubsw    m3, [blockq+%1+112]
+    psubsw    m4, m6
+    mova      m0, [pw_fdct_tg]
+    psllw     m3, SHIFT_FRW_COL
+    mova      m6, [pw_fdct_tg+32]
+    pmulhw    m0, m1
+    mova      [blockq+%1], m7
+    pmulhw    m6, m4
+    mova      [blockq+%1+96], m5
+    mova      m7, m3
+    mova      m5, [pw_fdct_tg+32]
+    psubsw    m7, m2
+    paddsw    m3, m2
+    pmulhw    m5, m7
+    paddsw    m0, m3
+    paddsw    m6, m4
+    pmulhw    m3, [pw_fdct_tg]
+    por       m0, [pw_1]
+    paddsw    m5, m7
+    psubsw    m7, m6
+    mova      [blockq+%1+16], m0
+    paddsw    m5, m4
+    mova      [blockq+%1+48], m7
+    psubsw    m3, m1
+    mova      [blockq+%1+80], m5
+    mova      [blockq+%1+112], m3
+%endmacro
+
+%macro FDCT_ROW_MMX 0
+%if cpuflag(mmxext)
+    pshufw    m5, [blockq+mmsize*1], q0123
+    mova      m0, [blockq+mmsize*0]
+    mova      m1, m0
+    paddsw    m0, m5
+    psubsw    m1, m5
+    mova      m2, m0
+    punpckldq m0, m1
+    punpckhdq m2, m1
+%else
+    movd      m1, [blockq+mmsize+4]
+    punpcklwd m1, [blockq+mmsize+0]
+    mova      m2, m1
+    psrlq     m1, 32
+    mova      m0, [blockq]
+    punpcklwd m1, m2
+    mova      m5, m0
+    paddsw    m0, m1
+    psubsw    m5, m1
+    mova      m2, m0
+    punpckldq m0, m5
+    punpckhdq m2, m5
+%endif
+    mova      m1, [pw_frw_coef_mmx+offq+mmsize*0]
+    mova      m3, [pw_frw_coef_mmx+offq+mmsize*1]
+    mova      m4, [pw_frw_coef_mmx+offq+mmsize*2]
+    mova      m5, [pw_frw_coef_mmx+offq+mmsize*3]
+    mova      m6, [pw_frw_coef_mmx+offq+mmsize*4]
+    mova      m7, [pw_frw_coef_mmx+offq+mmsize*5]
+    pmaddwd   m1, m0
+    pmaddwd   m3, m2
+    pmaddwd   m4, m0
+    pmaddwd   m5, m2
+    pmaddwd   m6, m0
+    pmaddwd   m7, m2
+    pmaddwd   m0, [pw_frw_coef_mmx+offq+mmsize*6]
+    pmaddwd   m2, [pw_frw_coef_mmx+offq+mmsize*7]
+    paddd     m3, m1
+    paddd     m5, m4
+    paddd     m7, m6
+    paddd     m2, m0
+    mova      m0, [pd_row]
+    paddd     m3, m0
+    paddd     m5, m0
+    paddd     m7, m0
+    paddd     m2, m0
+    psrad     m3, SHIFT_FRW_ROW
+    psrad     m5, SHIFT_FRW_ROW
+    psrad     m7, SHIFT_FRW_ROW
+    psrad     m2, SHIFT_FRW_ROW
+    packssdw  m3, m5
+    packssdw  m7, m2
+    mova      [blockq+mmsize*0], m3
+    mova      [blockq+mmsize*1], m7
+%endmacro
+
+;-----------------------------------
+;void ff_fdct_<opt>(int16_t *block)
+;----------------------------------
+%macro FDCT_MMX 0
+cglobal fdct, 1, 3, 0, block, cnt, off
+    FDCT_COL  0
+    FDCT_COL  mmsize
+    mov offq, 0
+    mov cntd, 8
+.loop
+    FDCT_ROW_MMX
+    add blockq, mmsize*2
+    add offq,   mmsize*8
+    dec cntd
+    jg .loop
+    RET
+%endmacro
+
+INIT_MMX mmx
+FDCT_MMX
+INIT_MMX mmxext
+FDCT_MMX
+
+; %1 = block offset
+; %2 = coeff offset
+%macro FDCT_ROW_SSE2 2
+    movh      m2, [blockq+%1]
+    movh      m0, [blockq+%1+8]
+    mova      m3, [pw_frw_coef_sse2+%2+32]
+    mova      m7, [pw_frw_coef_sse2+%2+48]
+    movh      m1, m2
+    pshuflw   m0, m0, q0123
+    paddsw    m1, m0
+    psubsw    m2, m0
+    punpckldq m1, m2
+    pshufd    m2, m1, q1032
+    pmaddwd   m3, m2
+    pmaddwd   m7, m1
+    pmaddwd   m2, m5
+    pmaddwd   m1, m4
+    paddd     m3, m7
+    paddd     m1, m2
+    paddd     m3, m6
+    paddd     m1, m6
+    psrad     m3, 17
+    psrad     m1, 17
+    packssdw  m1, m3
+    mova      [blockq+%1],m1
+%endmacro
+
+;-----------------------------------
+;void ff_fdct_sse2(int16_t *block)
+;-----------------------------------
+INIT_XMM sse2
+cglobal fdct, 1, 1, 8, block
+    FDCT_COL 0
+
+    mova      m6, [pd_row]
+    mova      m4, [pw_frw_coef_sse2+0 ]
+    mova      m5, [pw_frw_coef_sse2+16]
+    FDCT_ROW_SSE2   0,   0
+    FDCT_ROW_SSE2  64,   0
+    mova      m4, [pw_frw_coef_sse2+64]
+    mova      m5, [pw_frw_coef_sse2+80]
+    FDCT_ROW_SSE2  16,  64
+    FDCT_ROW_SSE2 112,  64
+    mova      m4, [pw_frw_coef_sse2+128]
+    mova      m5, [pw_frw_coef_sse2+144]
+    FDCT_ROW_SSE2  32, 128
+    FDCT_ROW_SSE2  96, 128
+    mova      m4, [pw_frw_coef_sse2+192]
+    mova      m5, [pw_frw_coef_sse2+208]
+    FDCT_ROW_SSE2  48, 192
+    FDCT_ROW_SSE2  80, 192
+    RET
diff --git a/libavcodec/x86/fdct.c b/libavcodec/x86/fdct.c
deleted file mode 100644
index f0cd471..0000000
--- a/libavcodec/x86/fdct.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * SIMD-optimized forward DCT
- * The gcc porting is Copyright (c) 2001 Fabrice Bellard.
- * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni at gmx.at>
- * SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
- *
- * from  fdctam32.c - AP922 MMX(3D-Now) forward-DCT
- *
- *  Intel Application Note AP-922 - fast, precise implementation of DCT
- *        http://developer.intel.com/vtune/cbts/appnotes.htm
- *
- * Also of inspiration:
- * a page about fdct at http://www.geocities.com/ssavekar/dct.htm
- * Skal's fdct at http://skal.planet-d.net/coding/dct.html
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/common.h"
-#include "libavutil/x86/asm.h"
-#include "libavcodec/dct.h"
-
-#if HAVE_MMX_INLINE
-
-//////////////////////////////////////////////////////////////////////
-//
-// constants for the forward DCT
-// -----------------------------
-//
-// Be sure to check that your compiler is aligning all constants to QWORD
-// (8-byte) memory boundaries!  Otherwise the unaligned memory access will
-// severely stall MMX execution.
-//
-//////////////////////////////////////////////////////////////////////
-
-#define BITS_FRW_ACC   3 //; 2 or 3 for accuracy
-#define SHIFT_FRW_COL  BITS_FRW_ACC
-#define SHIFT_FRW_ROW  (BITS_FRW_ACC + 17 - 3)
-#define RND_FRW_ROW    (1 << (SHIFT_FRW_ROW-1))
-//#define RND_FRW_COL    (1 << (SHIFT_FRW_COL-1))
-
-#define X8(x) x,x,x,x,x,x,x,x
-
-//concatenated table, for forward DCT transformation
-DECLARE_ALIGNED(16, static const int16_t, fdct_tg_all_16)[24] = {
-    X8(13036),  // tg * (2<<16) + 0.5
-    X8(27146),  // tg * (2<<16) + 0.5
-    X8(-21746)  // tg * (2<<16) + 0.5
-};
-
-DECLARE_ALIGNED(16, static const int16_t, ocos_4_16)[8] = {
-    X8(23170)   //cos * (2<<15) + 0.5
-};
-
-DECLARE_ALIGNED(16, static const int16_t, fdct_one_corr)[8] = { X8(1) };
-
-DECLARE_ALIGNED(8, static const int32_t, fdct_r_row)[2] = {RND_FRW_ROW, RND_FRW_ROW };
-
-static const struct
-{
- DECLARE_ALIGNED(16, const int32_t, fdct_r_row_sse2)[4];
-} fdct_r_row_sse2 =
-{{
- RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW
-}};
-//DECLARE_ALIGNED(16, static const long, fdct_r_row_sse2)[4] = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW};
-
-DECLARE_ALIGNED(8, static const int16_t, tab_frw_01234567)[] = {  // forward_dct coeff table
-  16384,   16384,   22725,   19266,
-  16384,   16384,   12873,    4520,
-  21407,    8867,   19266,   -4520,
-  -8867,  -21407,  -22725,  -12873,
-  16384,  -16384,   12873,  -22725,
- -16384,   16384,    4520,   19266,
-   8867,  -21407,    4520,  -12873,
-  21407,   -8867,   19266,  -22725,
-
-  22725,   22725,   31521,   26722,
-  22725,   22725,   17855,    6270,
-  29692,   12299,   26722,   -6270,
- -12299,  -29692,  -31521,  -17855,
-  22725,  -22725,   17855,  -31521,
- -22725,   22725,    6270,   26722,
-  12299,  -29692,    6270,  -17855,
-  29692,  -12299,   26722,  -31521,
-
-  21407,   21407,   29692,   25172,
-  21407,   21407,   16819,    5906,
-  27969,   11585,   25172,   -5906,
- -11585,  -27969,  -29692,  -16819,
-  21407,  -21407,   16819,  -29692,
- -21407,   21407,    5906,   25172,
-  11585,  -27969,    5906,  -16819,
-  27969,  -11585,   25172,  -29692,
-
-  19266,   19266,   26722,   22654,
-  19266,   19266,   15137,    5315,
-  25172,   10426,   22654,   -5315,
- -10426,  -25172,  -26722,  -15137,
-  19266,  -19266,   15137,  -26722,
- -19266,   19266,    5315,   22654,
-  10426,  -25172,    5315,  -15137,
-  25172,  -10426,   22654,  -26722,
-
-  16384,   16384,   22725,   19266,
-  16384,   16384,   12873,    4520,
-  21407,    8867,   19266,   -4520,
-  -8867,  -21407,  -22725,  -12873,
-  16384,  -16384,   12873,  -22725,
- -16384,   16384,    4520,   19266,
-   8867,  -21407,    4520,  -12873,
-  21407,   -8867,   19266,  -22725,
-
-  19266,   19266,   26722,   22654,
-  19266,   19266,   15137,    5315,
-  25172,   10426,   22654,   -5315,
- -10426,  -25172,  -26722,  -15137,
-  19266,  -19266,   15137,  -26722,
- -19266,   19266,    5315,   22654,
-  10426,  -25172,    5315,  -15137,
-  25172,  -10426,   22654,  -26722,
-
-  21407,   21407,   29692,   25172,
-  21407,   21407,   16819,    5906,
-  27969,   11585,   25172,   -5906,
- -11585,  -27969,  -29692,  -16819,
-  21407,  -21407,   16819,  -29692,
- -21407,   21407,    5906,   25172,
-  11585,  -27969,    5906,  -16819,
-  27969,  -11585,   25172,  -29692,
-
-  22725,   22725,   31521,   26722,
-  22725,   22725,   17855,    6270,
-  29692,   12299,   26722,   -6270,
- -12299,  -29692,  -31521,  -17855,
-  22725,  -22725,   17855,  -31521,
- -22725,   22725,    6270,   26722,
-  12299,  -29692,    6270,  -17855,
-  29692,  -12299,   26722,  -31521,
-};
-
-static const struct
-{
- DECLARE_ALIGNED(16, const int16_t, tab_frw_01234567_sse2)[256];
-} tab_frw_01234567_sse2 =
-{{
-//DECLARE_ALIGNED(16, static const int16_t, tab_frw_01234567_sse2)[] = {  // forward_dct coeff table
-#define TABLE_SSE2 C4,  C4,  C1,  C3, -C6, -C2, -C1, -C5, \
-                   C4,  C4,  C5,  C7,  C2,  C6,  C3, -C7, \
-                  -C4,  C4,  C7,  C3,  C6, -C2,  C7, -C5, \
-                   C4, -C4,  C5, -C1,  C2, -C6,  C3, -C1,
-// c1..c7 * cos(pi/4) * 2^15
-#define C1 22725
-#define C2 21407
-#define C3 19266
-#define C4 16384
-#define C5 12873
-#define C6 8867
-#define C7 4520
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 31521
-#define C2 29692
-#define C3 26722
-#define C4 22725
-#define C5 17855
-#define C6 12299
-#define C7 6270
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 29692
-#define C2 27969
-#define C3 25172
-#define C4 21407
-#define C5 16819
-#define C6 11585
-#define C7 5906
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 26722
-#define C2 25172
-#define C3 22654
-#define C4 19266
-#define C5 15137
-#define C6 10426
-#define C7 5315
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 22725
-#define C2 21407
-#define C3 19266
-#define C4 16384
-#define C5 12873
-#define C6 8867
-#define C7 4520
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 26722
-#define C2 25172
-#define C3 22654
-#define C4 19266
-#define C5 15137
-#define C6 10426
-#define C7 5315
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 29692
-#define C2 27969
-#define C3 25172
-#define C4 21407
-#define C5 16819
-#define C6 11585
-#define C7 5906
-TABLE_SSE2
-
-#undef C1
-#undef C2
-#undef C3
-#undef C4
-#undef C5
-#undef C6
-#undef C7
-#define C1 31521
-#define C2 29692
-#define C3 26722
-#define C4 22725
-#define C5 17855
-#define C6 12299
-#define C7 6270
-TABLE_SSE2
-}};
-
-#define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
-
-#define FDCT_COL(cpu, mm, mov)\
-static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\
-{\
-    __asm__ volatile (\
-        #mov"      16(%0),  %%"#mm"0 \n\t" \
-        #mov"      96(%0),  %%"#mm"1 \n\t" \
-        #mov"    %%"#mm"0,  %%"#mm"2 \n\t" \
-        #mov"      32(%0),  %%"#mm"3 \n\t" \
-        "paddsw  %%"#mm"1,  %%"#mm"0 \n\t" \
-        #mov"      80(%0),  %%"#mm"4 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \
-        #mov"        (%0),  %%"#mm"5 \n\t" \
-        "paddsw  %%"#mm"3,  %%"#mm"4 \n\t" \
-        "paddsw   112(%0),  %%"#mm"5 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \
-        #mov"    %%"#mm"0,  %%"#mm"6 \n\t" \
-        "psubsw  %%"#mm"1,  %%"#mm"2 \n\t" \
-        #mov"      16(%1),  %%"#mm"1 \n\t" \
-        "psubsw  %%"#mm"4,  %%"#mm"0 \n\t" \
-        #mov"      48(%0),  %%"#mm"7 \n\t" \
-        "pmulhw  %%"#mm"0,  %%"#mm"1 \n\t" \
-        "paddsw    64(%0),  %%"#mm"7 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \
-        "paddsw  %%"#mm"4,  %%"#mm"6 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \
-        #mov"    %%"#mm"5,  %%"#mm"4 \n\t" \
-        "psubsw  %%"#mm"7,  %%"#mm"5 \n\t" \
-        "paddsw  %%"#mm"5,  %%"#mm"1 \n\t" \
-        "paddsw  %%"#mm"7,  %%"#mm"4 \n\t" \
-        "por         (%2),  %%"#mm"1 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \
-        "pmulhw    16(%1),  %%"#mm"5 \n\t" \
-        #mov"    %%"#mm"4,  %%"#mm"7 \n\t" \
-        "psubsw    80(%0),  %%"#mm"3 \n\t" \
-        "psubsw  %%"#mm"6,  %%"#mm"4 \n\t" \
-        #mov"    %%"#mm"1,    32(%3) \n\t" \
-        "paddsw  %%"#mm"6,  %%"#mm"7 \n\t" \
-        #mov"      48(%0),  %%"#mm"1 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \
-        "psubsw    64(%0),  %%"#mm"1 \n\t" \
-        #mov"    %%"#mm"2,  %%"#mm"6 \n\t" \
-        #mov"    %%"#mm"4,    64(%3) \n\t" \
-        "paddsw  %%"#mm"3,  %%"#mm"2 \n\t" \
-        "pmulhw      (%4),  %%"#mm"2 \n\t" \
-        "psubsw  %%"#mm"3,  %%"#mm"6 \n\t" \
-        "pmulhw      (%4),  %%"#mm"6 \n\t" \
-        "psubsw  %%"#mm"0,  %%"#mm"5 \n\t" \
-        "por         (%2),  %%"#mm"5 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \
-        "por         (%2),  %%"#mm"2 \n\t" \
-        #mov"    %%"#mm"1,  %%"#mm"4 \n\t" \
-        #mov"        (%0),  %%"#mm"3 \n\t" \
-        "paddsw  %%"#mm"6,  %%"#mm"1 \n\t" \
-        "psubsw   112(%0),  %%"#mm"3 \n\t" \
-        "psubsw  %%"#mm"6,  %%"#mm"4 \n\t" \
-        #mov"        (%1),  %%"#mm"0 \n\t" \
-        "psllw  $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \
-        #mov"      32(%1),  %%"#mm"6 \n\t" \
-        "pmulhw  %%"#mm"1,  %%"#mm"0 \n\t" \
-        #mov"    %%"#mm"7,      (%3) \n\t" \
-        "pmulhw  %%"#mm"4,  %%"#mm"6 \n\t" \
-        #mov"    %%"#mm"5,    96(%3) \n\t" \
-        #mov"    %%"#mm"3,  %%"#mm"7 \n\t" \
-        #mov"      32(%1),  %%"#mm"5 \n\t" \
-        "psubsw  %%"#mm"2,  %%"#mm"7 \n\t" \
-        "paddsw  %%"#mm"2,  %%"#mm"3 \n\t" \
-        "pmulhw  %%"#mm"7,  %%"#mm"5 \n\t" \
-        "paddsw  %%"#mm"3,  %%"#mm"0 \n\t" \
-        "paddsw  %%"#mm"4,  %%"#mm"6 \n\t" \
-        "pmulhw      (%1),  %%"#mm"3 \n\t" \
-        "por         (%2),  %%"#mm"0 \n\t" \
-        "paddsw  %%"#mm"7,  %%"#mm"5 \n\t" \
-        "psubsw  %%"#mm"6,  %%"#mm"7 \n\t" \
-        #mov"    %%"#mm"0,    16(%3) \n\t" \
-        "paddsw  %%"#mm"4,  %%"#mm"5 \n\t" \
-        #mov"    %%"#mm"7,    48(%3) \n\t" \
-        "psubsw  %%"#mm"1,  %%"#mm"3 \n\t" \
-        #mov"    %%"#mm"5,    80(%3) \n\t" \
-        #mov"    %%"#mm"3,   112(%3) \n\t" \
-        : \
-        : "r" (in  + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \
-          "r" (out + offset), "r" (ocos_4_16)); \
-}
-
-FDCT_COL(mmx, mm, movq)
-FDCT_COL(sse2, xmm, movdqa)
-
-static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
-{
-    __asm__ volatile(
-#define FDCT_ROW_SSE2_H1(i,t)                    \
-        "movq      " #i "(%0), %%xmm2      \n\t" \
-        "movq      " #i "+8(%0), %%xmm0    \n\t" \
-        "movdqa    " #t "+32(%1), %%xmm3   \n\t" \
-        "movdqa    " #t "+48(%1), %%xmm7   \n\t" \
-        "movdqa    " #t "(%1), %%xmm4      \n\t" \
-        "movdqa    " #t "+16(%1), %%xmm5   \n\t"
-
-#define FDCT_ROW_SSE2_H2(i,t)                    \
-        "movq      " #i "(%0), %%xmm2      \n\t" \
-        "movq      " #i "+8(%0), %%xmm0    \n\t" \
-        "movdqa    " #t "+32(%1), %%xmm3   \n\t" \
-        "movdqa    " #t "+48(%1), %%xmm7   \n\t"
-
-#define FDCT_ROW_SSE2(i)                      \
-        "movq      %%xmm2, %%xmm1       \n\t" \
-        "pshuflw   $27, %%xmm0, %%xmm0  \n\t" \
-        "paddsw    %%xmm0, %%xmm1       \n\t" \
-        "psubsw    %%xmm0, %%xmm2       \n\t" \
-        "punpckldq %%xmm2, %%xmm1       \n\t" \
-        "pshufd    $78, %%xmm1, %%xmm2  \n\t" \
-        "pmaddwd   %%xmm2, %%xmm3       \n\t" \
-        "pmaddwd   %%xmm1, %%xmm7       \n\t" \
-        "pmaddwd   %%xmm5, %%xmm2       \n\t" \
-        "pmaddwd   %%xmm4, %%xmm1       \n\t" \
-        "paddd     %%xmm7, %%xmm3       \n\t" \
-        "paddd     %%xmm2, %%xmm1       \n\t" \
-        "paddd     %%xmm6, %%xmm3       \n\t" \
-        "paddd     %%xmm6, %%xmm1       \n\t" \
-        "psrad     %3, %%xmm3           \n\t" \
-        "psrad     %3, %%xmm1           \n\t" \
-        "packssdw  %%xmm3, %%xmm1       \n\t" \
-        "movdqa    %%xmm1, " #i "(%4)   \n\t"
-
-        "movdqa    (%2), %%xmm6         \n\t"
-        FDCT_ROW_SSE2_H1(0,0)
-        FDCT_ROW_SSE2(0)
-        FDCT_ROW_SSE2_H2(64,0)
-        FDCT_ROW_SSE2(64)
-
-        FDCT_ROW_SSE2_H1(16,64)
-        FDCT_ROW_SSE2(16)
-        FDCT_ROW_SSE2_H2(112,64)
-        FDCT_ROW_SSE2(112)
-
-        FDCT_ROW_SSE2_H1(32,128)
-        FDCT_ROW_SSE2(32)
-        FDCT_ROW_SSE2_H2(96,128)
-        FDCT_ROW_SSE2(96)
-
-        FDCT_ROW_SSE2_H1(48,192)
-        FDCT_ROW_SSE2(48)
-        FDCT_ROW_SSE2_H2(80,192)
-        FDCT_ROW_SSE2(80)
-        :
-        : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2),
-          "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
-          XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
-                            "%xmm4", "%xmm5", "%xmm6", "%xmm7")
-    );
-}
-
-static av_always_inline void fdct_row_mmxext(const int16_t *in, int16_t *out,
-                                             const int16_t *table)
-{
-    __asm__ volatile (
-        "pshufw    $0x1B, 8(%0), %%mm5 \n\t"
-        "movq       (%0), %%mm0 \n\t"
-        "movq      %%mm0, %%mm1 \n\t"
-        "paddsw    %%mm5, %%mm0 \n\t"
-        "psubsw    %%mm5, %%mm1 \n\t"
-        "movq      %%mm0, %%mm2 \n\t"
-        "punpckldq %%mm1, %%mm0 \n\t"
-        "punpckhdq %%mm1, %%mm2 \n\t"
-        "movq       (%1), %%mm1 \n\t"
-        "movq      8(%1), %%mm3 \n\t"
-        "movq     16(%1), %%mm4 \n\t"
-        "movq     24(%1), %%mm5 \n\t"
-        "movq     32(%1), %%mm6 \n\t"
-        "movq     40(%1), %%mm7 \n\t"
-        "pmaddwd   %%mm0, %%mm1 \n\t"
-        "pmaddwd   %%mm2, %%mm3 \n\t"
-        "pmaddwd   %%mm0, %%mm4 \n\t"
-        "pmaddwd   %%mm2, %%mm5 \n\t"
-        "pmaddwd   %%mm0, %%mm6 \n\t"
-        "pmaddwd   %%mm2, %%mm7 \n\t"
-        "pmaddwd  48(%1), %%mm0 \n\t"
-        "pmaddwd  56(%1), %%mm2 \n\t"
-        "paddd     %%mm1, %%mm3 \n\t"
-        "paddd     %%mm4, %%mm5 \n\t"
-        "paddd     %%mm6, %%mm7 \n\t"
-        "paddd     %%mm0, %%mm2 \n\t"
-        "movq       (%2), %%mm0 \n\t"
-        "paddd     %%mm0, %%mm3 \n\t"
-        "paddd     %%mm0, %%mm5 \n\t"
-        "paddd     %%mm0, %%mm7 \n\t"
-        "paddd     %%mm0, %%mm2 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
-        "packssdw  %%mm5, %%mm3 \n\t"
-        "packssdw  %%mm2, %%mm7 \n\t"
-        "movq      %%mm3,  (%3) \n\t"
-        "movq      %%mm7, 8(%3) \n\t"
-        :
-        : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
-}
-
-static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table)
-{
-    //FIXME reorder (I do not have an old MMX-only CPU here to benchmark ...)
-    __asm__ volatile(
-        "movd     12(%0), %%mm1 \n\t"
-        "punpcklwd 8(%0), %%mm1 \n\t"
-        "movq      %%mm1, %%mm2 \n\t"
-        "psrlq     $0x20, %%mm1 \n\t"
-        "movq      0(%0), %%mm0 \n\t"
-        "punpcklwd %%mm2, %%mm1 \n\t"
-        "movq      %%mm0, %%mm5 \n\t"
-        "paddsw    %%mm1, %%mm0 \n\t"
-        "psubsw    %%mm1, %%mm5 \n\t"
-        "movq      %%mm0, %%mm2 \n\t"
-        "punpckldq %%mm5, %%mm0 \n\t"
-        "punpckhdq %%mm5, %%mm2 \n\t"
-        "movq      0(%1), %%mm1 \n\t"
-        "movq      8(%1), %%mm3 \n\t"
-        "movq     16(%1), %%mm4 \n\t"
-        "movq     24(%1), %%mm5 \n\t"
-        "movq     32(%1), %%mm6 \n\t"
-        "movq     40(%1), %%mm7 \n\t"
-        "pmaddwd   %%mm0, %%mm1 \n\t"
-        "pmaddwd   %%mm2, %%mm3 \n\t"
-        "pmaddwd   %%mm0, %%mm4 \n\t"
-        "pmaddwd   %%mm2, %%mm5 \n\t"
-        "pmaddwd   %%mm0, %%mm6 \n\t"
-        "pmaddwd   %%mm2, %%mm7 \n\t"
-        "pmaddwd  48(%1), %%mm0 \n\t"
-        "pmaddwd  56(%1), %%mm2 \n\t"
-        "paddd     %%mm1, %%mm3 \n\t"
-        "paddd     %%mm4, %%mm5 \n\t"
-        "paddd     %%mm6, %%mm7 \n\t"
-        "paddd     %%mm0, %%mm2 \n\t"
-        "movq       (%2), %%mm0 \n\t"
-        "paddd     %%mm0, %%mm3 \n\t"
-        "paddd     %%mm0, %%mm5 \n\t"
-        "paddd     %%mm0, %%mm7 \n\t"
-        "paddd     %%mm0, %%mm2 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
-        "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
-        "packssdw  %%mm5, %%mm3 \n\t"
-        "packssdw  %%mm2, %%mm7 \n\t"
-        "movq      %%mm3, 0(%3) \n\t"
-        "movq      %%mm7, 8(%3) \n\t"
-        :
-        : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
-}
-
-void ff_fdct_mmx(int16_t *block)
-{
-    DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
-    int16_t * block1= (int16_t*)align_tmp;
-    const int16_t *table= tab_frw_01234567;
-    int i;
-
-    fdct_col_mmx(block, block1, 0);
-    fdct_col_mmx(block, block1, 4);
-
-    for(i=8;i>0;i--) {
-        fdct_row_mmx(block1, block, table);
-        block1 += 8;
-        table += 32;
-        block += 8;
-    }
-}
-
-#endif /* HAVE_MMX_INLINE */
-
-#if HAVE_MMXEXT_INLINE
-
-void ff_fdct_mmxext(int16_t *block)
-{
-    DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
-    int16_t *block1= (int16_t*)align_tmp;
-    const int16_t *table= tab_frw_01234567;
-    int i;
-
-    fdct_col_mmx(block, block1, 0);
-    fdct_col_mmx(block, block1, 4);
-
-    for(i=8;i>0;i--) {
-        fdct_row_mmxext(block1, block, table);
-        block1 += 8;
-        table += 32;
-        block += 8;
-    }
-}
-
-#endif /* HAVE_MMXEXT_INLINE */
-
-#if HAVE_SSE2_INLINE
-
-void ff_fdct_sse2(int16_t *block)
-{
-    DECLARE_ALIGNED(16, int64_t, align_tmp)[16];
-    int16_t * const block1= (int16_t*)align_tmp;
-
-    fdct_col_sse2(block, block1, 0);
-    fdct_row_sse2(block1, block);
-}
-
-#endif /* HAVE_SSE2_INLINE */
-- 
1.8.5.5



More information about the ffmpeg-devel mailing list