00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #include "libavutil/cpu.h"
00026 #include "libavutil/x86_cpu.h"
00027 #include "libavcodec/dsputil.h"
00028 #include "libavcodec/h264dsp.h"
00029 #include "libavcodec/mpegvideo.h"
00030 #include "libavcodec/simple_idct.h"
00031 #include "libavcodec/ac3dec.h"
00032 #include "dsputil_mmx.h"
00033 #include "idct_xvid.h"
00034
00035
00036
00037
00038
00039 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
00040 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
00041
00042 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
00043 {0x8000000080000000ULL, 0x8000000080000000ULL};
00044
00045 DECLARE_ALIGNED(8, const uint64_t, ff_pw_1 ) = 0x0001000100010001ULL;
00046 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2 ) = {0x0002000200020002ULL, 0x0002000200020002ULL};
00047 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
00048 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
00049 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
00050 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
00051 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
00052 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
00053 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
00054 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
00055 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
00056 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
00057 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
00058 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
00059 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
00060 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
00061 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
00062 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
00063 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
00064 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
00065 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
00066 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
00067
00068 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
00069 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
00070 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
00071 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
00072 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
00073 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
00074 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
00075 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
00076 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
00077 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
00078 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
00079 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
00080 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
00081
00082 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
00083 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
00084
00085 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
00086 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
00087
00088 #define MOVQ_BFE(regd) \
00089 __asm__ volatile ( \
00090 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
00091 "paddb %%" #regd ", %%" #regd " \n\t" ::)
00092
00093 #ifndef PIC
00094 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
00095 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
00096 #else
00097
00098
00099 #define MOVQ_BONE(regd) \
00100 __asm__ volatile ( \
00101 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00102 "psrlw $15, %%" #regd " \n\t" \
00103 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
00104
00105 #define MOVQ_WTWO(regd) \
00106 __asm__ volatile ( \
00107 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00108 "psrlw $15, %%" #regd " \n\t" \
00109 "psllw $1, %%" #regd " \n\t"::)
00110
00111 #endif
00112
00113
00114
00115
00116 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
00117 "movq " #rega ", " #regr " \n\t"\
00118 "pand " #regb ", " #regr " \n\t"\
00119 "pxor " #rega ", " #regb " \n\t"\
00120 "pand " #regfe "," #regb " \n\t"\
00121 "psrlq $1, " #regb " \n\t"\
00122 "paddb " #regb ", " #regr " \n\t"
00123
00124 #define PAVGB_MMX(rega, regb, regr, regfe) \
00125 "movq " #rega ", " #regr " \n\t"\
00126 "por " #regb ", " #regr " \n\t"\
00127 "pxor " #rega ", " #regb " \n\t"\
00128 "pand " #regfe "," #regb " \n\t"\
00129 "psrlq $1, " #regb " \n\t"\
00130 "psubb " #regb ", " #regr " \n\t"
00131
00132
00133 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
00134 "movq " #rega ", " #regr " \n\t"\
00135 "movq " #regc ", " #regp " \n\t"\
00136 "pand " #regb ", " #regr " \n\t"\
00137 "pand " #regd ", " #regp " \n\t"\
00138 "pxor " #rega ", " #regb " \n\t"\
00139 "pxor " #regc ", " #regd " \n\t"\
00140 "pand %%mm6, " #regb " \n\t"\
00141 "pand %%mm6, " #regd " \n\t"\
00142 "psrlq $1, " #regb " \n\t"\
00143 "psrlq $1, " #regd " \n\t"\
00144 "paddb " #regb ", " #regr " \n\t"\
00145 "paddb " #regd ", " #regp " \n\t"
00146
00147 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
00148 "movq " #rega ", " #regr " \n\t"\
00149 "movq " #regc ", " #regp " \n\t"\
00150 "por " #regb ", " #regr " \n\t"\
00151 "por " #regd ", " #regp " \n\t"\
00152 "pxor " #rega ", " #regb " \n\t"\
00153 "pxor " #regc ", " #regd " \n\t"\
00154 "pand %%mm6, " #regb " \n\t"\
00155 "pand %%mm6, " #regd " \n\t"\
00156 "psrlq $1, " #regd " \n\t"\
00157 "psrlq $1, " #regb " \n\t"\
00158 "psubb " #regb ", " #regr " \n\t"\
00159 "psubb " #regd ", " #regp " \n\t"
00160
00161
00162
00163 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
00164 #define SET_RND MOVQ_WONE
00165 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
00166 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
00167 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
00168
00169 #include "dsputil_mmx_rnd_template.c"
00170
00171 #undef DEF
00172 #undef SET_RND
00173 #undef PAVGBP
00174 #undef PAVGB
00175
00176
00177
00178 #define DEF(x, y) x ## _ ## y ##_mmx
00179 #define SET_RND MOVQ_WTWO
00180 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
00181 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
00182
00183 #include "dsputil_mmx_rnd_template.c"
00184
00185 #undef DEF
00186 #undef SET_RND
00187 #undef PAVGBP
00188 #undef PAVGB
00189 #undef OP_AVG
00190
00191
00192
00193
00194 #define DEF(x) x ## _3dnow
00195 #define PAVGB "pavgusb"
00196 #define OP_AVG PAVGB
00197
00198 #include "dsputil_mmx_avg_template.c"
00199
00200 #undef DEF
00201 #undef PAVGB
00202 #undef OP_AVG
00203
00204
00205
00206
00207 #define DEF(x) x ## _mmx2
00208
00209
00210 #define PAVGB "pavgb"
00211 #define OP_AVG PAVGB
00212
00213 #include "dsputil_mmx_avg_template.c"
00214
00215 #undef DEF
00216 #undef PAVGB
00217 #undef OP_AVG
00218
00219 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
00220 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
00221 #define put_pixels16_mmx2 put_pixels16_mmx
00222 #define put_pixels8_mmx2 put_pixels8_mmx
00223 #define put_pixels4_mmx2 put_pixels4_mmx
00224 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
00225 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
00226 #define put_pixels16_3dnow put_pixels16_mmx
00227 #define put_pixels8_3dnow put_pixels8_mmx
00228 #define put_pixels4_3dnow put_pixels4_mmx
00229 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
00230 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
00231
00232
00233
00234
00235 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00236 {
00237 const DCTELEM *p;
00238 uint8_t *pix;
00239
00240
00241 p = block;
00242 pix = pixels;
00243
00244 __asm__ volatile(
00245 "movq %3, %%mm0 \n\t"
00246 "movq 8%3, %%mm1 \n\t"
00247 "movq 16%3, %%mm2 \n\t"
00248 "movq 24%3, %%mm3 \n\t"
00249 "movq 32%3, %%mm4 \n\t"
00250 "movq 40%3, %%mm5 \n\t"
00251 "movq 48%3, %%mm6 \n\t"
00252 "movq 56%3, %%mm7 \n\t"
00253 "packuswb %%mm1, %%mm0 \n\t"
00254 "packuswb %%mm3, %%mm2 \n\t"
00255 "packuswb %%mm5, %%mm4 \n\t"
00256 "packuswb %%mm7, %%mm6 \n\t"
00257 "movq %%mm0, (%0) \n\t"
00258 "movq %%mm2, (%0, %1) \n\t"
00259 "movq %%mm4, (%0, %1, 2) \n\t"
00260 "movq %%mm6, (%0, %2) \n\t"
00261 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
00262 :"memory");
00263 pix += line_size*4;
00264 p += 32;
00265
00266
00267
00268
00269 __asm__ volatile(
00270 "movq (%3), %%mm0 \n\t"
00271 "movq 8(%3), %%mm1 \n\t"
00272 "movq 16(%3), %%mm2 \n\t"
00273 "movq 24(%3), %%mm3 \n\t"
00274 "movq 32(%3), %%mm4 \n\t"
00275 "movq 40(%3), %%mm5 \n\t"
00276 "movq 48(%3), %%mm6 \n\t"
00277 "movq 56(%3), %%mm7 \n\t"
00278 "packuswb %%mm1, %%mm0 \n\t"
00279 "packuswb %%mm3, %%mm2 \n\t"
00280 "packuswb %%mm5, %%mm4 \n\t"
00281 "packuswb %%mm7, %%mm6 \n\t"
00282 "movq %%mm0, (%0) \n\t"
00283 "movq %%mm2, (%0, %1) \n\t"
00284 "movq %%mm4, (%0, %1, 2) \n\t"
00285 "movq %%mm6, (%0, %2) \n\t"
00286 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
00287 :"memory");
00288 }
00289
00290 #define put_signed_pixels_clamped_mmx_half(off) \
00291 "movq "#off"(%2), %%mm1 \n\t"\
00292 "movq 16+"#off"(%2), %%mm2 \n\t"\
00293 "movq 32+"#off"(%2), %%mm3 \n\t"\
00294 "movq 48+"#off"(%2), %%mm4 \n\t"\
00295 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
00296 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
00297 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
00298 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
00299 "paddb %%mm0, %%mm1 \n\t"\
00300 "paddb %%mm0, %%mm2 \n\t"\
00301 "paddb %%mm0, %%mm3 \n\t"\
00302 "paddb %%mm0, %%mm4 \n\t"\
00303 "movq %%mm1, (%0) \n\t"\
00304 "movq %%mm2, (%0, %3) \n\t"\
00305 "movq %%mm3, (%0, %3, 2) \n\t"\
00306 "movq %%mm4, (%0, %1) \n\t"
00307
00308 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00309 {
00310 x86_reg line_skip = line_size;
00311 x86_reg line_skip3;
00312
00313 __asm__ volatile (
00314 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
00315 "lea (%3, %3, 2), %1 \n\t"
00316 put_signed_pixels_clamped_mmx_half(0)
00317 "lea (%0, %3, 4), %0 \n\t"
00318 put_signed_pixels_clamped_mmx_half(64)
00319 :"+&r" (pixels), "=&r" (line_skip3)
00320 :"r" (block), "r"(line_skip)
00321 :"memory");
00322 }
00323
00324 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00325 {
00326 const DCTELEM *p;
00327 uint8_t *pix;
00328 int i;
00329
00330
00331 p = block;
00332 pix = pixels;
00333 MOVQ_ZERO(mm7);
00334 i = 4;
00335 do {
00336 __asm__ volatile(
00337 "movq (%2), %%mm0 \n\t"
00338 "movq 8(%2), %%mm1 \n\t"
00339 "movq 16(%2), %%mm2 \n\t"
00340 "movq 24(%2), %%mm3 \n\t"
00341 "movq %0, %%mm4 \n\t"
00342 "movq %1, %%mm6 \n\t"
00343 "movq %%mm4, %%mm5 \n\t"
00344 "punpcklbw %%mm7, %%mm4 \n\t"
00345 "punpckhbw %%mm7, %%mm5 \n\t"
00346 "paddsw %%mm4, %%mm0 \n\t"
00347 "paddsw %%mm5, %%mm1 \n\t"
00348 "movq %%mm6, %%mm5 \n\t"
00349 "punpcklbw %%mm7, %%mm6 \n\t"
00350 "punpckhbw %%mm7, %%mm5 \n\t"
00351 "paddsw %%mm6, %%mm2 \n\t"
00352 "paddsw %%mm5, %%mm3 \n\t"
00353 "packuswb %%mm1, %%mm0 \n\t"
00354 "packuswb %%mm3, %%mm2 \n\t"
00355 "movq %%mm0, %0 \n\t"
00356 "movq %%mm2, %1 \n\t"
00357 :"+m"(*pix), "+m"(*(pix+line_size))
00358 :"r"(p)
00359 :"memory");
00360 pix += line_size*2;
00361 p += 16;
00362 } while (--i);
00363 }
00364
00365 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00366 {
00367 __asm__ volatile(
00368 "lea (%3, %3), %%"REG_a" \n\t"
00369 ".p2align 3 \n\t"
00370 "1: \n\t"
00371 "movd (%1), %%mm0 \n\t"
00372 "movd (%1, %3), %%mm1 \n\t"
00373 "movd %%mm0, (%2) \n\t"
00374 "movd %%mm1, (%2, %3) \n\t"
00375 "add %%"REG_a", %1 \n\t"
00376 "add %%"REG_a", %2 \n\t"
00377 "movd (%1), %%mm0 \n\t"
00378 "movd (%1, %3), %%mm1 \n\t"
00379 "movd %%mm0, (%2) \n\t"
00380 "movd %%mm1, (%2, %3) \n\t"
00381 "add %%"REG_a", %1 \n\t"
00382 "add %%"REG_a", %2 \n\t"
00383 "subl $4, %0 \n\t"
00384 "jnz 1b \n\t"
00385 : "+g"(h), "+r" (pixels), "+r" (block)
00386 : "r"((x86_reg)line_size)
00387 : "%"REG_a, "memory"
00388 );
00389 }
00390
00391 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00392 {
00393 __asm__ volatile(
00394 "lea (%3, %3), %%"REG_a" \n\t"
00395 ".p2align 3 \n\t"
00396 "1: \n\t"
00397 "movq (%1), %%mm0 \n\t"
00398 "movq (%1, %3), %%mm1 \n\t"
00399 "movq %%mm0, (%2) \n\t"
00400 "movq %%mm1, (%2, %3) \n\t"
00401 "add %%"REG_a", %1 \n\t"
00402 "add %%"REG_a", %2 \n\t"
00403 "movq (%1), %%mm0 \n\t"
00404 "movq (%1, %3), %%mm1 \n\t"
00405 "movq %%mm0, (%2) \n\t"
00406 "movq %%mm1, (%2, %3) \n\t"
00407 "add %%"REG_a", %1 \n\t"
00408 "add %%"REG_a", %2 \n\t"
00409 "subl $4, %0 \n\t"
00410 "jnz 1b \n\t"
00411 : "+g"(h), "+r" (pixels), "+r" (block)
00412 : "r"((x86_reg)line_size)
00413 : "%"REG_a, "memory"
00414 );
00415 }
00416
00417 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00418 {
00419 __asm__ volatile(
00420 "lea (%3, %3), %%"REG_a" \n\t"
00421 ".p2align 3 \n\t"
00422 "1: \n\t"
00423 "movq (%1), %%mm0 \n\t"
00424 "movq 8(%1), %%mm4 \n\t"
00425 "movq (%1, %3), %%mm1 \n\t"
00426 "movq 8(%1, %3), %%mm5 \n\t"
00427 "movq %%mm0, (%2) \n\t"
00428 "movq %%mm4, 8(%2) \n\t"
00429 "movq %%mm1, (%2, %3) \n\t"
00430 "movq %%mm5, 8(%2, %3) \n\t"
00431 "add %%"REG_a", %1 \n\t"
00432 "add %%"REG_a", %2 \n\t"
00433 "movq (%1), %%mm0 \n\t"
00434 "movq 8(%1), %%mm4 \n\t"
00435 "movq (%1, %3), %%mm1 \n\t"
00436 "movq 8(%1, %3), %%mm5 \n\t"
00437 "movq %%mm0, (%2) \n\t"
00438 "movq %%mm4, 8(%2) \n\t"
00439 "movq %%mm1, (%2, %3) \n\t"
00440 "movq %%mm5, 8(%2, %3) \n\t"
00441 "add %%"REG_a", %1 \n\t"
00442 "add %%"REG_a", %2 \n\t"
00443 "subl $4, %0 \n\t"
00444 "jnz 1b \n\t"
00445 : "+g"(h), "+r" (pixels), "+r" (block)
00446 : "r"((x86_reg)line_size)
00447 : "%"REG_a, "memory"
00448 );
00449 }
00450
00451 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00452 {
00453 __asm__ volatile(
00454 "1: \n\t"
00455 "movdqu (%1), %%xmm0 \n\t"
00456 "movdqu (%1,%3), %%xmm1 \n\t"
00457 "movdqu (%1,%3,2), %%xmm2 \n\t"
00458 "movdqu (%1,%4), %%xmm3 \n\t"
00459 "movdqa %%xmm0, (%2) \n\t"
00460 "movdqa %%xmm1, (%2,%3) \n\t"
00461 "movdqa %%xmm2, (%2,%3,2) \n\t"
00462 "movdqa %%xmm3, (%2,%4) \n\t"
00463 "subl $4, %0 \n\t"
00464 "lea (%1,%3,4), %1 \n\t"
00465 "lea (%2,%3,4), %2 \n\t"
00466 "jnz 1b \n\t"
00467 : "+g"(h), "+r" (pixels), "+r" (block)
00468 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00469 : "memory"
00470 );
00471 }
00472
00473 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00474 {
00475 __asm__ volatile(
00476 "1: \n\t"
00477 "movdqu (%1), %%xmm0 \n\t"
00478 "movdqu (%1,%3), %%xmm1 \n\t"
00479 "movdqu (%1,%3,2), %%xmm2 \n\t"
00480 "movdqu (%1,%4), %%xmm3 \n\t"
00481 "pavgb (%2), %%xmm0 \n\t"
00482 "pavgb (%2,%3), %%xmm1 \n\t"
00483 "pavgb (%2,%3,2), %%xmm2 \n\t"
00484 "pavgb (%2,%4), %%xmm3 \n\t"
00485 "movdqa %%xmm0, (%2) \n\t"
00486 "movdqa %%xmm1, (%2,%3) \n\t"
00487 "movdqa %%xmm2, (%2,%3,2) \n\t"
00488 "movdqa %%xmm3, (%2,%4) \n\t"
00489 "subl $4, %0 \n\t"
00490 "lea (%1,%3,4), %1 \n\t"
00491 "lea (%2,%3,4), %2 \n\t"
00492 "jnz 1b \n\t"
00493 : "+g"(h), "+r" (pixels), "+r" (block)
00494 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00495 : "memory"
00496 );
00497 }
00498
00499 #define CLEAR_BLOCKS(name,n) \
00500 static void name(DCTELEM *blocks)\
00501 {\
00502 __asm__ volatile(\
00503 "pxor %%mm7, %%mm7 \n\t"\
00504 "mov %1, %%"REG_a" \n\t"\
00505 "1: \n\t"\
00506 "movq %%mm7, (%0, %%"REG_a") \n\t"\
00507 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
00508 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
00509 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
00510 "add $32, %%"REG_a" \n\t"\
00511 " js 1b \n\t"\
00512 : : "r" (((uint8_t *)blocks)+128*n),\
00513 "i" (-128*n)\
00514 : "%"REG_a\
00515 );\
00516 }
00517 CLEAR_BLOCKS(clear_blocks_mmx, 6)
00518 CLEAR_BLOCKS(clear_block_mmx, 1)
00519
00520 static void clear_block_sse(DCTELEM *block)
00521 {
00522 __asm__ volatile(
00523 "xorps %%xmm0, %%xmm0 \n"
00524 "movaps %%xmm0, (%0) \n"
00525 "movaps %%xmm0, 16(%0) \n"
00526 "movaps %%xmm0, 32(%0) \n"
00527 "movaps %%xmm0, 48(%0) \n"
00528 "movaps %%xmm0, 64(%0) \n"
00529 "movaps %%xmm0, 80(%0) \n"
00530 "movaps %%xmm0, 96(%0) \n"
00531 "movaps %%xmm0, 112(%0) \n"
00532 :: "r"(block)
00533 : "memory"
00534 );
00535 }
00536
00537 static void clear_blocks_sse(DCTELEM *blocks)
00538 {\
00539 __asm__ volatile(
00540 "xorps %%xmm0, %%xmm0 \n"
00541 "mov %1, %%"REG_a" \n"
00542 "1: \n"
00543 "movaps %%xmm0, (%0, %%"REG_a") \n"
00544 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
00545 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
00546 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
00547 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
00548 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
00549 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
00550 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
00551 "add $128, %%"REG_a" \n"
00552 " js 1b \n"
00553 : : "r" (((uint8_t *)blocks)+128*6),
00554 "i" (-128*6)
00555 : "%"REG_a
00556 );
00557 }
00558
00559 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
00560 x86_reg i=0;
00561 __asm__ volatile(
00562 "jmp 2f \n\t"
00563 "1: \n\t"
00564 "movq (%1, %0), %%mm0 \n\t"
00565 "movq (%2, %0), %%mm1 \n\t"
00566 "paddb %%mm0, %%mm1 \n\t"
00567 "movq %%mm1, (%2, %0) \n\t"
00568 "movq 8(%1, %0), %%mm0 \n\t"
00569 "movq 8(%2, %0), %%mm1 \n\t"
00570 "paddb %%mm0, %%mm1 \n\t"
00571 "movq %%mm1, 8(%2, %0) \n\t"
00572 "add $16, %0 \n\t"
00573 "2: \n\t"
00574 "cmp %3, %0 \n\t"
00575 " js 1b \n\t"
00576 : "+r" (i)
00577 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
00578 );
00579 for(; i<w; i++)
00580 dst[i+0] += src[i+0];
00581 }
00582
00583 #if HAVE_7REGS && HAVE_TEN_OPERANDS
00584 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
00585 x86_reg w2 = -w;
00586 x86_reg x;
00587 int l = *left & 0xff;
00588 int tl = *left_top & 0xff;
00589 int t;
00590 __asm__ volatile(
00591 "mov %7, %3 \n"
00592 "1: \n"
00593 "movzbl (%3,%4), %2 \n"
00594 "mov %2, %k3 \n"
00595 "sub %b1, %b3 \n"
00596 "add %b0, %b3 \n"
00597 "mov %2, %1 \n"
00598 "cmp %0, %2 \n"
00599 "cmovg %0, %2 \n"
00600 "cmovg %1, %0 \n"
00601 "cmp %k3, %0 \n"
00602 "cmovg %k3, %0 \n"
00603 "mov %7, %3 \n"
00604 "cmp %2, %0 \n"
00605 "cmovl %2, %0 \n"
00606 "add (%6,%4), %b0 \n"
00607 "mov %b0, (%5,%4) \n"
00608 "inc %4 \n"
00609 "jl 1b \n"
00610 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
00611 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
00612 );
00613 *left = l;
00614 *left_top = tl;
00615 }
00616 #endif
00617
00618 #define H263_LOOP_FILTER \
00619 "pxor %%mm7, %%mm7 \n\t"\
00620 "movq %0, %%mm0 \n\t"\
00621 "movq %0, %%mm1 \n\t"\
00622 "movq %3, %%mm2 \n\t"\
00623 "movq %3, %%mm3 \n\t"\
00624 "punpcklbw %%mm7, %%mm0 \n\t"\
00625 "punpckhbw %%mm7, %%mm1 \n\t"\
00626 "punpcklbw %%mm7, %%mm2 \n\t"\
00627 "punpckhbw %%mm7, %%mm3 \n\t"\
00628 "psubw %%mm2, %%mm0 \n\t"\
00629 "psubw %%mm3, %%mm1 \n\t"\
00630 "movq %1, %%mm2 \n\t"\
00631 "movq %1, %%mm3 \n\t"\
00632 "movq %2, %%mm4 \n\t"\
00633 "movq %2, %%mm5 \n\t"\
00634 "punpcklbw %%mm7, %%mm2 \n\t"\
00635 "punpckhbw %%mm7, %%mm3 \n\t"\
00636 "punpcklbw %%mm7, %%mm4 \n\t"\
00637 "punpckhbw %%mm7, %%mm5 \n\t"\
00638 "psubw %%mm2, %%mm4 \n\t"\
00639 "psubw %%mm3, %%mm5 \n\t"\
00640 "psllw $2, %%mm4 \n\t"\
00641 "psllw $2, %%mm5 \n\t"\
00642 "paddw %%mm0, %%mm4 \n\t"\
00643 "paddw %%mm1, %%mm5 \n\t"\
00644 "pxor %%mm6, %%mm6 \n\t"\
00645 "pcmpgtw %%mm4, %%mm6 \n\t"\
00646 "pcmpgtw %%mm5, %%mm7 \n\t"\
00647 "pxor %%mm6, %%mm4 \n\t"\
00648 "pxor %%mm7, %%mm5 \n\t"\
00649 "psubw %%mm6, %%mm4 \n\t"\
00650 "psubw %%mm7, %%mm5 \n\t"\
00651 "psrlw $3, %%mm4 \n\t"\
00652 "psrlw $3, %%mm5 \n\t"\
00653 "packuswb %%mm5, %%mm4 \n\t"\
00654 "packsswb %%mm7, %%mm6 \n\t"\
00655 "pxor %%mm7, %%mm7 \n\t"\
00656 "movd %4, %%mm2 \n\t"\
00657 "punpcklbw %%mm2, %%mm2 \n\t"\
00658 "punpcklbw %%mm2, %%mm2 \n\t"\
00659 "punpcklbw %%mm2, %%mm2 \n\t"\
00660 "psubusb %%mm4, %%mm2 \n\t"\
00661 "movq %%mm2, %%mm3 \n\t"\
00662 "psubusb %%mm4, %%mm3 \n\t"\
00663 "psubb %%mm3, %%mm2 \n\t"\
00664 "movq %1, %%mm3 \n\t"\
00665 "movq %2, %%mm4 \n\t"\
00666 "pxor %%mm6, %%mm3 \n\t"\
00667 "pxor %%mm6, %%mm4 \n\t"\
00668 "paddusb %%mm2, %%mm3 \n\t"\
00669 "psubusb %%mm2, %%mm4 \n\t"\
00670 "pxor %%mm6, %%mm3 \n\t"\
00671 "pxor %%mm6, %%mm4 \n\t"\
00672 "paddusb %%mm2, %%mm2 \n\t"\
00673 "packsswb %%mm1, %%mm0 \n\t"\
00674 "pcmpgtb %%mm0, %%mm7 \n\t"\
00675 "pxor %%mm7, %%mm0 \n\t"\
00676 "psubb %%mm7, %%mm0 \n\t"\
00677 "movq %%mm0, %%mm1 \n\t"\
00678 "psubusb %%mm2, %%mm0 \n\t"\
00679 "psubb %%mm0, %%mm1 \n\t"\
00680 "pand %5, %%mm1 \n\t"\
00681 "psrlw $2, %%mm1 \n\t"\
00682 "pxor %%mm7, %%mm1 \n\t"\
00683 "psubb %%mm7, %%mm1 \n\t"\
00684 "movq %0, %%mm5 \n\t"\
00685 "movq %3, %%mm6 \n\t"\
00686 "psubb %%mm1, %%mm5 \n\t"\
00687 "paddb %%mm1, %%mm6 \n\t"
00688
00689 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00690 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00691 const int strength= ff_h263_loop_filter_strength[qscale];
00692
00693 __asm__ volatile(
00694
00695 H263_LOOP_FILTER
00696
00697 "movq %%mm3, %1 \n\t"
00698 "movq %%mm4, %2 \n\t"
00699 "movq %%mm5, %0 \n\t"
00700 "movq %%mm6, %3 \n\t"
00701 : "+m" (*(uint64_t*)(src - 2*stride)),
00702 "+m" (*(uint64_t*)(src - 1*stride)),
00703 "+m" (*(uint64_t*)(src + 0*stride)),
00704 "+m" (*(uint64_t*)(src + 1*stride))
00705 : "g" (2*strength), "m"(ff_pb_FC)
00706 );
00707 }
00708 }
00709
00710 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00711 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00712 const int strength= ff_h263_loop_filter_strength[qscale];
00713 DECLARE_ALIGNED(8, uint64_t, temp)[4];
00714 uint8_t *btemp= (uint8_t*)temp;
00715
00716 src -= 2;
00717
00718 transpose4x4(btemp , src , 8, stride);
00719 transpose4x4(btemp+4, src + 4*stride, 8, stride);
00720 __asm__ volatile(
00721 H263_LOOP_FILTER
00722
00723 : "+m" (temp[0]),
00724 "+m" (temp[1]),
00725 "+m" (temp[2]),
00726 "+m" (temp[3])
00727 : "g" (2*strength), "m"(ff_pb_FC)
00728 );
00729
00730 __asm__ volatile(
00731 "movq %%mm5, %%mm1 \n\t"
00732 "movq %%mm4, %%mm0 \n\t"
00733 "punpcklbw %%mm3, %%mm5 \n\t"
00734 "punpcklbw %%mm6, %%mm4 \n\t"
00735 "punpckhbw %%mm3, %%mm1 \n\t"
00736 "punpckhbw %%mm6, %%mm0 \n\t"
00737 "movq %%mm5, %%mm3 \n\t"
00738 "movq %%mm1, %%mm6 \n\t"
00739 "punpcklwd %%mm4, %%mm5 \n\t"
00740 "punpcklwd %%mm0, %%mm1 \n\t"
00741 "punpckhwd %%mm4, %%mm3 \n\t"
00742 "punpckhwd %%mm0, %%mm6 \n\t"
00743 "movd %%mm5, (%0) \n\t"
00744 "punpckhdq %%mm5, %%mm5 \n\t"
00745 "movd %%mm5, (%0,%2) \n\t"
00746 "movd %%mm3, (%0,%2,2) \n\t"
00747 "punpckhdq %%mm3, %%mm3 \n\t"
00748 "movd %%mm3, (%0,%3) \n\t"
00749 "movd %%mm1, (%1) \n\t"
00750 "punpckhdq %%mm1, %%mm1 \n\t"
00751 "movd %%mm1, (%1,%2) \n\t"
00752 "movd %%mm6, (%1,%2,2) \n\t"
00753 "punpckhdq %%mm6, %%mm6 \n\t"
00754 "movd %%mm6, (%1,%3) \n\t"
00755 :: "r" (src),
00756 "r" (src + 4*stride),
00757 "r" ((x86_reg) stride ),
00758 "r" ((x86_reg)(3*stride))
00759 );
00760 }
00761 }
00762
00763
00764
00765 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
00766 {
00767 uint8_t *ptr, *last_line;
00768 int i;
00769
00770 last_line = buf + (height - 1) * wrap;
00771
00772 ptr = buf;
00773 if(w==8)
00774 {
00775 __asm__ volatile(
00776 "1: \n\t"
00777 "movd (%0), %%mm0 \n\t"
00778 "punpcklbw %%mm0, %%mm0 \n\t"
00779 "punpcklwd %%mm0, %%mm0 \n\t"
00780 "punpckldq %%mm0, %%mm0 \n\t"
00781 "movq %%mm0, -8(%0) \n\t"
00782 "movq -8(%0, %2), %%mm1 \n\t"
00783 "punpckhbw %%mm1, %%mm1 \n\t"
00784 "punpckhwd %%mm1, %%mm1 \n\t"
00785 "punpckhdq %%mm1, %%mm1 \n\t"
00786 "movq %%mm1, (%0, %2) \n\t"
00787 "add %1, %0 \n\t"
00788 "cmp %3, %0 \n\t"
00789 " jb 1b \n\t"
00790 : "+r" (ptr)
00791 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00792 );
00793 }
00794 else
00795 {
00796 __asm__ volatile(
00797 "1: \n\t"
00798 "movd (%0), %%mm0 \n\t"
00799 "punpcklbw %%mm0, %%mm0 \n\t"
00800 "punpcklwd %%mm0, %%mm0 \n\t"
00801 "punpckldq %%mm0, %%mm0 \n\t"
00802 "movq %%mm0, -8(%0) \n\t"
00803 "movq %%mm0, -16(%0) \n\t"
00804 "movq -8(%0, %2), %%mm1 \n\t"
00805 "punpckhbw %%mm1, %%mm1 \n\t"
00806 "punpckhwd %%mm1, %%mm1 \n\t"
00807 "punpckhdq %%mm1, %%mm1 \n\t"
00808 "movq %%mm1, (%0, %2) \n\t"
00809 "movq %%mm1, 8(%0, %2) \n\t"
00810 "add %1, %0 \n\t"
00811 "cmp %3, %0 \n\t"
00812 " jb 1b \n\t"
00813 : "+r" (ptr)
00814 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00815 );
00816 }
00817
00818
00819 if (sides&EDGE_TOP) {
00820 for(i = 0; i < h; i += 4) {
00821 ptr= buf - (i + 1) * wrap - w;
00822 __asm__ volatile(
00823 "1: \n\t"
00824 "movq (%1, %0), %%mm0 \n\t"
00825 "movq %%mm0, (%0) \n\t"
00826 "movq %%mm0, (%0, %2) \n\t"
00827 "movq %%mm0, (%0, %2, 2) \n\t"
00828 "movq %%mm0, (%0, %3) \n\t"
00829 "add $8, %0 \n\t"
00830 "cmp %4, %0 \n\t"
00831 " jb 1b \n\t"
00832 : "+r" (ptr)
00833 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
00834 );
00835 }
00836 }
00837
00838 if (sides&EDGE_BOTTOM) {
00839 for(i = 0; i < w; i += 4) {
00840 ptr= last_line + (i + 1) * wrap - w;
00841 __asm__ volatile(
00842 "1: \n\t"
00843 "movq (%1, %0), %%mm0 \n\t"
00844 "movq %%mm0, (%0) \n\t"
00845 "movq %%mm0, (%0, %2) \n\t"
00846 "movq %%mm0, (%0, %2, 2) \n\t"
00847 "movq %%mm0, (%0, %3) \n\t"
00848 "add $8, %0 \n\t"
00849 "cmp %4, %0 \n\t"
00850 " jb 1b \n\t"
00851 : "+r" (ptr)
00852 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
00853 );
00854 }
00855 }
00856 }
00857
00858 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
00859 "paddw " #m4 ", " #m3 " \n\t" \
00860 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" \
00861 "pmullw " #m3 ", %%mm4 \n\t" \
00862 "movq "#in7", " #m3 " \n\t" \
00863 "movq "#in0", %%mm5 \n\t" \
00864 "paddw " #m3 ", %%mm5 \n\t" \
00865 "psubw %%mm5, %%mm4 \n\t" \
00866 "movq "#in1", %%mm5 \n\t" \
00867 "movq "#in2", %%mm6 \n\t" \
00868 "paddw " #m6 ", %%mm5 \n\t" \
00869 "paddw " #m5 ", %%mm6 \n\t" \
00870 "paddw %%mm6, %%mm6 \n\t" \
00871 "psubw %%mm6, %%mm5 \n\t" \
00872 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" \
00873 "paddw " #rnd ", %%mm4 \n\t" \
00874 "paddw %%mm4, %%mm5 \n\t" \
00875 "psraw $5, %%mm5 \n\t"\
00876 "packuswb %%mm5, %%mm5 \n\t"\
00877 OP(%%mm5, out, %%mm7, d)
00878
00879 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
00880 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00881 uint64_t temp;\
00882 \
00883 __asm__ volatile(\
00884 "pxor %%mm7, %%mm7 \n\t"\
00885 "1: \n\t"\
00886 "movq (%0), %%mm0 \n\t" \
00887 "movq %%mm0, %%mm1 \n\t" \
00888 "movq %%mm0, %%mm2 \n\t" \
00889 "punpcklbw %%mm7, %%mm0 \n\t" \
00890 "punpckhbw %%mm7, %%mm1 \n\t" \
00891 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
00892 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
00893 "movq %%mm2, %%mm3 \n\t" \
00894 "movq %%mm2, %%mm4 \n\t" \
00895 "psllq $8, %%mm2 \n\t" \
00896 "psllq $16, %%mm3 \n\t" \
00897 "psllq $24, %%mm4 \n\t" \
00898 "punpckhbw %%mm7, %%mm2 \n\t" \
00899 "punpckhbw %%mm7, %%mm3 \n\t" \
00900 "punpckhbw %%mm7, %%mm4 \n\t" \
00901 "paddw %%mm3, %%mm5 \n\t" \
00902 "paddw %%mm2, %%mm6 \n\t" \
00903 "paddw %%mm5, %%mm5 \n\t" \
00904 "psubw %%mm5, %%mm6 \n\t" \
00905 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
00906 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
00907 "paddw %%mm4, %%mm0 \n\t" \
00908 "paddw %%mm1, %%mm5 \n\t" \
00909 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
00910 "psubw %%mm5, %%mm0 \n\t" \
00911 "paddw %6, %%mm6 \n\t"\
00912 "paddw %%mm6, %%mm0 \n\t" \
00913 "psraw $5, %%mm0 \n\t"\
00914 "movq %%mm0, %5 \n\t"\
00915 \
00916 \
00917 "movq 5(%0), %%mm0 \n\t" \
00918 "movq %%mm0, %%mm5 \n\t" \
00919 "movq %%mm0, %%mm6 \n\t" \
00920 "psrlq $8, %%mm0 \n\t" \
00921 "psrlq $16, %%mm5 \n\t" \
00922 "punpcklbw %%mm7, %%mm0 \n\t" \
00923 "punpcklbw %%mm7, %%mm5 \n\t" \
00924 "paddw %%mm0, %%mm2 \n\t" \
00925 "paddw %%mm5, %%mm3 \n\t" \
00926 "paddw %%mm2, %%mm2 \n\t" \
00927 "psubw %%mm2, %%mm3 \n\t" \
00928 "movq %%mm6, %%mm2 \n\t" \
00929 "psrlq $24, %%mm6 \n\t" \
00930 "punpcklbw %%mm7, %%mm2 \n\t" \
00931 "punpcklbw %%mm7, %%mm6 \n\t" \
00932 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
00933 "paddw %%mm2, %%mm1 \n\t" \
00934 "paddw %%mm6, %%mm4 \n\t" \
00935 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
00936 "psubw %%mm4, %%mm3 \n\t" \
00937 "paddw %6, %%mm1 \n\t"\
00938 "paddw %%mm1, %%mm3 \n\t" \
00939 "psraw $5, %%mm3 \n\t"\
00940 "movq %5, %%mm1 \n\t"\
00941 "packuswb %%mm3, %%mm1 \n\t"\
00942 OP_MMX2(%%mm1, (%1),%%mm4, q)\
00943 \
00944 \
00945 "movq 9(%0), %%mm1 \n\t" \
00946 "movq %%mm1, %%mm4 \n\t" \
00947 "movq %%mm1, %%mm3 \n\t" \
00948 "psrlq $8, %%mm1 \n\t" \
00949 "psrlq $16, %%mm4 \n\t" \
00950 "punpcklbw %%mm7, %%mm1 \n\t" \
00951 "punpcklbw %%mm7, %%mm4 \n\t" \
00952 "paddw %%mm1, %%mm5 \n\t" \
00953 "paddw %%mm4, %%mm0 \n\t" \
00954 "paddw %%mm5, %%mm5 \n\t" \
00955 "psubw %%mm5, %%mm0 \n\t" \
00956 "movq %%mm3, %%mm5 \n\t" \
00957 "psrlq $24, %%mm3 \n\t" \
00958 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" \
00959 "punpcklbw %%mm7, %%mm3 \n\t" \
00960 "paddw %%mm3, %%mm2 \n\t" \
00961 "psubw %%mm2, %%mm0 \n\t" \
00962 "movq %%mm5, %%mm2 \n\t" \
00963 "punpcklbw %%mm7, %%mm2 \n\t" \
00964 "punpckhbw %%mm7, %%mm5 \n\t" \
00965 "paddw %%mm2, %%mm6 \n\t" \
00966 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" \
00967 "paddw %6, %%mm0 \n\t"\
00968 "paddw %%mm6, %%mm0 \n\t" \
00969 "psraw $5, %%mm0 \n\t"\
00970 \
00971 \
00972 "paddw %%mm5, %%mm3 \n\t" \
00973 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
00974 "paddw %%mm4, %%mm6 \n\t" \
00975 "pshufw $0xBE, %%mm5, %%mm4 \n\t" \
00976 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
00977 "paddw %%mm1, %%mm4 \n\t" \
00978 "paddw %%mm2, %%mm5 \n\t" \
00979 "paddw %%mm6, %%mm6 \n\t" \
00980 "psubw %%mm6, %%mm4 \n\t" \
00981 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" \
00982 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" \
00983 "psubw %%mm5, %%mm3 \n\t" \
00984 "paddw %6, %%mm4 \n\t"\
00985 "paddw %%mm3, %%mm4 \n\t" \
00986 "psraw $5, %%mm4 \n\t"\
00987 "packuswb %%mm4, %%mm0 \n\t"\
00988 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
00989 \
00990 "add %3, %0 \n\t"\
00991 "add %4, %1 \n\t"\
00992 "decl %2 \n\t"\
00993 " jnz 1b \n\t"\
00994 : "+a"(src), "+c"(dst), "+D"(h)\
00995 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(temp), "m"(ROUNDER)\
00996 : "memory"\
00997 );\
00998 }\
00999 \
01000 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01001 int i;\
01002 int16_t temp[16];\
01003 \
01004 for(i=0; i<h; i++)\
01005 {\
01006 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01007 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01008 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01009 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01010 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01011 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
01012 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
01013 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
01014 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
01015 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
01016 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
01017 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
01018 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
01019 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
01020 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
01021 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
01022 __asm__ volatile(\
01023 "movq (%0), %%mm0 \n\t"\
01024 "movq 8(%0), %%mm1 \n\t"\
01025 "paddw %2, %%mm0 \n\t"\
01026 "paddw %2, %%mm1 \n\t"\
01027 "psraw $5, %%mm0 \n\t"\
01028 "psraw $5, %%mm1 \n\t"\
01029 "packuswb %%mm1, %%mm0 \n\t"\
01030 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01031 "movq 16(%0), %%mm0 \n\t"\
01032 "movq 24(%0), %%mm1 \n\t"\
01033 "paddw %2, %%mm0 \n\t"\
01034 "paddw %2, %%mm1 \n\t"\
01035 "psraw $5, %%mm0 \n\t"\
01036 "psraw $5, %%mm1 \n\t"\
01037 "packuswb %%mm1, %%mm0 \n\t"\
01038 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
01039 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01040 : "memory"\
01041 );\
01042 dst+=dstStride;\
01043 src+=srcStride;\
01044 }\
01045 }\
01046 \
01047 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01048 __asm__ volatile(\
01049 "pxor %%mm7, %%mm7 \n\t"\
01050 "1: \n\t"\
01051 "movq (%0), %%mm0 \n\t" \
01052 "movq %%mm0, %%mm1 \n\t" \
01053 "movq %%mm0, %%mm2 \n\t" \
01054 "punpcklbw %%mm7, %%mm0 \n\t" \
01055 "punpckhbw %%mm7, %%mm1 \n\t" \
01056 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
01057 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
01058 "movq %%mm2, %%mm3 \n\t" \
01059 "movq %%mm2, %%mm4 \n\t" \
01060 "psllq $8, %%mm2 \n\t" \
01061 "psllq $16, %%mm3 \n\t" \
01062 "psllq $24, %%mm4 \n\t" \
01063 "punpckhbw %%mm7, %%mm2 \n\t" \
01064 "punpckhbw %%mm7, %%mm3 \n\t" \
01065 "punpckhbw %%mm7, %%mm4 \n\t" \
01066 "paddw %%mm3, %%mm5 \n\t" \
01067 "paddw %%mm2, %%mm6 \n\t" \
01068 "paddw %%mm5, %%mm5 \n\t" \
01069 "psubw %%mm5, %%mm6 \n\t" \
01070 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
01071 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
01072 "paddw %%mm4, %%mm0 \n\t" \
01073 "paddw %%mm1, %%mm5 \n\t" \
01074 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
01075 "psubw %%mm5, %%mm0 \n\t" \
01076 "paddw %5, %%mm6 \n\t"\
01077 "paddw %%mm6, %%mm0 \n\t" \
01078 "psraw $5, %%mm0 \n\t"\
01079 \
01080 \
01081 "movd 5(%0), %%mm5 \n\t" \
01082 "punpcklbw %%mm7, %%mm5 \n\t" \
01083 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
01084 "paddw %%mm5, %%mm1 \n\t" \
01085 "paddw %%mm6, %%mm2 \n\t" \
01086 "pshufw $0xBE, %%mm5, %%mm6 \n\t" \
01087 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
01088 "paddw %%mm6, %%mm3 \n\t" \
01089 "paddw %%mm5, %%mm4 \n\t" \
01090 "paddw %%mm2, %%mm2 \n\t" \
01091 "psubw %%mm2, %%mm3 \n\t" \
01092 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
01093 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
01094 "psubw %%mm4, %%mm3 \n\t" \
01095 "paddw %5, %%mm1 \n\t"\
01096 "paddw %%mm1, %%mm3 \n\t" \
01097 "psraw $5, %%mm3 \n\t"\
01098 "packuswb %%mm3, %%mm0 \n\t"\
01099 OP_MMX2(%%mm0, (%1), %%mm4, q)\
01100 \
01101 "add %3, %0 \n\t"\
01102 "add %4, %1 \n\t"\
01103 "decl %2 \n\t"\
01104 " jnz 1b \n\t"\
01105 : "+a"(src), "+c"(dst), "+d"(h)\
01106 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ROUNDER)\
01107 : "memory"\
01108 );\
01109 }\
01110 \
01111 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01112 int i;\
01113 int16_t temp[8];\
01114 \
01115 for(i=0; i<h; i++)\
01116 {\
01117 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01118 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01119 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01120 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01121 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01122 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
01123 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
01124 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
01125 __asm__ volatile(\
01126 "movq (%0), %%mm0 \n\t"\
01127 "movq 8(%0), %%mm1 \n\t"\
01128 "paddw %2, %%mm0 \n\t"\
01129 "paddw %2, %%mm1 \n\t"\
01130 "psraw $5, %%mm0 \n\t"\
01131 "psraw $5, %%mm1 \n\t"\
01132 "packuswb %%mm1, %%mm0 \n\t"\
01133 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01134 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01135 :"memory"\
01136 );\
01137 dst+=dstStride;\
01138 src+=srcStride;\
01139 }\
01140 }
01141
01142 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
01143 \
01144 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01145 uint64_t temp[17*4];\
01146 uint64_t *temp_ptr= temp;\
01147 int count= 17;\
01148 \
01149 \
01150 __asm__ volatile(\
01151 "pxor %%mm7, %%mm7 \n\t"\
01152 "1: \n\t"\
01153 "movq (%0), %%mm0 \n\t"\
01154 "movq (%0), %%mm1 \n\t"\
01155 "movq 8(%0), %%mm2 \n\t"\
01156 "movq 8(%0), %%mm3 \n\t"\
01157 "punpcklbw %%mm7, %%mm0 \n\t"\
01158 "punpckhbw %%mm7, %%mm1 \n\t"\
01159 "punpcklbw %%mm7, %%mm2 \n\t"\
01160 "punpckhbw %%mm7, %%mm3 \n\t"\
01161 "movq %%mm0, (%1) \n\t"\
01162 "movq %%mm1, 17*8(%1) \n\t"\
01163 "movq %%mm2, 2*17*8(%1) \n\t"\
01164 "movq %%mm3, 3*17*8(%1) \n\t"\
01165 "add $8, %1 \n\t"\
01166 "add %3, %0 \n\t"\
01167 "decl %2 \n\t"\
01168 " jnz 1b \n\t"\
01169 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01170 : "r" ((x86_reg)srcStride)\
01171 : "memory"\
01172 );\
01173 \
01174 temp_ptr= temp;\
01175 count=4;\
01176 \
01177 \
01178 __asm__ volatile(\
01179 \
01180 "1: \n\t"\
01181 "movq (%0), %%mm0 \n\t"\
01182 "movq 8(%0), %%mm1 \n\t"\
01183 "movq 16(%0), %%mm2 \n\t"\
01184 "movq 24(%0), %%mm3 \n\t"\
01185 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01186 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01187 "add %4, %1 \n\t"\
01188 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01189 \
01190 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01191 "add %4, %1 \n\t"\
01192 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01193 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
01194 "add %4, %1 \n\t"\
01195 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
01196 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
01197 "add %4, %1 \n\t"\
01198 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
01199 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
01200 "add %4, %1 \n\t"\
01201 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
01202 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
01203 "add %4, %1 \n\t"\
01204 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
01205 \
01206 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
01207 "add %4, %1 \n\t" \
01208 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
01209 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
01210 \
01211 "add $136, %0 \n\t"\
01212 "add %6, %1 \n\t"\
01213 "decl %2 \n\t"\
01214 " jnz 1b \n\t"\
01215 \
01216 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01217 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
01218 :"memory"\
01219 );\
01220 }\
01221 \
01222 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01223 uint64_t temp[9*2];\
01224 uint64_t *temp_ptr= temp;\
01225 int count= 9;\
01226 \
01227 \
01228 __asm__ volatile(\
01229 "pxor %%mm7, %%mm7 \n\t"\
01230 "1: \n\t"\
01231 "movq (%0), %%mm0 \n\t"\
01232 "movq (%0), %%mm1 \n\t"\
01233 "punpcklbw %%mm7, %%mm0 \n\t"\
01234 "punpckhbw %%mm7, %%mm1 \n\t"\
01235 "movq %%mm0, (%1) \n\t"\
01236 "movq %%mm1, 9*8(%1) \n\t"\
01237 "add $8, %1 \n\t"\
01238 "add %3, %0 \n\t"\
01239 "decl %2 \n\t"\
01240 " jnz 1b \n\t"\
01241 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01242 : "r" ((x86_reg)srcStride)\
01243 : "memory"\
01244 );\
01245 \
01246 temp_ptr= temp;\
01247 count=2;\
01248 \
01249 \
01250 __asm__ volatile(\
01251 \
01252 "1: \n\t"\
01253 "movq (%0), %%mm0 \n\t"\
01254 "movq 8(%0), %%mm1 \n\t"\
01255 "movq 16(%0), %%mm2 \n\t"\
01256 "movq 24(%0), %%mm3 \n\t"\
01257 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01258 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01259 "add %4, %1 \n\t"\
01260 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01261 \
01262 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01263 "add %4, %1 \n\t"\
01264 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01265 \
01266 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
01267 "add %4, %1 \n\t"\
01268 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
01269 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
01270 \
01271 "add $72, %0 \n\t"\
01272 "add %6, %1 \n\t"\
01273 "decl %2 \n\t"\
01274 " jnz 1b \n\t"\
01275 \
01276 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01277 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
01278 : "memory"\
01279 );\
01280 }\
01281 \
01282 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01283 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
01284 }\
01285 \
01286 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01287 uint64_t temp[8];\
01288 uint8_t * const half= (uint8_t*)temp;\
01289 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01290 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01291 }\
01292 \
01293 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01294 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
01295 }\
01296 \
01297 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01298 uint64_t temp[8];\
01299 uint8_t * const half= (uint8_t*)temp;\
01300 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01301 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
01302 }\
01303 \
01304 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01305 uint64_t temp[8];\
01306 uint8_t * const half= (uint8_t*)temp;\
01307 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01308 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01309 }\
01310 \
01311 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01312 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
01313 }\
01314 \
01315 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01316 uint64_t temp[8];\
01317 uint8_t * const half= (uint8_t*)temp;\
01318 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01319 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
01320 }\
01321 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01322 uint64_t half[8 + 9];\
01323 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01324 uint8_t * const halfHV= ((uint8_t*)half);\
01325 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01326 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01327 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01328 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01329 }\
01330 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01331 uint64_t half[8 + 9];\
01332 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01333 uint8_t * const halfHV= ((uint8_t*)half);\
01334 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01335 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01336 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01337 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01338 }\
01339 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01340 uint64_t half[8 + 9];\
01341 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01342 uint8_t * const halfHV= ((uint8_t*)half);\
01343 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01344 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01345 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01346 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01347 }\
01348 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01349 uint64_t half[8 + 9];\
01350 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01351 uint8_t * const halfHV= ((uint8_t*)half);\
01352 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01353 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01354 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01355 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01356 }\
01357 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01358 uint64_t half[8 + 9];\
01359 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01360 uint8_t * const halfHV= ((uint8_t*)half);\
01361 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01362 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01363 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01364 }\
01365 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01366 uint64_t half[8 + 9];\
01367 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01368 uint8_t * const halfHV= ((uint8_t*)half);\
01369 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01370 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01371 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01372 }\
01373 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01374 uint64_t half[8 + 9];\
01375 uint8_t * const halfH= ((uint8_t*)half);\
01376 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01377 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01378 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01379 }\
01380 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01381 uint64_t half[8 + 9];\
01382 uint8_t * const halfH= ((uint8_t*)half);\
01383 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01384 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01385 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01386 }\
01387 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01388 uint64_t half[9];\
01389 uint8_t * const halfH= ((uint8_t*)half);\
01390 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01391 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01392 }\
01393 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01394 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
01395 }\
01396 \
01397 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01398 uint64_t temp[32];\
01399 uint8_t * const half= (uint8_t*)temp;\
01400 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01401 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01402 }\
01403 \
01404 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01405 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
01406 }\
01407 \
01408 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01409 uint64_t temp[32];\
01410 uint8_t * const half= (uint8_t*)temp;\
01411 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01412 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
01413 }\
01414 \
01415 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01416 uint64_t temp[32];\
01417 uint8_t * const half= (uint8_t*)temp;\
01418 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01419 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01420 }\
01421 \
01422 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01423 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
01424 }\
01425 \
01426 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01427 uint64_t temp[32];\
01428 uint8_t * const half= (uint8_t*)temp;\
01429 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01430 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
01431 }\
01432 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01433 uint64_t half[16*2 + 17*2];\
01434 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01435 uint8_t * const halfHV= ((uint8_t*)half);\
01436 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01437 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01438 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01439 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01440 }\
01441 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01442 uint64_t half[16*2 + 17*2];\
01443 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01444 uint8_t * const halfHV= ((uint8_t*)half);\
01445 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01446 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01447 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01448 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01449 }\
01450 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01451 uint64_t half[16*2 + 17*2];\
01452 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01453 uint8_t * const halfHV= ((uint8_t*)half);\
01454 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01455 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01456 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01457 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01458 }\
01459 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01460 uint64_t half[16*2 + 17*2];\
01461 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01462 uint8_t * const halfHV= ((uint8_t*)half);\
01463 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01464 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01465 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01466 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01467 }\
01468 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01469 uint64_t half[16*2 + 17*2];\
01470 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01471 uint8_t * const halfHV= ((uint8_t*)half);\
01472 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01473 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01474 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01475 }\
01476 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01477 uint64_t half[16*2 + 17*2];\
01478 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01479 uint8_t * const halfHV= ((uint8_t*)half);\
01480 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01481 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01482 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01483 }\
01484 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01485 uint64_t half[17*2];\
01486 uint8_t * const halfH= ((uint8_t*)half);\
01487 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01488 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01489 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01490 }\
01491 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01492 uint64_t half[17*2];\
01493 uint8_t * const halfH= ((uint8_t*)half);\
01494 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01495 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01496 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01497 }\
01498 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01499 uint64_t half[17*2];\
01500 uint8_t * const halfH= ((uint8_t*)half);\
01501 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01502 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01503 }
01504
01505 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
01506 #define AVG_3DNOW_OP(a,b,temp, size) \
01507 "mov" #size " " #b ", " #temp " \n\t"\
01508 "pavgusb " #temp ", " #a " \n\t"\
01509 "mov" #size " " #a ", " #b " \n\t"
01510 #define AVG_MMX2_OP(a,b,temp, size) \
01511 "mov" #size " " #b ", " #temp " \n\t"\
01512 "pavgb " #temp ", " #a " \n\t"\
01513 "mov" #size " " #a ", " #b " \n\t"
01514
01515 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
01516 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
01517 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
01518 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
01519 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
01520 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
01521 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
01522 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
01523 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
01524
01525
01526
01527
01528 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
01529 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01530 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
01531 }
01532 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
01533 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01534 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
01535 }
01536
01537 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
01538 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
01539 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
01540 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
01541 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
01542 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
01543 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
01544 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
01545 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
01546 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
01547 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01548 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
01549 }\
01550 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01551 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
01552 }\
01553 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
01554 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
01555 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
01556 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
01557 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
01558 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
01559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
01560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
01561
01562 QPEL_2TAP(put_, 16, mmx2)
01563 QPEL_2TAP(avg_, 16, mmx2)
01564 QPEL_2TAP(put_, 8, mmx2)
01565 QPEL_2TAP(avg_, 8, mmx2)
01566 QPEL_2TAP(put_, 16, 3dnow)
01567 QPEL_2TAP(avg_, 16, 3dnow)
01568 QPEL_2TAP(put_, 8, 3dnow)
01569 QPEL_2TAP(avg_, 8, 3dnow)
01570
01571
01572 #if 0
01573 static void just_return(void) { return; }
01574 #endif
01575
01576 #if HAVE_YASM
01577 typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src,
01578 x86_reg linesize, x86_reg start_y,
01579 x86_reg end_y, x86_reg block_h,
01580 x86_reg start_x, x86_reg end_x,
01581 x86_reg block_w);
01582 extern emu_edge_core_func ff_emu_edge_core_mmx;
01583 extern emu_edge_core_func ff_emu_edge_core_sse;
01584
01585 static av_always_inline
01586 void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize,
01587 int block_w, int block_h,
01588 int src_x, int src_y, int w, int h,
01589 emu_edge_core_func *core_fn)
01590 {
01591 int start_y, start_x, end_y, end_x, src_y_add=0;
01592
01593 if(src_y>= h){
01594 src_y_add = h-1-src_y;
01595 src_y=h-1;
01596 }else if(src_y<=-block_h){
01597 src_y_add = 1-block_h-src_y;
01598 src_y=1-block_h;
01599 }
01600 if(src_x>= w){
01601 src+= (w-1-src_x);
01602 src_x=w-1;
01603 }else if(src_x<=-block_w){
01604 src+= (1-block_w-src_x);
01605 src_x=1-block_w;
01606 }
01607
01608 start_y= FFMAX(0, -src_y);
01609 start_x= FFMAX(0, -src_x);
01610 end_y= FFMIN(block_h, h-src_y);
01611 end_x= FFMIN(block_w, w-src_x);
01612 assert(start_x < end_x && block_w > 0);
01613 assert(start_y < end_y && block_h > 0);
01614
01615
01616 src += (src_y_add+start_y)*linesize + start_x;
01617 buf += start_x;
01618 core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w);
01619 }
01620
01621 #if ARCH_X86_32
01622 static av_noinline
01623 void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize,
01624 int block_w, int block_h,
01625 int src_x, int src_y, int w, int h)
01626 {
01627 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01628 w, h, &ff_emu_edge_core_mmx);
01629 }
01630 #endif
01631 static av_noinline
01632 void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize,
01633 int block_w, int block_h,
01634 int src_x, int src_y, int w, int h)
01635 {
01636 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01637 w, h, &ff_emu_edge_core_sse);
01638 }
01639 #endif
01640
01641 typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src,
01642 int linesize, int block_w, int block_h,
01643 int src_x, int src_y, int w, int h);
01644
01645 static av_always_inline
01646 void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01647 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height,
01648 emulated_edge_mc_func *emu_edge_fn)
01649 {
01650 const int w = 8;
01651 const int ix = ox>>(16+shift);
01652 const int iy = oy>>(16+shift);
01653 const int oxs = ox>>4;
01654 const int oys = oy>>4;
01655 const int dxxs = dxx>>4;
01656 const int dxys = dxy>>4;
01657 const int dyxs = dyx>>4;
01658 const int dyys = dyy>>4;
01659 const uint16_t r4[4] = {r,r,r,r};
01660 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
01661 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
01662 const uint64_t shift2 = 2*shift;
01663 uint8_t edge_buf[(h+1)*stride];
01664 int x, y;
01665
01666 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
01667 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
01668 const int dxh = dxy*(h-1);
01669 const int dyw = dyx*(w-1);
01670 if(
01671 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
01672 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
01673
01674 || (dxx|dxy|dyx|dyy)&15 )
01675 {
01676
01677 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
01678 return;
01679 }
01680
01681 src += ix + iy*stride;
01682 if( (unsigned)ix >= width-w ||
01683 (unsigned)iy >= height-h )
01684 {
01685 emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
01686 src = edge_buf;
01687 }
01688
01689 __asm__ volatile(
01690 "movd %0, %%mm6 \n\t"
01691 "pxor %%mm7, %%mm7 \n\t"
01692 "punpcklwd %%mm6, %%mm6 \n\t"
01693 "punpcklwd %%mm6, %%mm6 \n\t"
01694 :: "r"(1<<shift)
01695 );
01696
01697 for(x=0; x<w; x+=4){
01698 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
01699 oxs - dxys + dxxs*(x+1),
01700 oxs - dxys + dxxs*(x+2),
01701 oxs - dxys + dxxs*(x+3) };
01702 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
01703 oys - dyys + dyxs*(x+1),
01704 oys - dyys + dyxs*(x+2),
01705 oys - dyys + dyxs*(x+3) };
01706
01707 for(y=0; y<h; y++){
01708 __asm__ volatile(
01709 "movq %0, %%mm4 \n\t"
01710 "movq %1, %%mm5 \n\t"
01711 "paddw %2, %%mm4 \n\t"
01712 "paddw %3, %%mm5 \n\t"
01713 "movq %%mm4, %0 \n\t"
01714 "movq %%mm5, %1 \n\t"
01715 "psrlw $12, %%mm4 \n\t"
01716 "psrlw $12, %%mm5 \n\t"
01717 : "+m"(*dx4), "+m"(*dy4)
01718 : "m"(*dxy4), "m"(*dyy4)
01719 );
01720
01721 __asm__ volatile(
01722 "movq %%mm6, %%mm2 \n\t"
01723 "movq %%mm6, %%mm1 \n\t"
01724 "psubw %%mm4, %%mm2 \n\t"
01725 "psubw %%mm5, %%mm1 \n\t"
01726 "movq %%mm2, %%mm0 \n\t"
01727 "movq %%mm4, %%mm3 \n\t"
01728 "pmullw %%mm1, %%mm0 \n\t"
01729 "pmullw %%mm5, %%mm3 \n\t"
01730 "pmullw %%mm5, %%mm2 \n\t"
01731 "pmullw %%mm4, %%mm1 \n\t"
01732
01733 "movd %4, %%mm5 \n\t"
01734 "movd %3, %%mm4 \n\t"
01735 "punpcklbw %%mm7, %%mm5 \n\t"
01736 "punpcklbw %%mm7, %%mm4 \n\t"
01737 "pmullw %%mm5, %%mm3 \n\t"
01738 "pmullw %%mm4, %%mm2 \n\t"
01739
01740 "movd %2, %%mm5 \n\t"
01741 "movd %1, %%mm4 \n\t"
01742 "punpcklbw %%mm7, %%mm5 \n\t"
01743 "punpcklbw %%mm7, %%mm4 \n\t"
01744 "pmullw %%mm5, %%mm1 \n\t"
01745 "pmullw %%mm4, %%mm0 \n\t"
01746 "paddw %5, %%mm1 \n\t"
01747 "paddw %%mm3, %%mm2 \n\t"
01748 "paddw %%mm1, %%mm0 \n\t"
01749 "paddw %%mm2, %%mm0 \n\t"
01750
01751 "psrlw %6, %%mm0 \n\t"
01752 "packuswb %%mm0, %%mm0 \n\t"
01753 "movd %%mm0, %0 \n\t"
01754
01755 : "=m"(dst[x+y*stride])
01756 : "m"(src[0]), "m"(src[1]),
01757 "m"(src[stride]), "m"(src[stride+1]),
01758 "m"(*r4), "m"(shift2)
01759 );
01760 src += stride;
01761 }
01762 src += 4-h*stride;
01763 }
01764 }
01765
01766 #if HAVE_YASM
01767 #if ARCH_X86_32
01768 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01769 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01770 {
01771 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01772 width, height, &emulated_edge_mc_mmx);
01773 }
01774 #endif
01775 static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01776 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01777 {
01778 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01779 width, height, &emulated_edge_mc_sse);
01780 }
01781 #else
01782 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01783 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01784 {
01785 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01786 width, height, &ff_emulated_edge_mc);
01787 }
01788 #endif
01789
01790 #define PREFETCH(name, op) \
01791 static void name(void *mem, int stride, int h){\
01792 const uint8_t *p= mem;\
01793 do{\
01794 __asm__ volatile(#op" %0" :: "m"(*p));\
01795 p+= stride;\
01796 }while(--h);\
01797 }
01798 PREFETCH(prefetch_mmx2, prefetcht0)
01799 PREFETCH(prefetch_3dnow, prefetch)
01800 #undef PREFETCH
01801
01802 #include "h264_qpel_mmx.c"
01803
01804 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
01805 int stride, int h, int x, int y);
01806 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
01807 int stride, int h, int x, int y);
01808 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
01809 int stride, int h, int x, int y);
01810 void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
01811 int stride, int h, int x, int y);
01812 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
01813 int stride, int h, int x, int y);
01814 void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src,
01815 int stride, int h, int x, int y);
01816
01817 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
01818 int stride, int h, int x, int y);
01819 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
01820 int stride, int h, int x, int y);
01821 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
01822 int stride, int h, int x, int y);
01823 void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
01824 int stride, int h, int x, int y);
01825 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
01826 int stride, int h, int x, int y);
01827 void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
01828 int stride, int h, int x, int y);
01829
01830 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
01831 int stride, int h, int x, int y);
01832 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
01833 int stride, int h, int x, int y);
01834
01835 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01836 int stride, int h, int x, int y);
01837 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
01838 int stride, int h, int x, int y);
01839
01840 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01841 int stride, int h, int x, int y);
01842 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
01843 int stride, int h, int x, int y);
01844
01845 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
01846 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
01847 (uint8_t *dst, uint8_t *src,\
01848 int stride, int h, int x, int y);
01849
01850 CHROMA_MC(put, 2, 10, mmxext)
01851 CHROMA_MC(avg, 2, 10, mmxext)
01852 CHROMA_MC(put, 4, 10, mmxext)
01853 CHROMA_MC(avg, 4, 10, mmxext)
01854 CHROMA_MC(put, 8, 10, sse2)
01855 CHROMA_MC(avg, 8, 10, sse2)
01856 CHROMA_MC(put, 8, 10, avx)
01857 CHROMA_MC(avg, 8, 10, avx)
01858
01859
01860 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01861 put_pixels8_mmx(dst, src, stride, 8);
01862 }
01863 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01864 avg_pixels8_mmx(dst, src, stride, 8);
01865 }
01866 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01867 put_pixels16_mmx(dst, src, stride, 16);
01868 }
01869 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01870 avg_pixels16_mmx(dst, src, stride, 16);
01871 }
01872
01873
01874 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01875 put_pixels8_mmx(dst, src, stride, 8);
01876 }
01877 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01878 avg_pixels8_mmx2(dst, src, stride, 8);
01879 }
01880
01881
01882
01883 #if CONFIG_GPL
01884 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01885 {
01886 ff_mmx_idct (block);
01887 ff_put_pixels_clamped_mmx(block, dest, line_size);
01888 }
01889 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01890 {
01891 ff_mmx_idct (block);
01892 ff_add_pixels_clamped_mmx(block, dest, line_size);
01893 }
01894 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01895 {
01896 ff_mmxext_idct (block);
01897 ff_put_pixels_clamped_mmx(block, dest, line_size);
01898 }
01899 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01900 {
01901 ff_mmxext_idct (block);
01902 ff_add_pixels_clamped_mmx(block, dest, line_size);
01903 }
01904 #endif
01905 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
01906 {
01907 ff_idct_xvid_mmx (block);
01908 ff_put_pixels_clamped_mmx(block, dest, line_size);
01909 }
01910 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
01911 {
01912 ff_idct_xvid_mmx (block);
01913 ff_add_pixels_clamped_mmx(block, dest, line_size);
01914 }
01915 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
01916 {
01917 ff_idct_xvid_mmx2 (block);
01918 ff_put_pixels_clamped_mmx(block, dest, line_size);
01919 }
01920 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
01921 {
01922 ff_idct_xvid_mmx2 (block);
01923 ff_add_pixels_clamped_mmx(block, dest, line_size);
01924 }
01925
01926 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
01927 {
01928 int i;
01929 __asm__ volatile("pxor %%mm7, %%mm7":);
01930 for(i=0; i<blocksize; i+=2) {
01931 __asm__ volatile(
01932 "movq %0, %%mm0 \n\t"
01933 "movq %1, %%mm1 \n\t"
01934 "movq %%mm0, %%mm2 \n\t"
01935 "movq %%mm1, %%mm3 \n\t"
01936 "pfcmpge %%mm7, %%mm2 \n\t"
01937 "pfcmpge %%mm7, %%mm3 \n\t"
01938 "pslld $31, %%mm2 \n\t"
01939 "pxor %%mm2, %%mm1 \n\t"
01940 "movq %%mm3, %%mm4 \n\t"
01941 "pand %%mm1, %%mm3 \n\t"
01942 "pandn %%mm1, %%mm4 \n\t"
01943 "pfadd %%mm0, %%mm3 \n\t"
01944 "pfsub %%mm4, %%mm0 \n\t"
01945 "movq %%mm3, %1 \n\t"
01946 "movq %%mm0, %0 \n\t"
01947 :"+m"(mag[i]), "+m"(ang[i])
01948 ::"memory"
01949 );
01950 }
01951 __asm__ volatile("femms");
01952 }
01953 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
01954 {
01955 int i;
01956
01957 __asm__ volatile(
01958 "movaps %0, %%xmm5 \n\t"
01959 ::"m"(ff_pdw_80000000[0])
01960 );
01961 for(i=0; i<blocksize; i+=4) {
01962 __asm__ volatile(
01963 "movaps %0, %%xmm0 \n\t"
01964 "movaps %1, %%xmm1 \n\t"
01965 "xorps %%xmm2, %%xmm2 \n\t"
01966 "xorps %%xmm3, %%xmm3 \n\t"
01967 "cmpleps %%xmm0, %%xmm2 \n\t"
01968 "cmpleps %%xmm1, %%xmm3 \n\t"
01969 "andps %%xmm5, %%xmm2 \n\t"
01970 "xorps %%xmm2, %%xmm1 \n\t"
01971 "movaps %%xmm3, %%xmm4 \n\t"
01972 "andps %%xmm1, %%xmm3 \n\t"
01973 "andnps %%xmm1, %%xmm4 \n\t"
01974 "addps %%xmm0, %%xmm3 \n\t"
01975 "subps %%xmm4, %%xmm0 \n\t"
01976 "movaps %%xmm3, %1 \n\t"
01977 "movaps %%xmm0, %0 \n\t"
01978 :"+m"(mag[i]), "+m"(ang[i])
01979 ::"memory"
01980 );
01981 }
01982 }
01983
01984 #define IF1(x) x
01985 #define IF0(x)
01986
01987 #define MIX5(mono,stereo)\
01988 __asm__ volatile(\
01989 "movss 0(%2), %%xmm5 \n"\
01990 "movss 8(%2), %%xmm6 \n"\
01991 "movss 24(%2), %%xmm7 \n"\
01992 "shufps $0, %%xmm5, %%xmm5 \n"\
01993 "shufps $0, %%xmm6, %%xmm6 \n"\
01994 "shufps $0, %%xmm7, %%xmm7 \n"\
01995 "1: \n"\
01996 "movaps (%0,%1), %%xmm0 \n"\
01997 "movaps 0x400(%0,%1), %%xmm1 \n"\
01998 "movaps 0x800(%0,%1), %%xmm2 \n"\
01999 "movaps 0xc00(%0,%1), %%xmm3 \n"\
02000 "movaps 0x1000(%0,%1), %%xmm4 \n"\
02001 "mulps %%xmm5, %%xmm0 \n"\
02002 "mulps %%xmm6, %%xmm1 \n"\
02003 "mulps %%xmm5, %%xmm2 \n"\
02004 "mulps %%xmm7, %%xmm3 \n"\
02005 "mulps %%xmm7, %%xmm4 \n"\
02006 stereo("addps %%xmm1, %%xmm0 \n")\
02007 "addps %%xmm1, %%xmm2 \n"\
02008 "addps %%xmm3, %%xmm0 \n"\
02009 "addps %%xmm4, %%xmm2 \n"\
02010 mono("addps %%xmm2, %%xmm0 \n")\
02011 "movaps %%xmm0, (%0,%1) \n"\
02012 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
02013 "add $16, %0 \n"\
02014 "jl 1b \n"\
02015 :"+&r"(i)\
02016 :"r"(samples[0]+len), "r"(matrix)\
02017 :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
02018 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
02019 "memory"\
02020 );
02021
02022 #define MIX_MISC(stereo)\
02023 __asm__ volatile(\
02024 "1: \n"\
02025 "movaps (%3,%0), %%xmm0 \n"\
02026 stereo("movaps %%xmm0, %%xmm1 \n")\
02027 "mulps %%xmm4, %%xmm0 \n"\
02028 stereo("mulps %%xmm5, %%xmm1 \n")\
02029 "lea 1024(%3,%0), %1 \n"\
02030 "mov %5, %2 \n"\
02031 "2: \n"\
02032 "movaps (%1), %%xmm2 \n"\
02033 stereo("movaps %%xmm2, %%xmm3 \n")\
02034 "mulps (%4,%2), %%xmm2 \n"\
02035 stereo("mulps 16(%4,%2), %%xmm3 \n")\
02036 "addps %%xmm2, %%xmm0 \n"\
02037 stereo("addps %%xmm3, %%xmm1 \n")\
02038 "add $1024, %1 \n"\
02039 "add $32, %2 \n"\
02040 "jl 2b \n"\
02041 "movaps %%xmm0, (%3,%0) \n"\
02042 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
02043 "add $16, %0 \n"\
02044 "jl 1b \n"\
02045 :"+&r"(i), "=&r"(j), "=&r"(k)\
02046 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
02047 :"memory"\
02048 );
02049
02050 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
02051 {
02052 int (*matrix_cmp)[2] = (int(*)[2])matrix;
02053 intptr_t i,j,k;
02054
02055 i = -len*sizeof(float);
02056 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
02057 MIX5(IF0,IF1);
02058 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
02059 MIX5(IF1,IF0);
02060 } else {
02061 DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
02062 j = 2*in_ch*sizeof(float);
02063 __asm__ volatile(
02064 "1: \n"
02065 "sub $8, %0 \n"
02066 "movss (%2,%0), %%xmm4 \n"
02067 "movss 4(%2,%0), %%xmm5 \n"
02068 "shufps $0, %%xmm4, %%xmm4 \n"
02069 "shufps $0, %%xmm5, %%xmm5 \n"
02070 "movaps %%xmm4, (%1,%0,4) \n"
02071 "movaps %%xmm5, 16(%1,%0,4) \n"
02072 "jg 1b \n"
02073 :"+&r"(j)
02074 :"r"(matrix_simd), "r"(matrix)
02075 :"memory"
02076 );
02077 if(out_ch == 2) {
02078 MIX_MISC(IF1);
02079 } else {
02080 MIX_MISC(IF0);
02081 }
02082 }
02083 }
02084
02085 static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
02086 x86_reg i = (len-4)*4;
02087 __asm__ volatile(
02088 "1: \n\t"
02089 "movq (%2,%0), %%mm0 \n\t"
02090 "movq 8(%2,%0), %%mm1 \n\t"
02091 "pfmul (%3,%0), %%mm0 \n\t"
02092 "pfmul 8(%3,%0), %%mm1 \n\t"
02093 "movq %%mm0, (%1,%0) \n\t"
02094 "movq %%mm1, 8(%1,%0) \n\t"
02095 "sub $16, %0 \n\t"
02096 "jge 1b \n\t"
02097 "femms \n\t"
02098 :"+r"(i)
02099 :"r"(dst), "r"(src0), "r"(src1)
02100 :"memory"
02101 );
02102 }
02103 static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
02104 x86_reg i = (len-8)*4;
02105 __asm__ volatile(
02106 "1: \n\t"
02107 "movaps (%2,%0), %%xmm0 \n\t"
02108 "movaps 16(%2,%0), %%xmm1 \n\t"
02109 "mulps (%3,%0), %%xmm0 \n\t"
02110 "mulps 16(%3,%0), %%xmm1 \n\t"
02111 "movaps %%xmm0, (%1,%0) \n\t"
02112 "movaps %%xmm1, 16(%1,%0) \n\t"
02113 "sub $32, %0 \n\t"
02114 "jge 1b \n\t"
02115 :"+r"(i)
02116 :"r"(dst), "r"(src0), "r"(src1)
02117 :"memory"
02118 );
02119 }
02120
02121 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
02122 x86_reg i = len*4-16;
02123 __asm__ volatile(
02124 "1: \n\t"
02125 "pswapd 8(%1), %%mm0 \n\t"
02126 "pswapd (%1), %%mm1 \n\t"
02127 "pfmul (%3,%0), %%mm0 \n\t"
02128 "pfmul 8(%3,%0), %%mm1 \n\t"
02129 "movq %%mm0, (%2,%0) \n\t"
02130 "movq %%mm1, 8(%2,%0) \n\t"
02131 "add $16, %1 \n\t"
02132 "sub $16, %0 \n\t"
02133 "jge 1b \n\t"
02134 :"+r"(i), "+r"(src1)
02135 :"r"(dst), "r"(src0)
02136 );
02137 __asm__ volatile("femms");
02138 }
02139 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
02140 x86_reg i = len*4-32;
02141 __asm__ volatile(
02142 "1: \n\t"
02143 "movaps 16(%1), %%xmm0 \n\t"
02144 "movaps (%1), %%xmm1 \n\t"
02145 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
02146 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
02147 "mulps (%3,%0), %%xmm0 \n\t"
02148 "mulps 16(%3,%0), %%xmm1 \n\t"
02149 "movaps %%xmm0, (%2,%0) \n\t"
02150 "movaps %%xmm1, 16(%2,%0) \n\t"
02151 "add $32, %1 \n\t"
02152 "sub $32, %0 \n\t"
02153 "jge 1b \n\t"
02154 :"+r"(i), "+r"(src1)
02155 :"r"(dst), "r"(src0)
02156 );
02157 }
02158
02159 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
02160 const float *src2, int len){
02161 x86_reg i = (len-4)*4;
02162 __asm__ volatile(
02163 "1: \n\t"
02164 "movq (%2,%0), %%mm0 \n\t"
02165 "movq 8(%2,%0), %%mm1 \n\t"
02166 "pfmul (%3,%0), %%mm0 \n\t"
02167 "pfmul 8(%3,%0), %%mm1 \n\t"
02168 "pfadd (%4,%0), %%mm0 \n\t"
02169 "pfadd 8(%4,%0), %%mm1 \n\t"
02170 "movq %%mm0, (%1,%0) \n\t"
02171 "movq %%mm1, 8(%1,%0) \n\t"
02172 "sub $16, %0 \n\t"
02173 "jge 1b \n\t"
02174 :"+r"(i)
02175 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02176 :"memory"
02177 );
02178 __asm__ volatile("femms");
02179 }
02180 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
02181 const float *src2, int len){
02182 x86_reg i = (len-8)*4;
02183 __asm__ volatile(
02184 "1: \n\t"
02185 "movaps (%2,%0), %%xmm0 \n\t"
02186 "movaps 16(%2,%0), %%xmm1 \n\t"
02187 "mulps (%3,%0), %%xmm0 \n\t"
02188 "mulps 16(%3,%0), %%xmm1 \n\t"
02189 "addps (%4,%0), %%xmm0 \n\t"
02190 "addps 16(%4,%0), %%xmm1 \n\t"
02191 "movaps %%xmm0, (%1,%0) \n\t"
02192 "movaps %%xmm1, 16(%1,%0) \n\t"
02193 "sub $32, %0 \n\t"
02194 "jge 1b \n\t"
02195 :"+r"(i)
02196 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02197 :"memory"
02198 );
02199 }
02200
02201 #if HAVE_6REGS
02202 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
02203 const float *win, int len){
02204 x86_reg i = -len*4;
02205 x86_reg j = len*4-8;
02206 __asm__ volatile(
02207 "1: \n"
02208 "pswapd (%5,%1), %%mm1 \n"
02209 "movq (%5,%0), %%mm0 \n"
02210 "pswapd (%4,%1), %%mm5 \n"
02211 "movq (%3,%0), %%mm4 \n"
02212 "movq %%mm0, %%mm2 \n"
02213 "movq %%mm1, %%mm3 \n"
02214 "pfmul %%mm4, %%mm2 \n"
02215 "pfmul %%mm5, %%mm3 \n"
02216 "pfmul %%mm4, %%mm1 \n"
02217 "pfmul %%mm5, %%mm0 \n"
02218 "pfadd %%mm3, %%mm2 \n"
02219 "pfsub %%mm0, %%mm1 \n"
02220 "pswapd %%mm2, %%mm2 \n"
02221 "movq %%mm1, (%2,%0) \n"
02222 "movq %%mm2, (%2,%1) \n"
02223 "sub $8, %1 \n"
02224 "add $8, %0 \n"
02225 "jl 1b \n"
02226 "femms \n"
02227 :"+r"(i), "+r"(j)
02228 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02229 );
02230 }
02231
02232 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
02233 const float *win, int len){
02234 x86_reg i = -len*4;
02235 x86_reg j = len*4-16;
02236 __asm__ volatile(
02237 "1: \n"
02238 "movaps (%5,%1), %%xmm1 \n"
02239 "movaps (%5,%0), %%xmm0 \n"
02240 "movaps (%4,%1), %%xmm5 \n"
02241 "movaps (%3,%0), %%xmm4 \n"
02242 "shufps $0x1b, %%xmm1, %%xmm1 \n"
02243 "shufps $0x1b, %%xmm5, %%xmm5 \n"
02244 "movaps %%xmm0, %%xmm2 \n"
02245 "movaps %%xmm1, %%xmm3 \n"
02246 "mulps %%xmm4, %%xmm2 \n"
02247 "mulps %%xmm5, %%xmm3 \n"
02248 "mulps %%xmm4, %%xmm1 \n"
02249 "mulps %%xmm5, %%xmm0 \n"
02250 "addps %%xmm3, %%xmm2 \n"
02251 "subps %%xmm0, %%xmm1 \n"
02252 "shufps $0x1b, %%xmm2, %%xmm2 \n"
02253 "movaps %%xmm1, (%2,%0) \n"
02254 "movaps %%xmm2, (%2,%1) \n"
02255 "sub $16, %1 \n"
02256 "add $16, %0 \n"
02257 "jl 1b \n"
02258 :"+r"(i), "+r"(j)
02259 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02260 );
02261 }
02262 #endif
02263
02264 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
02265 int len)
02266 {
02267 x86_reg i = (len-16)*4;
02268 __asm__ volatile(
02269 "movss %3, %%xmm4 \n"
02270 "movss %4, %%xmm5 \n"
02271 "shufps $0, %%xmm4, %%xmm4 \n"
02272 "shufps $0, %%xmm5, %%xmm5 \n"
02273 "1: \n\t"
02274 "movaps (%2,%0), %%xmm0 \n\t"
02275 "movaps 16(%2,%0), %%xmm1 \n\t"
02276 "movaps 32(%2,%0), %%xmm2 \n\t"
02277 "movaps 48(%2,%0), %%xmm3 \n\t"
02278 "maxps %%xmm4, %%xmm0 \n\t"
02279 "maxps %%xmm4, %%xmm1 \n\t"
02280 "maxps %%xmm4, %%xmm2 \n\t"
02281 "maxps %%xmm4, %%xmm3 \n\t"
02282 "minps %%xmm5, %%xmm0 \n\t"
02283 "minps %%xmm5, %%xmm1 \n\t"
02284 "minps %%xmm5, %%xmm2 \n\t"
02285 "minps %%xmm5, %%xmm3 \n\t"
02286 "movaps %%xmm0, (%1,%0) \n\t"
02287 "movaps %%xmm1, 16(%1,%0) \n\t"
02288 "movaps %%xmm2, 32(%1,%0) \n\t"
02289 "movaps %%xmm3, 48(%1,%0) \n\t"
02290 "sub $64, %0 \n\t"
02291 "jge 1b \n\t"
02292 :"+&r"(i)
02293 :"r"(dst), "r"(src), "m"(min), "m"(max)
02294 :"memory"
02295 );
02296 }
02297
02298 void ff_vp3_idct_mmx(int16_t *input_data);
02299 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02300 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02301
02302 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
02303
02304 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02305 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02306
02307 void ff_vp3_idct_sse2(int16_t *input_data);
02308 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02309 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02310
02311 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
02312 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
02313 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02314 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02315 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02316
02317 void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input,
02318 const int16_t *window, unsigned int len);
02319 void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
02320 const int16_t *window, unsigned int len);
02321 void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input,
02322 const int16_t *window, unsigned int len);
02323 void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input,
02324 const int16_t *window, unsigned int len);
02325 void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input,
02326 const int16_t *window, unsigned int len);
02327 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
02328 const int16_t *window, unsigned int len);
02329
02330 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
02331 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
02332 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
02333
02334 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
02335
02336 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
02337 {
02338 int mm_flags = av_get_cpu_flags();
02339 const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8;
02340 const int bit_depth = avctx->bits_per_raw_sample;
02341
02342 if (avctx->dsp_mask) {
02343 if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
02344 mm_flags |= (avctx->dsp_mask & 0xffff);
02345 else
02346 mm_flags &= ~(avctx->dsp_mask & 0xffff);
02347 }
02348
02349 #if 0
02350 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
02351 if (mm_flags & AV_CPU_FLAG_MMX)
02352 av_log(avctx, AV_LOG_INFO, " mmx");
02353 if (mm_flags & AV_CPU_FLAG_MMX2)
02354 av_log(avctx, AV_LOG_INFO, " mmx2");
02355 if (mm_flags & AV_CPU_FLAG_3DNOW)
02356 av_log(avctx, AV_LOG_INFO, " 3dnow");
02357 if (mm_flags & AV_CPU_FLAG_SSE)
02358 av_log(avctx, AV_LOG_INFO, " sse");
02359 if (mm_flags & AV_CPU_FLAG_SSE2)
02360 av_log(avctx, AV_LOG_INFO, " sse2");
02361 av_log(avctx, AV_LOG_INFO, "\n");
02362 #endif
02363
02364 if (mm_flags & AV_CPU_FLAG_MMX) {
02365 const int idct_algo= avctx->idct_algo;
02366
02367 if(avctx->lowres==0){
02368 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
02369 c->idct_put= ff_simple_idct_put_mmx;
02370 c->idct_add= ff_simple_idct_add_mmx;
02371 c->idct = ff_simple_idct_mmx;
02372 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
02373 #if CONFIG_GPL
02374 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
02375 if(mm_flags & AV_CPU_FLAG_MMX2){
02376 c->idct_put= ff_libmpeg2mmx2_idct_put;
02377 c->idct_add= ff_libmpeg2mmx2_idct_add;
02378 c->idct = ff_mmxext_idct;
02379 }else{
02380 c->idct_put= ff_libmpeg2mmx_idct_put;
02381 c->idct_add= ff_libmpeg2mmx_idct_add;
02382 c->idct = ff_mmx_idct;
02383 }
02384 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
02385 #endif
02386 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
02387 idct_algo==FF_IDCT_VP3 && HAVE_YASM){
02388 if(mm_flags & AV_CPU_FLAG_SSE2){
02389 c->idct_put= ff_vp3_idct_put_sse2;
02390 c->idct_add= ff_vp3_idct_add_sse2;
02391 c->idct = ff_vp3_idct_sse2;
02392 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02393 }else{
02394 c->idct_put= ff_vp3_idct_put_mmx;
02395 c->idct_add= ff_vp3_idct_add_mmx;
02396 c->idct = ff_vp3_idct_mmx;
02397 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
02398 }
02399 }else if(idct_algo==FF_IDCT_CAVS){
02400 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02401 }else if(idct_algo==FF_IDCT_XVIDMMX){
02402 if(mm_flags & AV_CPU_FLAG_SSE2){
02403 c->idct_put= ff_idct_xvid_sse2_put;
02404 c->idct_add= ff_idct_xvid_sse2_add;
02405 c->idct = ff_idct_xvid_sse2;
02406 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
02407 }else if(mm_flags & AV_CPU_FLAG_MMX2){
02408 c->idct_put= ff_idct_xvid_mmx2_put;
02409 c->idct_add= ff_idct_xvid_mmx2_add;
02410 c->idct = ff_idct_xvid_mmx2;
02411 }else{
02412 c->idct_put= ff_idct_xvid_mmx_put;
02413 c->idct_add= ff_idct_xvid_mmx_add;
02414 c->idct = ff_idct_xvid_mmx;
02415 }
02416 }
02417 }
02418
02419 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
02420 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
02421 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
02422 if (!high_bit_depth) {
02423 c->clear_block = clear_block_mmx;
02424 c->clear_blocks = clear_blocks_mmx;
02425 if ((mm_flags & AV_CPU_FLAG_SSE) &&
02426 !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
02427
02428 c->clear_block = clear_block_sse;
02429 c->clear_blocks = clear_blocks_sse;
02430 }
02431 }
02432
02433 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02434 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
02435 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
02436 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
02437 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
02438
02439 if (!high_bit_depth) {
02440 SET_HPEL_FUNCS(put, 0, 16, mmx);
02441 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
02442 SET_HPEL_FUNCS(avg, 0, 16, mmx);
02443 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
02444 SET_HPEL_FUNCS(put, 1, 8, mmx);
02445 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
02446 SET_HPEL_FUNCS(avg, 1, 8, mmx);
02447 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
02448 }
02449
02450 #if ARCH_X86_32 || !HAVE_YASM
02451 c->gmc= gmc_mmx;
02452 #endif
02453 #if ARCH_X86_32 && HAVE_YASM
02454 if (!high_bit_depth)
02455 c->emulated_edge_mc = emulated_edge_mc_mmx;
02456 #endif
02457
02458 c->add_bytes= add_bytes_mmx;
02459
02460 if (!high_bit_depth)
02461 c->draw_edges = draw_edges_mmx;
02462
02463 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
02464 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
02465 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
02466 }
02467
02468 #if HAVE_YASM
02469 if (!high_bit_depth) {
02470 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
02471 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
02472 }
02473
02474 c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
02475 c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
02476 #endif
02477
02478 if (mm_flags & AV_CPU_FLAG_MMX2) {
02479 c->prefetch = prefetch_mmx2;
02480
02481 if (!high_bit_depth) {
02482 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
02483 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
02484
02485 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
02486 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
02487 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
02488
02489 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
02490 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
02491
02492 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
02493 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
02494 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
02495 }
02496
02497 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02498 if (!high_bit_depth) {
02499 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
02500 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
02501 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
02502 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
02503 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
02504 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
02505 }
02506
02507 if (CONFIG_VP3_DECODER && HAVE_YASM) {
02508 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
02509 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
02510 }
02511 }
02512 if (CONFIG_VP3_DECODER && HAVE_YASM) {
02513 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
02514 }
02515
02516 if (CONFIG_VP3_DECODER
02517 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02518 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
02519 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
02520 }
02521
02522 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02523 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
02524 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
02525 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
02526 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
02527 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
02528 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
02529 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
02530 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
02531 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
02532 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
02533 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
02534 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
02535 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
02536 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
02537 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
02538 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
02539
02540 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
02541 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
02542 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
02543 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
02544 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
02545 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
02546
02547 if (!high_bit_depth) {
02548 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
02549 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
02550 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
02551 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
02552 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
02553 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
02554 }
02555
02556 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
02557 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
02558 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
02559 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
02560
02561 #if HAVE_YASM
02562 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
02563 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
02564
02565 if (!high_bit_depth) {
02566 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
02567 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
02568 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
02569 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
02570 }
02571 if (bit_depth == 10) {
02572 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_10_mmxext;
02573 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_10_mmxext;
02574 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_10_mmxext;
02575 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_10_mmxext;
02576 }
02577
02578 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
02579 #endif
02580 #if HAVE_7REGS && HAVE_TEN_OPERANDS
02581 if( mm_flags&AV_CPU_FLAG_3DNOW )
02582 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
02583 #endif
02584
02585 } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
02586 c->prefetch = prefetch_3dnow;
02587
02588 if (!high_bit_depth) {
02589 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
02590 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
02591
02592 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
02593 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
02594 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
02595
02596 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
02597 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
02598
02599 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
02600 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
02601 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
02602
02603 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02604 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
02605 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
02606 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
02607 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
02608 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
02609 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
02610 }
02611 }
02612
02613 if (CONFIG_VP3_DECODER
02614 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02615 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
02616 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
02617 }
02618
02619 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
02620 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
02621 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
02622 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
02623 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
02624 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
02625
02626 if (!high_bit_depth) {
02627 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
02628 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
02629 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
02630 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
02631 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
02632 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
02633 }
02634
02635 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
02636 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
02637 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
02638 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
02639
02640 #if HAVE_YASM
02641 if (!high_bit_depth) {
02642 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
02643 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
02644 }
02645
02646 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
02647 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
02648 #endif
02649 }
02650
02651
02652 #define H264_QPEL_FUNCS(x, y, CPU)\
02653 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
02654 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
02655 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
02656 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
02657 if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
02658
02659 if (!high_bit_depth) {
02660 c->put_pixels_tab[0][0] = put_pixels16_sse2;
02661 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
02662 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
02663 H264_QPEL_FUNCS(0, 0, sse2);
02664 }
02665 }
02666 if(mm_flags & AV_CPU_FLAG_SSE2){
02667 if (!high_bit_depth) {
02668 H264_QPEL_FUNCS(0, 1, sse2);
02669 H264_QPEL_FUNCS(0, 2, sse2);
02670 H264_QPEL_FUNCS(0, 3, sse2);
02671 H264_QPEL_FUNCS(1, 1, sse2);
02672 H264_QPEL_FUNCS(1, 2, sse2);
02673 H264_QPEL_FUNCS(1, 3, sse2);
02674 H264_QPEL_FUNCS(2, 1, sse2);
02675 H264_QPEL_FUNCS(2, 2, sse2);
02676 H264_QPEL_FUNCS(2, 3, sse2);
02677 H264_QPEL_FUNCS(3, 1, sse2);
02678 H264_QPEL_FUNCS(3, 2, sse2);
02679 H264_QPEL_FUNCS(3, 3, sse2);
02680 }
02681 #if HAVE_YASM
02682 if (bit_depth == 10) {
02683 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_sse2;
02684 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_sse2;
02685 }
02686 #endif
02687 }
02688 #if HAVE_SSSE3
02689 if(mm_flags & AV_CPU_FLAG_SSSE3){
02690 if (!high_bit_depth) {
02691 H264_QPEL_FUNCS(1, 0, ssse3);
02692 H264_QPEL_FUNCS(1, 1, ssse3);
02693 H264_QPEL_FUNCS(1, 2, ssse3);
02694 H264_QPEL_FUNCS(1, 3, ssse3);
02695 H264_QPEL_FUNCS(2, 0, ssse3);
02696 H264_QPEL_FUNCS(2, 1, ssse3);
02697 H264_QPEL_FUNCS(2, 2, ssse3);
02698 H264_QPEL_FUNCS(2, 3, ssse3);
02699 H264_QPEL_FUNCS(3, 0, ssse3);
02700 H264_QPEL_FUNCS(3, 1, ssse3);
02701 H264_QPEL_FUNCS(3, 2, ssse3);
02702 H264_QPEL_FUNCS(3, 3, ssse3);
02703 }
02704 #if HAVE_YASM
02705 if (!high_bit_depth) {
02706 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
02707 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
02708 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
02709 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
02710 }
02711 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
02712 if (mm_flags & AV_CPU_FLAG_SSE4)
02713 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
02714 #endif
02715 }
02716 #endif
02717
02718 if(mm_flags & AV_CPU_FLAG_3DNOW){
02719 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
02720 c->vector_fmul = vector_fmul_3dnow;
02721 }
02722 if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
02723 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
02724 #if HAVE_6REGS
02725 c->vector_fmul_window = vector_fmul_window_3dnow2;
02726 #endif
02727 }
02728 if(mm_flags & AV_CPU_FLAG_MMX2){
02729 #if HAVE_YASM
02730 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
02731 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
02732 if (avctx->flags & CODEC_FLAG_BITEXACT) {
02733 c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
02734 } else {
02735 c->apply_window_int16 = ff_apply_window_int16_mmxext;
02736 }
02737 #endif
02738 }
02739 if(mm_flags & AV_CPU_FLAG_SSE){
02740 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
02741 c->ac3_downmix = ac3_downmix_sse;
02742 c->vector_fmul = vector_fmul_sse;
02743 c->vector_fmul_reverse = vector_fmul_reverse_sse;
02744 c->vector_fmul_add = vector_fmul_add_sse;
02745 #if HAVE_6REGS
02746 c->vector_fmul_window = vector_fmul_window_sse;
02747 #endif
02748 c->vector_clipf = vector_clipf_sse;
02749 #if HAVE_YASM
02750 c->scalarproduct_float = ff_scalarproduct_float_sse;
02751 #endif
02752 }
02753 if(mm_flags & AV_CPU_FLAG_3DNOW)
02754 c->vector_fmul_add = vector_fmul_add_3dnow;
02755 if(mm_flags & AV_CPU_FLAG_SSE2){
02756 #if HAVE_YASM
02757 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
02758 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
02759 if (avctx->flags & CODEC_FLAG_BITEXACT) {
02760 c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
02761 } else {
02762 if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
02763 c->apply_window_int16 = ff_apply_window_int16_sse2;
02764 }
02765 }
02766
02767 if (!high_bit_depth)
02768 c->emulated_edge_mc = emulated_edge_mc_sse;
02769 c->gmc= gmc_sse;
02770 #endif
02771 }
02772 if (mm_flags & AV_CPU_FLAG_SSSE3) {
02773 #if HAVE_YASM
02774 if (mm_flags & AV_CPU_FLAG_ATOM) {
02775 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
02776 } else {
02777 c->apply_window_int16 = ff_apply_window_int16_ssse3;
02778 }
02779 if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) {
02780 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
02781 }
02782 #endif
02783 }
02784 #if HAVE_AVX && HAVE_YASM
02785 if (mm_flags & AV_CPU_FLAG_AVX) {
02786 if (bit_depth == 10) {
02787 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_avx;
02788 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_avx;
02789 }
02790 }
02791 #endif
02792 }
02793
02794 if (CONFIG_ENCODERS)
02795 dsputilenc_init_mmx(c, avctx);
02796
02797 #if 0
02798
02799 get_pixels = just_return;
02800 put_pixels_clamped = just_return;
02801 add_pixels_clamped = just_return;
02802
02803 pix_abs16x16 = just_return;
02804 pix_abs16x16_x2 = just_return;
02805 pix_abs16x16_y2 = just_return;
02806 pix_abs16x16_xy2 = just_return;
02807
02808 put_pixels_tab[0] = just_return;
02809 put_pixels_tab[1] = just_return;
02810 put_pixels_tab[2] = just_return;
02811 put_pixels_tab[3] = just_return;
02812
02813 put_no_rnd_pixels_tab[0] = just_return;
02814 put_no_rnd_pixels_tab[1] = just_return;
02815 put_no_rnd_pixels_tab[2] = just_return;
02816 put_no_rnd_pixels_tab[3] = just_return;
02817
02818 avg_pixels_tab[0] = just_return;
02819 avg_pixels_tab[1] = just_return;
02820 avg_pixels_tab[2] = just_return;
02821 avg_pixels_tab[3] = just_return;
02822
02823 avg_no_rnd_pixels_tab[0] = just_return;
02824 avg_no_rnd_pixels_tab[1] = just_return;
02825 avg_no_rnd_pixels_tab[2] = just_return;
02826 avg_no_rnd_pixels_tab[3] = just_return;
02827
02828
02829
02830 #endif
02831 }