Go to the documentation of this file.
21 #ifndef AVUTIL_MIPS_GENERIC_MACROS_MSA_H
22 #define AVUTIL_MIPS_GENERIC_MACROS_MSA_H
33 #define ALLOC_ALIGNED(align) __attribute__ ((aligned((align) << 1)))
35 #define LD_V(RTYPE, psrc) *((RTYPE *)(psrc))
36 #define LD_UB(...) LD_V(v16u8, __VA_ARGS__)
37 #define LD_SB(...) LD_V(v16i8, __VA_ARGS__)
38 #define LD_UH(...) LD_V(v8u16, __VA_ARGS__)
39 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__)
40 #define LD_UW(...) LD_V(v4u32, __VA_ARGS__)
41 #define LD_SW(...) LD_V(v4i32, __VA_ARGS__)
43 #define ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
44 #define ST_UB(...) ST_V(v16u8, __VA_ARGS__)
45 #define ST_SB(...) ST_V(v16i8, __VA_ARGS__)
46 #define ST_UH(...) ST_V(v8u16, __VA_ARGS__)
47 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__)
48 #define ST_UW(...) ST_V(v4u32, __VA_ARGS__)
49 #define ST_SW(...) ST_V(v4i32, __VA_ARGS__)
51 #if (__mips_isa_rev >= 6)
54 uint16_t val_lh_m = *(uint16_t *)(psrc); \
60 uint32_t val_lw_m = *(uint32_t *)(psrc); \
67 uint64_t val_ld_m = *(uint64_t *)(psrc); \
70 #else // !(__mips == 64)
73 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
74 uint32_t val0_ld_m, val1_ld_m; \
75 uint64_t val_ld_m = 0; \
77 val0_ld_m = LW(psrc_ld_m); \
78 val1_ld_m = LW(psrc_ld_m + 4); \
80 val_ld_m = (uint64_t) (val1_ld_m); \
81 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
82 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
86 #endif // (__mips == 64)
88 #define SH(val, pdst) *(uint16_t *)(pdst) = (val);
89 #define SW(val, pdst) *(uint32_t *)(pdst) = (val);
90 #define SD(val, pdst) *(uint64_t *)(pdst) = (val);
92 #else // !(__mips_isa_rev >= 6)
95 uint8_t *psrc_lh_m = (uint8_t *) (psrc); \
99 "ulh %[val_lh_m], %[psrc_lh_m] \n\t" \
101 : [val_lh_m] "=r" (val_lh_m) \
102 : [psrc_lh_m] "m" (*psrc_lh_m) \
110 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
114 "lwr %[val_lw_m], 0(%[psrc_lw_m]) \n\t" \
115 "lwl %[val_lw_m], 3(%[psrc_lw_m]) \n\t" \
117 : [val_lw_m] "=&r"(val_lw_m) \
118 : [psrc_lw_m] "r"(psrc_lw_m) \
127 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
128 uint64_t val_ld_m = 0; \
131 "ldr %[val_ld_m], 0(%[psrc_ld_m]) \n\t" \
132 "ldl %[val_ld_m], 7(%[psrc_ld_m]) \n\t" \
134 : [val_ld_m] "=&r" (val_ld_m) \
135 : [psrc_ld_m] "r" (psrc_ld_m) \
140 #else // !(__mips == 64)
143 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
144 uint32_t val0_ld_m, val1_ld_m; \
145 uint64_t val_ld_m = 0; \
147 val0_ld_m = LW(psrc_ld_m); \
148 val1_ld_m = LW(psrc_ld_m + 4); \
150 val_ld_m = (uint64_t) (val1_ld_m); \
151 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
152 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
156 #endif // (__mips == 64)
158 #define SH(val, pdst) \
160 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
161 uint16_t val_sh_m = (val); \
164 "ush %[val_sh_m], %[pdst_sh_m] \n\t" \
166 : [pdst_sh_m] "=m" (*pdst_sh_m) \
167 : [val_sh_m] "r" (val_sh_m) \
171 #define SW(val, pdst) \
173 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
174 uint32_t val_sw_m = (val); \
177 "usw %[val_sw_m], %[pdst_sw_m] \n\t" \
179 : [pdst_sw_m] "=m" (*pdst_sw_m) \
180 : [val_sw_m] "r" (val_sw_m) \
184 #define SD(val, pdst) \
186 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
187 uint32_t val0_sd_m, val1_sd_m; \
189 val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
190 val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
192 SW(val0_sd_m, pdst_sd_m); \
193 SW(val1_sd_m, pdst_sd_m + 4); \
195 #endif // (__mips_isa_rev >= 6)
206 #define LW4(psrc, stride, out0, out1, out2, out3) \
209 out1 = LW((psrc) + stride); \
210 out2 = LW((psrc) + 2 * stride); \
211 out3 = LW((psrc) + 3 * stride); \
214 #define LW2(psrc, stride, out0, out1) \
217 out1 = LW((psrc) + stride); \
227 #define LD2(psrc, stride, out0, out1) \
230 out1 = LD((psrc) + stride); \
232 #define LD4(psrc, stride, out0, out1, out2, out3) \
234 LD2((psrc), stride, out0, out1); \
235 LD2((psrc) + 2 * stride, stride, out2, out3); \
245 #define SW4(in0, in1, in2, in3, pdst, stride) \
248 SW(in1, (pdst) + stride); \
249 SW(in2, (pdst) + 2 * stride); \
250 SW(in3, (pdst) + 3 * stride); \
260 #define SD4(in0, in1, in2, in3, pdst, stride) \
263 SD(in1, (pdst) + stride); \
264 SD(in2, (pdst) + 2 * stride); \
265 SD(in3, (pdst) + 3 * stride); \
276 #define LD_V2(RTYPE, psrc, stride, out0, out1) \
278 out0 = LD_V(RTYPE, (psrc)); \
279 out1 = LD_V(RTYPE, (psrc) + stride); \
281 #define LD_UB2(...) LD_V2(v16u8, __VA_ARGS__)
282 #define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__)
283 #define LD_UH2(...) LD_V2(v8u16, __VA_ARGS__)
284 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
285 #define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
287 #define LD_V3(RTYPE, psrc, stride, out0, out1, out2) \
289 LD_V2(RTYPE, (psrc), stride, out0, out1); \
290 out2 = LD_V(RTYPE, (psrc) + 2 * stride); \
292 #define LD_UB3(...) LD_V3(v16u8, __VA_ARGS__)
293 #define LD_SB3(...) LD_V3(v16i8, __VA_ARGS__)
295 #define LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \
297 LD_V2(RTYPE, (psrc), stride, out0, out1); \
298 LD_V2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
300 #define LD_UB4(...) LD_V4(v16u8, __VA_ARGS__)
301 #define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__)
302 #define LD_UH4(...) LD_V4(v8u16, __VA_ARGS__)
303 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
304 #define LD_SW4(...) LD_V4(v4i32, __VA_ARGS__)
306 #define LD_V5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
308 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
309 out4 = LD_V(RTYPE, (psrc) + 4 * stride); \
311 #define LD_UB5(...) LD_V5(v16u8, __VA_ARGS__)
312 #define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__)
314 #define LD_V6(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5) \
316 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
317 LD_V2(RTYPE, (psrc) + 4 * stride, stride, out4, out5); \
319 #define LD_UB6(...) LD_V6(v16u8, __VA_ARGS__)
320 #define LD_SB6(...) LD_V6(v16i8, __VA_ARGS__)
321 #define LD_UH6(...) LD_V6(v8u16, __VA_ARGS__)
322 #define LD_SH6(...) LD_V6(v8i16, __VA_ARGS__)
324 #define LD_V7(RTYPE, psrc, stride, \
325 out0, out1, out2, out3, out4, out5, out6) \
327 LD_V5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \
328 LD_V2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \
330 #define LD_UB7(...) LD_V7(v16u8, __VA_ARGS__)
331 #define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__)
333 #define LD_V8(RTYPE, psrc, stride, \
334 out0, out1, out2, out3, out4, out5, out6, out7) \
336 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
337 LD_V4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
339 #define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__)
340 #define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__)
341 #define LD_UH8(...) LD_V8(v8u16, __VA_ARGS__)
342 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
343 #define LD_SW8(...) LD_V8(v4i32, __VA_ARGS__)
345 #define LD_V16(RTYPE, psrc, stride, \
346 out0, out1, out2, out3, out4, out5, out6, out7, \
347 out8, out9, out10, out11, out12, out13, out14, out15) \
349 LD_V8(RTYPE, (psrc), stride, \
350 out0, out1, out2, out3, out4, out5, out6, out7); \
351 LD_V8(RTYPE, (psrc) + 8 * stride, stride, \
352 out8, out9, out10, out11, out12, out13, out14, out15); \
354 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
362 #define ST_V2(RTYPE, in0, in1, pdst, stride) \
364 ST_V(RTYPE, in0, (pdst)); \
365 ST_V(RTYPE, in1, (pdst) + stride); \
367 #define ST_UB2(...) ST_V2(v16u8, __VA_ARGS__)
368 #define ST_SB2(...) ST_V2(v16i8, __VA_ARGS__)
369 #define ST_UH2(...) ST_V2(v8u16, __VA_ARGS__)
370 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
371 #define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
373 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \
375 ST_V2(RTYPE, in0, in1, (pdst), stride); \
376 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
378 #define ST_UB4(...) ST_V4(v16u8, __VA_ARGS__)
379 #define ST_SB4(...) ST_V4(v16i8, __VA_ARGS__)
380 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
381 #define ST_SW4(...) ST_V4(v4i32, __VA_ARGS__)
383 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \
385 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
386 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
388 #define ST_SH6(...) ST_V6(v8i16, __VA_ARGS__)
390 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
392 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
393 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
395 #define ST_UB8(...) ST_V8(v16u8, __VA_ARGS__)
396 #define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__)
397 #define ST_SW8(...) ST_V8(v4i32, __VA_ARGS__)
407 #define ST_H1(in, idx, pdst) \
410 out0_m = __msa_copy_u_h((v8i16) in, idx); \
411 SH(out0_m, (pdst)); \
413 #define ST_H2(in, idx0, idx1, pdst, stride) \
415 uint16_t out0_m, out1_m; \
416 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
417 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
418 SH(out0_m, (pdst)); \
419 SH(out1_m, (pdst) + stride); \
421 #define ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
423 uint16_t out0_m, out1_m, out2_m, out3_m; \
424 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
425 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
426 out2_m = __msa_copy_u_h((v8i16) in, idx2); \
427 out3_m = __msa_copy_u_h((v8i16) in, idx3); \
428 SH(out0_m, (pdst)); \
429 SH(out1_m, (pdst) + stride); \
430 SH(out2_m, (pdst) + 2 * stride); \
431 SH(out3_m, (pdst) + 3 * stride); \
433 #define ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5, \
434 idx6, idx7, pdst, stride) \
436 ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
437 ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \
448 #define ST_W1(in, idx, pdst) \
451 out0_m = __msa_copy_u_w((v4i32) in, idx); \
452 SW(out0_m, (pdst)); \
454 #define ST_W2(in, idx0, idx1, pdst, stride) \
456 uint32_t out0_m, out1_m; \
457 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
458 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
459 SW(out0_m, (pdst)); \
460 SW(out1_m, (pdst) + stride); \
462 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
464 uint32_t out0_m, out1_m, out2_m, out3_m; \
465 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
466 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
467 out2_m = __msa_copy_u_w((v4i32) in, idx2); \
468 out3_m = __msa_copy_u_w((v4i32) in, idx3); \
469 SW(out0_m, (pdst)); \
470 SW(out1_m, (pdst) + stride); \
471 SW(out2_m, (pdst) + 2*stride); \
472 SW(out3_m, (pdst) + 3*stride); \
474 #define ST_W8(in0, in1, idx0, idx1, idx2, idx3, \
475 idx4, idx5, idx6, idx7, pdst, stride) \
477 ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride) \
478 ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \
489 #define ST_D1(in, idx, pdst) \
492 out0_m = __msa_copy_u_d((v2i64) in, idx); \
493 SD(out0_m, (pdst)); \
495 #define ST_D2(in, idx0, idx1, pdst, stride) \
497 uint64_t out0_m, out1_m; \
498 out0_m = __msa_copy_u_d((v2i64) in, idx0); \
499 out1_m = __msa_copy_u_d((v2i64) in, idx1); \
500 SD(out0_m, (pdst)); \
501 SD(out1_m, (pdst) + stride); \
503 #define ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
505 uint64_t out0_m, out1_m, out2_m, out3_m; \
506 out0_m = __msa_copy_u_d((v2i64) in0, idx0); \
507 out1_m = __msa_copy_u_d((v2i64) in0, idx1); \
508 out2_m = __msa_copy_u_d((v2i64) in1, idx2); \
509 out3_m = __msa_copy_u_d((v2i64) in1, idx3); \
510 SD(out0_m, (pdst)); \
511 SD(out1_m, (pdst) + stride); \
512 SD(out2_m, (pdst) + 2 * stride); \
513 SD(out3_m, (pdst) + 3 * stride); \
515 #define ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \
516 idx4, idx5, idx6, idx7, pdst, stride) \
518 ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
519 ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
531 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
533 uint64_t out0_m, out1_m, out2_m, out3_m; \
534 uint64_t out4_m, out5_m, out6_m, out7_m; \
535 uint32_t out8_m, out9_m, out10_m, out11_m; \
536 uint32_t out12_m, out13_m, out14_m, out15_m; \
537 uint8_t *pblk_12x8_m = (uint8_t *) (pdst); \
539 out0_m = __msa_copy_u_d((v2i64) in0, 0); \
540 out1_m = __msa_copy_u_d((v2i64) in1, 0); \
541 out2_m = __msa_copy_u_d((v2i64) in2, 0); \
542 out3_m = __msa_copy_u_d((v2i64) in3, 0); \
543 out4_m = __msa_copy_u_d((v2i64) in4, 0); \
544 out5_m = __msa_copy_u_d((v2i64) in5, 0); \
545 out6_m = __msa_copy_u_d((v2i64) in6, 0); \
546 out7_m = __msa_copy_u_d((v2i64) in7, 0); \
548 out8_m = __msa_copy_u_w((v4i32) in0, 2); \
549 out9_m = __msa_copy_u_w((v4i32) in1, 2); \
550 out10_m = __msa_copy_u_w((v4i32) in2, 2); \
551 out11_m = __msa_copy_u_w((v4i32) in3, 2); \
552 out12_m = __msa_copy_u_w((v4i32) in4, 2); \
553 out13_m = __msa_copy_u_w((v4i32) in5, 2); \
554 out14_m = __msa_copy_u_w((v4i32) in6, 2); \
555 out15_m = __msa_copy_u_w((v4i32) in7, 2); \
557 SD(out0_m, pblk_12x8_m); \
558 SW(out8_m, pblk_12x8_m + 8); \
559 pblk_12x8_m += stride; \
560 SD(out1_m, pblk_12x8_m); \
561 SW(out9_m, pblk_12x8_m + 8); \
562 pblk_12x8_m += stride; \
563 SD(out2_m, pblk_12x8_m); \
564 SW(out10_m, pblk_12x8_m + 8); \
565 pblk_12x8_m += stride; \
566 SD(out3_m, pblk_12x8_m); \
567 SW(out11_m, pblk_12x8_m + 8); \
568 pblk_12x8_m += stride; \
569 SD(out4_m, pblk_12x8_m); \
570 SW(out12_m, pblk_12x8_m + 8); \
571 pblk_12x8_m += stride; \
572 SD(out5_m, pblk_12x8_m); \
573 SW(out13_m, pblk_12x8_m + 8); \
574 pblk_12x8_m += stride; \
575 SD(out6_m, pblk_12x8_m); \
576 SW(out14_m, pblk_12x8_m + 8); \
577 pblk_12x8_m += stride; \
578 SD(out7_m, pblk_12x8_m); \
579 SW(out15_m, pblk_12x8_m + 8); \
594 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
596 out0 = (RTYPE) __msa_aver_u_b((v16u8) in0, (v16u8) in1); \
597 out1 = (RTYPE) __msa_aver_u_b((v16u8) in2, (v16u8) in3); \
599 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
601 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
602 out0, out1, out2, out3) \
604 AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
605 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
607 #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
616 #define SLDI_B(RTYPE, d, s, slide_val, out) \
618 out = (RTYPE) __msa_sldi_b((v16i8) d, (v16i8) s, slide_val); \
621 #define SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
623 SLDI_B(RTYPE, d0, s0, slide_val, out0) \
624 SLDI_B(RTYPE, d1, s1, slide_val, out1) \
626 #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
627 #define SLDI_B2_SB(...) SLDI_B2(v16i8, __VA_ARGS__)
628 #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
629 #define SLDI_B2_SW(...) SLDI_B2(v4i32, __VA_ARGS__)
631 #define SLDI_B3(RTYPE, d0, s0, d1, s1, d2, s2, slide_val, \
634 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
635 SLDI_B(RTYPE, d2, s2, slide_val, out2) \
637 #define SLDI_B3_UB(...) SLDI_B3(v16u8, __VA_ARGS__)
638 #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
639 #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
641 #define SLDI_B4(RTYPE, d0, s0, d1, s1, d2, s2, d3, s3, \
642 slide_val, out0, out1, out2, out3) \
644 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
645 SLDI_B2(RTYPE, d2, s2, d3, s3, slide_val, out2, out3) \
647 #define SLDI_B4_UB(...) SLDI_B4(v16u8, __VA_ARGS__)
648 #define SLDI_B4_SB(...) SLDI_B4(v16i8, __VA_ARGS__)
649 #define SLDI_B4_SH(...) SLDI_B4(v8i16, __VA_ARGS__)
660 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
662 out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \
663 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
665 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
666 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
667 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
668 #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
670 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
673 VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
674 out2 = (RTYPE) __msa_vshf_b((v16i8) mask2, (v16i8) in5, (v16i8) in4); \
676 #define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
678 #define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, \
679 out0, out1, out2, out3) \
681 VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \
682 VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \
684 #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
685 #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
696 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
698 out0 = (RTYPE) __msa_vshf_h((v8i16) mask0, (v8i16) in1, (v8i16) in0); \
699 out1 = (RTYPE) __msa_vshf_h((v8i16) mask1, (v8i16) in3, (v8i16) in2); \
701 #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
703 #define VSHF_H3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
706 VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
707 out2 = (RTYPE) __msa_vshf_h((v8i16) mask2, (v8i16) in5, (v8i16) in4); \
709 #define VSHF_H3_SH(...) VSHF_H3(v8i16, __VA_ARGS__)
720 #define VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
722 out0 = (RTYPE) __msa_vshf_w((v4i32) mask0, (v4i32) in1, (v4i32) in0); \
723 out1 = (RTYPE) __msa_vshf_w((v4i32) mask1, (v4i32) in3, (v4i32) in2); \
725 #define VSHF_W2_SB(...) VSHF_W2(v16i8, __VA_ARGS__)
739 #define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
741 out0 = (RTYPE) __msa_dotp_u_h((v16u8) mult0, (v16u8) cnst0); \
742 out1 = (RTYPE) __msa_dotp_u_h((v16u8) mult1, (v16u8) cnst1); \
744 #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
746 #define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \
747 cnst0, cnst1, cnst2, cnst3, \
748 out0, out1, out2, out3) \
750 DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
751 DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
753 #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
767 #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
769 out0 = (RTYPE) __msa_dotp_s_h((v16i8) mult0, (v16i8) cnst0); \
770 out1 = (RTYPE) __msa_dotp_s_h((v16i8) mult1, (v16i8) cnst1); \
772 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
774 #define DOTP_SB3(RTYPE, mult0, mult1, mult2, cnst0, cnst1, cnst2, \
777 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
778 out2 = (RTYPE) __msa_dotp_s_h((v16i8) mult2, (v16i8) cnst2); \
780 #define DOTP_SB3_SH(...) DOTP_SB3(v8i16, __VA_ARGS__)
782 #define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \
783 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
785 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
786 DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
788 #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
802 #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
804 out0 = (RTYPE) __msa_dotp_s_w((v8i16) mult0, (v8i16) cnst0); \
805 out1 = (RTYPE) __msa_dotp_s_w((v8i16) mult1, (v8i16) cnst1); \
807 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
809 #define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \
810 cnst0, cnst1, cnst2, cnst3, \
811 out0, out1, out2, out3) \
813 DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
814 DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
816 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
830 #define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
832 out0 = (RTYPE) __msa_dpadd_s_h((v8i16) out0, \
833 (v16i8) mult0, (v16i8) cnst0); \
834 out1 = (RTYPE) __msa_dpadd_s_h((v8i16) out1, \
835 (v16i8) mult1, (v16i8) cnst1); \
837 #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
839 #define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \
840 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
842 DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
843 DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
845 #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
859 #define DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
861 out0 = (RTYPE) __msa_dpadd_u_h((v8u16) out0, \
862 (v16u8) mult0, (v16u8) cnst0); \
863 out1 = (RTYPE) __msa_dpadd_u_h((v8u16) out1, \
864 (v16u8) mult1, (v16u8) cnst1); \
866 #define DPADD_UB2_UH(...) DPADD_UB2(v8u16, __VA_ARGS__)
880 #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
882 out0 = (RTYPE) __msa_dpadd_s_w((v4i32) out0, \
883 (v8i16) mult0, (v8i16) cnst0); \
884 out1 = (RTYPE) __msa_dpadd_s_w((v4i32) out1, \
885 (v8i16) mult1, (v8i16) cnst1); \
887 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
889 #define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \
890 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
892 DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
893 DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
895 #define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
905 #define MIN_UH2(RTYPE, in0, in1, min_vec) \
907 in0 = (RTYPE) __msa_min_u_h((v8u16) in0, min_vec); \
908 in1 = (RTYPE) __msa_min_u_h((v8u16) in1, min_vec); \
910 #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
912 #define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
914 MIN_UH2(RTYPE, in0, in1, min_vec); \
915 MIN_UH2(RTYPE, in2, in3, min_vec); \
917 #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
927 #define CLIP_SH(in, min, max) \
929 in = __msa_max_s_h((v8i16) min, (v8i16) in); \
930 in = __msa_min_s_h((v8i16) max, (v8i16) in); \
939 #define CLIP_SH_0_255(in) \
941 in = __msa_maxi_s_h((v8i16) in, 0); \
942 in = (v8i16) __msa_sat_u_h((v8u16) in, 7); \
945 #define CLIP_SH2_0_255(in0, in1) \
947 CLIP_SH_0_255(in0); \
948 CLIP_SH_0_255(in1); \
951 #define CLIP_SH4_0_255(in0, in1, in2, in3) \
953 CLIP_SH2_0_255(in0, in1); \
954 CLIP_SH2_0_255(in2, in3); \
957 #define CLIP_SH8_0_255(in0, in1, in2, in3, \
958 in4, in5, in6, in7) \
960 CLIP_SH4_0_255(in0, in1, in2, in3); \
961 CLIP_SH4_0_255(in4, in5, in6, in7); \
970 #define CLIP_SW_0_255(in) \
972 in = __msa_maxi_s_w((v4i32) in, 0); \
973 in = (v4i32) __msa_sat_u_w((v4u32) in, 7); \
976 #define CLIP_SW2_0_255(in0, in1) \
978 CLIP_SW_0_255(in0); \
979 CLIP_SW_0_255(in1); \
982 #define CLIP_SW4_0_255(in0, in1, in2, in3) \
984 CLIP_SW2_0_255(in0, in1); \
985 CLIP_SW2_0_255(in2, in3); \
988 #define CLIP_SW8_0_255(in0, in1, in2, in3, \
989 in4, in5, in6, in7) \
991 CLIP_SW4_0_255(in0, in1, in2, in3); \
992 CLIP_SW4_0_255(in4, in5, in6, in7); \
1002 #define HADD_SW_S32(in) \
1004 v2i64 res0_m, res1_m; \
1007 res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in); \
1008 res1_m = __msa_splati_d(res0_m, 1); \
1010 sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \
1021 #define HADD_UH_U32(in) \
1024 v2u64 res0_m, res1_m; \
1027 res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in); \
1028 res0_m = __msa_hadd_u_d(res_m, res_m); \
1029 res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \
1031 sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \
1043 #define HADD_SB2(RTYPE, in0, in1, out0, out1) \
1045 out0 = (RTYPE) __msa_hadd_s_h((v16i8) in0, (v16i8) in0); \
1046 out1 = (RTYPE) __msa_hadd_s_h((v16i8) in1, (v16i8) in1); \
1048 #define HADD_SB2_SH(...) HADD_SB2(v8i16, __VA_ARGS__)
1050 #define HADD_SB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1052 HADD_SB2(RTYPE, in0, in1, out0, out1); \
1053 HADD_SB2(RTYPE, in2, in3, out2, out3); \
1055 #define HADD_SB4_UH(...) HADD_SB4(v8u16, __VA_ARGS__)
1056 #define HADD_SB4_SH(...) HADD_SB4(v8i16, __VA_ARGS__)
1066 #define HADD_UB2(RTYPE, in0, in1, out0, out1) \
1068 out0 = (RTYPE) __msa_hadd_u_h((v16u8) in0, (v16u8) in0); \
1069 out1 = (RTYPE) __msa_hadd_u_h((v16u8) in1, (v16u8) in1); \
1071 #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
1073 #define HADD_UB3(RTYPE, in0, in1, in2, out0, out1, out2) \
1075 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1076 out2 = (RTYPE) __msa_hadd_u_h((v16u8) in2, (v16u8) in2); \
1078 #define HADD_UB3_UH(...) HADD_UB3(v8u16, __VA_ARGS__)
1080 #define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1082 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1083 HADD_UB2(RTYPE, in2, in3, out2, out3); \
1085 #define HADD_UB4_UB(...) HADD_UB4(v16u8, __VA_ARGS__)
1086 #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
1087 #define HADD_UB4_SH(...) HADD_UB4(v8i16, __VA_ARGS__)
1097 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
1099 out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \
1100 out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \
1102 #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
1103 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
1105 #define HSUB_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1107 HSUB_UB2(RTYPE, in0, in1, out0, out1); \
1108 HSUB_UB2(RTYPE, in2, in3, out2, out3); \
1110 #define HSUB_UB4_UH(...) HSUB_UB4(v8u16, __VA_ARGS__)
1111 #define HSUB_UB4_SH(...) HSUB_UB4(v8i16, __VA_ARGS__)
1123 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1125 v8u16 sad_m = { 0 }; \
1126 sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in0, (v16u8) ref0); \
1127 sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in1, (v16u8) ref1); \
1131 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1133 v16u8 diff0_m, diff1_m; \
1134 v8u16 sad_m = { 0 }; \
1136 diff0_m = __msa_asub_u_b((v16u8) in0, (v16u8) ref0); \
1137 diff1_m = __msa_asub_u_b((v16u8) in1, (v16u8) ref1); \
1139 sad_m += __msa_hadd_u_h((v16u8) diff0_m, (v16u8) diff0_m); \
1140 sad_m += __msa_hadd_u_h((v16u8) diff1_m, (v16u8) diff1_m); \
1144 #endif // #if HAVE_MSA2
1152 #define INSERT_W2(RTYPE, in0, in1, out) \
1154 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1155 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1157 #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
1158 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
1160 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \
1162 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1163 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1164 out = (RTYPE) __msa_insert_w((v4i32) out, 2, in2); \
1165 out = (RTYPE) __msa_insert_w((v4i32) out, 3, in3); \
1167 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
1168 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
1169 #define INSERT_W4_SH(...) INSERT_W4(v8i16, __VA_ARGS__)
1170 #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
1178 #define INSERT_D2(RTYPE, in0, in1, out) \
1180 out = (RTYPE) __msa_insert_d((v2i64) out, 0, in0); \
1181 out = (RTYPE) __msa_insert_d((v2i64) out, 1, in1); \
1183 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
1184 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
1185 #define INSERT_D2_SH(...) INSERT_D2(v8i16, __VA_ARGS__)
1186 #define INSERT_D2_SD(...) INSERT_D2(v2i64, __VA_ARGS__)
1197 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1199 out0 = (RTYPE) __msa_ilvev_b((v16i8) in1, (v16i8) in0); \
1200 out1 = (RTYPE) __msa_ilvev_b((v16i8) in3, (v16i8) in2); \
1202 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
1203 #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
1204 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
1205 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
1216 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1218 out0 = (RTYPE) __msa_ilvev_h((v8i16) in1, (v8i16) in0); \
1219 out1 = (RTYPE) __msa_ilvev_h((v8i16) in3, (v8i16) in2); \
1221 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
1222 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
1223 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
1234 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1236 out0 = (RTYPE) __msa_ilvev_w((v4i32) in1, (v4i32) in0); \
1237 out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
1239 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
1240 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
1241 #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
1242 #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
1253 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1255 out0 = (RTYPE) __msa_ilvev_d((v2i64) in1, (v2i64) in0); \
1256 out1 = (RTYPE) __msa_ilvev_d((v2i64) in3, (v2i64) in2); \
1258 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
1259 #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
1260 #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
1271 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1273 out0 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1274 out1 = (RTYPE) __msa_ilvl_b((v16i8) in2, (v16i8) in3); \
1276 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
1277 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
1278 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
1279 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
1281 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1282 out0, out1, out2, out3) \
1284 ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1285 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1287 #define ILVL_B4_UB(...) ILVL_B4(v16u8, __VA_ARGS__)
1288 #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
1289 #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
1290 #define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
1301 #define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1303 out0 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1304 out1 = (RTYPE) __msa_ilvl_h((v8i16) in2, (v8i16) in3); \
1306 #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
1307 #define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
1309 #define ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1310 out0, out1, out2, out3) \
1312 ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1313 ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1315 #define ILVL_H4_SH(...) ILVL_H4(v8i16, __VA_ARGS__)
1316 #define ILVL_H4_SW(...) ILVL_H4(v4i32, __VA_ARGS__)
1327 #define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1329 out0 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1330 out1 = (RTYPE) __msa_ilvl_w((v4i32) in2, (v4i32) in3); \
1332 #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
1333 #define ILVL_W2_SB(...) ILVL_W2(v16i8, __VA_ARGS__)
1334 #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
1346 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1348 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1349 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
1351 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
1352 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
1353 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
1354 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
1355 #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
1357 #define ILVR_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1359 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1360 out2 = (RTYPE) __msa_ilvr_b((v16i8) in4, (v16i8) in5); \
1362 #define ILVR_B3_UB(...) ILVR_B3(v16u8, __VA_ARGS__)
1363 #define ILVR_B3_SB(...) ILVR_B3(v16i8, __VA_ARGS__)
1364 #define ILVR_B3_UH(...) ILVR_B3(v8u16, __VA_ARGS__)
1365 #define ILVR_B3_SH(...) ILVR_B3(v8i16, __VA_ARGS__)
1367 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1368 out0, out1, out2, out3) \
1370 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1371 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1373 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
1374 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
1375 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
1376 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
1377 #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
1379 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1380 in8, in9, in10, in11, in12, in13, in14, in15, \
1381 out0, out1, out2, out3, out4, out5, out6, out7) \
1383 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1384 out0, out1, out2, out3); \
1385 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, \
1386 out4, out5, out6, out7); \
1388 #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
1389 #define ILVR_B8_SW(...) ILVR_B8(v4i32, __VA_ARGS__)
1401 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1403 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1404 out1 = (RTYPE) __msa_ilvr_h((v8i16) in2, (v8i16) in3); \
1406 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
1407 #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
1409 #define ILVR_H3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1411 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1412 out2 = (RTYPE) __msa_ilvr_h((v8i16) in4, (v8i16) in5); \
1414 #define ILVR_H3_SH(...) ILVR_H3(v8i16, __VA_ARGS__)
1416 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1417 out0, out1, out2, out3) \
1419 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1420 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1422 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
1423 #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
1425 #define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1427 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1428 out1 = (RTYPE) __msa_ilvr_w((v4i32) in2, (v4i32) in3); \
1430 #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
1431 #define ILVR_W2_SB(...) ILVR_W2(v16i8, __VA_ARGS__)
1432 #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
1434 #define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1435 out0, out1, out2, out3) \
1437 ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
1438 ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \
1440 #define ILVR_W4_SB(...) ILVR_W4(v16i8, __VA_ARGS__)
1441 #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
1452 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1454 out0 = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \
1455 out1 = (RTYPE) __msa_ilvr_d((v2i64) in2, (v2i64) in3); \
1457 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
1458 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
1459 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
1461 #define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1463 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1464 out2 = (RTYPE) __msa_ilvr_d((v2i64) in4, (v2i64) in5); \
1466 #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
1468 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1469 out0, out1, out2, out3) \
1471 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1472 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1474 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
1475 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
1486 #define ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1488 out0 = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1); \
1489 out1 = (RTYPE) __msa_ilvl_d((v2i64) in2, (v2i64) in3); \
1491 #define ILVL_D2_UB(...) ILVL_D2(v16u8, __VA_ARGS__)
1492 #define ILVL_D2_SB(...) ILVL_D2(v16i8, __VA_ARGS__)
1493 #define ILVL_D2_SH(...) ILVL_D2(v8i16, __VA_ARGS__)
1504 #define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
1506 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1507 out1 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1509 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
1510 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
1511 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
1512 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
1513 #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
1515 #define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
1517 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1518 out1 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1520 #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
1521 #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
1522 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
1523 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
1525 #define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
1527 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1528 out1 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1530 #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
1531 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
1532 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
1542 #define MAXI_SH2(RTYPE, in0, in1, max_val) \
1544 in0 = (RTYPE) __msa_maxi_s_h((v8i16) in0, max_val); \
1545 in1 = (RTYPE) __msa_maxi_s_h((v8i16) in1, max_val); \
1547 #define MAXI_SH2_UH(...) MAXI_SH2(v8u16, __VA_ARGS__)
1548 #define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
1550 #define MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val) \
1552 MAXI_SH2(RTYPE, in0, in1, max_val); \
1553 MAXI_SH2(RTYPE, in2, in3, max_val); \
1555 #define MAXI_SH4_UH(...) MAXI_SH4(v8u16, __VA_ARGS__)
1556 #define MAXI_SH4_SH(...) MAXI_SH4(v8i16, __VA_ARGS__)
1558 #define MAXI_SH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, max_val) \
1560 MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val); \
1561 MAXI_SH4(RTYPE, in4, in5, in6, in7, max_val); \
1563 #define MAXI_SH8_UH(...) MAXI_SH8(v8u16, __VA_ARGS__)
1564 #define MAXI_SH8_SH(...) MAXI_SH8(v8i16, __VA_ARGS__)
1576 #define SAT_UH2(RTYPE, in0, in1, sat_val) \
1578 in0 = (RTYPE) __msa_sat_u_h((v8u16) in0, sat_val); \
1579 in1 = (RTYPE) __msa_sat_u_h((v8u16) in1, sat_val); \
1581 #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
1582 #define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
1584 #define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
1586 SAT_UH2(RTYPE, in0, in1, sat_val); \
1587 SAT_UH2(RTYPE, in2, in3, sat_val); \
1589 #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
1590 #define SAT_UH4_SH(...) SAT_UH4(v8i16, __VA_ARGS__)
1592 #define SAT_UH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, sat_val) \
1594 SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val); \
1595 SAT_UH4(RTYPE, in4, in5, in6, in7, sat_val); \
1597 #define SAT_UH8_UH(...) SAT_UH8(v8u16, __VA_ARGS__)
1598 #define SAT_UH8_SH(...) SAT_UH8(v8i16, __VA_ARGS__)
1610 #define SAT_SH2(RTYPE, in0, in1, sat_val) \
1612 in0 = (RTYPE) __msa_sat_s_h((v8i16) in0, sat_val); \
1613 in1 = (RTYPE) __msa_sat_s_h((v8i16) in1, sat_val); \
1615 #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
1617 #define SAT_SH3(RTYPE, in0, in1, in2, sat_val) \
1619 SAT_SH2(RTYPE, in0, in1, sat_val); \
1620 in2 = (RTYPE) __msa_sat_s_h((v8i16) in2, sat_val); \
1622 #define SAT_SH3_SH(...) SAT_SH3(v8i16, __VA_ARGS__)
1624 #define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
1626 SAT_SH2(RTYPE, in0, in1, sat_val); \
1627 SAT_SH2(RTYPE, in2, in3, sat_val); \
1629 #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
1641 #define SAT_SW2(RTYPE, in0, in1, sat_val) \
1643 in0 = (RTYPE) __msa_sat_s_w((v4i32) in0, sat_val); \
1644 in1 = (RTYPE) __msa_sat_s_w((v4i32) in1, sat_val); \
1646 #define SAT_SW2_SW(...) SAT_SW2(v4i32, __VA_ARGS__)
1648 #define SAT_SW4(RTYPE, in0, in1, in2, in3, sat_val) \
1650 SAT_SW2(RTYPE, in0, in1, sat_val); \
1651 SAT_SW2(RTYPE, in2, in3, sat_val); \
1653 #define SAT_SW4_SW(...) SAT_SW4(v4i32, __VA_ARGS__)
1664 #define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
1666 out0 = (RTYPE) __msa_splati_h((v8i16) in, idx0); \
1667 out1 = (RTYPE) __msa_splati_h((v8i16) in, idx1); \
1669 #define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
1670 #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
1672 #define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \
1675 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1676 out2 = (RTYPE) __msa_splati_h((v8i16) in, idx2); \
1678 #define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
1679 #define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
1681 #define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \
1682 out0, out1, out2, out3) \
1684 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1685 SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \
1687 #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
1688 #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
1701 #define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
1703 out0 = (RTYPE) __msa_splati_w((v4i32) in, stidx); \
1704 out1 = (RTYPE) __msa_splati_w((v4i32) in, (stidx+1)); \
1706 #define SPLATI_W2_SH(...) SPLATI_W2(v8i16, __VA_ARGS__)
1707 #define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
1709 #define SPLATI_W4(RTYPE, in, out0, out1, out2, out3) \
1711 SPLATI_W2(RTYPE, in, 0, out0, out1); \
1712 SPLATI_W2(RTYPE, in, 2, out2, out3); \
1714 #define SPLATI_W4_SH(...) SPLATI_W4(v8i16, __VA_ARGS__)
1715 #define SPLATI_W4_SW(...) SPLATI_W4(v4i32, __VA_ARGS__)
1728 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1730 out0 = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1); \
1731 out1 = (RTYPE) __msa_pckev_b((v16i8) in2, (v16i8) in3); \
1733 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
1734 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
1735 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
1736 #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
1738 #define PCKEV_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1740 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1741 out2 = (RTYPE) __msa_pckev_b((v16i8) in4, (v16i8) in5); \
1743 #define PCKEV_B3_UB(...) PCKEV_B3(v16u8, __VA_ARGS__)
1744 #define PCKEV_B3_SB(...) PCKEV_B3(v16i8, __VA_ARGS__)
1746 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1747 out0, out1, out2, out3) \
1749 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1750 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1752 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
1753 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
1754 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
1755 #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
1768 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1770 out0 = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1); \
1771 out1 = (RTYPE) __msa_pckev_h((v8i16) in2, (v8i16) in3); \
1773 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1774 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1776 #define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1777 out0, out1, out2, out3) \
1779 PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1780 PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1782 #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
1783 #define PCKEV_H4_SW(...) PCKEV_H4(v4i32, __VA_ARGS__)
1796 #define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1798 out0 = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1); \
1799 out1 = (RTYPE) __msa_pckev_d((v2i64) in2, (v2i64) in3); \
1801 #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
1802 #define PCKEV_D2_SB(...) PCKEV_D2(v16i8, __VA_ARGS__)
1803 #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
1805 #define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1806 out0, out1, out2, out3) \
1808 PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1809 PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1811 #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
1822 #define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1824 out0 = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1); \
1825 out1 = (RTYPE) __msa_pckod_d((v2i64) in2, (v2i64) in3); \
1827 #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
1828 #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
1829 #define PCKOD_D2_SD(...) PCKOD_D2(v2i64, __VA_ARGS__)
1843 #define XORI_B2_128(RTYPE, in0, in1) \
1845 in0 = (RTYPE) __msa_xori_b((v16u8) in0, 128); \
1846 in1 = (RTYPE) __msa_xori_b((v16u8) in1, 128); \
1848 #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
1849 #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
1850 #define XORI_B2_128_SH(...) XORI_B2_128(v8i16, __VA_ARGS__)
1852 #define XORI_B3_128(RTYPE, in0, in1, in2) \
1854 XORI_B2_128(RTYPE, in0, in1); \
1855 in2 = (RTYPE) __msa_xori_b((v16u8) in2, 128); \
1857 #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
1859 #define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
1861 XORI_B2_128(RTYPE, in0, in1); \
1862 XORI_B2_128(RTYPE, in2, in3); \
1864 #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
1865 #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
1866 #define XORI_B4_128_SH(...) XORI_B4_128(v8i16, __VA_ARGS__)
1868 #define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
1870 XORI_B3_128(RTYPE, in0, in1, in2); \
1871 XORI_B2_128(RTYPE, in3, in4); \
1873 #define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
1875 #define XORI_B6_128(RTYPE, in0, in1, in2, in3, in4, in5) \
1877 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1878 XORI_B2_128(RTYPE, in4, in5); \
1880 #define XORI_B6_128_SB(...) XORI_B6_128(v16i8, __VA_ARGS__)
1882 #define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
1884 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1885 XORI_B3_128(RTYPE, in4, in5, in6); \
1887 #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
1889 #define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
1891 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1892 XORI_B4_128(RTYPE, in4, in5, in6, in7); \
1894 #define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
1895 #define XORI_B8_128_UB(...) XORI_B8_128(v16u8, __VA_ARGS__)
1906 #define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \
1908 out0 = (RTYPE) __msa_adds_s_h((v8i16) in0, (v8i16) in1); \
1909 out1 = (RTYPE) __msa_adds_s_h((v8i16) in2, (v8i16) in3); \
1911 #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
1913 #define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1914 out0, out1, out2, out3) \
1916 ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
1917 ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \
1919 #define ADDS_SH4_UH(...) ADDS_SH4(v8u16, __VA_ARGS__)
1920 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
1930 #define SLLI_2V(in0, in1, shift) \
1932 in0 = in0 << shift; \
1933 in1 = in1 << shift; \
1935 #define SLLI_4V(in0, in1, in2, in3, shift) \
1937 in0 = in0 << shift; \
1938 in1 = in1 << shift; \
1939 in2 = in2 << shift; \
1940 in3 = in3 << shift; \
1953 #define SRA_4V(in0, in1, in2, in3, shift) \
1955 in0 = in0 >> shift; \
1956 in1 = in1 >> shift; \
1957 in2 = in2 >> shift; \
1958 in3 = in3 >> shift; \
1971 #define SRL_H4(RTYPE, in0, in1, in2, in3, shift) \
1973 in0 = (RTYPE) __msa_srl_h((v8i16) in0, (v8i16) shift); \
1974 in1 = (RTYPE) __msa_srl_h((v8i16) in1, (v8i16) shift); \
1975 in2 = (RTYPE) __msa_srl_h((v8i16) in2, (v8i16) shift); \
1976 in3 = (RTYPE) __msa_srl_h((v8i16) in3, (v8i16) shift); \
1978 #define SRL_H4_UH(...) SRL_H4(v8u16, __VA_ARGS__)
1980 #define SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \
1982 in0 = (RTYPE) __msa_srlr_h((v8i16) in0, (v8i16) shift); \
1983 in1 = (RTYPE) __msa_srlr_h((v8i16) in1, (v8i16) shift); \
1984 in2 = (RTYPE) __msa_srlr_h((v8i16) in2, (v8i16) shift); \
1985 in3 = (RTYPE) __msa_srlr_h((v8i16) in3, (v8i16) shift); \
1987 #define SRLR_H4_UH(...) SRLR_H4(v8u16, __VA_ARGS__)
1988 #define SRLR_H4_SH(...) SRLR_H4(v8i16, __VA_ARGS__)
1990 #define SRLR_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, shift) \
1992 SRLR_H4(RTYPE, in0, in1, in2, in3, shift); \
1993 SRLR_H4(RTYPE, in4, in5, in6, in7, shift); \
1995 #define SRLR_H8_UH(...) SRLR_H8(v8u16, __VA_ARGS__)
1996 #define SRLR_H8_SH(...) SRLR_H8(v8i16, __VA_ARGS__)
2009 #define SRAR_H2(RTYPE, in0, in1, shift) \
2011 in0 = (RTYPE) __msa_srar_h((v8i16) in0, (v8i16) shift); \
2012 in1 = (RTYPE) __msa_srar_h((v8i16) in1, (v8i16) shift); \
2014 #define SRAR_H2_UH(...) SRAR_H2(v8u16, __VA_ARGS__)
2015 #define SRAR_H2_SH(...) SRAR_H2(v8i16, __VA_ARGS__)
2017 #define SRAR_H3(RTYPE, in0, in1, in2, shift) \
2019 SRAR_H2(RTYPE, in0, in1, shift) \
2020 in2 = (RTYPE) __msa_srar_h((v8i16) in2, (v8i16) shift); \
2022 #define SRAR_H3_SH(...) SRAR_H3(v8i16, __VA_ARGS__)
2024 #define SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \
2026 SRAR_H2(RTYPE, in0, in1, shift) \
2027 SRAR_H2(RTYPE, in2, in3, shift) \
2029 #define SRAR_H4_UH(...) SRAR_H4(v8u16, __VA_ARGS__)
2030 #define SRAR_H4_SH(...) SRAR_H4(v8i16, __VA_ARGS__)
2043 #define SRAR_W2(RTYPE, in0, in1, shift) \
2045 in0 = (RTYPE) __msa_srar_w((v4i32) in0, (v4i32) shift); \
2046 in1 = (RTYPE) __msa_srar_w((v4i32) in1, (v4i32) shift); \
2048 #define SRAR_W2_SW(...) SRAR_W2(v4i32, __VA_ARGS__)
2050 #define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
2052 SRAR_W2(RTYPE, in0, in1, shift) \
2053 SRAR_W2(RTYPE, in2, in3, shift) \
2055 #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
2067 #define SRARI_H2(RTYPE, in0, in1, shift) \
2069 in0 = (RTYPE) __msa_srari_h((v8i16) in0, shift); \
2070 in1 = (RTYPE) __msa_srari_h((v8i16) in1, shift); \
2072 #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
2073 #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
2075 #define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
2077 SRARI_H2(RTYPE, in0, in1, shift); \
2078 SRARI_H2(RTYPE, in2, in3, shift); \
2080 #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
2081 #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
2093 #define SRARI_W2(RTYPE, in0, in1, shift) \
2095 in0 = (RTYPE) __msa_srari_w((v4i32) in0, shift); \
2096 in1 = (RTYPE) __msa_srari_w((v4i32) in1, shift); \
2098 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
2100 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
2102 SRARI_W2(RTYPE, in0, in1, shift); \
2103 SRARI_W2(RTYPE, in2, in3, shift); \
2105 #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
2106 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
2115 #define MUL2(in0, in1, in2, in3, out0, out1) \
2120 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2122 MUL2(in0, in1, in2, in3, out0, out1); \
2123 MUL2(in4, in5, in6, in7, out2, out3); \
2132 #define ADD2(in0, in1, in2, in3, out0, out1) \
2137 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2139 ADD2(in0, in1, in2, in3, out0, out1); \
2140 ADD2(in4, in5, in6, in7, out2, out3); \
2149 #define SUB2(in0, in1, in2, in3, out0, out1) \
2154 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2170 #define UNPCK_R_SB_SH(in, out) \
2174 sign_m = __msa_clti_s_b((v16i8) in, 0); \
2175 out = (v8i16) __msa_ilvr_b(sign_m, (v16i8) in); \
2187 #define UNPCK_R_SH_SW(in, out) \
2189 out = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2192 #define UNPCK_R_SH_SW(in, out) \
2196 sign_m = __msa_clti_s_h((v8i16) in, 0); \
2197 out = (v4i32) __msa_ilvr_h(sign_m, (v8i16) in); \
2199 #endif // #if HAVE_MSA2
2213 #define UNPCK_SB_SH(in, out0, out1) \
2215 out0 = (v4i32) __builtin_msa2_w2x_lo_s_b((v16i8) in); \
2216 out1 = (v4i32) __builtin_msa2_w2x_hi_s_b((v16i8) in); \
2219 #define UNPCK_SB_SH(in, out0, out1) \
2223 tmp_m = __msa_clti_s_b((v16i8) in, 0); \
2224 ILVRL_B2_SH(tmp_m, in, out0, out1); \
2226 #endif // #if HAVE_MSA2
2235 #define UNPCK_UB_SH(in, out0, out1) \
2237 v16i8 zero_m = { 0 }; \
2239 ILVRL_B2_SH(zero_m, in, out0, out1); \
2254 #define UNPCK_SH_SW(in, out0, out1) \
2256 out0 = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2257 out1 = (v4i32) __builtin_msa2_w2x_hi_s_h((v8i16) in); \
2260 #define UNPCK_SH_SW(in, out0, out1) \
2264 tmp_m = __msa_clti_s_h((v8i16) in, 0); \
2265 ILVRL_H2_SW(tmp_m, in, out0, out1); \
2267 #endif // #if HAVE_MSA2
2274 #define SWAP(in0, in1) \
2286 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
2300 #define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
2301 out0, out1, out2, out3, out4, out5, out6, out7) \
2319 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \
2320 in8, in9, in10, in11, in12, in13, in14, in15, \
2321 out0, out1, out2, out3, out4, out5, out6, out7, \
2322 out8, out9, out10, out11, out12, out13, out14, out15) \
2324 out0 = in0 + in15; \
2325 out1 = in1 + in14; \
2326 out2 = in2 + in13; \
2327 out3 = in3 + in12; \
2328 out4 = in4 + in11; \
2329 out5 = in5 + in10; \
2335 out10 = in5 - in10; \
2336 out11 = in4 - in11; \
2337 out12 = in3 - in12; \
2338 out13 = in2 - in13; \
2339 out14 = in1 - in14; \
2340 out15 = in0 - in15; \
2349 #define TRANSPOSE4x4_UB_UB(in0, in1, in2, in3, out0, out1, out2, out3) \
2351 v16i8 zero_m = { 0 }; \
2352 v16i8 s0_m, s1_m, s2_m, s3_m; \
2354 ILVR_D2_SB(in1, in0, in3, in2, s0_m, s1_m); \
2355 ILVRL_B2_SB(s1_m, s0_m, s2_m, s3_m); \
2357 out0 = (v16u8) __msa_ilvr_b(s3_m, s2_m); \
2358 out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 4); \
2359 out2 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out1, 4); \
2360 out3 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out2, 4); \
2369 #define TRANSPOSE8x4_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2370 out0, out1, out2, out3) \
2372 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2374 ILVEV_W2_SB(in0, in4, in1, in5, tmp0_m, tmp1_m); \
2375 tmp2_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2376 ILVEV_W2_SB(in2, in6, in3, in7, tmp0_m, tmp1_m); \
2378 tmp3_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2379 ILVRL_H2_SB(tmp3_m, tmp2_m, tmp0_m, tmp1_m); \
2381 ILVRL_W2(RTYPE, tmp1_m, tmp0_m, out0, out2); \
2382 out1 = (RTYPE) __msa_ilvl_d((v2i64) out2, (v2i64) out0); \
2383 out3 = (RTYPE) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2385 #define TRANSPOSE8x4_UB_UB(...) TRANSPOSE8x4_UB(v16u8, __VA_ARGS__)
2386 #define TRANSPOSE8x4_UB_UH(...) TRANSPOSE8x4_UB(v8u16, __VA_ARGS__)
2396 #define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2397 out0, out1, out2, out3, out4, out5, out6, out7) \
2399 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2400 v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2401 v16i8 zeros = { 0 }; \
2403 ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \
2404 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2405 ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \
2406 ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \
2407 ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \
2408 ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
2409 SLDI_B4(RTYPE, zeros, out0, zeros, out2, zeros, out4, zeros, out6, \
2410 8, out1, out3, out5, out7); \
2412 #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
2413 #define TRANSPOSE8x8_UB_UH(...) TRANSPOSE8x8_UB(v8u16, __VA_ARGS__)
2422 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2423 in8, in9, in10, in11, in12, in13, in14, in15, \
2424 out0, out1, out2, out3) \
2426 v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2428 ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
2429 out1 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2431 ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
2432 out3 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2434 ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
2436 tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2437 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
2439 tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2440 ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
2441 out0 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2442 out2 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2444 tmp0_m = (v2i64) __msa_ilvod_b((v16i8) out3, (v16i8) out1); \
2445 tmp1_m = (v2i64) __msa_ilvod_b((v16i8) tmp3_m, (v16i8) tmp2_m); \
2446 out1 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2447 out3 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2457 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2458 in8, in9, in10, in11, in12, in13, in14, in15, \
2459 out0, out1, out2, out3, out4, out5, out6, out7) \
2461 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2462 v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2464 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
2465 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
2466 ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
2467 ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
2469 tmp0_m = (v16u8) __msa_ilvev_b((v16i8) out6, (v16i8) out7); \
2470 tmp4_m = (v16u8) __msa_ilvod_b((v16i8) out6, (v16i8) out7); \
2471 tmp1_m = (v16u8) __msa_ilvev_b((v16i8) out4, (v16i8) out5); \
2472 tmp5_m = (v16u8) __msa_ilvod_b((v16i8) out4, (v16i8) out5); \
2473 out5 = (v16u8) __msa_ilvev_b((v16i8) out2, (v16i8) out3); \
2474 tmp6_m = (v16u8) __msa_ilvod_b((v16i8) out2, (v16i8) out3); \
2475 out7 = (v16u8) __msa_ilvev_b((v16i8) out0, (v16i8) out1); \
2476 tmp7_m = (v16u8) __msa_ilvod_b((v16i8) out0, (v16i8) out1); \
2478 ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
2479 out0 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2480 out4 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2482 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2483 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) out7, (v8i16) out5); \
2484 out2 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2485 out6 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2487 ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
2488 out1 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2489 out5 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2491 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp5_m, (v8i16) tmp4_m); \
2492 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) tmp7_m, (v8i16) tmp6_m); \
2493 out3 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2494 out7 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2503 #define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
2507 ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
2508 ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
2509 out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
2510 out3 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2519 #define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2520 out0, out1, out2, out3, out4, out5, out6, out7) \
2523 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2524 v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2526 ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2527 ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \
2528 ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2529 ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \
2530 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2531 ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \
2532 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2533 ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \
2534 PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \
2535 tmp3_m, tmp7_m, out0, out2, out4, out6); \
2536 out1 = (RTYPE) __msa_pckod_d((v2i64) tmp0_m, (v2i64) tmp4_m); \
2537 out3 = (RTYPE) __msa_pckod_d((v2i64) tmp1_m, (v2i64) tmp5_m); \
2538 out5 = (RTYPE) __msa_pckod_d((v2i64) tmp2_m, (v2i64) tmp6_m); \
2539 out7 = (RTYPE) __msa_pckod_d((v2i64) tmp3_m, (v2i64) tmp7_m); \
2541 #define TRANSPOSE8x8_UH_UH(...) TRANSPOSE8x8_H(v8u16, __VA_ARGS__)
2542 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
2550 #define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
2552 v4i32 s0_m, s1_m, s2_m, s3_m; \
2554 ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
2555 ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
2557 out0 = (v4i32) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m); \
2558 out1 = (v4i32) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m); \
2559 out2 = (v4i32) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m); \
2560 out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
2577 #define AVE_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2579 uint64_t out0_m, out1_m, out2_m, out3_m; \
2580 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2582 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2583 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2584 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2585 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2587 out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0); \
2588 out1_m = __msa_copy_u_d((v2i64) tmp1_m, 0); \
2589 out2_m = __msa_copy_u_d((v2i64) tmp2_m, 0); \
2590 out3_m = __msa_copy_u_d((v2i64) tmp3_m, 0); \
2591 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2608 #define AVE_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2610 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2612 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2613 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2614 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2615 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2617 ST_UB4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, pdst, stride); \
2634 #define AVER_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2636 uint64_t out0_m, out1_m, out2_m, out3_m; \
2637 v16u8 tp0_m, tp1_m, tp2_m, tp3_m; \
2639 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2640 tp0_m, tp1_m, tp2_m, tp3_m); \
2642 out0_m = __msa_copy_u_d((v2i64) tp0_m, 0); \
2643 out1_m = __msa_copy_u_d((v2i64) tp1_m, 0); \
2644 out2_m = __msa_copy_u_d((v2i64) tp2_m, 0); \
2645 out3_m = __msa_copy_u_d((v2i64) tp3_m, 0); \
2646 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2663 #define AVER_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2665 v16u8 t0_m, t1_m, t2_m, t3_m; \
2667 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2668 t0_m, t1_m, t2_m, t3_m); \
2669 ST_UB4(t0_m, t1_m, t2_m, t3_m, pdst, stride); \
2687 #define AVER_DST_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2690 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2691 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2693 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2694 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2695 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2696 AVER_ST8x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2697 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2715 #define AVER_DST_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2718 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2719 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2721 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2722 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2723 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2724 AVER_ST16x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2725 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2733 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2735 uint32_t src0_m, src1_m, src2_m, src3_m; \
2736 uint32_t out0_m, out1_m, out2_m, out3_m; \
2737 v8i16 inp0_m, inp1_m, res0_m, res1_m; \
2738 v16i8 dst0_m = { 0 }; \
2739 v16i8 dst1_m = { 0 }; \
2740 v16i8 zero_m = { 0 }; \
2742 ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
2743 LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
2744 INSERT_W2_SB(src0_m, src1_m, dst0_m); \
2745 INSERT_W2_SB(src2_m, src3_m, dst1_m); \
2746 ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
2747 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
2748 CLIP_SH2_0_255(res0_m, res1_m); \
2749 PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
2751 out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); \
2752 out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); \
2753 out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); \
2754 out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); \
2755 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2769 #define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
2773 out0_m = __msa_dotp_s_h((v16i8) in0, (v16i8) coeff0); \
2774 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in1, (v16i8) coeff1); \
2775 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in2, (v16i8) coeff2); \
2788 #define PCKEV_XORI128_UB(in0, in1) \
2791 out_m = (v16u8) __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2792 out_m = (v16u8) __msa_xori_b((v16u8) out_m, 128); \
2800 #define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \
2801 dst0, dst1, pdst, stride) \
2803 v16u8 tmp0_m, tmp1_m; \
2804 uint8_t *pdst_m = (uint8_t *) (pdst); \
2806 tmp0_m = PCKEV_XORI128_UB(in0, in1); \
2807 tmp1_m = PCKEV_XORI128_UB(in2, in3); \
2808 AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
2809 ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, pdst_m, stride); \
2817 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2819 uint32_t out0_m, out1_m, out2_m, out3_m; \
2820 v16i8 tmp0_m, tmp1_m; \
2822 PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
2824 out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0); \
2825 out1_m = __msa_copy_u_w((v4i32) tmp0_m, 2); \
2826 out2_m = __msa_copy_u_w((v4i32) tmp1_m, 0); \
2827 out3_m = __msa_copy_u_w((v4i32) tmp1_m, 2); \
2829 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2836 #define PCKEV_ST_SB(in0, in1, pdst) \
2839 tmp_m = __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2840 ST_SB(tmp_m, (pdst)); \
2846 #define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \
2851 tmp0_m = __msa_vshf_b((v16i8) mask, (v16i8) in1, (v16i8) in0); \
2852 tmp1_m = __msa_dotp_u_h((v16u8) tmp0_m, (v16u8) coeff); \
2853 tmp1_m = (v8u16) __msa_srari_h((v8i16) tmp1_m, shift); \
2854 tmp1_m = __msa_sat_u_h(tmp1_m, shift); \