27 #define VP9_DCT_CONST_BITS 14
28 #define ALLOC_ALIGNED(align) __attribute__ ((aligned(align)))
29 #define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
68 #define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
70 __m128i k0_m = __lsx_vreplgr2vr_h(cnst0); \
71 __m128i s0_m, s1_m, s2_m, s3_m; \
73 s0_m = __lsx_vreplgr2vr_h(cnst1); \
74 k0_m = __lsx_vpackev_h(s0_m, k0_m); \
76 s1_m = __lsx_vilvl_h(__lsx_vneg_h(reg1), reg0); \
77 s0_m = __lsx_vilvh_h(__lsx_vneg_h(reg1), reg0); \
78 s3_m = __lsx_vilvl_h(reg0, reg1); \
79 s2_m = __lsx_vilvh_h(reg0, reg1); \
80 DUP2_ARG2(__lsx_vdp2_w_h, s1_m, k0_m, s0_m, k0_m, s1_m, s0_m); \
81 DUP2_ARG2(__lsx_vsrari_w, s1_m, VP9_DCT_CONST_BITS, \
82 s0_m, VP9_DCT_CONST_BITS, s1_m, s0_m); \
83 out0 = __lsx_vpickev_h(s0_m, s1_m); \
84 DUP2_ARG2(__lsx_vdp2_w_h, s3_m, k0_m, s2_m, k0_m, s1_m, s0_m); \
85 DUP2_ARG2(__lsx_vsrari_w, s1_m, VP9_DCT_CONST_BITS, \
86 s0_m, VP9_DCT_CONST_BITS, s1_m, s0_m); \
87 out1 = __lsx_vpickev_h(s0_m, s1_m); \
90 #define VP9_SET_COSPI_PAIR(c0_h, c1_h) \
92 __m128i out0_m, r0_m, r1_m; \
94 r0_m = __lsx_vreplgr2vr_h(c0_h); \
95 r1_m = __lsx_vreplgr2vr_h(c1_h); \
96 out0_m = __lsx_vpackev_h(r1_m, r0_m); \
101 #define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
103 uint8_t *dst_m = (uint8_t *) (dst); \
104 __m128i dst0_m, dst1_m, dst2_m, dst3_m; \
105 __m128i tmp0_m, tmp1_m; \
106 __m128i res0_m, res1_m, res2_m, res3_m; \
107 __m128i zero_m = __lsx_vldi(0); \
108 DUP4_ARG2(__lsx_vld, dst_m, 0, dst_m + dst_stride, 0, \
109 dst_m + 2 * dst_stride, 0, dst_m + 3 * dst_stride, 0, \
110 dst0_m, dst1_m, dst2_m, dst3_m); \
111 DUP4_ARG2(__lsx_vilvl_b, zero_m, dst0_m, zero_m, dst1_m, zero_m, \
112 dst2_m, zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m);\
113 DUP4_ARG2(__lsx_vadd_h, res0_m, in0, res1_m, in1, res2_m, in2, \
114 res3_m, in3, res0_m, res1_m, res2_m, res3_m); \
115 DUP4_ARG1(__lsx_vclip255_h, res0_m, res1_m, res2_m, res3_m, \
116 res0_m, res1_m, res2_m, res3_m); \
117 DUP2_ARG2(__lsx_vpickev_b, res1_m, res0_m, res3_m, res2_m, \
119 __lsx_vstelm_d(tmp0_m, dst_m, 0, 0); \
120 __lsx_vstelm_d(tmp0_m, dst_m + dst_stride, 0, 1); \
121 __lsx_vstelm_d(tmp1_m, dst_m + 2 * dst_stride, 0, 0); \
122 __lsx_vstelm_d(tmp1_m, dst_m + 3 * dst_stride, 0, 1); \
125 #define VP9_UNPCK_UB_SH(in, out_h, out_l) \
127 __m128i zero = __lsx_vldi(0); \
128 out_l = __lsx_vilvl_b(zero, in); \
129 out_h = __lsx_vilvh_b(zero, in); \
132 #define VP9_ILVLTRANS4x8_H(in0, in1, in2, in3, in4, in5, in6, in7, \
133 out0, out1, out2, out3, out4, out5, out6, out7) \
135 __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
136 __m128i tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
137 __m128i zero_m = __lsx_vldi(0); \
139 DUP4_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, in5, in4, in7, in6, \
140 tmp0_n, tmp1_n, tmp2_n, tmp3_n); \
141 tmp0_m = __lsx_vilvl_w(tmp1_n, tmp0_n); \
142 tmp2_m = __lsx_vilvh_w(tmp1_n, tmp0_n); \
143 tmp1_m = __lsx_vilvl_w(tmp3_n, tmp2_n); \
144 tmp3_m = __lsx_vilvh_w(tmp3_n, tmp2_n); \
146 out0 = __lsx_vilvl_d(tmp1_m, tmp0_m); \
147 out1 = __lsx_vilvh_d(tmp1_m, tmp0_m); \
148 out2 = __lsx_vilvl_d(tmp3_m, tmp2_m); \
149 out3 = __lsx_vilvh_d(tmp3_m, tmp2_m); \
158 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
159 out0, out1, out2, out3) \
161 __m128i madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
162 __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
164 madd_s1_m = __lsx_vilvl_h(inp1, inp0); \
165 madd_s0_m = __lsx_vilvh_h(inp1, inp0); \
166 madd_s3_m = __lsx_vilvl_h(inp3, inp2); \
167 madd_s2_m = __lsx_vilvh_h(inp3, inp2); \
168 DUP4_ARG2(__lsx_vdp2_w_h, madd_s1_m, cst0, madd_s0_m, cst0, \
169 madd_s1_m, cst1, madd_s0_m, cst1, tmp0_m, tmp1_m, \
171 DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, tmp1_m, \
172 VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, tmp3_m, \
173 VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
174 DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \
175 DUP4_ARG2(__lsx_vdp2_w_h, madd_s3_m, cst2, madd_s2_m, cst2, madd_s3_m, \
176 cst3, madd_s2_m, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
177 DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, \
178 tmp1_m, VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, \
179 tmp3_m, VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
180 DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \
183 #define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
185 __m128i c0_m, c1_m; \
187 DUP2_ARG2(__lsx_vreplvei_h, mask_h, idx1_h, mask_h, idx2_h, c0_m, c1_m); \
188 c0_m = __lsx_vpackev_h(c1_m, c0_m); \
194 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
195 out0, out1, out2, out3, out4, out5, out6, out7) \
197 __m128i tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
198 __m128i k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \
199 __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
200 v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \
201 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
203 k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \
204 k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \
205 k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \
206 k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \
207 VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
208 DUP2_ARG2(__lsx_vsub_h, in1, in3, in7, in5, res0_m, res1_m); \
209 k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \
210 k1_m = __lsx_vreplvei_h(mask_m, 4); \
212 res2_m = __lsx_vilvl_h(res0_m, res1_m); \
213 res3_m = __lsx_vilvh_h(res0_m, res1_m); \
214 DUP4_ARG2(__lsx_vdp2_w_h, res2_m, k0_m, res3_m, k0_m, res2_m, k1_m, \
215 res3_m, k1_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
216 DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, \
217 tmp1_m, VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, \
218 tmp3_m, VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
219 tp4_m = __lsx_vadd_h(in1, in3); \
220 DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \
221 tp7_m = __lsx_vadd_h(in7, in5); \
222 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
223 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
224 VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \
225 in0, in4, in2, in6); \
226 LSX_BUTTERFLY_4_H(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
227 LSX_BUTTERFLY_8_H(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \
228 out0, out1, out2, out3, out4, out5, out6, out7); \
242 vec = __lsx_vreplgr2vr_h(
val);
246 dst += (4 * dst_stride);
253 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
254 __m128i s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
255 __m128i tmp0, tmp1, tmp2, tmp3;
256 __m128i
zero = __lsx_vldi(0);
271 DUP4_ARG2(__lsx_vilvl_d,in1, in0, in3, in2, in5, in4, in7,
272 in6, in0, in1, in2, in3);
275 DUP2_ARG2(__lsx_vilvh_h, in3, in0, in2, in1, s0, s1);
280 DUP4_ARG2(__lsx_vdp2_w_h, s0, k0, s0, k1, s1, k2, s1, k3,
281 tmp0, tmp1, tmp2, tmp3);
287 LSX_BUTTERFLY_4_H(s0, s1, s3, s2, s4, s7, s6, s5);
290 DUP2_ARG2(__lsx_vilvl_h, in3, in1, in2, in0, s1, s0);
295 DUP4_ARG2(__lsx_vdp2_w_h, s0, k0, s0, k1, s1, k2, s1, k3,
296 tmp0, tmp1, tmp2, tmp3);
302 LSX_BUTTERFLY_4_H(s0, s1, s2, s3, m0, m1, m2, m3);
305 s0 = __lsx_vilvl_h(s6, s5);
308 DUP2_ARG2(__lsx_vdp2_w_h, s0, k1, s0, k0, tmp0, tmp1);
314 LSX_BUTTERFLY_8_H(m0, m1, m2, m3, s4, s2, s3, s7,
315 in0, in1, in2, in3, in4, in5, in6, in7);
317 in0, in1, in2, in3, in4, in5, in6, in7);
319 in0, in1, in2, in3, in4, in5, in6, in7);
322 DUP4_ARG2(__lsx_vsrari_h, in0 , 5, in1, 5, in2, 5, in3, 5,
324 DUP4_ARG2(__lsx_vsrari_h, in4 , 5, in5, 5, in6, 5, in7, 5,
329 dst += (4 * dst_stride);
336 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
337 __m128i
zero = __lsx_vldi(0);
354 in0, in1, in2, in3, in4, in5, in6, in7);
356 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7,
357 in0, in1, in2, in3, in4, in5, in6, in7);
360 in0, in1, in2, in3, in4, in5, in6, in7);
362 DUP4_ARG2(__lsx_vsrari_h, in0, 5, in1, 5, in2, 5, in3, 5,
364 DUP4_ARG2(__lsx_vsrari_h, in4, 5, in5, 5, in6, 5, in7, 5,
368 dst += (4 * dst_stride);
375 __m128i loc0, loc1, loc2, loc3;
376 __m128i reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
377 __m128i reg1, reg3, reg5, reg7, reg9, reg11, reg13, reg15;
378 __m128i tmp5, tmp6, tmp7;
379 __m128i
zero = __lsx_vldi(0);
383 reg0, reg1, reg2, reg3);
385 reg4, reg5, reg6, reg7);
387 reg8, reg9, reg10, reg11);
389 32*15, reg12, reg13, reg14, reg15);
410 LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
414 LSX_BUTTERFLY_4_H(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
416 reg0 = __lsx_vsub_h(reg2, loc1);
417 reg2 = __lsx_vadd_h(reg2, loc1);
418 reg12 = __lsx_vsub_h(reg14, loc0);
419 reg14 = __lsx_vadd_h(reg14, loc0);
420 reg4 = __lsx_vsub_h(reg6, loc3);
421 reg6 = __lsx_vadd_h(reg6, loc3);
422 reg8 = __lsx_vsub_h(reg10, loc2);
423 reg10 = __lsx_vadd_h(reg10, loc2);
429 reg9 = __lsx_vsub_h(reg1, loc2);
430 reg1 = __lsx_vadd_h(reg1, loc2);
431 reg7 = __lsx_vsub_h(reg15, loc3);
432 reg15 = __lsx_vadd_h(reg15, loc3);
436 LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
438 loc1 = __lsx_vadd_h(reg15, reg3);
439 reg3 = __lsx_vsub_h(reg15, reg3);
440 loc2 = __lsx_vadd_h(reg2, loc1);
441 reg15 = __lsx_vsub_h(reg2, loc1);
443 loc1 = __lsx_vadd_h(reg1, reg13);
444 reg13 = __lsx_vsub_h(reg1, reg13);
445 loc0 = __lsx_vadd_h(reg0, loc1);
446 loc1 = __lsx_vsub_h(reg0, loc1);
455 loc0 = __lsx_vadd_h(reg9, reg5);
456 reg5 = __lsx_vsub_h(reg9, reg5);
457 reg2 = __lsx_vadd_h(reg6, loc0);
458 reg1 = __lsx_vsub_h(reg6, loc0);
460 loc0 = __lsx_vadd_h(reg7, reg11);
461 reg11 = __lsx_vsub_h(reg7, reg11);
462 loc1 = __lsx_vadd_h(reg4, loc0);
463 loc2 = __lsx_vsub_h(reg4, loc0);
467 LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
473 LSX_BUTTERFLY_4_H(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
481 DUP4_ARG2(__lsx_vsrari_h, reg0, 6, reg2, 6, reg4, 6, reg6, 6,
482 reg0, reg2, reg4, reg6);
485 DUP4_ARG2(__lsx_vsrari_h, reg8, 6, reg10, 6, reg12, 6, reg14, 6,
486 reg8, reg10, reg12, reg14);
489 DUP4_ARG2(__lsx_vsrari_h, reg3, 6, reg5, 6, reg11, 6, reg13, 6,
490 reg3, reg5, reg11, reg13);
493 DUP4_ARG2(__lsx_vsrari_h, reg1, 6, reg7, 6, reg9, 6, reg15, 6,
494 reg1, reg7, reg9, reg15);
500 __m128i loc0, loc1, loc2, loc3;
501 __m128i reg1, reg3, reg5, reg7, reg9, reg11, reg13, reg15;
502 __m128i reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
503 __m128i tmp5, tmp6, tmp7;
504 __m128i
zero = __lsx_vldi(0);
508 reg0, reg1, reg2, reg3);
510 reg4, reg5, reg6, reg7);
512 reg8, reg9, reg10, reg11);
514 32*15, reg12, reg13, reg14, reg15);
535 LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
539 LSX_BUTTERFLY_4_H(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
541 reg0 = __lsx_vsub_h(reg2, loc1);
542 reg2 = __lsx_vadd_h(reg2, loc1);
543 reg12 = __lsx_vsub_h(reg14, loc0);
544 reg14 = __lsx_vadd_h(reg14, loc0);
545 reg4 = __lsx_vsub_h(reg6, loc3);
546 reg6 = __lsx_vadd_h(reg6, loc3);
547 reg8 = __lsx_vsub_h(reg10, loc2);
548 reg10 = __lsx_vadd_h(reg10, loc2);
554 reg9 = __lsx_vsub_h(reg1, loc2);
555 reg1 = __lsx_vadd_h(reg1, loc2);
556 reg7 = __lsx_vsub_h(reg15, loc3);
557 reg15 = __lsx_vadd_h(reg15, loc3);
561 LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
563 loc1 = __lsx_vadd_h(reg15, reg3);
564 reg3 = __lsx_vsub_h(reg15, reg3);
565 loc2 = __lsx_vadd_h(reg2, loc1);
566 reg15 = __lsx_vsub_h(reg2, loc1);
568 loc1 = __lsx_vadd_h(reg1, reg13);
569 reg13 = __lsx_vsub_h(reg1, reg13);
570 loc0 = __lsx_vadd_h(reg0, loc1);
571 loc1 = __lsx_vsub_h(reg0, loc1);
580 loc0 = __lsx_vadd_h(reg9, reg5);
581 reg5 = __lsx_vsub_h(reg9, reg5);
582 reg2 = __lsx_vadd_h(reg6, loc0);
583 reg1 = __lsx_vsub_h(reg6, loc0);
585 loc0 = __lsx_vadd_h(reg7, reg11);
586 reg11 = __lsx_vsub_h(reg7, reg11);
587 loc1 = __lsx_vadd_h(reg4, loc0);
588 loc2 = __lsx_vsub_h(reg4, loc0);
593 LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
599 LSX_BUTTERFLY_4_H(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
608 LSX_TRANSPOSE8x8_H(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
609 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
611 __lsx_vst(reg0,
output, 32*0);
612 __lsx_vst(reg2,
output, 32*1);
613 __lsx_vst(reg4,
output, 32*2);
614 __lsx_vst(reg6,
output, 32*3);
615 __lsx_vst(reg8,
output, 32*4);
616 __lsx_vst(reg10,
output, 32*5);
617 __lsx_vst(reg12,
output, 32*6);
618 __lsx_vst(reg14,
output, 32*7);
621 LSX_TRANSPOSE8x8_H(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
622 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
625 __lsx_vst(reg3,
offset, 32*0);
626 __lsx_vst(reg13,
offset, 32*1);
627 __lsx_vst(reg11,
offset, 32*2);
628 __lsx_vst(reg5,
offset, 32*3);
631 __lsx_vst(reg7,
offset, 32*0);
632 __lsx_vst(reg9,
offset, 32*1);
633 __lsx_vst(reg1,
offset, 32*2);
634 __lsx_vst(reg15,
offset, 32*3);
642 __m128i vec, res0, res1, res2, res3, res4, res5, res6, res7;
643 __m128i dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
644 int32_t stride2 = dst_stride << 1;
645 int32_t stride3 = stride2 + dst_stride;
646 int32_t stride4 = stride2 << 1;
652 vec = __lsx_vreplgr2vr_h(
out);
655 dst0 = __lsx_vld(
dst, 0);
657 dst3 = __lsx_vldx(
dst, stride3);
662 DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec,
663 res0, res1, res2, res3);
664 DUP4_ARG2(__lsx_vadd_h, res4, vec, res5, vec, res6, vec, res7, vec,
665 res4, res5, res6, res7);
666 DUP4_ARG1(__lsx_vclip255_h, res0, res1, res2, res3,
667 res0, res1, res2, res3);
668 DUP4_ARG1(__lsx_vclip255_h, res4, res5, res6, res7,
669 res4, res5, res6, res7);
670 DUP4_ARG2(__lsx_vpickev_b, res4, res0, res5, res1, res6,
671 res2, res7, res3, tmp0, tmp1, tmp2, tmp3);
672 __lsx_vst(tmp0,
dst, 0);
673 __lsx_vstx(tmp1,
dst, dst_stride);
674 __lsx_vstx(tmp2,
dst, stride2);
675 __lsx_vstx(tmp3,
dst, stride3);
685 int16_t *
out = out_arr;
686 __m128i
zero = __lsx_vldi(0);
708 for (
i = 0;
i < 2;
i++) {
720 int16_t *
out = out_arr;
723 for (
i = 0;
i < 2;
i++) {
729 for (
i = 0;
i < 2;
i++) {
737 int16_t *tmp_eve_buf,
738 int16_t *tmp_odd_buf,
741 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
742 __m128i m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
745 vec0 = __lsx_vld(tmp_odd_buf, 0);
746 vec1 = __lsx_vld(tmp_odd_buf, 9 * 16);
747 vec2 = __lsx_vld(tmp_odd_buf, 14 * 16);
748 vec3 = __lsx_vld(tmp_odd_buf, 6 * 16);
749 loc0 = __lsx_vld(tmp_eve_buf, 0);
750 loc1 = __lsx_vld(tmp_eve_buf, 8 * 16);
751 loc2 = __lsx_vld(tmp_eve_buf, 4 * 16);
752 loc3 = __lsx_vld(tmp_eve_buf, 12 * 16);
754 DUP4_ARG2(__lsx_vadd_h,loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
757 #define SUB(a, b) __lsx_vsub_h(a, b)
759 __lsx_vst(
SUB(loc0, vec3), tmp_buf, 31 * 16);
760 __lsx_vst(
SUB(loc1, vec2), tmp_buf, 23 * 16);
761 __lsx_vst(
SUB(loc2, vec1), tmp_buf, 27 * 16);
762 __lsx_vst(
SUB(loc3, vec0), tmp_buf, 19 * 16);
765 vec0 = __lsx_vld(tmp_odd_buf, 4 * 16);
766 vec1 = __lsx_vld(tmp_odd_buf, 13 * 16);
767 vec2 = __lsx_vld(tmp_odd_buf, 10 * 16);
768 vec3 = __lsx_vld(tmp_odd_buf, 3 * 16);
769 loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
770 loc1 = __lsx_vld(tmp_eve_buf, 10 * 16);
771 loc2 = __lsx_vld(tmp_eve_buf, 6 * 16);
772 loc3 = __lsx_vld(tmp_eve_buf, 14 * 16);
774 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
777 __lsx_vst(
SUB(loc0, vec3), tmp_buf, 29 * 16);
778 __lsx_vst(
SUB(loc1, vec2), tmp_buf, 21 * 16);
779 __lsx_vst(
SUB(loc2, vec1), tmp_buf, 25 * 16);
780 __lsx_vst(
SUB(loc3, vec0), tmp_buf, 17 * 16);
783 vec0 = __lsx_vld(tmp_odd_buf, 2 * 16);
784 vec1 = __lsx_vld(tmp_odd_buf, 11 * 16);
785 vec2 = __lsx_vld(tmp_odd_buf, 12 * 16);
786 vec3 = __lsx_vld(tmp_odd_buf, 7 * 16);
787 loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
788 loc1 = __lsx_vld(tmp_eve_buf, 9 * 16);
789 loc2 = __lsx_vld(tmp_eve_buf, 5 * 16);
790 loc3 = __lsx_vld(tmp_eve_buf, 13 * 16);
792 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
795 __lsx_vst(
SUB(loc0, vec3), tmp_buf, 30 * 16);
796 __lsx_vst(
SUB(loc1, vec2), tmp_buf, 22 * 16);
797 __lsx_vst(
SUB(loc2, vec1), tmp_buf, 26 * 16);
798 __lsx_vst(
SUB(loc3, vec0), tmp_buf, 18 * 16);
801 vec0 = __lsx_vld(tmp_odd_buf, 5 * 16);
802 vec1 = __lsx_vld(tmp_odd_buf, 15 * 16);
803 vec2 = __lsx_vld(tmp_odd_buf, 8 * 16);
804 vec3 = __lsx_vld(tmp_odd_buf, 1 * 16);
805 loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
806 loc1 = __lsx_vld(tmp_eve_buf, 11 * 16);
807 loc2 = __lsx_vld(tmp_eve_buf, 7 * 16);
808 loc3 = __lsx_vld(tmp_eve_buf, 15 * 16);
810 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
813 __lsx_vst(
SUB(loc0, vec3), tmp_buf, 28 * 16);
814 __lsx_vst(
SUB(loc1, vec2), tmp_buf, 20 * 16);
815 __lsx_vst(
SUB(loc2, vec1), tmp_buf, 24 * 16);
816 __lsx_vst(
SUB(loc3, vec0), tmp_buf, 16 * 16);
820 LSX_TRANSPOSE8x8_H(m0, n0, m1, n1, m2, n2, m3, n3,
821 m0, n0, m1, n1, m2, n2, m3, n3);
822 __lsx_vst(m0,
dst, 0);
823 __lsx_vst(n0,
dst, 32 * 2);
824 __lsx_vst(m1,
dst, 32 * 4);
825 __lsx_vst(n1,
dst, 32 * 6);
826 __lsx_vst(m2,
dst, 32 * 8);
827 __lsx_vst(n2,
dst, 32 * 10);
828 __lsx_vst(m3,
dst, 32 * 12);
829 __lsx_vst(n3,
dst, 32 * 14);
831 LSX_TRANSPOSE8x8_H(m4, n4, m5, n5, m6, n6, m7, n7,
832 m4, n4, m5, n5, m6, n6, m7, n7);
834 __lsx_vst(m4,
dst, 16);
835 __lsx_vst(n4,
dst, 16 + 32 * 2);
836 __lsx_vst(m5,
dst, 16 + 32 * 4);
837 __lsx_vst(n5,
dst, 16 + 32 * 6);
838 __lsx_vst(m6,
dst, 16 + 32 * 8);
839 __lsx_vst(n6,
dst, 16 + 32 * 10);
840 __lsx_vst(m7,
dst, 16 + 32 * 12);
841 __lsx_vst(n7,
dst, 16 + 32 * 14);
844 DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 16, tmp_buf, 16 * 17,
845 tmp_buf, 16 * 18, tmp_buf, 16 * 19, m0, n0, m1, n1);
846 DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 20, tmp_buf, 16 * 21,
847 tmp_buf, 16 * 22, tmp_buf, 16 * 23, m2, n2, m3, n3);
849 DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 24, tmp_buf, 16 * 25,
850 tmp_buf, 16 * 26, tmp_buf, 16 * 27, m4, n4, m5, n5);
851 DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 28, tmp_buf, 16 * 29,
852 tmp_buf, 16 * 30, tmp_buf, 16 * 31, m6, n6, m7, n7);
854 LSX_TRANSPOSE8x8_H(m0, n0, m1, n1, m2, n2, m3, n3,
855 m0, n0, m1, n1, m2, n2, m3, n3);
857 __lsx_vst(m0,
dst, 32);
858 __lsx_vst(n0,
dst, 32 + 32 * 2);
859 __lsx_vst(m1,
dst, 32 + 32 * 4);
860 __lsx_vst(n1,
dst, 32 + 32 * 6);
861 __lsx_vst(m2,
dst, 32 + 32 * 8);
862 __lsx_vst(n2,
dst, 32 + 32 * 10);
863 __lsx_vst(m3,
dst, 32 + 32 * 12);
864 __lsx_vst(n3,
dst, 32 + 32 * 14);
866 LSX_TRANSPOSE8x8_H(m4, n4, m5, n5, m6, n6, m7, n7,
867 m4, n4, m5, n5, m6, n6, m7, n7);
869 __lsx_vst(m4,
dst, 48);
870 __lsx_vst(n4,
dst, 48 + 32 * 2);
871 __lsx_vst(m5,
dst, 48 + 32 * 4);
872 __lsx_vst(n5,
dst, 48 + 32 * 6);
873 __lsx_vst(m6,
dst, 48 + 32 * 8);
874 __lsx_vst(n6,
dst, 48 + 32 * 10);
875 __lsx_vst(m7,
dst, 48 + 32 * 12);
876 __lsx_vst(n7,
dst, 48 + 32 * 14);
880 int16_t *tmp_eve_buf)
882 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
883 __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
884 __m128i stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
885 __m128i
zero = __lsx_vldi(0);
888 DUP4_ARG2(__lsx_vld, tmp_buf, 0, tmp_buf, 32 * 8,
889 tmp_buf, 32 * 16, tmp_buf, 32 * 24, reg0, reg1, reg2, reg3);
890 DUP4_ARG2(__lsx_vld, tmp_buf, 32 * 32, tmp_buf, 32 * 40,
891 tmp_buf, 32 * 48, tmp_buf, 32 * 56, reg4, reg5, reg6, reg7);
893 __lsx_vst(
zero, tmp_buf, 0);
894 __lsx_vst(
zero, tmp_buf, 32 * 8);
895 __lsx_vst(
zero, tmp_buf, 32 * 16);
896 __lsx_vst(
zero, tmp_buf, 32 * 24);
897 __lsx_vst(
zero, tmp_buf, 32 * 32);
898 __lsx_vst(
zero, tmp_buf, 32 * 40);
899 __lsx_vst(
zero, tmp_buf, 32 * 48);
900 __lsx_vst(
zero, tmp_buf, 32 * 56);
906 LSX_BUTTERFLY_4_H(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
914 LSX_BUTTERFLY_4_H(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
915 LSX_BUTTERFLY_4_H(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
916 LSX_BUTTERFLY_4_H(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
920 DUP4_ARG2(__lsx_vld, tmp_buf, 0, tmp_buf, 32 * 8,
921 tmp_buf, 32 * 16, tmp_buf, 32 * 24, reg0, reg1, reg2, reg3);
922 DUP4_ARG2(__lsx_vld, tmp_buf, 32 * 32, tmp_buf, 32 * 40,
923 tmp_buf, 32 * 48, tmp_buf, 32 * 56, reg4, reg5, reg6, reg7);
925 __lsx_vst(
zero, tmp_buf, 0);
926 __lsx_vst(
zero, tmp_buf, 32 * 8);
927 __lsx_vst(
zero, tmp_buf, 32 * 16);
928 __lsx_vst(
zero, tmp_buf, 32 * 24);
929 __lsx_vst(
zero, tmp_buf, 32 * 32);
930 __lsx_vst(
zero, tmp_buf, 32 * 40);
931 __lsx_vst(
zero, tmp_buf, 32 * 48);
932 __lsx_vst(
zero, tmp_buf, 32 * 56);
939 vec0 = __lsx_vadd_h(reg0, reg4);
940 reg0 = __lsx_vsub_h(reg0, reg4);
941 reg4 = __lsx_vadd_h(reg6, reg2);
942 reg6 = __lsx_vsub_h(reg6, reg2);
943 reg2 = __lsx_vadd_h(reg1, reg5);
944 reg1 = __lsx_vsub_h(reg1, reg5);
945 reg5 = __lsx_vadd_h(reg7, reg3);
946 reg7 = __lsx_vsub_h(reg7, reg3);
950 reg2 = __lsx_vadd_h(reg3, reg4);
951 reg3 = __lsx_vsub_h(reg3, reg4);
952 reg4 = __lsx_vsub_h(reg5, vec1);
953 reg5 = __lsx_vadd_h(reg5, vec1);
959 vec0 = __lsx_vsub_h(reg0, reg6);
960 reg0 = __lsx_vadd_h(reg0, reg6);
961 vec1 = __lsx_vsub_h(reg7, reg1);
962 reg7 = __lsx_vadd_h(reg7, reg1);
969 LSX_BUTTERFLY_4_H(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
970 __lsx_vst(loc1, tmp_eve_buf, 0);
971 __lsx_vst(loc3, tmp_eve_buf, 16);
972 __lsx_vst(loc2, tmp_eve_buf, 14 * 16);
973 __lsx_vst(loc0, tmp_eve_buf, 14 * 16 + 16);
974 LSX_BUTTERFLY_4_H(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
975 __lsx_vst(loc1, tmp_eve_buf, 2 * 16);
976 __lsx_vst(loc3, tmp_eve_buf, 2 * 16 + 16);
977 __lsx_vst(loc2, tmp_eve_buf, 12 * 16);
978 __lsx_vst(loc0, tmp_eve_buf, 12 * 16 + 16);
981 LSX_BUTTERFLY_4_H(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
982 __lsx_vst(loc1, tmp_eve_buf, 4 * 16);
983 __lsx_vst(loc3, tmp_eve_buf, 4 * 16 + 16);
984 __lsx_vst(loc2, tmp_eve_buf, 10 * 16);
985 __lsx_vst(loc0, tmp_eve_buf, 10 * 16 + 16);
987 LSX_BUTTERFLY_4_H(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
988 __lsx_vst(loc1, tmp_eve_buf, 6 * 16);
989 __lsx_vst(loc3, tmp_eve_buf, 6 * 16 + 16);
990 __lsx_vst(loc2, tmp_eve_buf, 8 * 16);
991 __lsx_vst(loc0, tmp_eve_buf, 8 * 16 + 16);
995 int16_t *tmp_odd_buf)
997 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
998 __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
999 __m128i
zero = __lsx_vldi(0);
1002 reg0 = __lsx_vld(tmp_buf, 64);
1003 reg1 = __lsx_vld(tmp_buf, 7 * 64);
1004 reg2 = __lsx_vld(tmp_buf, 9 * 64);
1005 reg3 = __lsx_vld(tmp_buf, 15 * 64);
1006 reg4 = __lsx_vld(tmp_buf, 17 * 64);
1007 reg5 = __lsx_vld(tmp_buf, 23 * 64);
1008 reg6 = __lsx_vld(tmp_buf, 25 * 64);
1009 reg7 = __lsx_vld(tmp_buf, 31 * 64);
1011 __lsx_vst(
zero, tmp_buf, 64);
1012 __lsx_vst(
zero, tmp_buf, 7 * 64);
1013 __lsx_vst(
zero, tmp_buf, 9 * 64);
1014 __lsx_vst(
zero, tmp_buf, 15 * 64);
1015 __lsx_vst(
zero, tmp_buf, 17 * 64);
1016 __lsx_vst(
zero, tmp_buf, 23 * 64);
1017 __lsx_vst(
zero, tmp_buf, 25 * 64);
1018 __lsx_vst(
zero, tmp_buf, 31 * 64);
1025 vec0 = __lsx_vadd_h(reg0, reg3);
1026 reg0 = __lsx_vsub_h(reg0, reg3);
1027 reg3 = __lsx_vadd_h(reg7, reg4);
1028 reg7 = __lsx_vsub_h(reg7, reg4);
1029 reg4 = __lsx_vadd_h(reg1, reg2);
1030 reg1 = __lsx_vsub_h(reg1, reg2);
1031 reg2 = __lsx_vadd_h(reg6, reg5);
1032 reg6 = __lsx_vsub_h(reg6, reg5);
1036 DUP2_ARG2(__lsx_vadd_h, reg5, reg4, reg3, reg2, vec0, vec1);
1037 __lsx_vst(vec0, tmp_odd_buf, 4 * 16);
1038 __lsx_vst(vec1, tmp_odd_buf, 4 * 16 + 16);
1039 DUP2_ARG2(__lsx_vsub_h, reg5, reg4, reg3, reg2, vec0, vec1);
1041 __lsx_vst(vec0, tmp_odd_buf, 0);
1042 __lsx_vst(vec1, tmp_odd_buf, 16);
1047 LSX_BUTTERFLY_4_H(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
1048 __lsx_vst(vec0, tmp_odd_buf, 6 * 16);
1049 __lsx_vst(vec1, tmp_odd_buf, 6 * 16 + 16);
1051 __lsx_vst(vec2, tmp_odd_buf, 2 * 16);
1052 __lsx_vst(vec3, tmp_odd_buf, 2 * 16 + 16);
1056 reg0 = __lsx_vld(tmp_buf, 3 * 64);
1057 reg1 = __lsx_vld(tmp_buf, 5 * 64);
1058 reg2 = __lsx_vld(tmp_buf, 11 * 64);
1059 reg3 = __lsx_vld(tmp_buf, 13 * 64);
1060 reg4 = __lsx_vld(tmp_buf, 19 * 64);
1061 reg5 = __lsx_vld(tmp_buf, 21 * 64);
1062 reg6 = __lsx_vld(tmp_buf, 27 * 64);
1063 reg7 = __lsx_vld(tmp_buf, 29 * 64);
1065 __lsx_vst(
zero, tmp_buf, 3 * 64);
1066 __lsx_vst(
zero, tmp_buf, 5 * 64);
1067 __lsx_vst(
zero, tmp_buf, 11 * 64);
1068 __lsx_vst(
zero, tmp_buf, 13 * 64);
1069 __lsx_vst(
zero, tmp_buf, 19 * 64);
1070 __lsx_vst(
zero, tmp_buf, 21 * 64);
1071 __lsx_vst(
zero, tmp_buf, 27 * 64);
1072 __lsx_vst(
zero, tmp_buf, 29 * 64);
1080 DUP4_ARG2(__lsx_vsub_h,reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
1081 vec0, vec1, vec2, vec3);
1084 LSX_BUTTERFLY_4_H(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
1085 __lsx_vst(vec0, tmp_odd_buf, 12 * 16);
1086 __lsx_vst(vec1, tmp_odd_buf, 12 * 16 + 3 * 16);
1088 __lsx_vst(vec0, tmp_odd_buf, 10 * 16);
1089 __lsx_vst(vec1, tmp_odd_buf, 10 * 16 + 16);
1092 DUP4_ARG2(__lsx_vadd_h, reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7,
1093 vec0, vec1, vec2, vec3);
1094 LSX_BUTTERFLY_4_H(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
1095 __lsx_vst(reg0, tmp_odd_buf, 13 * 16);
1096 __lsx_vst(reg1, tmp_odd_buf, 13 * 16 + 16);
1099 __lsx_vst(reg0, tmp_odd_buf, 8 * 16);
1100 __lsx_vst(reg1, tmp_odd_buf, 8 * 16 + 16);
1104 DUP4_ARG2(__lsx_vld, tmp_odd_buf, 0, tmp_odd_buf, 16,
1105 tmp_odd_buf, 32, tmp_odd_buf, 48, reg0, reg1, reg2, reg3);
1106 DUP4_ARG2(__lsx_vld, tmp_odd_buf, 8 * 16, tmp_odd_buf, 8 * 16 + 16,
1107 tmp_odd_buf, 8 * 16 + 32, tmp_odd_buf, 8 * 16 + 48,
1108 reg4, reg5, reg6, reg7);
1110 DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1111 loc0, loc1, loc2, loc3);
1112 __lsx_vst(loc0, tmp_odd_buf, 0);
1113 __lsx_vst(loc1, tmp_odd_buf, 16);
1114 __lsx_vst(loc2, tmp_odd_buf, 32);
1115 __lsx_vst(loc3, tmp_odd_buf, 48);
1116 DUP2_ARG2(__lsx_vsub_h, reg0, reg4, reg1, reg5, vec0, vec1);
1119 DUP2_ARG2(__lsx_vsub_h, reg2, reg6, reg3, reg7, vec0, vec1);
1121 __lsx_vst(loc0, tmp_odd_buf, 8 * 16);
1122 __lsx_vst(loc1, tmp_odd_buf, 8 * 16 + 16);
1123 __lsx_vst(loc2, tmp_odd_buf, 8 * 16 + 32);
1124 __lsx_vst(loc3, tmp_odd_buf, 8 * 16 + 48);
1127 DUP4_ARG2(__lsx_vld, tmp_odd_buf, 4 * 16, tmp_odd_buf, 4 * 16 + 16,
1128 tmp_odd_buf, 4 * 16 + 32, tmp_odd_buf, 4 * 16 + 48,
1129 reg1, reg2, reg0, reg3);
1130 DUP4_ARG2(__lsx_vld, tmp_odd_buf, 12 * 16, tmp_odd_buf, 12 * 16 + 16,
1131 tmp_odd_buf, 12 * 16 + 32, tmp_odd_buf, 12 * 16 + 48,
1132 reg4, reg5, reg6, reg7);
1134 DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1135 loc0, loc1, loc2, loc3);
1136 __lsx_vst(loc0, tmp_odd_buf, 4 * 16);
1137 __lsx_vst(loc1, tmp_odd_buf, 4 * 16 + 16);
1138 __lsx_vst(loc2, tmp_odd_buf, 4 * 16 + 32);
1139 __lsx_vst(loc3, tmp_odd_buf, 4 * 16 + 48);
1141 DUP2_ARG2(__lsx_vsub_h, reg0, reg4, reg3, reg7, vec0, vec1);
1144 DUP2_ARG2(__lsx_vsub_h, reg1, reg5, reg2, reg6, vec0, vec1);
1146 __lsx_vst(loc0, tmp_odd_buf, 12 * 16);
1147 __lsx_vst(loc1, tmp_odd_buf, 12 * 16 + 16);
1148 __lsx_vst(loc2, tmp_odd_buf, 12 * 16 + 32);
1149 __lsx_vst(loc3, tmp_odd_buf, 12 * 16 + 48);
1153 int16_t *tmp_odd_buf,
1157 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1158 __m128i m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1161 vec0 = __lsx_vld(tmp_odd_buf, 0);
1162 vec1 = __lsx_vld(tmp_odd_buf, 9 * 16);
1163 vec2 = __lsx_vld(tmp_odd_buf, 14 * 16);
1164 vec3 = __lsx_vld(tmp_odd_buf, 6 * 16);
1165 loc0 = __lsx_vld(tmp_eve_buf, 0);
1166 loc1 = __lsx_vld(tmp_eve_buf, 8 * 16);
1167 loc2 = __lsx_vld(tmp_eve_buf, 4 * 16);
1168 loc3 = __lsx_vld(tmp_eve_buf, 12 * 16);
1170 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1172 DUP4_ARG2(__lsx_vsrari_h, m0, 6, m2, 6, m4, 6, m6, 6, m0, m2, m4, m6);
1175 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1177 DUP4_ARG2(__lsx_vsrari_h, m0, 6, m2, 6, m4, 6, m6, 6, m0, m2, m4, m6);
1182 vec0 = __lsx_vld(tmp_odd_buf, 4 * 16);
1183 vec1 = __lsx_vld(tmp_odd_buf, 13 * 16);
1184 vec2 = __lsx_vld(tmp_odd_buf, 10 * 16);
1185 vec3 = __lsx_vld(tmp_odd_buf, 3 * 16);
1186 loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
1187 loc1 = __lsx_vld(tmp_eve_buf, 10 * 16);
1188 loc2 = __lsx_vld(tmp_eve_buf, 6 * 16);
1189 loc3 = __lsx_vld(tmp_eve_buf, 14 * 16);
1191 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1193 DUP4_ARG2(__lsx_vsrari_h, m1, 6, m3, 6, m5, 6, m7, 6, m1, m3, m5, m7);
1197 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1199 DUP4_ARG2(__lsx_vsrari_h, m1, 6, m3, 6, m5, 6, m7, 6, m1, m3, m5, m7);
1204 vec0 = __lsx_vld(tmp_odd_buf, 2 * 16);
1205 vec1 = __lsx_vld(tmp_odd_buf, 11 * 16);
1206 vec2 = __lsx_vld(tmp_odd_buf, 12 * 16);
1207 vec3 = __lsx_vld(tmp_odd_buf, 7 * 16);
1208 loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
1209 loc1 = __lsx_vld(tmp_eve_buf, 9 * 16);
1210 loc2 = __lsx_vld(tmp_eve_buf, 5 * 16);
1211 loc3 = __lsx_vld(tmp_eve_buf, 13 * 16);
1213 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1215 DUP4_ARG2(__lsx_vsrari_h, n0, 6, n2, 6, n4, 6, n6, 6, n0, n2, n4, n6);
1218 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1220 DUP4_ARG2(__lsx_vsrari_h, n0, 6, n2, 6, n4, 6, n6, 6, n0, n2, n4, n6);
1225 vec0 = __lsx_vld(tmp_odd_buf, 5 * 16);
1226 vec1 = __lsx_vld(tmp_odd_buf, 15 * 16);
1227 vec2 = __lsx_vld(tmp_odd_buf, 8 * 16);
1228 vec3 = __lsx_vld(tmp_odd_buf, 1 * 16);
1229 loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
1230 loc1 = __lsx_vld(tmp_eve_buf, 11 * 16);
1231 loc2 = __lsx_vld(tmp_eve_buf, 7 * 16);
1232 loc3 = __lsx_vld(tmp_eve_buf, 15 * 16);
1234 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1236 DUP4_ARG2(__lsx_vsrari_h, n1, 6, n3, 6, n5, 6, n7, 6, n1, n3, n5, n7);
1239 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1241 DUP4_ARG2(__lsx_vsrari_h, n1, 6, n3, 6, n5, 6, n7, 6, n1, n3, n5, n7);
1267 &tmp_odd_buf[0],
output);
1275 uint8_t *dst_tmp =
dst + dst_stride;
1276 __m128i
zero = __lsx_vldi(0);
1277 __m128i dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1278 __m128i res0, res1, res2, res3, res4, res5, res6, res7, vec;
1285 vec = __lsx_vreplgr2vr_h(
out);
1287 for (
i = 16;
i--;) {
1289 DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst2, dst3);
1292 res0, res1, res2, res3);
1294 res4, res5, res6, res7);
1295 DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec,
1296 res0, res1, res2, res3);
1297 DUP4_ARG2(__lsx_vadd_h, res4, vec, res5, vec, res6, vec, res7, vec,
1298 res4, res5, res6, res7);
1299 DUP4_ARG1(__lsx_vclip255_h, res0, res1, res2, res3, res0, res1, res2, res3);
1300 DUP4_ARG1(__lsx_vclip255_h, res4, res5, res6, res7, res4, res5, res6, res7);
1301 DUP4_ARG2(__lsx_vpickev_b, res4, res0, res5, res1, res6, res2, res7, res3,
1302 tmp0, tmp1, tmp2, tmp3);
1304 __lsx_vst(tmp0,
dst, 0);
1305 __lsx_vst(tmp1,
dst, 16);
1306 __lsx_vst(tmp2, dst_tmp, 0);
1307 __lsx_vst(tmp3, dst_tmp, 16);
1308 dst = dst_tmp + dst_stride;
1309 dst_tmp =
dst + dst_stride;
1318 int16_t *out_ptr = out_arr;
1320 __m128i
zero = __lsx_vldi(0);
1322 for (
i = 16;
i--;) {
1323 __lsx_vst(
zero, out_ptr, 0);
1324 __lsx_vst(
zero, out_ptr, 16);
1325 __lsx_vst(
zero, out_ptr, 32);
1326 __lsx_vst(
zero, out_ptr, 48);
1327 __lsx_vst(
zero, out_ptr, 64);
1328 __lsx_vst(
zero, out_ptr, 80);
1329 __lsx_vst(
zero, out_ptr, 96);
1330 __lsx_vst(
zero, out_ptr, 112);
1340 for (
i = 0;
i < 4;
i++) {
1343 (
dst + (
i << 3)), dst_stride);
1352 int16_t *out_ptr = out_arr;
1356 for (
i = 0;
i < 4;
i++) {
1363 for (
i = 0;
i < 4;
i++) {
1366 (
dst + (
i << 3)), dst_stride);
1371 int16_t *
block,
int eob)
1376 else if (eob <= 12) {
1385 int16_t *
block,
int eob)
1391 else if (eob <= 10) {
1400 int16_t *
block,
int eob)
1405 else if (eob <= 34) {