26 #define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \
27 _dst5, _dst6, _dst7, _dst, _stride, \
28 _stride2, _stride3, _stride4) \
30 __lsx_vst(_dst0, _dst, 0); \
31 __lsx_vstx(_dst1, _dst, _stride); \
32 __lsx_vstx(_dst2, _dst, _stride2); \
33 __lsx_vstx(_dst3, _dst, _stride3); \
35 __lsx_vst(_dst4, _dst, 0); \
36 __lsx_vstx(_dst5, _dst, _stride); \
37 __lsx_vstx(_dst6, _dst, _stride2); \
38 __lsx_vstx(_dst7, _dst, _stride3); \
41 #define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \
42 _dst5, _dst6, _dst7, _dst, _stride) \
44 __lsx_vst(_dst0, _dst, 0); \
45 __lsx_vst(_dst0, _dst, 16); \
47 __lsx_vst(_dst1, _dst, 0); \
48 __lsx_vst(_dst1, _dst, 16); \
50 __lsx_vst(_dst2, _dst, 0); \
51 __lsx_vst(_dst2, _dst, 16); \
53 __lsx_vst(_dst3, _dst, 0); \
54 __lsx_vst(_dst3, _dst, 16); \
56 __lsx_vst(_dst4, _dst, 0); \
57 __lsx_vst(_dst4, _dst, 16); \
59 __lsx_vst(_dst5, _dst, 0); \
60 __lsx_vst(_dst5, _dst, 16); \
62 __lsx_vst(_dst6, _dst, 0); \
63 __lsx_vst(_dst6, _dst, 16); \
65 __lsx_vst(_dst7, _dst, 0); \
66 __lsx_vst(_dst7, _dst, 16); \
74 ptrdiff_t stride2 = dst_stride << 1;
75 ptrdiff_t stride3 = stride2 + dst_stride;
76 ptrdiff_t stride4 = stride2 << 1;
79 dst_stride, stride2, stride3, stride4);
82 dst_stride, stride2, stride3, stride4);
92 for (row = 32; row--;) {
103 __m128i src8, src9, src10, src11, src12, src13, src14, src15;
104 ptrdiff_t stride2 = dst_stride << 1;
105 ptrdiff_t stride3 = stride2 + dst_stride;
106 ptrdiff_t stride4 = stride2 << 1;
108 src15 = __lsx_vldrepl_b(
src, 0);
109 src14 = __lsx_vldrepl_b(
src, 1);
110 src13 = __lsx_vldrepl_b(
src, 2);
111 src12 = __lsx_vldrepl_b(
src, 3);
112 src11 = __lsx_vldrepl_b(
src, 4);
113 src10 = __lsx_vldrepl_b(
src, 5);
114 src9 = __lsx_vldrepl_b(
src, 6);
115 src8 = __lsx_vldrepl_b(
src, 7);
116 src7 = __lsx_vldrepl_b(
src, 8);
117 src6 = __lsx_vldrepl_b(
src, 9);
118 src5 = __lsx_vldrepl_b(
src, 10);
119 src4 = __lsx_vldrepl_b(
src, 11);
120 src3 = __lsx_vldrepl_b(
src, 12);
121 src2 = __lsx_vldrepl_b(
src, 13);
122 src1 = __lsx_vldrepl_b(
src, 14);
123 src0 = __lsx_vldrepl_b(
src, 15);
125 dst_stride, stride2, stride3, stride4);
127 LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15,
dst,
128 dst_stride, stride2, stride3, stride4);
135 __m128i src8, src9, src10, src11, src12, src13, src14, src15;
136 __m128i src16, src17, src18, src19, src20, src21, src22, src23;
137 __m128i src24, src25, src26, src27, src28, src29, src30, src31;
139 src31 = __lsx_vldrepl_b(
src, 0);
140 src30 = __lsx_vldrepl_b(
src, 1);
141 src29 = __lsx_vldrepl_b(
src, 2);
142 src28 = __lsx_vldrepl_b(
src, 3);
143 src27 = __lsx_vldrepl_b(
src, 4);
144 src26 = __lsx_vldrepl_b(
src, 5);
145 src25 = __lsx_vldrepl_b(
src, 6);
146 src24 = __lsx_vldrepl_b(
src, 7);
147 src23 = __lsx_vldrepl_b(
src, 8);
148 src22 = __lsx_vldrepl_b(
src, 9);
149 src21 = __lsx_vldrepl_b(
src, 10);
150 src20 = __lsx_vldrepl_b(
src, 11);
151 src19 = __lsx_vldrepl_b(
src, 12);
152 src18 = __lsx_vldrepl_b(
src, 13);
153 src17 = __lsx_vldrepl_b(
src, 14);
154 src16 = __lsx_vldrepl_b(
src, 15);
155 src15 = __lsx_vldrepl_b(
src, 16);
156 src14 = __lsx_vldrepl_b(
src, 17);
157 src13 = __lsx_vldrepl_b(
src, 18);
158 src12 = __lsx_vldrepl_b(
src, 19);
159 src11 = __lsx_vldrepl_b(
src, 20);
160 src10 = __lsx_vldrepl_b(
src, 21);
161 src9 = __lsx_vldrepl_b(
src, 22);
162 src8 = __lsx_vldrepl_b(
src, 23);
163 src7 = __lsx_vldrepl_b(
src, 24);
164 src6 = __lsx_vldrepl_b(
src, 25);
165 src5 = __lsx_vldrepl_b(
src, 26);
166 src4 = __lsx_vldrepl_b(
src, 27);
167 src3 = __lsx_vldrepl_b(
src, 28);
168 src2 = __lsx_vldrepl_b(
src, 29);
169 src1 = __lsx_vldrepl_b(
src, 30);
170 src0 = __lsx_vldrepl_b(
src, 31);
173 LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
175 LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
177 LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
182 const uint8_t *src_top)
184 __m128i tmp0, tmp1, dst0;
186 tmp0 = __lsx_vldrepl_w(src_top, 0);
187 tmp1 = __lsx_vldrepl_w(src_left, 0);
188 dst0 = __lsx_vilvl_w(tmp1, tmp0);
189 dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
190 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
191 dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
192 dst0 = __lsx_vsrari_w(dst0, 3);
193 dst0 = __lsx_vshuf4i_b(dst0, 0);
194 __lsx_vstelm_w(dst0,
dst, 0, 0);
196 __lsx_vstelm_w(dst0,
dst, 0, 0);
198 __lsx_vstelm_w(dst0,
dst, 0, 0);
200 __lsx_vstelm_w(dst0,
dst, 0, 0);
203 #define INTRA_DC_TL_4X4(dir) \
204 void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
205 const uint8_t *left, \
206 const uint8_t *top) \
208 __m128i tmp0, dst0; \
210 tmp0 = __lsx_vldrepl_w(dir, 0); \
211 dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
212 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
213 dst0 = __lsx_vsrari_w(dst0, 2); \
214 dst0 = __lsx_vshuf4i_b(dst0, 0); \
215 __lsx_vstelm_w(dst0, dst, 0, 0); \
217 __lsx_vstelm_w(dst0, dst, 0, 0); \
219 __lsx_vstelm_w(dst0, dst, 0, 0); \
221 __lsx_vstelm_w(dst0, dst, 0, 0); \
227 const uint8_t *src_top)
229 __m128i tmp0, tmp1, dst0;
231 tmp0 = __lsx_vldrepl_d(src_top, 0);
232 tmp1 = __lsx_vldrepl_d(src_left, 0);
233 dst0 = __lsx_vilvl_d(tmp1, tmp0);
234 dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
235 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
236 dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
237 dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
238 dst0 = __lsx_vsrari_w(dst0, 4);
239 dst0 = __lsx_vreplvei_b(dst0, 0);
240 __lsx_vstelm_d(dst0,
dst, 0, 0);
242 __lsx_vstelm_d(dst0,
dst, 0, 0);
244 __lsx_vstelm_d(dst0,
dst, 0, 0);
246 __lsx_vstelm_d(dst0,
dst, 0, 0);
248 __lsx_vstelm_d(dst0,
dst, 0, 0);
250 __lsx_vstelm_d(dst0,
dst, 0, 0);
252 __lsx_vstelm_d(dst0,
dst, 0, 0);
254 __lsx_vstelm_d(dst0,
dst, 0, 0);
257 #define INTRA_DC_TL_8X8(dir) \
258 void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
259 const uint8_t *left, \
260 const uint8_t *top) \
262 __m128i tmp0, dst0; \
264 tmp0 = __lsx_vldrepl_d(dir, 0); \
265 dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
266 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
267 dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
268 dst0 = __lsx_vsrari_w(dst0, 3); \
269 dst0 = __lsx_vreplvei_b(dst0, 0); \
270 __lsx_vstelm_d(dst0, dst, 0, 0); \
272 __lsx_vstelm_d(dst0, dst, 0, 0); \
274 __lsx_vstelm_d(dst0, dst, 0, 0); \
276 __lsx_vstelm_d(dst0, dst, 0, 0); \
278 __lsx_vstelm_d(dst0, dst, 0, 0); \
280 __lsx_vstelm_d(dst0, dst, 0, 0); \
282 __lsx_vstelm_d(dst0, dst, 0, 0); \
284 __lsx_vstelm_d(dst0, dst, 0, 0); \
291 const uint8_t *src_left,
const uint8_t *src_top)
293 __m128i tmp0, tmp1, dst0;
294 ptrdiff_t stride2 = dst_stride << 1;
295 ptrdiff_t stride3 = stride2 + dst_stride;
296 ptrdiff_t stride4 = stride2 << 1;
298 tmp0 = __lsx_vld(src_top, 0);
299 tmp1 = __lsx_vld(src_left, 0);
300 DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
301 dst0 = __lsx_vadd_h(tmp0, tmp1);
302 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
303 dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
304 dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
305 dst0 = __lsx_vsrari_w(dst0, 5);
306 dst0 = __lsx_vreplvei_b(dst0, 0);
307 LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
dst,
308 dst_stride, stride2, stride3, stride4);
310 LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
dst,
311 dst_stride, stride2, stride3, stride4);
314 #define INTRA_DC_TL_16X16(dir) \
315 void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
316 const uint8_t *left, \
317 const uint8_t *top) \
319 __m128i tmp0, dst0; \
320 ptrdiff_t stride2 = dst_stride << 1; \
321 ptrdiff_t stride3 = stride2 + dst_stride; \
322 ptrdiff_t stride4 = stride2 << 1; \
324 tmp0 = __lsx_vld(dir, 0); \
325 dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
326 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
327 dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
328 dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
329 dst0 = __lsx_vsrari_w(dst0, 4); \
330 dst0 = __lsx_vreplvei_b(dst0, 0); \
331 LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
332 dst_stride, stride2, stride3, stride4); \
334 LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
335 dst_stride, stride2, stride3, stride4); \
342 const uint8_t *src_left,
const uint8_t *src_top)
344 __m128i tmp0, tmp1, tmp2, tmp3, dst0;
346 DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
347 DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
348 DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
349 tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
350 DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
351 dst0 = __lsx_vadd_h(tmp0, tmp1);
352 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
353 dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
354 dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
355 dst0 = __lsx_vsrari_w(dst0, 6);
356 dst0 = __lsx_vreplvei_b(dst0, 0);
357 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
359 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
361 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
363 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
367 #define INTRA_DC_TL_32X32(dir) \
368 void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
369 const uint8_t *left, \
370 const uint8_t *top) \
372 __m128i tmp0, tmp1, dst0; \
374 DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1); \
375 DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); \
376 dst0 = __lsx_vadd_h(tmp0, tmp1); \
377 dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
378 dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
379 dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
380 dst0 = __lsx_vsrari_w(dst0, 5); \
381 dst0 = __lsx_vreplvei_b(dst0, 0); \
382 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
384 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
386 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
388 LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
395 #define INTRA_PREDICT_VALDC_16X16_LSX(val) \
396 void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
397 const uint8_t *left, const uint8_t *top) \
399 __m128i out = __lsx_vldi(val); \
400 ptrdiff_t stride2 = dst_stride << 1; \
401 ptrdiff_t stride3 = stride2 + dst_stride; \
402 ptrdiff_t stride4 = stride2 << 1; \
404 LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
405 dst_stride, stride2, stride3, stride4); \
407 LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
408 dst_stride, stride2, stride3, stride4); \
415 #define INTRA_PREDICT_VALDC_32X32_LSX(val) \
416 void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
417 const uint8_t *left, const uint8_t *top) \
419 __m128i out = __lsx_vldi(val); \
421 LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
422 LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
423 LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
424 LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
432 const uint8_t *src_left,
const uint8_t *src_top_ptr)
434 uint8_t top_left = src_top_ptr[-1];
435 __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
437 __m128i dst0, dst1, dst2, dst3;
439 reg0 = __lsx_vreplgr2vr_h(top_left);
440 reg1 = __lsx_vld(src_top_ptr, 0);
441 DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
442 3, tmp3, tmp2, tmp1, tmp0);
443 DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
446 src3, dst0, dst1, dst2, dst3);
447 DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, reg0,
448 dst0, dst1, dst2, dst3);
449 DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
450 dst0, dst1, dst2, dst3);
451 DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
452 __lsx_vstelm_w(dst0,
dst, 0, 0);
454 __lsx_vstelm_w(dst0,
dst, 0, 2);
456 __lsx_vstelm_w(dst1,
dst, 0, 0);
458 __lsx_vstelm_w(dst1,
dst, 0, 2);
462 const uint8_t *src_left,
const uint8_t *src_top_ptr)
464 uint8_t top_left = src_top_ptr[-1];
465 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
469 reg0 = __lsx_vreplgr2vr_h(top_left);
470 reg1 = __lsx_vld(src_top_ptr, 0);
471 DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
472 3, tmp7, tmp6, tmp5, tmp4);
473 DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
474 7, tmp3, tmp2, tmp1, tmp0);
475 DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
477 DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, reg1,
478 src4, src5, src6, src7);
481 DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6, src7,
482 src7, src4, src5, src6, src7);
485 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
486 src4, src5, src6, src7);
489 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
490 src4, src5, src6, src7);
493 __lsx_vstelm_d(
src0,
dst, 0, 0);
495 __lsx_vstelm_d(
src0,
dst, 0, 1);
497 __lsx_vstelm_d(
src1,
dst, 0, 0);
499 __lsx_vstelm_d(
src1,
dst, 0, 1);
501 __lsx_vstelm_d(
src2,
dst, 0, 0);
503 __lsx_vstelm_d(
src2,
dst, 0, 1);
505 __lsx_vstelm_d(src3,
dst, 0, 0);
507 __lsx_vstelm_d(src3,
dst, 0, 1);
511 const uint8_t *src_left,
const uint8_t *src_top_ptr)
513 uint8_t top_left = src_top_ptr[-1];
514 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
515 __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
518 ptrdiff_t stride2 = dst_stride << 1;
519 ptrdiff_t stride3 = stride2 + dst_stride;
520 ptrdiff_t stride4 = stride2 << 1;
522 reg0 = __lsx_vreplgr2vr_h(top_left);
523 reg1 = __lsx_vld(src_top_ptr, 0);
524 DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
525 3, tmp15, tmp14, tmp13, tmp12);
526 DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
527 7, tmp11, tmp10, tmp9, tmp8);
528 DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
529 src_left, 11, tmp7, tmp6, tmp5, tmp4);
530 DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left, 14,
531 src_left, 15, tmp3, tmp2, tmp1, tmp0);
532 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
534 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
535 reg1, src4, src5, src6, src7);
538 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
539 src4, src5, src6, src7);
542 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
543 src4, src5, src6, src7);
545 tmp0, tmp1, tmp2, tmp3);
546 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
548 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
549 reg1, src4, src5, src6, src7);
552 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
553 src4, src5, src6, src7);
556 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
557 src4, src5, src6, src7);
559 tmp4, tmp5, tmp6, tmp7);
560 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
562 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
563 reg1, src4, src5, src6, src7);
566 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
567 src4, src5, src6, src7);
570 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
571 src4, src5, src6, src7);
573 tmp8, tmp9, tmp10, tmp11);
574 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
576 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
577 tmp15, reg1, src4, src5, src6, src7);
580 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
581 src4, src5, src6, src7);
584 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
585 src4, src5, src6, src7);
587 tmp12, tmp13, tmp14, tmp15);
588 LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
dst,
589 dst_stride, stride2, stride3, stride4);
591 LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15,
dst,
592 dst_stride, stride2, stride3, stride4);
596 const uint8_t *src_left,
const uint8_t *src_top_ptr)
598 uint8_t top_left = src_top_ptr[-1];
600 __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
602 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
604 reg0 = __lsx_vreplgr2vr_h(top_left);
605 DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
608 for (loop_cnt = 8; loop_cnt--;) {
609 DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
610 src_left, 3, tmp3, tmp2, tmp1, tmp0);
612 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
614 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
615 tmp3, reg1, src4, src5, src6, src7);
618 DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7,
619 reg0, src4, src5, src6, src7);
620 DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
621 tmp3, reg2, dst0, dst1, dst2, dst3);
622 DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
623 tmp3, reg2, dst4, dst5, dst6, dst7);
624 DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3,
625 reg0, dst0, dst1, dst2, dst3);
626 DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0, dst7,
627 reg0, dst4, dst5, dst6, dst7);
630 DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
631 src4, src5, src6, src7);
632 DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
633 dst0, dst1, dst2, dst3);
634 DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
635 dst4, dst5, dst6, dst7);
638 DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2, dst7,
639 dst3, dst0, dst1, dst2, dst3);
641 __lsx_vst(dst0,
dst, 16);
644 __lsx_vst(dst1,
dst, 16);
647 __lsx_vst(dst2,
dst, 16);
649 __lsx_vst(src3,
dst, 0);
650 __lsx_vst(dst3,
dst, 16);