28 uint32_t tp0, tp1, offset_val;
31 v8i16 src0_r, tmp0, wgt, denom,
offset;
33 offset_val = (unsigned) offset_in << log2_denom;
35 wgt = __msa_fill_h(src_weight);
36 offset = __msa_fill_h(offset_val);
37 denom = __msa_fill_h(log2_denom);
41 src0_r = (v8i16) __msa_ilvr_b((v16i8)
zero, (v16i8)
src0);
43 tmp0 = __msa_adds_s_h(tmp0,
offset);
44 tmp0 = __msa_maxi_s_h(tmp0, 0);
45 tmp0 = __msa_srlr_h(tmp0, denom);
46 tmp0 = (v8i16) __msa_sat_u_h((v8u16) tmp0, 7);
47 src0 = (v16u8) __msa_pckev_b((v16i8) tmp0, (v16i8) tmp0);
54 uint32_t tp0, tp1, tp2, tp3, offset_val;
56 v8i16 src0_r, src1_r, tmp0, tmp1, wgt, denom,
offset;
58 offset_val = (unsigned) offset_in << log2_denom;
60 wgt = __msa_fill_h(src_weight);
61 offset = __msa_fill_h(offset_val);
62 denom = __msa_fill_h(log2_denom);
67 MUL2(wgt, src0_r, wgt, src1_r, tmp0, tmp1);
70 tmp0 = __msa_srlr_h(tmp0, denom);
71 tmp1 = __msa_srlr_h(tmp1, denom);
73 src0 = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
80 uint32_t tp0, tp1, tp2, tp3, offset_val;
82 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
85 offset_val = (unsigned) offset_in << log2_denom;
87 wgt = __msa_fill_h(src_weight);
88 offset = __msa_fill_h(offset_val);
89 denom = __msa_fill_h(log2_denom);
97 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
105 ST_W8(
src0,
src1, 0, 1, 2, 3, 0, 1, 2, 3,
data,
stride);
112 uint64_t tp0, tp1, tp2, tp3;
114 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
117 offset_val = (unsigned) offset_in << log2_denom;
119 wgt = __msa_fill_h(src_weight);
120 offset = __msa_fill_h(offset_val);
121 denom = __msa_fill_h(log2_denom);
128 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
143 uint64_t tp0, tp1, tp2, tp3;
144 v16u8
src0 = { 0 },
src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
145 v8i16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
146 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
149 offset_val = (unsigned) offset_in << log2_denom;
151 wgt = __msa_fill_h(src_weight);
152 offset = __msa_fill_h(offset_val);
153 denom = __msa_fill_h(log2_denom);
165 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
167 MUL4(wgt, src4_r, wgt, src5_r, wgt, src6_r, wgt, src7_r, tmp4, tmp5, tmp6,
173 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
174 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
175 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
176 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
src0,
src1,
178 ST_D8(
src0,
src1, src2, src3, 0, 1, 0, 1, 0, 1, 0, 1,
data,
stride);
184 uint32_t offset_val, cnt;
185 uint64_t tp0, tp1, tp2, tp3;
186 v16u8
src0 = { 0 },
src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
187 v8i16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
188 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
191 offset_val = (unsigned) offset_in << log2_denom;
193 wgt = __msa_fill_h(src_weight);
194 offset = __msa_fill_h(offset_val);
195 denom = __msa_fill_h(log2_denom);
197 for (cnt = 2; cnt--;) {
208 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1,
210 MUL4(wgt, src4_r, wgt, src5_r, wgt, src6_r, wgt, src7_r, tmp4, tmp5,
213 tmp0, tmp1, tmp2, tmp3);
215 tmp4, tmp5, tmp6, tmp7);
216 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
217 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
218 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
219 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
src0,
src1,
221 ST_D8(
src0,
src1, src2, src3, 0, 1, 0, 1, 0, 1, 0, 1,
data,
stride);
231 v16i8 src_wgt, dst_wgt, wgt, vec0;
232 v16u8
src0 = { 0 }, dst0 = { 0 };
233 v8i16 tmp0, denom,
offset, max255 = __msa_ldi_h(255);
235 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
236 offset_in += (128 * (src_weight + dst_weight));
238 src_wgt = __msa_fill_b(src_weight);
239 dst_wgt = __msa_fill_b(dst_weight);
240 offset = __msa_fill_h(offset_in);
241 denom = __msa_fill_h(log2_denom + 1);
243 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
250 vec0 = (v16i8) __msa_ilvr_b((v16i8) dst0, (v16i8)
src0);
251 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
253 tmp0 = __msa_maxi_s_h(tmp0, 0);
254 tmp0 = __msa_min_s_h(max255, tmp0);
255 dst0 = (v16u8) __msa_pckev_b((v16i8) tmp0, (v16i8) tmp0);
263 uint32_t tp0, tp1, tp2, tp3;
264 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1;
266 v8i16 tmp0, tmp1, denom,
offset;
268 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
269 offset_in += (128 * (src_weight + dst_weight));
271 src_wgt = __msa_fill_b(src_weight);
272 dst_wgt = __msa_fill_b(dst_weight);
273 offset = __msa_fill_h(offset_in);
274 denom = __msa_fill_h(log2_denom + 1);
276 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
284 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
285 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
289 dst0 = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
297 uint32_t tp0, tp1, tp2, tp3;
298 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3;
300 v8i16 tmp0, tmp1, tmp2, tmp3, denom,
offset;
302 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
303 offset_in += (128 * (src_weight + dst_weight));
305 src_wgt = __msa_fill_b(src_weight);
306 dst_wgt = __msa_fill_b(dst_weight);
307 offset = __msa_fill_h(offset_in);
308 denom = __msa_fill_h(log2_denom + 1);
309 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
323 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
324 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
325 tmp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
326 tmp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
327 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
330 ST_W8(dst0, dst1, 0, 1, 2, 3, 0, 1, 2, 3, dst,
stride);
337 uint64_t tp0, tp1, tp2, tp3;
338 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3;
340 v8i16 tmp0, tmp1, tmp2, tmp3, denom,
offset;
342 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
343 offset_in += (128 * (src_weight + dst_weight));
345 src_wgt = __msa_fill_b(src_weight);
346 dst_wgt = __msa_fill_b(dst_weight);
347 offset = __msa_fill_h(offset_in);
348 denom = __msa_fill_h(log2_denom + 1);
350 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
361 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
362 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
363 tmp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
364 tmp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
365 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
375 uint64_t tp0, tp1, tp2, tp3;
376 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
377 v16u8
src0,
src1, src2, src3, dst0, dst1, dst2, dst3;
378 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom,
offset;
380 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
381 offset_in += (128 * (src_weight + dst_weight));
383 src_wgt = __msa_fill_b(src_weight);
384 dst_wgt = __msa_fill_b(dst_weight);
385 offset = __msa_fill_h(offset_in);
386 denom = __msa_fill_h(log2_denom + 1);
387 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
406 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
407 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
408 tmp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
409 tmp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
410 tmp4 = __msa_dpadd_s_h(
offset, wgt, vec4);
411 tmp5 = __msa_dpadd_s_h(
offset, wgt, vec5);
412 tmp6 = __msa_dpadd_s_h(
offset, wgt, vec6);
413 tmp7 = __msa_dpadd_s_h(
offset, wgt, vec7);
414 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
415 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
419 ST_D8(dst0, dst1, dst2, dst3, 0, 1, 0, 1, 0, 1, 0, 1, dst,
stride);
427 uint64_t tp0, tp1, tp2, tp3;
428 v16i8 src_wgt, dst_wgt, wgt;
430 v16u8 dst0, dst1, dst2, dst3;
431 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
432 v8i16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
435 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
436 offset_in += (128 * (src_weight + dst_weight));
438 src_wgt = __msa_fill_b(src_weight);
439 dst_wgt = __msa_fill_b(dst_weight);
440 offset = __msa_fill_h(offset_in);
441 denom = __msa_fill_h(log2_denom + 1);
442 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
444 for (cnt = 2; cnt--;) {
462 vec0, vec2, vec4, vec6);
464 vec1, vec3, vec5, vec7);
466 temp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
467 temp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
468 temp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
469 temp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
470 temp4 = __msa_dpadd_s_h(
offset, wgt, vec4);
471 temp5 = __msa_dpadd_s_h(
offset, wgt, vec5);
472 temp6 = __msa_dpadd_s_h(
offset, wgt, vec6);
473 temp7 = __msa_dpadd_s_h(
offset, wgt, vec7);
475 SRA_4V(temp0, temp1, temp2, temp3, denom);
476 SRA_4V(temp4, temp5, temp6, temp7, denom);
477 CLIP_SH8_0_255(temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7);
478 PCKEV_B4_UB(temp1, temp0, temp3, temp2, temp5, temp4, temp7, temp6,
479 dst0, dst1, dst2, dst3);
480 ST_D8(dst0, dst1, dst2, dst3, 0, 1, 0, 1, 0, 1, 0, 1, dst,
stride);
485 #define AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_or_q3_org_in, p0_or_q0_org_in, \
486 q3_or_p3_org_in, p1_or_q1_org_in, \
487 p2_or_q2_org_in, q1_or_p1_org_in, \
488 p0_or_q0_out, p1_or_q1_out, p2_or_q2_out) \
491 v8i16 const3 = __msa_ldi_h(3); \
493 threshold = (p0_or_q0_org_in) + (q3_or_p3_org_in); \
494 threshold += (p1_or_q1_org_in); \
496 (p0_or_q0_out) = threshold << 1; \
497 (p0_or_q0_out) += (p2_or_q2_org_in); \
498 (p0_or_q0_out) += (q1_or_p1_org_in); \
499 (p0_or_q0_out) = __msa_srari_h((p0_or_q0_out), 3); \
501 (p1_or_q1_out) = (p2_or_q2_org_in) + threshold; \
502 (p1_or_q1_out) = __msa_srari_h((p1_or_q1_out), 2); \
504 (p2_or_q2_out) = (p2_or_q2_org_in) * const3; \
505 (p2_or_q2_out) += (p3_or_q3_org_in); \
506 (p2_or_q2_out) += (p3_or_q3_org_in); \
507 (p2_or_q2_out) += threshold; \
508 (p2_or_q2_out) = __msa_srari_h((p2_or_q2_out), 3); \
512 #define AVC_LPF_P0_OR_Q0(p0_or_q0_org_in, q1_or_p1_org_in, \
513 p1_or_q1_org_in, p0_or_q0_out) \
515 (p0_or_q0_out) = (p0_or_q0_org_in) + (q1_or_p1_org_in); \
516 (p0_or_q0_out) += (p1_or_q1_org_in); \
517 (p0_or_q0_out) += (p1_or_q1_org_in); \
518 (p0_or_q0_out) = __msa_srari_h((p0_or_q0_out), 2); \
521 #define AVC_LPF_P1_OR_Q1(p0_or_q0_org_in, q0_or_p0_org_in, \
522 p1_or_q1_org_in, p2_or_q2_org_in, \
523 negate_tc_in, tc_in, p1_or_q1_out) \
527 clip3 = (v8i16) __msa_aver_u_h((v8u16) p0_or_q0_org_in, \
528 (v8u16) q0_or_p0_org_in); \
529 temp = p1_or_q1_org_in << 1; \
530 clip3 = clip3 - temp; \
531 clip3 = __msa_ave_s_h(p2_or_q2_org_in, clip3); \
532 CLIP_SH(clip3, negate_tc_in, tc_in); \
533 p1_or_q1_out = p1_or_q1_org_in + clip3; \
536 #define AVC_LPF_P0Q0(q0_or_p0_org_in, p0_or_q0_org_in, \
537 p1_or_q1_org_in, q1_or_p1_org_in, \
538 negate_threshold_in, threshold_in, \
539 p0_or_q0_out, q0_or_p0_out) \
541 v8i16 q0_sub_p0, p1_sub_q1, delta; \
543 q0_sub_p0 = q0_or_p0_org_in - p0_or_q0_org_in; \
544 p1_sub_q1 = p1_or_q1_org_in - q1_or_p1_org_in; \
547 delta = q0_sub_p0 + p1_sub_q1; \
550 CLIP_SH(delta, negate_threshold_in, threshold_in); \
552 p0_or_q0_out = p0_or_q0_org_in + delta; \
553 q0_or_p0_out = q0_or_p0_org_in - delta; \
555 CLIP_SH2_0_255(p0_or_q0_out, q0_or_p0_out); \
558 #define AVC_LPF_H_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
560 uint32_t load0, load1, load2, load3; \
561 v16u8 src0 = { 0 }; \
562 v16u8 src1 = { 0 }; \
563 v16u8 src2 = { 0 }; \
564 v16u8 src3 = { 0 }; \
565 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; \
566 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta; \
567 v8i16 tc, q0_sub_p0, p1_sub_q1, delta; \
568 v8i16 res0_r, res1_r; \
569 v16i8 zeros = { 0 }; \
572 LW4((src - 2), stride, load0, load1, load2, load3); \
573 src0 = (v16u8) __msa_insert_w((v4i32) src0, 0, load0); \
574 src1 = (v16u8) __msa_insert_w((v4i32) src1, 0, load1); \
575 src2 = (v16u8) __msa_insert_w((v4i32) src2, 0, load2); \
576 src3 = (v16u8) __msa_insert_w((v4i32) src3, 0, load3); \
578 TRANSPOSE4x4_UB_UB(src0, src1, src2, src3, src0, src1, src2, src3); \
580 p0_asub_q0 = __msa_asub_u_b(src2, src1); \
581 p1_asub_p0 = __msa_asub_u_b(src1, src0); \
582 q1_asub_q0 = __msa_asub_u_b(src2, src3); \
584 tc = __msa_fill_h(tc_val); \
586 is_less_than_alpha = (p0_asub_q0 < alpha); \
587 is_less_than_beta = (p1_asub_p0 < beta); \
588 is_less_than = is_less_than_alpha & is_less_than_beta; \
589 is_less_than_beta = (q1_asub_q0 < beta); \
590 is_less_than = is_less_than_beta & is_less_than; \
592 ILVR_B2_SH(src2, src1, src0, src3, q0_sub_p0, p1_sub_q1); \
593 HSUB_UB2_SH(q0_sub_p0, p1_sub_q1, q0_sub_p0, p1_sub_q1); \
596 delta = q0_sub_p0 + p1_sub_q1; \
597 delta = __msa_srari_h(delta, 3); \
599 CLIP_SH(delta, -tc, tc); \
601 ILVR_B2_SH(zeros, src1, zeros, src2, res0_r, res1_r); \
606 CLIP_SH2_0_255(res0_r, res1_r); \
607 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1); \
609 res0 = __msa_bmnz_v(src1, res0, is_less_than); \
610 res1 = __msa_bmnz_v(src2, res1, is_less_than); \
612 res = (v16u8) __msa_ilvr_b((v16i8) res1, (v16i8) res0); \
615 #define TRANSPOSE2x4_B_UB(in0, in1, out0, out1, out2, out3) \
617 v16i8 zero_m = { 0 }; \
619 out0 = (v16u8) __msa_ilvr_b((v16i8) in1, (v16i8) in0); \
620 out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 2); \
621 SLDI_B2_UB(zero_m, out1, zero_m, out2, 2, out2, out3); \
624 #define AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
626 uint32_t load0, load1; \
627 v16u8 src0 = { 0 }; \
628 v16u8 src1 = { 0 }; \
629 v16u8 src2 = { 0 }; \
630 v16u8 src3 = { 0 }; \
631 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; \
632 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta; \
633 v8i16 tc, q0_sub_p0, p1_sub_q1, delta, res0_r, res1_r; \
634 v16i8 zeros = { 0 }; \
637 load0 = LW(src - 2); \
638 load1 = LW(src - 2 + stride); \
640 src0 = (v16u8) __msa_insert_w((v4i32) src0, 0, load0); \
641 src1 = (v16u8) __msa_insert_w((v4i32) src1, 0, load1); \
643 TRANSPOSE2x4_B_UB(src0, src1, src0, src1, src2, src3); \
645 p0_asub_q0 = __msa_asub_u_b(src2, src1); \
646 p1_asub_p0 = __msa_asub_u_b(src1, src0); \
647 q1_asub_q0 = __msa_asub_u_b(src2, src3); \
649 tc = __msa_fill_h(tc_val); \
651 is_less_than_alpha = (p0_asub_q0 < alpha); \
652 is_less_than_beta = (p1_asub_p0 < beta); \
653 is_less_than = is_less_than_alpha & is_less_than_beta; \
654 is_less_than_beta = (q1_asub_q0 < beta); \
655 is_less_than = is_less_than_beta & is_less_than; \
657 ILVR_B2_SH(src2, src1, src0, src3, q0_sub_p0, p1_sub_q1); \
658 HSUB_UB2_SH(q0_sub_p0, p1_sub_q1, q0_sub_p0, p1_sub_q1); \
661 delta = q0_sub_p0 + p1_sub_q1; \
662 delta = __msa_srari_h(delta, 3); \
663 CLIP_SH(delta, -tc, tc); \
665 ILVR_B2_SH(zeros, src1, zeros, src2, res0_r, res1_r); \
670 CLIP_SH2_0_255(res0_r, res1_r); \
671 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1); \
673 res0 = __msa_bmnz_v(src1, res0, is_less_than); \
674 res1 = __msa_bmnz_v(src2, res1, is_less_than); \
676 res = (v16u8) __msa_ilvr_b((v16i8) res1, (v16i8) res0); \
684 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
685 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
686 v16u8 p1_org, p0_org, q0_org, q1_org;
688 LD_UB4(
data - (img_width << 1), img_width, p1_org, p0_org, q0_org, q1_org);
690 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
691 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
692 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
694 is_less_than_alpha = (p0_asub_q0 < alpha_in);
695 is_less_than_beta = (p1_asub_p0 < beta_in);
696 is_less_than = is_less_than_beta & is_less_than_alpha;
697 is_less_than_beta = (q1_asub_q0 < beta_in);
698 is_less_than = is_less_than_beta & is_less_than;
700 if (!__msa_test_bz_v(is_less_than)) {
701 v16u8 p2_asub_p0, q2_asub_q0, p0,
q0, negate_is_less_than_beta;
707 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
708 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
709 v16u8 q2_org =
LD_UB(
data + (2 * img_width));
710 v16u8 p2_org =
LD_UB(
data - (3 * img_width));
711 v16u8 tmp_flag = (v16u8)__msa_fill_b((alpha_in >> 2) + 2);
717 tmp_flag = (p0_asub_q0 < tmp_flag);
719 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
720 is_less_than_beta = (p2_asub_p0 < beta_in);
721 is_less_than_beta = is_less_than_beta & tmp_flag;
722 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
723 is_less_than_beta = is_less_than_beta & is_less_than;
724 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
726 q1_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) q1_org);
727 q1_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) q1_org);
730 if (!__msa_test_bz_v(is_less_than_beta)) {
731 v8i16 p3_org_l, p3_org_r;
732 v16u8 p3_org =
LD_UB(
data - (img_width << 2));
741 p2_r, q1_org_r, p0_r, p1_r, p2_r);
745 p2_l, q1_org_l, p0_l, p1_l, p2_l);
747 PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2);
749 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta);
750 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
751 p2_org = __msa_bmnz_v(p2_org, p2, is_less_than_beta);
761 p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r);
762 p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta);
767 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
768 is_less_than_beta = (q2_asub_q0 < beta_in);
769 is_less_than_beta = is_less_than_beta & tmp_flag;
770 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
771 is_less_than_beta = is_less_than_beta & is_less_than;
772 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
775 if (!__msa_test_bz_v(is_less_than_beta)) {
776 v8i16 q3_org_r, q3_org_l;
777 v16u8 q3_org =
LD_UB(
data + (3 * img_width));
786 q2_r, p1_org_r, q0_r, q1_r, q2_r);
790 q2_l, p1_org_l, q0_l, q1_l, q2_l);
793 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than_beta);
794 q1_org = __msa_bmnz_v(q1_org,
q1, is_less_than_beta);
795 q2_org = __msa_bmnz_v(q2_org, q2, is_less_than_beta);
805 q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r);
806 q0_org = __msa_bmnz_v(q0_org,
q0, negate_is_less_than_beta);
818 v16u8
alpha, beta, p0_asub_q0;
819 v16u8 is_less_than_alpha, is_less_than, is_less_than_beta;
820 v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org;
821 v16u8 p1_asub_p0, q1_asub_q0;
825 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
826 v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
828 LD_UB8(
src, img_width, row0, row1, row2, row3, row4, row5, row6, row7);
830 row8, row9, row10, row11, row12, row13, row14, row15);
833 row4, row5, row6, row7,
834 row8, row9, row10, row11,
835 row12, row13, row14, row15,
836 p3_org, p2_org, p1_org, p0_org,
837 q0_org, q1_org, q2_org, q3_org);
840 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
841 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
842 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
844 alpha = (v16u8) __msa_fill_b(alpha_in);
845 beta = (v16u8) __msa_fill_b(beta_in);
847 is_less_than_alpha = (p0_asub_q0 <
alpha);
848 is_less_than_beta = (p1_asub_p0 < beta);
849 is_less_than = is_less_than_beta & is_less_than_alpha;
850 is_less_than_beta = (q1_asub_q0 < beta);
851 is_less_than = is_less_than_beta & is_less_than;
853 if (!__msa_test_bz_v(is_less_than)) {
859 v16u8 tmp_flag, p0,
q0, p2_asub_p0, q2_asub_q0;
860 v16u8 negate_is_less_than_beta;
861 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
862 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
869 tmp_flag =
alpha >> 2;
870 tmp_flag = tmp_flag + 2;
871 tmp_flag = (p0_asub_q0 < tmp_flag);
873 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
874 is_less_than_beta = (p2_asub_p0 < beta);
875 is_less_than_beta = tmp_flag & is_less_than_beta;
876 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
877 is_less_than_beta = is_less_than_beta & is_less_than;
878 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
880 if (!__msa_test_bz_v(is_less_than_beta)) {
882 v8i16 p3_org_r, p3_org_l;
890 p2_r, q1_org_r, p0_r, p1_r, p2_r);
894 p2_l, q1_org_l, p0_l, p1_l, p2_l);
896 PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2);
897 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta);
898 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
899 p2_org = __msa_bmnz_v(p2_org, p2, is_less_than_beta);
905 p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r);
906 p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta);
908 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
909 is_less_than_beta = (q2_asub_q0 < beta);
911 is_less_than_beta = is_less_than_beta & tmp_flag;
912 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
914 is_less_than_beta = is_less_than_beta & is_less_than;
915 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
917 if (!__msa_test_bz_v(is_less_than_beta)) {
919 v8i16 q3_org_r, q3_org_l;
927 q2_r, p1_org_r, q0_r, q1_r, q2_r);
931 q2_l, p1_org_l, q0_l, q1_l, q2_l);
934 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than_beta);
935 q1_org = __msa_bmnz_v(q1_org,
q1, is_less_than_beta);
936 q2_org = __msa_bmnz_v(q2_org, q2, is_less_than_beta);
942 q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r);
943 q0_org = __msa_bmnz_v(q0_org,
q0, negate_is_less_than_beta);
946 v8i16 tp0, tp1, tp2, tp3, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
956 ST_W4(tmp3, 0, 1, 2, 3,
src, img_width);
957 ST_H4(tmp2, 0, 1, 2, 3,
src + 4, img_width);
958 src += 4 * img_width;
959 ST_W4(tmp4, 0, 1, 2, 3,
src, img_width);
960 ST_H4(tmp2, 4, 5, 6, 7,
src + 4, img_width);
961 src += 4 * img_width;
963 ST_W4(tmp6, 0, 1, 2, 3,
src, img_width);
964 ST_H4(tmp5, 0, 1, 2, 3,
src + 4, img_width);
965 src += 4 * img_width;
966 ST_W4(tmp7, 0, 1, 2, 3,
src, img_width);
967 ST_H4(tmp5, 4, 5, 6, 7,
src + 4, img_width);
976 uint64_t load0, load1;
979 v8u16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
980 v8u16 dst0_r, dst1_r, dst4_r, dst5_r;
981 v8u16 dst2_x_r, dst2_y_r, dst3_x_r, dst3_y_r;
982 v16u8 dst0, dst1, dst4, dst5, dst2_x, dst2_y, dst3_x, dst3_y;
983 v8i16 tmp0, tmp1, tmp2, tmp3;
985 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0, p2_asub_p0, q2_asub_q0;
986 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta;
987 v16u8 is_less_than_beta1, is_less_than_beta2;
1000 src0 = (v16i8) __msa_insert_d((v2i64)
src0, 0, load0);
1001 src1 = (v16i8) __msa_insert_d((v2i64)
src1, 0, load1);
1005 src2 = (v16i8) __msa_insert_d((v2i64) src2, 0, load0);
1006 src3 = (v16i8) __msa_insert_d((v2i64) src3, 0, load1);
1010 src4 = (v16i8) __msa_insert_d((v2i64) src4, 0, load0);
1011 src5 = (v16i8) __msa_insert_d((v2i64) src5, 0, load1);
1015 src6 = (v16i8) __msa_insert_d((v2i64) src6, 0, load0);
1016 src7 = (v16i8) __msa_insert_d((v2i64) src7, 0, load1);
1024 ILVR_W2_SB(tmp2, tmp0, tmp3, tmp1, src6, src3);
1027 8,
src0, src2, src4, src7);
1029 p0_asub_q0 = __msa_asub_u_b((v16u8) src2, (v16u8) src3);
1030 p1_asub_p0 = __msa_asub_u_b((v16u8)
src1, (v16u8) src2);
1031 q1_asub_q0 = __msa_asub_u_b((v16u8) src4, (v16u8) src3);
1033 alpha = (v16u8) __msa_fill_b(alpha_in);
1034 beta = (v16u8) __msa_fill_b(beta_in);
1036 is_less_than_alpha = (p0_asub_q0 <
alpha);
1037 is_less_than_beta = (p1_asub_p0 < beta);
1038 is_less_than = is_less_than_alpha & is_less_than_beta;
1039 is_less_than_beta = (q1_asub_q0 < beta);
1040 is_less_than = is_less_than & is_less_than_beta;
1045 is_less_than_alpha = (p0_asub_q0 <
alpha);
1047 p2_asub_p0 = __msa_asub_u_b((v16u8)
src0, (v16u8) src2);
1048 is_less_than_beta1 = (p2_asub_p0 < beta);
1049 q2_asub_q0 = __msa_asub_u_b((v16u8) src5, (v16u8) src3);
1050 is_less_than_beta2 = (q2_asub_q0 < beta);
1053 src0_r, src1_r, src2_r, src3_r);
1054 ILVR_B4_UH(zeros, src4, zeros, src5, zeros, src6, zeros, src7,
1055 src4_r, src5_r, src6_r, src7_r);
1057 dst2_x_r = src1_r + src2_r + src3_r;
1058 dst2_x_r = src0_r + (2 * (dst2_x_r)) + src4_r;
1059 dst2_x_r = (v8u16) __msa_srari_h((v8i16) dst2_x_r, 3);
1060 dst1_r = src0_r + src1_r + src2_r + src3_r;
1061 dst1_r = (v8u16) __msa_srari_h((v8i16) dst1_r, 2);
1063 dst0_r = (2 * src6_r) + (3 * src0_r);
1064 dst0_r += src1_r + src2_r + src3_r;
1065 dst0_r = (v8u16) __msa_srari_h((v8i16) dst0_r, 3);
1066 dst2_y_r = (2 * src1_r) + src2_r + src4_r;
1067 dst2_y_r = (v8u16) __msa_srari_h((v8i16) dst2_y_r, 2);
1069 PCKEV_B2_UB(dst2_x_r, dst2_x_r, dst2_y_r, dst2_y_r, dst2_x, dst2_y);
1070 dst2_x = __msa_bmnz_v(dst2_y, dst2_x, is_less_than_beta1);
1072 dst3_x_r = src2_r + src3_r + src4_r;
1073 dst3_x_r = src1_r + (2 * dst3_x_r) + src5_r;
1074 dst3_x_r = (v8u16) __msa_srari_h((v8i16) dst3_x_r, 3);
1075 dst4_r = src2_r + src3_r + src4_r + src5_r;
1076 dst4_r = (v8u16) __msa_srari_h((v8i16) dst4_r, 2);
1078 dst5_r = (2 * src7_r) + (3 * src5_r);
1079 dst5_r += src4_r + src3_r + src2_r;
1080 dst5_r = (v8u16) __msa_srari_h((v8i16) dst5_r, 3);
1081 dst3_y_r = (2 * src4_r) + src3_r + src1_r;
1082 dst3_y_r = (v8u16) __msa_srari_h((v8i16) dst3_y_r, 2);
1084 PCKEV_B2_UB(dst3_x_r, dst3_x_r, dst3_y_r, dst3_y_r, dst3_x, dst3_y);
1085 dst3_x = __msa_bmnz_v(dst3_y, dst3_x, is_less_than_beta2);
1087 dst2_y_r = (2 * src1_r) + src2_r + src4_r;
1088 dst2_y_r = (v8u16) __msa_srari_h((v8i16) dst2_y_r, 2);
1089 dst3_y_r = (2 * src4_r) + src3_r + src1_r;
1090 dst3_y_r = (v8u16) __msa_srari_h((v8i16) dst3_y_r, 2);
1092 PCKEV_B2_UB(dst2_y_r, dst2_y_r, dst3_y_r, dst3_y_r, dst2_y, dst3_y);
1094 dst2_x = __msa_bmnz_v(dst2_y, dst2_x, is_less_than_alpha);
1095 dst3_x = __msa_bmnz_v(dst3_y, dst3_x, is_less_than_alpha);
1096 dst2_x = __msa_bmnz_v((v16u8) src2, dst2_x, is_less_than);
1097 dst3_x = __msa_bmnz_v((v16u8) src3, dst3_x, is_less_than);
1099 is_less_than = is_less_than_alpha & is_less_than;
1100 dst1 = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst1_r);
1101 is_less_than_beta1 = is_less_than_beta1 & is_less_than;
1102 dst1 = __msa_bmnz_v((v16u8)
src1, dst1, is_less_than_beta1);
1104 dst0 = (v16u8) __msa_pckev_b((v16i8) dst0_r, (v16i8) dst0_r);
1105 dst0 = __msa_bmnz_v((v16u8)
src0, dst0, is_less_than_beta1);
1106 dst4 = (v16u8) __msa_pckev_b((v16i8) dst4_r, (v16i8) dst4_r);
1107 is_less_than_beta2 = is_less_than_beta2 & is_less_than;
1108 dst4 = __msa_bmnz_v((v16u8) src4, dst4, is_less_than_beta2);
1109 dst5 = (v16u8) __msa_pckev_b((v16i8) dst5_r, (v16i8) dst5_r);
1110 dst5 = __msa_bmnz_v((v16u8) src5, dst5, is_less_than_beta2);
1112 ILVR_B2_UB(dst1, dst0, dst3_x, dst2_x, dst0, dst1);
1113 dst2_x = (v16u8) __msa_ilvr_b((v16i8) dst5, (v16i8) dst4);
1117 ILVR_W2_UB(tmp2, tmp0, tmp3, tmp1, dst0, dst4);
1118 SLDI_B2_UB(zeros, dst0, zeros, dst4, 8, dst1, dst5);
1119 dst2_x = (v16u8) __msa_ilvl_w((v4i32) tmp2, (v4i32) tmp0);
1120 dst2_y = (v16u8) __msa_ilvl_w((v4i32) tmp3, (v4i32) tmp1);
1121 SLDI_B2_UB(zeros, dst2_x, zeros, dst2_y, 8, dst3_x, dst3_y);
1123 out0 = __msa_copy_u_w((v4i32) dst0, 0);
1124 out1 = __msa_copy_u_h((v8i16) dst0, 2);
1125 out2 = __msa_copy_u_w((v4i32) dst1, 0);
1126 out3 = __msa_copy_u_h((v8i16) dst1, 2);
1128 SW(out0, (
src - 3));
1129 SH(out1, (
src + 1));
1131 SW(out2, (
src - 3));
1132 SH(out3, (
src + 1));
1135 out0 = __msa_copy_u_w((v4i32) dst2_x, 0);
1136 out1 = __msa_copy_u_h((v8i16) dst2_x, 2);
1137 out2 = __msa_copy_u_w((v4i32) dst3_x, 0);
1138 out3 = __msa_copy_u_h((v8i16) dst3_x, 2);
1140 SW(out0, (
src - 3));
1141 SH(out1, (
src + 1));
1143 SW(out2, (
src - 3));
1144 SH(out3, (
src + 1));
1147 out0 = __msa_copy_u_w((v4i32) dst4, 0);
1148 out1 = __msa_copy_u_h((v8i16) dst4, 2);
1149 out2 = __msa_copy_u_w((v4i32) dst5, 0);
1150 out3 = __msa_copy_u_h((v8i16) dst5, 2);
1152 SW(out0, (
src - 3));
1153 SH(out1, (
src + 1));
1155 SW(out2, (
src - 3));
1156 SH(out3, (
src + 1));
1159 out0 = __msa_copy_u_w((v4i32) dst2_y, 0);
1160 out1 = __msa_copy_u_h((v8i16) dst2_y, 2);
1161 out2 = __msa_copy_u_w((v4i32) dst3_y, 0);
1162 out3 = __msa_copy_u_h((v8i16) dst3_y, 2);
1164 SW(out0, (
src - 3));
1165 SH(out1, (
src + 1));
1167 SW(out2, (
src - 3));
1168 SH(out3, (
src + 1));
1178 v8i16 p0_or_q0, q0_or_p0;
1179 v16u8 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org;
1181 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1182 v16u8 is_less_than_alpha, is_less_than_beta;
1183 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1185 alpha = (v16u8) __msa_fill_b(alpha_in);
1186 beta = (v16u8) __msa_fill_b(beta_in);
1188 LD_UB4(data_cb_or_cr - (img_width << 1), img_width,
1189 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org);
1191 p0_asub_q0 = __msa_asub_u_b(p0_or_q0_org, q0_or_p0_org);
1192 p1_asub_p0 = __msa_asub_u_b(p1_or_q1_org, p0_or_q0_org);
1193 q1_asub_q0 = __msa_asub_u_b(q1_or_p1_org, q0_or_p0_org);
1195 is_less_than_alpha = (p0_asub_q0 <
alpha);
1196 is_less_than_beta = (p1_asub_p0 < beta);
1197 is_less_than = is_less_than_beta & is_less_than_alpha;
1198 is_less_than_beta = (q1_asub_q0 < beta);
1199 is_less_than = is_less_than_beta & is_less_than;
1201 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
1203 if (!__msa_test_bz_v(is_less_than)) {
1205 zero, q1_or_p1_org, p1_org_r, p0_org_r, q0_org_r, q1_org_r);
1211 __msa_bmnz_v(p0_or_q0_org, (v16u8) p0_or_q0, is_less_than);
1213 __msa_bmnz_v(q0_or_p0_org, (v16u8) q0_or_p0, is_less_than);
1215 ST_UB(q0_or_p0_org, data_cb_or_cr);
1216 ST_UB(p0_or_q0_org, data_cb_or_cr - img_width);
1226 v16u8
alpha, beta, is_less_than;
1227 v8i16 p0_or_q0, q0_or_p0;
1228 v16u8 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org;
1230 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1231 v16u8 is_less_than_alpha, is_less_than_beta;
1232 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1235 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
1237 LD_UB8((data_cb_or_cr - 2), img_width,
1238 row0, row1, row2, row3, row4, row5, row6, row7);
1241 p1_or_q1_org, p0_or_q0_org,
1242 q0_or_p0_org, q1_or_p1_org);
1245 alpha = (v16u8) __msa_fill_b(alpha_in);
1246 beta = (v16u8) __msa_fill_b(beta_in);
1248 p0_asub_q0 = __msa_asub_u_b(p0_or_q0_org, q0_or_p0_org);
1249 p1_asub_p0 = __msa_asub_u_b(p1_or_q1_org, p0_or_q0_org);
1250 q1_asub_q0 = __msa_asub_u_b(q1_or_p1_org, q0_or_p0_org);
1252 is_less_than_alpha = (p0_asub_q0 <
alpha);
1253 is_less_than_beta = (p1_asub_p0 < beta);
1254 is_less_than = is_less_than_beta & is_less_than_alpha;
1255 is_less_than_beta = (q1_asub_q0 < beta);
1256 is_less_than = is_less_than_beta & is_less_than;
1257 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
1259 if (!__msa_test_bz_v(is_less_than)) {
1261 zero, q1_or_p1_org, p1_org_r, p0_org_r, q0_org_r, q1_org_r);
1270 __msa_bmnz_v(p0_or_q0_org, (v16u8) p0_or_q0, is_less_than);
1272 __msa_bmnz_v(q0_or_p0_org, (v16u8) q0_or_p0, is_less_than);
1273 tmp1 = (v8i16) __msa_ilvr_b((v16i8) q0_or_p0_org, (v16i8) p0_or_q0_org);
1276 ST_H4(tmp1, 0, 1, 2, 3, data_cb_or_cr, img_width);
1277 data_cb_or_cr += 4 * img_width;
1278 ST_H4(tmp1, 4, 5, 6, 7, data_cb_or_cr, img_width);
1291 v16u8 tmp_vec, bs = { 0 };
1293 tmp_vec = (v16u8) __msa_fill_b(bs0);
1294 bs = (v16u8) __msa_insve_w((v4i32) bs, 0, (v4i32) tmp_vec);
1295 tmp_vec = (v16u8) __msa_fill_b(bs1);
1296 bs = (v16u8) __msa_insve_w((v4i32) bs, 1, (v4i32) tmp_vec);
1297 tmp_vec = (v16u8) __msa_fill_b(bs2);
1298 bs = (v16u8) __msa_insve_w((v4i32) bs, 2, (v4i32) tmp_vec);
1299 tmp_vec = (v16u8) __msa_fill_b(bs3);
1300 bs = (v16u8) __msa_insve_w((v4i32) bs, 3, (v4i32) tmp_vec);
1302 if (!__msa_test_bz_v(bs)) {
1304 v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org;
1305 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0,
alpha, beta;
1306 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
1307 v16u8 is_bs_greater_than0;
1311 tmp_vec = (v16u8) __msa_fill_b(tc0);
1312 tc = (v16u8) __msa_insve_w((v4i32)
tc, 0, (v4i32) tmp_vec);
1313 tmp_vec = (v16u8) __msa_fill_b(tc1);
1314 tc = (v16u8) __msa_insve_w((v4i32)
tc, 1, (v4i32) tmp_vec);
1315 tmp_vec = (v16u8) __msa_fill_b(tc2);
1316 tc = (v16u8) __msa_insve_w((v4i32)
tc, 2, (v4i32) tmp_vec);
1317 tmp_vec = (v16u8) __msa_fill_b(tc3);
1318 tc = (v16u8) __msa_insve_w((v4i32)
tc, 3, (v4i32) tmp_vec);
1320 is_bs_greater_than0 = (
zero < bs);
1323 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
1324 v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
1327 row0, row1, row2, row3, row4, row5, row6, row7);
1328 src += (8 * img_width);
1330 row8, row9, row10, row11, row12, row13, row14, row15);
1333 row8, row9, row10, row11,
1334 row12, row13, row14, row15,
1335 p3_org, p2_org, p1_org, p0_org,
1336 q0_org, q1_org, q2_org, q3_org);
1339 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1340 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1341 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1343 alpha = (v16u8) __msa_fill_b(alpha_in);
1344 beta = (v16u8) __msa_fill_b(beta_in);
1346 is_less_than_alpha = (p0_asub_q0 <
alpha);
1347 is_less_than_beta = (p1_asub_p0 < beta);
1348 is_less_than = is_less_than_beta & is_less_than_alpha;
1349 is_less_than_beta = (q1_asub_q0 < beta);
1350 is_less_than = is_less_than_beta & is_less_than;
1351 is_less_than = is_less_than & is_bs_greater_than0;
1353 if (!__msa_test_bz_v(is_less_than)) {
1354 v16i8 negate_tc, sign_negate_tc;
1355 v16u8 p0,
q0, p2_asub_p0, q2_asub_q0;
1356 v8i16 tc_r, tc_l, negate_tc_r, i16_negatetc_l;
1357 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1358 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
1359 v8i16 p0_r, q0_r, p0_l, q0_l;
1361 negate_tc =
zero - (v16i8)
tc;
1362 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
1364 ILVRL_B2_SH(sign_negate_tc, negate_tc, negate_tc_r, i16_negatetc_l);
1371 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
1372 is_less_than_beta = (p2_asub_p0 < beta);
1373 is_less_than_beta = is_less_than_beta & is_less_than;
1375 if (!__msa_test_bz_v(is_less_than_beta)) {
1379 v8i16 p2_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) p2_org);
1380 v8i16 p2_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) p2_org);
1383 negate_tc_r, tc_r, p1_r);
1385 i16_negatetc_l, tc_l, p1_l);
1387 p1 = (v16u8) __msa_pckev_b((v16i8) p1_l, (v16i8) p1_r);
1388 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
1390 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1391 tc =
tc + is_less_than_beta;
1394 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
1395 is_less_than_beta = (q2_asub_q0 < beta);
1396 is_less_than_beta = is_less_than_beta & is_less_than;
1398 q1_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) q1_org);
1399 q1_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) q1_org);
1401 if (!__msa_test_bz_v(is_less_than_beta)) {
1405 v8i16 q2_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) q2_org);
1406 v8i16 q2_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) q2_org);
1409 negate_tc_r, tc_r, q1_r);
1411 i16_negatetc_l, tc_l, q1_l);
1413 q1 = (v16u8) __msa_pckev_b((v16i8) q1_l, (v16i8) q1_r);
1414 q1_org = __msa_bmnz_v(q1_org,
q1, is_less_than_beta);
1416 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1417 tc =
tc + is_less_than_beta;
1421 v8i16 threshold_r, negate_thresh_r;
1422 v8i16 threshold_l, negate_thresh_l;
1423 v16i8 negate_thresh, sign_negate_thresh;
1425 negate_thresh =
zero - (v16i8)
tc;
1426 sign_negate_thresh = __msa_clti_s_b(negate_thresh, 0);
1429 threshold_r, negate_thresh_r);
1432 negate_thresh_r, threshold_r, p0_r, q0_r);
1434 threshold_l = (v8i16) __msa_ilvl_b(
zero, (v16i8)
tc);
1435 negate_thresh_l = (v8i16) __msa_ilvl_b(sign_negate_thresh,
1439 negate_thresh_l, threshold_l, p0_l, q0_l);
1444 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
1445 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than);
1448 v16i8 tp0, tp1, tp2, tp3;
1450 v4i32 tmp3, tmp4, tmp6, tmp7;
1451 uint32_t out0, out2;
1452 uint16_t out1, out3;
1463 out0 = __msa_copy_u_w(tmp3, 0);
1464 out1 = __msa_copy_u_h(tmp2, 0);
1465 out2 = __msa_copy_u_w(tmp3, 1);
1466 out3 = __msa_copy_u_h(tmp2, 1);
1469 SH(out1, (
src + 4));
1472 SH(out3, (
src + 4));
1474 out0 = __msa_copy_u_w(tmp3, 2);
1475 out1 = __msa_copy_u_h(tmp2, 2);
1476 out2 = __msa_copy_u_w(tmp3, 3);
1477 out3 = __msa_copy_u_h(tmp2, 3);
1481 SH(out1, (
src + 4));
1484 SH(out3, (
src + 4));
1486 out0 = __msa_copy_u_w(tmp4, 0);
1487 out1 = __msa_copy_u_h(tmp2, 4);
1488 out2 = __msa_copy_u_w(tmp4, 1);
1489 out3 = __msa_copy_u_h(tmp2, 5);
1493 SH(out1, (
src + 4));
1496 SH(out3, (
src + 4));
1498 out0 = __msa_copy_u_w(tmp4, 2);
1499 out1 = __msa_copy_u_h(tmp2, 6);
1500 out2 = __msa_copy_u_w(tmp4, 3);
1501 out3 = __msa_copy_u_h(tmp2, 7);
1505 SH(out1, (
src + 4));
1508 SH(out3, (
src + 4));
1510 out0 = __msa_copy_u_w(tmp6, 0);
1511 out1 = __msa_copy_u_h(tmp5, 0);
1512 out2 = __msa_copy_u_w(tmp6, 1);
1513 out3 = __msa_copy_u_h(tmp5, 1);
1517 SH(out1, (
src + 4));
1520 SH(out3, (
src + 4));
1522 out0 = __msa_copy_u_w(tmp6, 2);
1523 out1 = __msa_copy_u_h(tmp5, 2);
1524 out2 = __msa_copy_u_w(tmp6, 3);
1525 out3 = __msa_copy_u_h(tmp5, 3);
1529 SH(out1, (
src + 4));
1532 SH(out3, (
src + 4));
1534 out0 = __msa_copy_u_w(tmp7, 0);
1535 out1 = __msa_copy_u_h(tmp5, 4);
1536 out2 = __msa_copy_u_w(tmp7, 1);
1537 out3 = __msa_copy_u_h(tmp5, 5);
1541 SH(out1, (
src + 4));
1544 SH(out3, (
src + 4));
1546 out0 = __msa_copy_u_w(tmp7, 2);
1547 out1 = __msa_copy_u_h(tmp5, 6);
1548 out2 = __msa_copy_u_w(tmp7, 3);
1549 out3 = __msa_copy_u_h(tmp5, 7);
1553 SH(out1, (
src + 4));
1556 SH(out3, (
src + 4));
1569 uint32_t image_width)
1574 tmp_vec = (v16u8) __msa_fill_b(bs0);
1575 bs = (v16u8) __msa_insve_w((v4i32) bs, 0, (v4i32) tmp_vec);
1576 tmp_vec = (v16u8) __msa_fill_b(bs1);
1577 bs = (v16u8) __msa_insve_w((v4i32) bs, 1, (v4i32) tmp_vec);
1578 tmp_vec = (v16u8) __msa_fill_b(bs2);
1579 bs = (v16u8) __msa_insve_w((v4i32) bs, 2, (v4i32) tmp_vec);
1580 tmp_vec = (v16u8) __msa_fill_b(bs3);
1581 bs = (v16u8) __msa_insve_w((v4i32) bs, 3, (v4i32) tmp_vec);
1583 if (!__msa_test_bz_v(bs)) {
1584 v16u8
alpha, beta, is_less_than, is_less_than_beta;
1585 v16u8 p0,
q0, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org;
1586 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1587 v16u8 is_less_than_alpha, is_bs_greater_than0;
1588 v8i16 p0_r, q0_r, p0_l, q0_l;
1589 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1590 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
1594 tmp_vec = (v16u8) __msa_fill_b(tc0);
1595 tc = (v16i8) __msa_insve_w((v4i32)
tc, 0, (v4i32) tmp_vec);
1596 tmp_vec = (v16u8) __msa_fill_b(tc1);
1597 tc = (v16i8) __msa_insve_w((v4i32)
tc, 1, (v4i32) tmp_vec);
1598 tmp_vec = (v16u8) __msa_fill_b(tc2);
1599 tc = (v16i8) __msa_insve_w((v4i32)
tc, 2, (v4i32) tmp_vec);
1600 tmp_vec = (v16u8) __msa_fill_b(tc3);
1601 tc = (v16i8) __msa_insve_w((v4i32)
tc, 3, (v4i32) tmp_vec);
1603 alpha = (v16u8) __msa_fill_b(alpha_in);
1604 beta = (v16u8) __msa_fill_b(beta_in);
1606 LD_UB5(
data - (3 * image_width), image_width,
1607 p2_org, p1_org, p0_org, q0_org, q1_org);
1609 is_bs_greater_than0 = ((v16u8)
zero < bs);
1610 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1611 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1612 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1614 is_less_than_alpha = (p0_asub_q0 <
alpha);
1615 is_less_than_beta = (p1_asub_p0 < beta);
1616 is_less_than = is_less_than_beta & is_less_than_alpha;
1617 is_less_than_beta = (q1_asub_q0 < beta);
1618 is_less_than = is_less_than_beta & is_less_than;
1619 is_less_than = is_less_than & is_bs_greater_than0;
1621 if (!__msa_test_bz_v(is_less_than)) {
1622 v16i8 sign_negate_tc, negate_tc;
1623 v8i16 negate_tc_r, i16_negatetc_l, tc_l, tc_r;
1624 v16u8 p2_asub_p0, q2_asub_q0;
1626 q2_org =
LD_UB(
data + (2 * image_width));
1628 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
1630 ILVRL_B2_SH(sign_negate_tc, negate_tc, negate_tc_r, i16_negatetc_l);
1637 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
1638 is_less_than_beta = (p2_asub_p0 < beta);
1639 is_less_than_beta = is_less_than_beta & is_less_than;
1641 if (!__msa_test_bz_v(is_less_than_beta)) {
1645 v8i16 p2_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) p2_org);
1646 v8i16 p2_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) p2_org);
1649 negate_tc_r, tc_r, p1_r);
1651 i16_negatetc_l, tc_l, p1_l);
1653 p1 = (v16u8) __msa_pckev_b((v16i8) p1_l, (v16i8) p1_r);
1654 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
1655 ST_UB(p1_org,
data - (2 * image_width));
1657 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1658 tc =
tc + (v16i8) is_less_than_beta;
1661 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
1662 is_less_than_beta = (q2_asub_q0 < beta);
1663 is_less_than_beta = is_less_than_beta & is_less_than;
1665 q1_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) q1_org);
1666 q1_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) q1_org);
1668 if (!__msa_test_bz_v(is_less_than_beta)) {
1672 v8i16 q2_org_r = (v8i16) __msa_ilvr_b(
zero, (v16i8) q2_org);
1673 v8i16 q2_org_l = (v8i16) __msa_ilvl_b(
zero, (v16i8) q2_org);
1676 negate_tc_r, tc_r, q1_r);
1678 i16_negatetc_l, tc_l, q1_l);
1680 q1 = (v16u8) __msa_pckev_b((v16i8) q1_l, (v16i8) q1_r);
1681 q1_org = __msa_bmnz_v(q1_org,
q1, is_less_than_beta);
1684 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1685 tc =
tc + (v16i8) is_less_than_beta;
1688 v16i8 negate_thresh, sign_negate_thresh;
1689 v8i16 threshold_r, threshold_l;
1690 v8i16 negate_thresh_l, negate_thresh_r;
1692 negate_thresh =
zero -
tc;
1693 sign_negate_thresh = __msa_clti_s_b(negate_thresh, 0);
1696 threshold_r, negate_thresh_r);
1698 negate_thresh_r, threshold_r, p0_r, q0_r);
1700 threshold_l = (v8i16) __msa_ilvl_b(
zero,
tc);
1701 negate_thresh_l = (v8i16) __msa_ilvl_b(sign_negate_thresh,
1704 negate_thresh_l, threshold_l, p0_l, q0_l);
1709 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
1710 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than);
1723 uint32_t out0, out1, out2, out3;
1736 v8i16 src4, src5, src6, src7;
1737 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0, p2_asub_p0, q2_asub_q0;
1738 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta;
1739 v16u8 is_less_than_beta1, is_less_than_beta2;
1740 v8i16
tc, tc_orig_r, tc_plus1;
1741 v16u8 is_tc_orig1, is_tc_orig2, tc_orig = { 0 };
1742 v8i16 p0_ilvr_q0, p0_add_q0, q0_sub_p0, p1_sub_q1;
1743 v8i16 src2_r, src3_r;
1744 v8i16 p2_r, p1_r, q2_r, q1_r;
1745 v16u8 p2, q2, p0,
q0;
1747 v16i8 zeros = { 0 };
1749 alpha = (v16u8) __msa_fill_b(alpha_in);
1750 beta = (v16u8) __msa_fill_b(beta_in);
1756 inp0 = (v16i8) __msa_insert_d((v2i64) inp0, 0, load);
1758 inp1 = (v16i8) __msa_insert_d((v2i64) inp1, 0, load);
1766 inp2 = (v16i8) __msa_insert_d((v2i64) inp2, 0, load);
1768 inp3 = (v16i8) __msa_insert_d((v2i64) inp3, 0, load);
1776 inp4 = (v16i8) __msa_insert_d((v2i64) inp4, 0, load);
1778 inp5 = (v16i8) __msa_insert_d((v2i64) inp5, 0, load);
1786 inp6 = (v16i8) __msa_insert_d((v2i64) inp6, 0, load);
1788 inp7 = (v16i8) __msa_insert_d((v2i64) inp7, 0, load);
1792 ILVR_B4_SB(inp1, inp0, inp3, inp2, inp5, inp4, inp7, inp6,
1798 src0 = (v16i8) __msa_ilvr_w((v4i32) src6, (v4i32) src4);
1799 src1 = __msa_sldi_b(zeros, (v16i8)
src0, 8);
1800 src2 = (v16i8) __msa_ilvl_w((v4i32) src6, (v4i32) src4);
1801 src3 = __msa_sldi_b(zeros, (v16i8) src2, 8);
1802 src4 = (v8i16) __msa_ilvr_w((v4i32) src7, (v4i32) src5);
1803 src5 = (v8i16) __msa_sldi_b(zeros, (v16i8) src4, 8);
1805 p0_asub_q0 = __msa_asub_u_b((v16u8) src2, (v16u8) src3);
1806 p1_asub_p0 = __msa_asub_u_b((v16u8)
src1, (v16u8) src2);
1807 q1_asub_q0 = __msa_asub_u_b((v16u8) src4, (v16u8) src3);
1808 p2_asub_p0 = __msa_asub_u_b((v16u8)
src0, (v16u8) src2);
1809 q2_asub_q0 = __msa_asub_u_b((v16u8) src5, (v16u8) src3);
1811 is_less_than_alpha = (p0_asub_q0 <
alpha);
1812 is_less_than_beta = (p1_asub_p0 < beta);
1813 is_less_than = is_less_than_alpha & is_less_than_beta;
1814 is_less_than_beta = (q1_asub_q0 < beta);
1815 is_less_than = is_less_than_beta & is_less_than;
1817 is_less_than_beta1 = (p2_asub_p0 < beta);
1818 is_less_than_beta2 = (q2_asub_q0 < beta);
1820 p0_ilvr_q0 = (v8i16) __msa_ilvr_b((v16i8) src3, (v16i8) src2);
1821 p0_add_q0 = (v8i16) __msa_hadd_u_h((v16u8) p0_ilvr_q0, (v16u8) p0_ilvr_q0);
1822 p0_add_q0 = __msa_srari_h(p0_add_q0, 1);
1828 ILVR_B2_SH(zeros, src5, zeros, src4, q2_r, q1_r);
1834 tc_orig = (v16u8) __msa_insert_w((v4i32) tc_orig, 0, tc_val);
1835 tc_orig = (v16u8) __msa_ilvr_b((v16i8) tc_orig, (v16i8) tc_orig);
1836 is_tc_orig1 = tc_orig;
1837 is_tc_orig2 = tc_orig;
1838 tc_orig_r = (v8i16) __msa_ilvr_b(zeros, (v16i8) tc_orig);
1841 CLIP_SH(p2_r, -tc_orig_r, tc_orig_r);
1842 CLIP_SH(q2_r, -tc_orig_r, tc_orig_r);
1849 is_tc_orig1 = (zeros < is_tc_orig1);
1850 is_tc_orig2 = is_tc_orig1;
1851 is_tc_orig1 = is_less_than_beta1 & is_tc_orig1;
1852 is_tc_orig2 = is_less_than_beta2 & is_tc_orig2;
1853 is_tc_orig1 = is_less_than & is_tc_orig1;
1854 is_tc_orig2 = is_less_than & is_tc_orig2;
1856 p2 = __msa_bmnz_v((v16u8)
src1, p2, is_tc_orig1);
1857 q2 = __msa_bmnz_v((v16u8) src4, q2, is_tc_orig2);
1859 q0_sub_p0 = __msa_hsub_u_h((v16u8) p0_ilvr_q0, (v16u8) p0_ilvr_q0);
1861 p1_sub_q1 = p1_r - q1_r;
1862 q0_sub_p0 += p1_sub_q1;
1863 q0_sub_p0 = __msa_srari_h(q0_sub_p0, 3);
1866 is_less_than_beta1 = (v16u8) __msa_ilvr_b((v16i8) is_less_than_beta1,
1867 (v16i8) is_less_than_beta1);
1868 tc = (v8i16) __msa_bmnz_v((v16u8)
tc, (v16u8) tc_plus1, is_less_than_beta1);
1870 is_less_than_beta2 = (v16u8) __msa_ilvr_b((v16i8) is_less_than_beta2,
1871 (v16i8) is_less_than_beta2);
1872 tc = (v8i16) __msa_bmnz_v((v16u8)
tc, (v16u8) tc_plus1, is_less_than_beta2);
1876 ILVR_B2_SH(zeros, src2, zeros, src3, src2_r, src3_r);
1877 src2_r += q0_sub_p0;
1878 src3_r -= q0_sub_p0;
1884 p0 = __msa_bmnz_v((v16u8) src2, p0, is_less_than);
1885 q0 = __msa_bmnz_v((v16u8) src3,
q0, is_less_than);
1893 out0 = __msa_copy_u_w(dst0, 0);
1894 out1 = __msa_copy_u_w(dst0, 1);
1895 out2 = __msa_copy_u_w(dst0, 2);
1896 out3 = __msa_copy_u_w(dst0, 3);
1916 out0 = __msa_copy_u_w(dst1, 0);
1917 out1 = __msa_copy_u_w(dst1, 1);
1918 out2 = __msa_copy_u_w(dst1, 2);
1919 out3 = __msa_copy_u_w(dst1, 3);
1950 v16u8 p0,
q0, p0_asub_q0, p1_asub_p0, q1_asub_q0;
1952 v16u8 is_less_than_beta, is_less_than_alpha, is_bs_greater_than0;
1954 v16u8 p1_org, p0_org, q0_org, q1_org;
1955 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1956 v16i8 negate_tc, sign_negate_tc;
1957 v8i16 tc_r, negate_tc_r;
1960 tmp_vec = (v8i16) __msa_fill_b(bs0);
1961 bs = __msa_insve_h(bs, 0, tmp_vec);
1962 tmp_vec = (v8i16) __msa_fill_b(bs1);
1963 bs = __msa_insve_h(bs, 1, tmp_vec);
1964 tmp_vec = (v8i16) __msa_fill_b(bs2);
1965 bs = __msa_insve_h(bs, 2, tmp_vec);
1966 tmp_vec = (v8i16) __msa_fill_b(bs3);
1967 bs = __msa_insve_h(bs, 3, tmp_vec);
1969 if (!__msa_test_bz_v((v16u8) bs)) {
1970 tmp_vec = (v8i16) __msa_fill_b(tc0);
1971 tc = __msa_insve_h(
tc, 0, tmp_vec);
1972 tmp_vec = (v8i16) __msa_fill_b(tc1);
1973 tc = __msa_insve_h(
tc, 1, tmp_vec);
1974 tmp_vec = (v8i16) __msa_fill_b(tc2);
1975 tc = __msa_insve_h(
tc, 2, tmp_vec);
1976 tmp_vec = (v8i16) __msa_fill_b(tc3);
1977 tc = __msa_insve_h(
tc, 3, tmp_vec);
1979 is_bs_greater_than0 = (v16u8) (
zero < (v16i8) bs);
1981 alpha = (v16u8) __msa_fill_b(alpha_in);
1982 beta = (v16u8) __msa_fill_b(beta_in);
1985 p1_org, p0_org, q0_org, q1_org);
1987 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1988 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1989 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1991 is_less_than_alpha = (p0_asub_q0 <
alpha);
1992 is_less_than_beta = (p1_asub_p0 < beta);
1993 is_less_than = is_less_than_beta & is_less_than_alpha;
1994 is_less_than_beta = (q1_asub_q0 < beta);
1995 is_less_than = is_less_than_beta & is_less_than;
1996 is_less_than = is_less_than & is_bs_greater_than0;
1998 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
2000 if (!__msa_test_bz_v(is_less_than)) {
2001 negate_tc =
zero - (v16i8)
tc;
2002 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
2007 p1_org_r, p0_org_r, q0_org_r, q1_org_r);
2009 AVC_LPF_P0Q0(q0_org_r, p0_org_r, p1_org_r, q1_org_r, negate_tc_r,
2014 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
2015 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than);
2034 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
2035 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
2039 v16u8 p1_org, p0_org, q0_org, q1_org;
2040 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
2041 v16u8 is_bs_greater_than0;
2042 v8i16 tc_r, negate_tc_r;
2043 v16i8 negate_tc, sign_negate_tc;
2045 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
2046 v8i16 tmp1, tmp_vec, bs = { 0 };
2049 tmp_vec = (v8i16) __msa_fill_b(bs0);
2050 bs = __msa_insve_h(bs, 0, tmp_vec);
2051 tmp_vec = (v8i16) __msa_fill_b(bs1);
2052 bs = __msa_insve_h(bs, 1, tmp_vec);
2053 tmp_vec = (v8i16) __msa_fill_b(bs2);
2054 bs = __msa_insve_h(bs, 2, tmp_vec);
2055 tmp_vec = (v8i16) __msa_fill_b(bs3);
2056 bs = __msa_insve_h(bs, 3, tmp_vec);
2058 if (!__msa_test_bz_v((v16u8) bs)) {
2059 tmp_vec = (v8i16) __msa_fill_b(tc0);
2060 tc = __msa_insve_h(
tc, 0, tmp_vec);
2061 tmp_vec = (v8i16) __msa_fill_b(tc1);
2062 tc = __msa_insve_h(
tc, 1, tmp_vec);
2063 tmp_vec = (v8i16) __msa_fill_b(tc2);
2064 tc = __msa_insve_h(
tc, 2, tmp_vec);
2065 tmp_vec = (v8i16) __msa_fill_b(tc3);
2066 tc = __msa_insve_h(
tc, 3, tmp_vec);
2068 is_bs_greater_than0 = (v16u8) (
zero < (v16i8) bs);
2071 row0, row1, row2, row3, row4, row5, row6, row7);
2074 row4, row5, row6, row7,
2075 p1_org, p0_org, q0_org, q1_org);
2077 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
2078 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
2079 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
2081 alpha = (v16u8) __msa_fill_b(alpha_in);
2082 beta = (v16u8) __msa_fill_b(beta_in);
2084 is_less_than_alpha = (p0_asub_q0 <
alpha);
2085 is_less_than_beta = (p1_asub_p0 < beta);
2086 is_less_than = is_less_than_beta & is_less_than_alpha;
2087 is_less_than_beta = (q1_asub_q0 < beta);
2088 is_less_than = is_less_than_beta & is_less_than;
2089 is_less_than = is_bs_greater_than0 & is_less_than;
2091 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
2093 if (!__msa_test_bz_v(is_less_than)) {
2095 p1_org_r, p0_org_r, q0_org_r, q1_org_r);
2097 negate_tc =
zero - (v16i8)
tc;
2098 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
2102 AVC_LPF_P0Q0(q0_org_r, p0_org_r, p1_org_r, q1_org_r, negate_tc_r,
2107 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
2108 q0_org = __msa_bmnz_v(q0_org,
q0, is_less_than);
2109 tmp1 = (v8i16) __msa_ilvr_b((v16i8) q0_org, (v16i8) p0_org);
2111 ST_H4(tmp1, 0, 1, 2, 3,
src, img_width);
2112 src += 4 * img_width;
2113 ST_H4(tmp1, 4, 5, 6, 7,
src, img_width);
2123 v16u8
alpha, beta, res;
2125 alpha = (v16u8) __msa_fill_b(alpha_in);
2126 beta = (v16u8) __msa_fill_b(beta_in);
2128 for (col = 0; col < 4; col++) {
2129 tc_val = (tc0[col] - 1) + 1;
2149 v16u8
alpha, beta, res;
2151 alpha = (v16u8) __msa_fill_b(alpha_in);
2152 beta = (v16u8) __msa_fill_b(beta_in);
2154 for (col = 0; col < 4; col++) {
2155 tc_val = (tc0[col] - 1) + 1;
2164 out0 = __msa_copy_s_h((v8i16) res, 0);
2165 out1 = __msa_copy_s_h((v8i16) res, 1);
2167 SH(out0, (
src - 1));
2169 SH(out1, (
src - 1));
2175 int alpha,
int beta, int8_t *
tc)
2193 alpha, beta, img_width);
2197 int alpha,
int beta, int8_t *
tc)
2216 alpha, beta, img_width);
2220 int alpha,
int beta, int8_t *
tc)
2238 alpha, beta, img_width);
2242 int alpha,
int beta, int8_t *
tc)
2260 alpha, beta, img_width);
2264 int alpha,
int beta)
2268 (
unsigned int) img_width);
2272 int alpha,
int beta)
2276 (
unsigned int) img_width);
2280 int alpha,
int beta)
2284 (
unsigned int) img_width);
2288 int alpha,
int beta)
2292 (
unsigned int) img_width);
2330 int height,
int log2_denom,
2331 int weight_src,
int offset_in)
2333 uint32_t offset_val;
2335 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
2336 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2337 v8i16 src0_l, src1_l, src2_l, src3_l, src0_r, src1_r, src2_r, src3_r;
2338 v8i16 src4_l, src5_l, src6_l, src7_l, src4_r, src5_r, src6_r, src7_r;
2339 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2340 v8i16 tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
2341 v8i16 wgt, denom,
offset;
2343 offset_val = (unsigned) offset_in << log2_denom;
2345 wgt = __msa_fill_h(weight_src);
2346 offset = __msa_fill_h(offset_val);
2347 denom = __msa_fill_h(log2_denom);
2350 ILVR_B4_SH(
zero,
src0,
zero,
src1,
zero, src2,
zero, src3, src0_r, src1_r,
2352 ILVL_B4_SH(
zero,
src0,
zero,
src1,
zero, src2,
zero, src3, src0_l, src1_l,
2354 ILVR_B4_SH(
zero, src4,
zero, src5,
zero, src6,
zero, src7, src4_r, src5_r,
2356 ILVL_B4_SH(
zero, src4,
zero, src5,
zero, src6,
zero, src7, src4_l, src5_l,
2358 MUL4(wgt, src0_r, wgt, src0_l, wgt, src1_r, wgt, src1_l, tmp0, tmp1, tmp2,
2360 MUL4(wgt, src2_r, wgt, src2_l, wgt, src3_r, wgt, src3_l, tmp4, tmp5, tmp6,
2362 MUL4(wgt, src4_r, wgt, src4_l, wgt, src5_r, wgt, src5_l, tmp8, tmp9, tmp10,
2364 MUL4(wgt, src6_r, wgt, src6_l, wgt, src7_r, wgt, src7_l, tmp12, tmp13,
2371 tmp9, tmp10, tmp11);
2373 tmp12, tmp13, tmp14, tmp15);
2374 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
2375 MAXI_SH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 0);
2376 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
2377 SRLR_H8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, denom);
2378 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
2379 SAT_UH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 7);
2380 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2382 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2390 src1_r, src2_r, src3_r);
2392 src1_l, src2_l, src3_l);
2394 src5_r, src6_r, src7_r);
2396 src5_l, src6_l, src7_l);
2397 MUL4(wgt, src0_r, wgt, src0_l, wgt, src1_r, wgt, src1_l, tmp0, tmp1,
2399 MUL4(wgt, src2_r, wgt, src2_l, wgt, src3_r, wgt, src3_l, tmp4, tmp5,
2401 MUL4(wgt, src4_r, wgt, src4_l, wgt, src5_r, wgt, src5_l, tmp8, tmp9,
2403 MUL4(wgt, src6_r, wgt, src6_l, wgt, src7_r, wgt, src7_l, tmp12, tmp13,
2406 tmp0, tmp1, tmp2, tmp3);
2408 tmp4, tmp5, tmp6, tmp7);
2410 tmp8, tmp9, tmp10, tmp11);
2412 tmp12, tmp13, tmp14, tmp15);
2413 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
2414 MAXI_SH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 0);
2415 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
2416 SRLR_H8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, denom);
2417 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
2418 SAT_UH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 7);
2419 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2421 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2428 int height,
int log2_denom,
2429 int weight_src,
int offset)
2433 }
else if (8 ==
height) {
2441 int height,
int log2_denom,
2442 int weight_src,
int offset)
2446 }
else if (4 ==
height) {
2455 int log2_denom,
int weight_dst,
2456 int weight_src,
int offset_in)
2458 v16i8 src_wgt, dst_wgt, wgt;
2459 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
2460 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2461 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2462 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
2463 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2464 v8i16 tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
2467 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
2468 offset_in += (128 * (weight_src + weight_dst));
2470 src_wgt = __msa_fill_b(weight_src);
2471 dst_wgt = __msa_fill_b(weight_dst);
2472 offset = __msa_fill_h(offset_in);
2473 denom = __msa_fill_h(log2_denom + 1);
2475 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
2479 LD_UB8(dst,
stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
2482 ILVR_B4_SB(dst0,
src0, dst1,
src1, dst2, src2, dst3, src3, vec0, vec2, vec4,
2484 ILVL_B4_SB(dst0,
src0, dst1,
src1, dst2, src2, dst3, src3, vec1, vec3, vec5,
2486 ILVR_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec8, vec10,
2488 ILVL_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec9, vec11,
2490 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
2491 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
2492 tmp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
2493 tmp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
2494 tmp4 = __msa_dpadd_s_h(
offset, wgt, vec4);
2495 tmp5 = __msa_dpadd_s_h(
offset, wgt, vec5);
2496 tmp6 = __msa_dpadd_s_h(
offset, wgt, vec6);
2497 tmp7 = __msa_dpadd_s_h(
offset, wgt, vec7);
2498 tmp8 = __msa_dpadd_s_h(
offset, wgt, vec8);
2499 tmp9 = __msa_dpadd_s_h(
offset, wgt, vec9);
2500 tmp10 = __msa_dpadd_s_h(
offset, wgt, vec10);
2501 tmp11 = __msa_dpadd_s_h(
offset, wgt, vec11);
2502 tmp12 = __msa_dpadd_s_h(
offset, wgt, vec12);
2503 tmp13 = __msa_dpadd_s_h(
offset, wgt, vec13);
2504 tmp14 = __msa_dpadd_s_h(
offset, wgt, vec14);
2505 tmp15 = __msa_dpadd_s_h(
offset, wgt, vec15);
2506 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
2507 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
2508 SRA_4V(tmp8, tmp9, tmp10, tmp11, denom);
2509 SRA_4V(tmp12, tmp13, tmp14, tmp15, denom);
2511 CLIP_SH8_0_255(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15);
2512 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2514 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2516 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst,
stride);
2521 LD_UB8(dst,
stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
2524 ILVR_B4_SB(dst0,
src0, dst1,
src1, dst2, src2, dst3, src3, vec0, vec2,
2526 ILVL_B4_SB(dst0,
src0, dst1,
src1, dst2, src2, dst3, src3, vec1, vec3,
2528 ILVR_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec8, vec10,
2530 ILVL_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec9, vec11,
2532 tmp0 = __msa_dpadd_s_h(
offset, wgt, vec0);
2533 tmp1 = __msa_dpadd_s_h(
offset, wgt, vec1);
2534 tmp2 = __msa_dpadd_s_h(
offset, wgt, vec2);
2535 tmp3 = __msa_dpadd_s_h(
offset, wgt, vec3);
2536 tmp4 = __msa_dpadd_s_h(
offset, wgt, vec4);
2537 tmp5 = __msa_dpadd_s_h(
offset, wgt, vec5);
2538 tmp6 = __msa_dpadd_s_h(
offset, wgt, vec6);
2539 tmp7 = __msa_dpadd_s_h(
offset, wgt, vec7);
2540 tmp8 = __msa_dpadd_s_h(
offset, wgt, vec8);
2541 tmp9 = __msa_dpadd_s_h(
offset, wgt, vec9);
2542 tmp10 = __msa_dpadd_s_h(
offset, wgt, vec10);
2543 tmp11 = __msa_dpadd_s_h(
offset, wgt, vec11);
2544 tmp12 = __msa_dpadd_s_h(
offset, wgt, vec12);
2545 tmp13 = __msa_dpadd_s_h(
offset, wgt, vec13);
2546 tmp14 = __msa_dpadd_s_h(
offset, wgt, vec14);
2547 tmp15 = __msa_dpadd_s_h(
offset, wgt, vec15);
2548 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
2549 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
2550 SRA_4V(tmp8, tmp9, tmp10, tmp11, denom);
2551 SRA_4V(tmp12, tmp13, tmp14, tmp15, denom);
2553 CLIP_SH8_0_255(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15);
2554 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2556 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2558 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst,
stride);
2564 int log2_denom,
int weight_dst,
2565 int weight_src,
int offset)
2570 }
else if (8 ==
height) {
2581 int log2_denom,
int weight_dst,
2582 int weight_src,
int offset)
2587 }
else if (4 ==
height) {