26 #define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \
29 __m128i p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
30 __m128i filt, filt1, filt2, cnst4b, cnst3b; \
31 __m128i q0_sub_p0_l, q0_sub_p0_h, filt_h, filt_l, cnst3h; \
33 p1_m = __lsx_vxori_b(p1_in_out, 0x80); \
34 p0_m = __lsx_vxori_b(p0_in_out, 0x80); \
35 q0_m = __lsx_vxori_b(q0_in_out, 0x80); \
36 q1_m = __lsx_vxori_b(q1_in_out, 0x80); \
37 filt = __lsx_vssub_b(p1_m, q1_m); \
38 filt = filt & hev_in; \
40 q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
41 filt_sign = __lsx_vslti_b(filt, 0); \
43 cnst3h = __lsx_vreplgr2vr_h(3); \
44 q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0, q0_sub_p0); \
45 q0_sub_p0_l = __lsx_vdp2_h_b(q0_sub_p0_l, cnst3h); \
46 filt_l = __lsx_vilvl_b(filt_sign, filt); \
47 filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
48 filt_l = __lsx_vsat_h(filt_l, 7); \
50 q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0, q0_sub_p0); \
51 q0_sub_p0_h = __lsx_vdp2_h_b(q0_sub_p0_h, cnst3h); \
52 filt_h = __lsx_vilvh_b(filt_sign, filt); \
53 filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
54 filt_h = __lsx_vsat_h(filt_h, 7); \
56 filt = __lsx_vpickev_b(filt_h, filt_l); \
57 filt = filt & mask_in; \
58 cnst4b = __lsx_vreplgr2vr_b(4); \
59 filt1 = __lsx_vsadd_b(filt, cnst4b); \
60 filt1 = __lsx_vsrai_b(filt1, 3); \
62 cnst3b = __lsx_vreplgr2vr_b(3); \
63 filt2 = __lsx_vsadd_b(filt, cnst3b); \
64 filt2 = __lsx_vsrai_b(filt2, 3); \
66 q0_m = __lsx_vssub_b(q0_m, filt1); \
67 q0_in_out = __lsx_vxori_b(q0_m, 0x80); \
68 p0_m = __lsx_vsadd_b(p0_m, filt2); \
69 p0_in_out = __lsx_vxori_b(p0_m, 0x80); \
71 filt = __lsx_vsrari_b(filt1, 1); \
72 hev_in = __lsx_vxori_b(hev_in, 0xff); \
73 filt = filt & hev_in; \
75 q1_m = __lsx_vssub_b(q1_m, filt); \
76 q1_in_out = __lsx_vxori_b(q1_m, 0x80); \
77 p1_m = __lsx_vsadd_b(p1_m, filt); \
78 p1_in_out = __lsx_vxori_b(p1_m, 0x80); \
81 #define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
83 __m128i p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
84 __m128i filt, q0_sub_p0, cnst4b, cnst3b; \
85 __m128i u, filt1, filt2, filt_sign, q0_sub_p0_sign; \
86 __m128i q0_sub_p0_l, q0_sub_p0_h, filt_l, u_l, u_h, filt_h; \
87 __m128i cnst3h, cnst27h, cnst18h, cnst63h; \
89 cnst3h = __lsx_vreplgr2vr_h(3); \
91 p2_m = __lsx_vxori_b(p2, 0x80); \
92 p1_m = __lsx_vxori_b(p1, 0x80); \
93 p0_m = __lsx_vxori_b(p0, 0x80); \
94 q0_m = __lsx_vxori_b(q0, 0x80); \
95 q1_m = __lsx_vxori_b(q1, 0x80); \
96 q2_m = __lsx_vxori_b(q2, 0x80); \
98 filt = __lsx_vssub_b(p1_m, q1_m); \
99 q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
100 q0_sub_p0_sign = __lsx_vslti_b(q0_sub_p0, 0); \
101 filt_sign = __lsx_vslti_b(filt, 0); \
104 q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0_sign, q0_sub_p0); \
105 q0_sub_p0_l = __lsx_vmul_h(q0_sub_p0_l, cnst3h); \
106 filt_l = __lsx_vilvl_b(filt_sign, filt); \
107 filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
108 filt_l = __lsx_vsat_h(filt_l, 7); \
111 q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0_sign, q0_sub_p0); \
112 q0_sub_p0_h = __lsx_vmul_h(q0_sub_p0_h, cnst3h); \
113 filt_h = __lsx_vilvh_b(filt_sign, filt); \
114 filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
115 filt_h = __lsx_vsat_h(filt_h, 7); \
118 filt = __lsx_vpickev_b(filt_h, filt_l); \
119 filt = filt & mask; \
120 filt2 = filt & hev; \
122 hev = __lsx_vxori_b(hev, 0xff); \
124 cnst4b = __lsx_vreplgr2vr_b(4); \
125 filt1 = __lsx_vsadd_b(filt2, cnst4b); \
126 filt1 = __lsx_vsrai_b(filt1, 3); \
127 cnst3b = __lsx_vreplgr2vr_b(3); \
128 filt2 = __lsx_vsadd_b(filt2, cnst3b); \
129 filt2 = __lsx_vsrai_b(filt2, 3); \
130 q0_m = __lsx_vssub_b(q0_m, filt1); \
131 p0_m = __lsx_vsadd_b(p0_m, filt2); \
133 filt_sign = __lsx_vslti_b(filt, 0); \
134 filt_l = __lsx_vilvl_b(filt_sign, filt); \
135 filt_h = __lsx_vilvh_b(filt_sign, filt); \
137 cnst27h = __lsx_vreplgr2vr_h(27); \
138 cnst63h = __lsx_vreplgr2vr_h(63); \
141 u_l = __lsx_vmul_h(filt_l, cnst27h); \
142 u_l = __lsx_vadd_h(u_l, cnst63h); \
143 u_l = __lsx_vsrai_h(u_l, 7); \
144 u_l = __lsx_vsat_h(u_l, 7); \
146 u_h = __lsx_vmul_h(filt_h, cnst27h); \
147 u_h = __lsx_vadd_h(u_h, cnst63h); \
148 u_h = __lsx_vsrai_h(u_h, 7); \
149 u_h = __lsx_vsat_h(u_h, 7); \
151 u = __lsx_vpickev_b(u_h, u_l); \
152 q0_m = __lsx_vssub_b(q0_m, u); \
153 q0 = __lsx_vxori_b(q0_m, 0x80); \
154 p0_m = __lsx_vsadd_b(p0_m, u); \
155 p0 = __lsx_vxori_b(p0_m, 0x80); \
156 cnst18h = __lsx_vreplgr2vr_h(18); \
157 u_l = __lsx_vmul_h(filt_l, cnst18h); \
158 u_l = __lsx_vadd_h(u_l, cnst63h); \
159 u_l = __lsx_vsrai_h(u_l, 7); \
160 u_l = __lsx_vsat_h(u_l, 7); \
163 u_h = __lsx_vmul_h(filt_h, cnst18h); \
164 u_h = __lsx_vadd_h(u_h, cnst63h); \
165 u_h = __lsx_vsrai_h(u_h, 7); \
166 u_h = __lsx_vsat_h(u_h, 7); \
168 u = __lsx_vpickev_b(u_h, u_l); \
169 q1_m = __lsx_vssub_b(q1_m, u); \
170 q1 = __lsx_vxori_b(q1_m, 0x80); \
171 p1_m = __lsx_vsadd_b(p1_m, u); \
172 p1 = __lsx_vxori_b(p1_m, 0x80); \
173 u_l = __lsx_vslli_h(filt_l, 3); \
174 u_l = __lsx_vadd_h(u_l, filt_l); \
175 u_l = __lsx_vadd_h(u_l, cnst63h); \
176 u_l = __lsx_vsrai_h(u_l, 7); \
177 u_l = __lsx_vsat_h(u_l, 7); \
180 u_h = __lsx_vslli_h(filt_h, 3); \
181 u_h = __lsx_vadd_h(u_h, filt_h); \
182 u_h = __lsx_vadd_h(u_h, cnst63h); \
183 u_h = __lsx_vsrai_h(u_h, 7); \
184 u_h = __lsx_vsat_h(u_h, 7); \
186 u = __lsx_vpickev_b(u_h, u_l); \
187 q2_m = __lsx_vssub_b(q2_m, u); \
188 q2 = __lsx_vxori_b(q2_m, 0x80); \
189 p2_m = __lsx_vsadd_b(p2_m, u); \
190 p2 = __lsx_vxori_b(p2_m, 0x80); \
193 #define LPF_MASK_HEV(p3_src, p2_src, p1_src, p0_src, \
194 q0_src, q1_src, q2_src, q3_src, \
195 limit_src, b_limit_src, thresh_src, \
196 hev_dst, mask_dst, flat_dst) \
198 __m128i p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
199 __m128i p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
202 p3_asub_p2_m = __lsx_vabsd_bu(p3_src, p2_src); \
203 p2_asub_p1_m = __lsx_vabsd_bu(p2_src, p1_src); \
204 p1_asub_p0_m = __lsx_vabsd_bu(p1_src, p0_src); \
205 q1_asub_q0_m = __lsx_vabsd_bu(q1_src, q0_src); \
206 q2_asub_q1_m = __lsx_vabsd_bu(q2_src, q1_src); \
207 q3_asub_q2_m = __lsx_vabsd_bu(q3_src, q2_src); \
208 p0_asub_q0_m = __lsx_vabsd_bu(p0_src, q0_src); \
209 p1_asub_q1_m = __lsx_vabsd_bu(p1_src, q1_src); \
212 flat_dst = __lsx_vmax_bu(p1_asub_p0_m, q1_asub_q0_m); \
213 hev_dst = __lsx_vslt_bu(thresh_src, flat_dst); \
215 p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p0_asub_q0_m); \
216 p1_asub_q1_m = __lsx_vsrli_b(p1_asub_q1_m, 1); \
217 p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p1_asub_q1_m); \
218 mask_dst = __lsx_vslt_bu(b_limit_src, p0_asub_q0_m); \
219 mask_dst = __lsx_vmax_bu(flat_dst, mask_dst); \
220 p3_asub_p2_m = __lsx_vmax_bu(p3_asub_p2_m, p2_asub_p1_m); \
221 mask_dst = __lsx_vmax_bu(p3_asub_p2_m, mask_dst); \
222 q2_asub_q1_m = __lsx_vmax_bu(q2_asub_q1_m, q3_asub_q2_m); \
223 mask_dst = __lsx_vmax_bu(q2_asub_q1_m, mask_dst); \
224 mask_dst = __lsx_vslt_bu(limit_src, mask_dst); \
225 mask_dst = __lsx_vxori_b(mask_dst, 0xff); \
228 #define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
230 __lsx_vstelm_w(in0, pdst, 0, in0_idx); \
231 __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \
234 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
236 __lsx_vstelm_w(in, pdst, 0, idx0); \
238 __lsx_vstelm_w(in, pdst, 0, idx1); \
240 __lsx_vstelm_w(in, pdst, 0, idx2); \
242 __lsx_vstelm_w(in, pdst, 0, idx3); \
247 int limit_in,
int thresh_in)
249 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
252 ptrdiff_t stride2 =
stride << 1;
253 ptrdiff_t stride3 = stride2 +
stride;
254 ptrdiff_t stride4 = stride2 << 1;
256 b_limit = __lsx_vreplgr2vr_b(b_limit_in);
257 limit = __lsx_vreplgr2vr_b(limit_in);
258 thresh = __lsx_vreplgr2vr_b(thresh_in);
261 DUP4_ARG2(__lsx_vld, dst - stride4, 0, dst - stride3, 0, dst - stride2, 0,
262 dst -
stride, 0, p3, p2, p1, p0);
263 DUP4_ARG2(__lsx_vld, dst, 0, dst +
stride, 0, dst + stride2, 0, dst + stride3, 0,
265 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
hev,
mask,
flat);
269 __lsx_vst(p2, dst - stride3, 0);
270 __lsx_vst(p1, dst - stride2, 0);
271 __lsx_vst(p0, dst -
stride, 0);
272 __lsx_vst(
q0, dst, 0);
275 __lsx_vst(q2, dst + stride2, 0);
279 ptrdiff_t
stride,
int b_limit_in,
280 int limit_in,
int thresh_in)
282 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
284 __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
285 __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
287 ptrdiff_t stride2 =
stride << 1;
288 ptrdiff_t stride3 = stride2 +
stride;
289 ptrdiff_t stride4 = stride2 << 1;
291 b_limit = __lsx_vreplgr2vr_b(b_limit_in);
292 limit = __lsx_vreplgr2vr_b(limit_in);
293 thresh = __lsx_vreplgr2vr_b(thresh_in);
295 DUP4_ARG2(__lsx_vld, dst_u - stride4, 0, dst_u - stride3, 0, dst_u - stride2, 0,
296 dst_u -
stride, 0, p3_u, p2_u, p1_u, p0_u);
298 dst_u + stride3, 0, q0_u, q1_u, q2_u, q3_u);
300 DUP4_ARG2(__lsx_vld, dst_v - stride4, 0, dst_v - stride3, 0, dst_v - stride2, 0,
301 dst_v -
stride, 0, p3_v, p2_v, p1_v, p0_v);
303 dst_v + stride3, 0, q0_v, q1_v, q2_v, q3_v);
306 DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
307 DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u,
q0,
q1, q2, q3);
308 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
312 __lsx_vstelm_d(p2, dst_u - stride3, 0, 0);
313 __lsx_vstelm_d(p1, dst_u - stride2, 0, 0);
314 __lsx_vstelm_d(p0, dst_u -
stride , 0, 0);
315 __lsx_vstelm_d(
q0, dst_u, 0, 0);
317 __lsx_vstelm_d(
q1, dst_u +
stride, 0, 0);
318 __lsx_vstelm_d(q2, dst_u + stride2, 0, 0);
320 __lsx_vstelm_d(p2, dst_v - stride3, 0, 1);
321 __lsx_vstelm_d(p1, dst_v - stride2, 0, 1);
322 __lsx_vstelm_d(p0, dst_v -
stride , 0, 1);
323 __lsx_vstelm_d(
q0, dst_v, 0, 1);
325 __lsx_vstelm_d(
q1, dst_v +
stride, 0, 1);
326 __lsx_vstelm_d(q2, dst_v + stride2, 0, 1);
330 int limit_in,
int thresh_in)
333 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
335 __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
336 __m128i row9, row10, row11, row12, row13, row14, row15;
337 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
339 ptrdiff_t stride2 =
stride << 1;
340 ptrdiff_t stride3 = stride2 +
stride;
341 ptrdiff_t stride4 = stride2 << 1;
343 b_limit = __lsx_vreplgr2vr_b(b_limit_in);
344 limit = __lsx_vreplgr2vr_b(limit_in);
345 thresh = __lsx_vreplgr2vr_b(thresh_in);
348 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
349 temp_src + stride3, 0, row0, row1, row2, row3);
351 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
352 temp_src + stride3, 0, row4, row5, row6, row7);
355 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
356 temp_src + stride3, 0, row8, row9, row10, row11);
358 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
359 temp_src + stride3, 0, row12, row13, row14, row15);
360 LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10,
361 row11, row12, row13, row14, row15, p3, p2, p1, p0,
q0,
q1, q2, q3);
363 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
hev,
mask,
flat);
366 tmp0 = __lsx_vilvl_b(p1, p2);
367 tmp1 = __lsx_vilvl_b(
q0, p0);
369 tmp3 = __lsx_vilvl_h(tmp1, tmp0);
370 tmp4 = __lsx_vilvh_h(tmp1, tmp0);
372 tmp0 = __lsx_vilvh_b(p1, p2);
373 tmp1 = __lsx_vilvh_b(
q0, p0);
375 tmp6 = __lsx_vilvl_h(tmp1, tmp0);
376 tmp7 = __lsx_vilvh_h(tmp1, tmp0);
378 tmp2 = __lsx_vilvl_b(q2,
q1);
379 tmp5 = __lsx_vilvh_b(q2,
q1);
416 ptrdiff_t
stride,
int b_limit_in,
417 int limit_in,
int thresh_in)
420 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
422 __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
423 __m128i row9, row10, row11, row12, row13, row14, row15;
424 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
426 ptrdiff_t stride2 =
stride << 1;
427 ptrdiff_t stride3 = stride2 +
stride;
428 ptrdiff_t stride4 = stride2 << 1;
430 b_limit = __lsx_vreplgr2vr_b(b_limit_in);
431 limit = __lsx_vreplgr2vr_b(limit_in);
432 thresh = __lsx_vreplgr2vr_b(thresh_in);
434 temp_src = dst_u - 4;
435 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
436 temp_src + stride3, 0, row0, row1, row2, row3);
438 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
439 temp_src + stride3, 0, row4, row5, row6, row7);
441 temp_src = dst_v - 4;
442 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
443 temp_src + stride3, 0, row8, row9, row10, row11);
445 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src +
stride, 0, temp_src + stride2, 0,
446 temp_src + stride3, 0, row12, row13, row14, row15);
448 LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7,
449 row8, row9, row10, row11, row12, row13, row14, row15,
450 p3, p2, p1, p0,
q0,
q1, q2, q3);
452 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
hev,
mask,
flat);
455 tmp0 = __lsx_vilvl_b(p1, p2);
456 tmp1 = __lsx_vilvl_b(
q0, p0);
458 tmp3 = __lsx_vilvl_h(tmp1, tmp0);
459 tmp4 = __lsx_vilvh_h(tmp1, tmp0);
461 tmp0 = __lsx_vilvh_b(p1, p2);
462 tmp1 = __lsx_vilvh_b(
q0, p0);
464 tmp6 = __lsx_vilvl_h(tmp1, tmp0);
465 tmp7 = __lsx_vilvh_h(tmp1, tmp0);
467 tmp2 = __lsx_vilvl_b(q2,
q1);
468 tmp5 = __lsx_vilvh_b(q2,
q1);
509 __m128i thresh, b_limit,
limit;
510 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
512 ptrdiff_t stride2 =
stride << 1;
513 ptrdiff_t stride3 = stride2 +
stride;
514 ptrdiff_t stride4 = stride2 << 1;
519 src + stride3, 0, p3, p2, p1, p0);
522 src + stride3, 0,
q0,
q1, q2, q3);
523 thresh = __lsx_vreplgr2vr_b(
h);
524 b_limit = __lsx_vreplgr2vr_b(e);
525 limit = __lsx_vreplgr2vr_b(
i);
527 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
531 __lsx_vst(p1,
src - stride2, 0);
533 __lsx_vst(
q0,
src, 0);
541 __m128i thresh, b_limit,
limit;
542 __m128i p3, p2, p1, p0, q3, q2,
q1,
q0;
543 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
544 __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
546 ptrdiff_t stride2 =
stride << 1;
547 ptrdiff_t stride3 = stride2 +
stride;
548 ptrdiff_t stride4 = stride2 << 1;
552 src + stride3, 0, tmp0, tmp1, tmp2, tmp3);
555 src + stride3, 0, tmp4, tmp5, tmp6, tmp7);
558 src + stride3, 0, tmp8, tmp9, tmp10, tmp11);
561 src + stride3, 0, tmp12, tmp13, tmp14, tmp15);
564 LSX_TRANSPOSE16x8_B(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
565 tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15,
566 p3, p2, p1, p0,
q0,
q1, q2, q3);
568 thresh = __lsx_vreplgr2vr_b(
h);
569 b_limit = __lsx_vreplgr2vr_b(e);
570 limit = __lsx_vreplgr2vr_b(
i);
572 LPF_MASK_HEV(p3, p2, p1, p0,
q0,
q1, q2, q3,
limit, b_limit, thresh,
577 tmp2 = __lsx_vilvl_h(tmp1, tmp0);
578 tmp3 = __lsx_vilvh_h(tmp1, tmp0);
585 tmp2 = __lsx_vilvl_h(tmp1, tmp0);
586 tmp3 = __lsx_vilvh_h(tmp1, tmp0);