36 #define vzero vec_splat_s32(0)
39 #define GET_LS(a,b,c,s) {\
41 a = vec_vsx_ld(((b) << 1) + 16, s);\
44 #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
45 vector signed short ls;\
46 vector signed int vf1, vf2, i1, i2;\
47 GET_LS(l1, x, perm, src);\
48 i1 = vec_mule(filter, ls);\
49 i2 = vec_mulo(filter, ls);\
50 vf1 = vec_mergeh(i1, i2);\
51 vf2 = vec_mergel(i1, i2);\
52 d1 = vec_add(d1, vf1);\
53 d2 = vec_add(d2, vf2);\
56 #define LOAD_FILTER(vf,f) {\
57 vf = vec_vsx_ld(joffset, f);\
59 #define LOAD_L1(ll1,s,p){\
60 ll1 = vec_vsx_ld(xoffset, s);\
69 #define GET_VF4(a, vf, f) {\
70 vf = (vector signed short)vec_vsx_ld(a << 3, f);\
71 vf = vec_mergeh(vf, (vector signed short)vzero);\
73 #define FIRST_LOAD(sv, pos, s, per) {}
74 #define UPDATE_PTR(s0, d0, s1, d1) {}
75 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
76 vf = vec_vsx_ld(pos + a, s);\
78 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
79 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
80 vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
83 #define FUNC(name) name ## _vsx
91 static void yuv2plane1_8_u(
const int16_t *
src, uint8_t *dest,
int dstW,
95 for (
i = start;
i < dstW;
i++) {
101 static void yuv2plane1_8_vsx(
const int16_t *
src, uint8_t *dest,
int dstW,
104 const int dst_u = -(uintptr_t)dest & 15;
108 vec_s16 vi, vileft, ditherleft, ditherright;
111 for (j = 0; j < 16; j++) {
115 ditherleft = vec_ld(0,
val);
116 ditherright = vec_ld(0, &
val[8]);
120 for (
i = dst_u;
i < dstW - 15;
i += 16) {
122 vi = vec_vsx_ld(0, &
src[
i]);
123 vi = vec_adds(ditherleft, vi);
124 vileft = vec_sra(vi,
shifts);
126 vi = vec_vsx_ld(0, &
src[
i + 8]);
127 vi = vec_adds(ditherright, vi);
130 vd = vec_packsu(vileft, vi);
131 vec_st(vd, 0, &dest[
i]);
139 #define output_pixel(pos, val) \
141 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
143 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
146 static void yuv2plane1_nbps_u(
const int16_t *
src, uint16_t *dest,
int dstW,
147 int big_endian,
int output_bits,
int start)
150 int shift = 15 - output_bits;
152 for (
i = start;
i < dstW;
i++) {
159 uint16_t *dest,
int dstW,
160 const int big_endian,
161 const int output_bits)
163 const int dst_u = -(uintptr_t)dest & 7;
164 const int shift = 15 - output_bits;
166 const int clip = (1 << output_bits) - 1;
168 const vec_u16 vswap = (
vec_u16) vec_splat_u16(big_endian ? 8 : 0);
174 yuv2plane1_nbps_u(
src, dest, dst_u, big_endian, output_bits, 0);
176 for (
i = dst_u;
i < dstW - 7;
i += 8) {
177 v = vec_vsx_ld(0, (
const uint16_t *) &
src[
i]);
178 v = vec_add(v, vadd);
179 v = vec_sr(v, vshift);
180 v = vec_min(v, vlargest);
181 v = vec_rl(v, vswap);
182 vec_st(v, 0, &dest[
i]);
185 yuv2plane1_nbps_u(
src, dest, dstW, big_endian, output_bits,
i);
188 static void yuv2planeX_nbps_u(
const int16_t *
filter,
int filterSize,
189 const int16_t **
src, uint16_t *dest,
int dstW,
190 int big_endian,
int output_bits,
int start)
193 int shift = 11 + 16 - output_bits;
195 for (
i = start;
i < dstW;
i++) {
199 for (j = 0; j < filterSize; j++)
206 static void yuv2planeX_nbps_vsx(
const int16_t *
filter,
int filterSize,
207 const int16_t **
src, uint16_t *dest,
int dstW,
208 int big_endian,
int output_bits)
210 const int dst_u = -(uintptr_t)dest & 7;
211 const int shift = 11 + 16 - output_bits;
213 const int clip = (1 << output_bits) - 1;
214 const uint16_t swap = big_endian ? 8 : 0;
217 const vec_u16 vswap = (
vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
219 const vec_s16 vzero = vec_splat_s16(0);
220 const vec_u8 vperm = (
vec_u8) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
226 for (
i = 0;
i < filterSize;
i++) {
231 yuv2planeX_nbps_u(
filter, filterSize,
src, dest, dst_u, big_endian, output_bits, 0);
233 for (
i = dst_u;
i < dstW - 7;
i += 8) {
234 vleft = vright = vadd;
236 for (j = 0; j < filterSize; j++) {
237 vin = vec_vsx_ld(0, &
src[j][
i]);
238 vtmp = (
vec_u32) vec_mule(vin, vfilter[j]);
239 vleft = vec_add(vleft, vtmp);
240 vtmp = (
vec_u32) vec_mulo(vin, vfilter[j]);
241 vright = vec_add(vright, vtmp);
244 vleft = vec_sra(vleft, vshift);
245 vright = vec_sra(vright, vshift);
246 v = vec_packsu(vleft, vright);
248 v = vec_min(v, vlargest);
249 v = vec_rl(v, vswap);
250 v = vec_perm(v, v, vperm);
251 vec_st(v, 0, &dest[
i]);
254 yuv2planeX_nbps_u(
filter, filterSize,
src, dest, dstW, big_endian, output_bits,
i);
260 #define output_pixel(pos, val, bias, signedness) \
262 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
264 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
267 static void yuv2plane1_16_u(
const int32_t *
src, uint16_t *dest,
int dstW,
268 int big_endian,
int output_bits,
int start)
273 for (
i = start;
i < dstW;
i++) {
280 uint16_t *dest,
int dstW,
281 const int big_endian,
284 const int dst_u = -(uintptr_t)dest & 7;
288 const vec_u16 vswap = (
vec_u16) vec_splat_u16(big_endian ? 8 : 0);
294 yuv2plane1_16_u(
src, dest, dst_u, big_endian, output_bits, 0);
296 for (
i = dst_u;
i < dstW - 7;
i += 8) {
297 v = vec_vsx_ld(0, (
const uint32_t *) &
src[
i]);
298 v = vec_add(v, vadd);
299 v = vec_sr(v, vshift);
301 v2 = vec_vsx_ld(0, (
const uint32_t *) &
src[
i + 4]);
302 v2 = vec_add(v2, vadd);
303 v2 = vec_sr(v2, vshift);
305 vd = vec_packsu(v, v2);
306 vd = vec_rl(vd, vswap);
308 vec_st(vd, 0, &dest[
i]);
311 yuv2plane1_16_u(
src, dest, dstW, big_endian, output_bits,
i);
316 static void yuv2planeX_16_u(
const int16_t *
filter,
int filterSize,
317 const int32_t **
src, uint16_t *dest,
int dstW,
318 int big_endian,
int output_bits,
int start)
323 for (
i = start;
i < dstW;
i++) {
333 for (j = 0; j < filterSize; j++)
340 static void yuv2planeX_16_vsx(
const int16_t *
filter,
int filterSize,
341 const int32_t **
src, uint16_t *dest,
int dstW,
342 int big_endian,
int output_bits)
344 const int dst_u = -(uintptr_t)dest & 7;
345 const int shift = 15;
346 const int bias = 0x8000;
347 const int add = (1 << (
shift - 1)) - 0x40000000;
348 const uint16_t swap = big_endian ? 8 : 0;
351 const vec_u16 vswap = (
vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
352 const vec_u16 vbias = (
vec_u16) {bias, bias, bias, bias, bias, bias, bias, bias};
359 for (
i = 0;
i < filterSize;
i++) {
363 yuv2planeX_16_u(
filter, filterSize,
src, dest, dst_u, big_endian, output_bits, 0);
365 for (
i = dst_u;
i < dstW - 7;
i += 8) {
366 vleft = vright = vadd;
368 for (j = 0; j < filterSize; j++) {
369 vin32l = vec_vsx_ld(0, &
src[j][
i]);
370 vin32r = vec_vsx_ld(0, &
src[j][
i + 4]);
372 vtmp = (
vec_u32) vec_mul(vin32l, vfilter[j]);
373 vleft = vec_add(vleft, vtmp);
374 vtmp = (
vec_u32) vec_mul(vin32r, vfilter[j]);
375 vright = vec_add(vright, vtmp);
378 vleft = vec_sra(vleft, vshift);
379 vright = vec_sra(vright, vshift);
381 v = vec_add(v, vbias);
382 v = vec_rl(v, vswap);
383 vec_st(v, 0, &dest[
i]);
386 yuv2planeX_16_u(
filter, filterSize,
src, dest, dstW, big_endian, output_bits,
i);
391 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
392 yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
393 yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
395 #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
396 static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
397 uint8_t *dest, int dstW, \
398 const uint8_t *dither, int offset) \
400 yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
401 (uint16_t *) dest, dstW, is_be, bits); \
404 #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
405 static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
406 const int16_t **src, uint8_t *dest, int dstW, \
407 const uint8_t *dither, int offset)\
409 yuv2planeX_## template_size ## _vsx(filter, \
410 filterSize, (const typeX_t **) src, \
411 (uint16_t *) dest, dstW, is_be, bits); \
423 yuv2NBPS1(16, BE, 1, 16,
int32_t)
424 yuv2NBPS1(16, LE, 0, 16,
int32_t)
426 yuv2NBPSX(16, BE, 1, 16,
int32_t)
427 yuv2NBPSX(16, LE, 0, 16,
int32_t)
431 R_l = vec_max(R_l, zero32); \
432 R_r = vec_max(R_r, zero32); \
433 G_l = vec_max(G_l, zero32); \
434 G_r = vec_max(G_r, zero32); \
435 B_l = vec_max(B_l, zero32); \
436 B_r = vec_max(B_r, zero32); \
438 R_l = vec_min(R_l, rgbclip); \
439 R_r = vec_min(R_r, rgbclip); \
440 G_l = vec_min(G_l, rgbclip); \
441 G_r = vec_min(G_r, rgbclip); \
442 B_l = vec_min(B_l, rgbclip); \
443 B_r = vec_min(B_r, rgbclip); \
445 R_l = vec_sr(R_l, shift22); \
446 R_r = vec_sr(R_r, shift22); \
447 G_l = vec_sr(G_l, shift22); \
448 G_r = vec_sr(G_r, shift22); \
449 B_l = vec_sr(B_l, shift22); \
450 B_r = vec_sr(B_r, shift22); \
452 rd16 = vec_packsu(R_l, R_r); \
453 gd16 = vec_packsu(G_l, G_r); \
454 bd16 = vec_packsu(B_l, B_r); \
455 rd = vec_packsu(rd16, zero16); \
456 gd = vec_packsu(gd16, zero16); \
457 bd = vec_packsu(bd16, zero16); \
460 case AV_PIX_FMT_RGB24: \
461 out0 = vec_perm(rd, gd, perm3rg0); \
462 out0 = vec_perm(out0, bd, perm3tb0); \
463 out1 = vec_perm(rd, gd, perm3rg1); \
464 out1 = vec_perm(out1, bd, perm3tb1); \
466 vec_vsx_st(out0, 0, dest); \
467 vec_vsx_st(out1, 16, dest); \
471 case AV_PIX_FMT_BGR24: \
472 out0 = vec_perm(bd, gd, perm3rg0); \
473 out0 = vec_perm(out0, rd, perm3tb0); \
474 out1 = vec_perm(bd, gd, perm3rg1); \
475 out1 = vec_perm(out1, rd, perm3tb1); \
477 vec_vsx_st(out0, 0, dest); \
478 vec_vsx_st(out1, 16, dest); \
482 case AV_PIX_FMT_BGRA: \
483 out0 = vec_mergeh(bd, gd); \
484 out1 = vec_mergeh(rd, ad); \
486 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
487 vec_vsx_st(tmp8, 0, dest); \
488 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
489 vec_vsx_st(tmp8, 16, dest); \
493 case AV_PIX_FMT_RGBA: \
494 out0 = vec_mergeh(rd, gd); \
495 out1 = vec_mergeh(bd, ad); \
497 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
498 vec_vsx_st(tmp8, 0, dest); \
499 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
500 vec_vsx_st(tmp8, 16, dest); \
504 case AV_PIX_FMT_ARGB: \
505 out0 = vec_mergeh(ad, rd); \
506 out1 = vec_mergeh(gd, bd); \
508 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
509 vec_vsx_st(tmp8, 0, dest); \
510 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
511 vec_vsx_st(tmp8, 16, dest); \
515 case AV_PIX_FMT_ABGR: \
516 out0 = vec_mergeh(ad, bd); \
517 out1 = vec_mergeh(gd, rd); \
519 tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
520 vec_vsx_st(tmp8, 0, dest); \
521 tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
522 vec_vsx_st(tmp8, 16, dest); \
529 yuv2rgb_full_X_vsx_template(
SwsContext *
c,
const int16_t *lumFilter,
530 const int16_t **lumSrc,
int lumFilterSize,
531 const int16_t *chrFilter,
const int16_t **chrUSrc,
532 const int16_t **chrVSrc,
int chrFilterSize,
533 const int16_t **alpSrc, uint8_t *dest,
537 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
538 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
541 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
543 const vec_s32 ystart = vec_splats(1 << 9);
544 const vec_s32 uvstart = vec_splats((1 << 9) - (128 << 19));
545 const vec_u16 zero16 = vec_splat_u16(0);
546 const vec_s32 y_offset = vec_splats(
c->yuv2rgb_y_offset);
547 const vec_s32 y_coeff = vec_splats(
c->yuv2rgb_y_coeff);
548 const vec_s32 y_add = vec_splats(1 << 21);
549 const vec_s32 v2r_coeff = vec_splats(
c->yuv2rgb_v2r_coeff);
550 const vec_s32 v2g_coeff = vec_splats(
c->yuv2rgb_v2g_coeff);
551 const vec_s32 u2g_coeff = vec_splats(
c->yuv2rgb_u2g_coeff);
552 const vec_s32 u2b_coeff = vec_splats(
c->yuv2rgb_u2b_coeff);
553 const vec_s32 rgbclip = vec_splats(1 << 30);
554 const vec_s32 zero32 = vec_splat_s32(0);
555 const vec_u32 shift22 = vec_splats(22
U);
556 const vec_u32 shift10 = vec_splat_u32(10);
579 ad = vec_splats((uint8_t) 255);
581 for (
i = 0;
i < lumFilterSize;
i++)
582 vlumFilter[
i] = vec_splats(lumFilter[
i]);
583 for (
i = 0;
i < chrFilterSize;
i++)
584 vchrFilter[
i] = vec_splats(chrFilter[
i]);
586 for (
i = 0;
i < dstW;
i += 8) {
594 for (j = 0; j < lumFilterSize; j++) {
595 vv = vec_ld(0, &lumSrc[j][
i]);
596 tmp = vec_mule(vv, vlumFilter[j]);
597 tmp2 = vec_mulo(vv, vlumFilter[j]);
598 tmp3 = vec_mergeh(
tmp, tmp2);
599 tmp4 = vec_mergel(
tmp, tmp2);
601 vy32_l = vec_adds(vy32_l, tmp3);
602 vy32_r = vec_adds(vy32_r, tmp4);
605 for (j = 0; j < chrFilterSize; j++) {
606 vv = vec_ld(0, &chrUSrc[j][
i]);
607 tmp = vec_mule(vv, vchrFilter[j]);
608 tmp2 = vec_mulo(vv, vchrFilter[j]);
609 tmp3 = vec_mergeh(
tmp, tmp2);
610 tmp4 = vec_mergel(
tmp, tmp2);
612 vu32_l = vec_adds(vu32_l, tmp3);
613 vu32_r = vec_adds(vu32_r, tmp4);
615 vv = vec_ld(0, &chrVSrc[j][
i]);
616 tmp = vec_mule(vv, vchrFilter[j]);
617 tmp2 = vec_mulo(vv, vchrFilter[j]);
618 tmp3 = vec_mergeh(
tmp, tmp2);
619 tmp4 = vec_mergel(
tmp, tmp2);
621 vv32_l = vec_adds(vv32_l, tmp3);
622 vv32_r = vec_adds(vv32_r, tmp4);
625 vy32_l = vec_sra(vy32_l, shift10);
626 vy32_r = vec_sra(vy32_r, shift10);
627 vu32_l = vec_sra(vu32_l, shift10);
628 vu32_r = vec_sra(vu32_r, shift10);
629 vv32_l = vec_sra(vv32_l, shift10);
630 vv32_r = vec_sra(vv32_r, shift10);
632 vy32_l = vec_sub(vy32_l, y_offset);
633 vy32_r = vec_sub(vy32_r, y_offset);
634 vy32_l = vec_mul(vy32_l, y_coeff);
635 vy32_r = vec_mul(vy32_r, y_coeff);
636 vy32_l = vec_add(vy32_l, y_add);
637 vy32_r = vec_add(vy32_r, y_add);
639 R_l = vec_mul(vv32_l, v2r_coeff);
640 R_l = vec_add(R_l, vy32_l);
641 R_r = vec_mul(vv32_r, v2r_coeff);
642 R_r = vec_add(R_r, vy32_r);
643 G_l = vec_mul(vv32_l, v2g_coeff);
644 tmp32 = vec_mul(vu32_l, u2g_coeff);
645 G_l = vec_add(G_l, vy32_l);
646 G_l = vec_add(G_l, tmp32);
647 G_r = vec_mul(vv32_r, v2g_coeff);
648 tmp32 = vec_mul(vu32_r, u2g_coeff);
649 G_r = vec_add(G_r, vy32_r);
650 G_r = vec_add(G_r, tmp32);
652 B_l = vec_mul(vu32_l, u2b_coeff);
653 B_l = vec_add(B_l, vy32_l);
654 B_r = vec_mul(vu32_r, u2b_coeff);
655 B_r = vec_add(B_r, vy32_r);
661 #define SETUP(x, buf0, alpha1, buf1, alpha) { \
662 x = vec_ld(0, buf0); \
663 tmp = vec_mule(x, alpha1); \
664 tmp2 = vec_mulo(x, alpha1); \
665 tmp3 = vec_mergeh(tmp, tmp2); \
666 tmp4 = vec_mergel(tmp, tmp2); \
668 x = vec_ld(0, buf1); \
669 tmp = vec_mule(x, alpha); \
670 tmp2 = vec_mulo(x, alpha); \
671 tmp5 = vec_mergeh(tmp, tmp2); \
672 tmp6 = vec_mergel(tmp, tmp2); \
674 tmp3 = vec_add(tmp3, tmp5); \
675 tmp4 = vec_add(tmp4, tmp6); \
680 yuv2rgb_full_2_vsx_template(
SwsContext *
c,
const int16_t *buf[2],
681 const int16_t *ubuf[2],
const int16_t *vbuf[2],
682 const int16_t *abuf[2], uint8_t *dest,
int dstW,
683 int yalpha,
int uvalpha,
int y,
686 const int16_t *buf0 = buf[0], *buf1 = buf[1],
687 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
688 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
689 *abuf0 = hasAlpha ? abuf[0] :
NULL,
690 *abuf1 = hasAlpha ? abuf[1] :
NULL;
691 const int16_t yalpha1 = 4096 - yalpha;
692 const int16_t uvalpha1 = 4096 - uvalpha;
693 vec_s16 vy, vu, vv,
A = vec_splat_s16(0);
694 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
695 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
698 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
699 const vec_s16 vyalpha1 = vec_splats(yalpha1);
700 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
701 const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
702 const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
703 const vec_u16 zero16 = vec_splat_u16(0);
704 const vec_s32 y_offset = vec_splats(
c->yuv2rgb_y_offset);
705 const vec_s32 y_coeff = vec_splats(
c->yuv2rgb_y_coeff);
706 const vec_s32 y_add = vec_splats(1 << 21);
707 const vec_s32 v2r_coeff = vec_splats(
c->yuv2rgb_v2r_coeff);
708 const vec_s32 v2g_coeff = vec_splats(
c->yuv2rgb_v2g_coeff);
709 const vec_s32 u2g_coeff = vec_splats(
c->yuv2rgb_u2g_coeff);
710 const vec_s32 u2b_coeff = vec_splats(
c->yuv2rgb_u2b_coeff);
711 const vec_s32 rgbclip = vec_splats(1 << 30);
712 const vec_s32 zero32 = vec_splat_s32(0);
713 const vec_u32 shift19 = vec_splats(19
U);
714 const vec_u32 shift22 = vec_splats(22
U);
715 const vec_u32 shift10 = vec_splat_u32(10);
716 const vec_s32 dec128 = vec_splats(128 << 19);
717 const vec_s32 add18 = vec_splats(1 << 18);
743 for (
i = 0;
i < dstW;
i += 8) {
744 SETUP(vy, &buf0[
i], vyalpha1, &buf1[
i], vyalpha);
745 vy32_l = vec_sra(tmp3, shift10);
746 vy32_r = vec_sra(tmp4, shift10);
748 SETUP(vu, &ubuf0[
i], vuvalpha1, &ubuf1[
i], vuvalpha);
749 tmp3 = vec_sub(tmp3, dec128);
750 tmp4 = vec_sub(tmp4, dec128);
751 vu32_l = vec_sra(tmp3, shift10);
752 vu32_r = vec_sra(tmp4, shift10);
754 SETUP(vv, &vbuf0[
i], vuvalpha1, &vbuf1[
i], vuvalpha);
755 tmp3 = vec_sub(tmp3, dec128);
756 tmp4 = vec_sub(tmp4, dec128);
757 vv32_l = vec_sra(tmp3, shift10);
758 vv32_r = vec_sra(tmp4, shift10);
761 SETUP(
A, &abuf0[
i], vyalpha1, &abuf1[
i], vyalpha);
762 tmp3 = vec_add(tmp3, add18);
763 tmp4 = vec_add(tmp4, add18);
764 tmp3 = vec_sra(tmp3, shift19);
765 tmp4 = vec_sra(tmp4, shift19);
766 A = vec_packs(tmp3, tmp4);
767 ad = vec_packsu(
A, (
vec_s16) zero16);
769 ad = vec_splats((uint8_t) 255);
772 vy32_l = vec_sub(vy32_l, y_offset);
773 vy32_r = vec_sub(vy32_r, y_offset);
774 vy32_l = vec_mul(vy32_l, y_coeff);
775 vy32_r = vec_mul(vy32_r, y_coeff);
776 vy32_l = vec_add(vy32_l, y_add);
777 vy32_r = vec_add(vy32_r, y_add);
779 R_l = vec_mul(vv32_l, v2r_coeff);
780 R_l = vec_add(R_l, vy32_l);
781 R_r = vec_mul(vv32_r, v2r_coeff);
782 R_r = vec_add(R_r, vy32_r);
783 G_l = vec_mul(vv32_l, v2g_coeff);
784 tmp32 = vec_mul(vu32_l, u2g_coeff);
785 G_l = vec_add(G_l, vy32_l);
786 G_l = vec_add(G_l, tmp32);
787 G_r = vec_mul(vv32_r, v2g_coeff);
788 tmp32 = vec_mul(vu32_r, u2g_coeff);
789 G_r = vec_add(G_r, vy32_r);
790 G_r = vec_add(G_r, tmp32);
792 B_l = vec_mul(vu32_l, u2b_coeff);
793 B_l = vec_add(B_l, vy32_l);
794 B_r = vec_mul(vu32_r, u2b_coeff);
795 B_r = vec_add(B_r, vy32_r);
802 yuv2rgb_2_vsx_template(
SwsContext *
c,
const int16_t *buf[2],
803 const int16_t *ubuf[2],
const int16_t *vbuf[2],
804 const int16_t *abuf[2], uint8_t *dest,
int dstW,
805 int yalpha,
int uvalpha,
int y,
808 const int16_t *buf0 = buf[0], *buf1 = buf[1],
809 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
810 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
811 *abuf0 = hasAlpha ? abuf[0] :
NULL,
812 *abuf1 = hasAlpha ? abuf[1] :
NULL;
813 const int16_t yalpha1 = 4096 - yalpha;
814 const int16_t uvalpha1 = 4096 - uvalpha;
815 vec_s16 vy, vu, vv,
A = vec_splat_s16(0);
816 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
817 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r, vud32_l, vud32_r, vvd32_l, vvd32_r;
820 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
821 const vec_s16 vyalpha1 = vec_splats(yalpha1);
822 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
823 const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
824 const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
825 const vec_u16 zero16 = vec_splat_u16(0);
826 const vec_s32 y_offset = vec_splats(
c->yuv2rgb_y_offset);
827 const vec_s32 y_coeff = vec_splats(
c->yuv2rgb_y_coeff);
828 const vec_s32 y_add = vec_splats(1 << 21);
829 const vec_s32 v2r_coeff = vec_splats(
c->yuv2rgb_v2r_coeff);
830 const vec_s32 v2g_coeff = vec_splats(
c->yuv2rgb_v2g_coeff);
831 const vec_s32 u2g_coeff = vec_splats(
c->yuv2rgb_u2g_coeff);
832 const vec_s32 u2b_coeff = vec_splats(
c->yuv2rgb_u2b_coeff);
833 const vec_s32 rgbclip = vec_splats(1 << 30);
834 const vec_s32 zero32 = vec_splat_s32(0);
835 const vec_u32 shift19 = vec_splats(19
U);
836 const vec_u32 shift22 = vec_splats(22
U);
837 const vec_u32 shift10 = vec_splat_u32(10);
838 const vec_s32 dec128 = vec_splats(128 << 19);
839 const vec_s32 add18 = vec_splats(1 << 18);
873 for (
i = 0;
i < (dstW + 1) >> 1;
i += 8) {
874 SETUP(vy, &buf0[
i * 2], vyalpha1, &buf1[
i * 2], vyalpha);
875 vy32_l = vec_sra(tmp3, shift10);
876 vy32_r = vec_sra(tmp4, shift10);
878 SETUP(vu, &ubuf0[
i], vuvalpha1, &ubuf1[
i], vuvalpha);
879 tmp3 = vec_sub(tmp3, dec128);
880 tmp4 = vec_sub(tmp4, dec128);
881 vu32_l = vec_sra(tmp3, shift10);
882 vu32_r = vec_sra(tmp4, shift10);
884 SETUP(vv, &vbuf0[
i], vuvalpha1, &vbuf1[
i], vuvalpha);
885 tmp3 = vec_sub(tmp3, dec128);
886 tmp4 = vec_sub(tmp4, dec128);
887 vv32_l = vec_sra(tmp3, shift10);
888 vv32_r = vec_sra(tmp4, shift10);
891 SETUP(
A, &abuf0[
i], vyalpha1, &abuf1[
i], vyalpha);
892 tmp3 = vec_add(tmp3, add18);
893 tmp4 = vec_add(tmp4, add18);
894 tmp3 = vec_sra(tmp3, shift19);
895 tmp4 = vec_sra(tmp4, shift19);
896 A = vec_packs(tmp3, tmp4);
897 ad = vec_packsu(
A, (
vec_s16) zero16);
899 ad = vec_splats((uint8_t) 255);
902 vy32_l = vec_sub(vy32_l, y_offset);
903 vy32_r = vec_sub(vy32_r, y_offset);
904 vy32_l = vec_mul(vy32_l, y_coeff);
905 vy32_r = vec_mul(vy32_r, y_coeff);
906 vy32_l = vec_add(vy32_l, y_add);
907 vy32_r = vec_add(vy32_r, y_add);
910 vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
911 vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
912 vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
913 vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
915 R_l = vec_mul(vvd32_l, v2r_coeff);
916 R_l = vec_add(R_l, vy32_l);
917 R_r = vec_mul(vvd32_r, v2r_coeff);
918 R_r = vec_add(R_r, vy32_r);
919 G_l = vec_mul(vvd32_l, v2g_coeff);
920 tmp32 = vec_mul(vud32_l, u2g_coeff);
921 G_l = vec_add(G_l, vy32_l);
922 G_l = vec_add(G_l, tmp32);
923 G_r = vec_mul(vvd32_r, v2g_coeff);
924 tmp32 = vec_mul(vud32_r, u2g_coeff);
925 G_r = vec_add(G_r, vy32_r);
926 G_r = vec_add(G_r, tmp32);
928 B_l = vec_mul(vud32_l, u2b_coeff);
929 B_l = vec_add(B_l, vy32_l);
930 B_r = vec_mul(vud32_r, u2b_coeff);
931 B_r = vec_add(B_r, vy32_r);
936 SETUP(vy, &buf0[
i * 2 + 8], vyalpha1, &buf1[
i * 2 + 8], vyalpha);
937 vy32_l = vec_sra(tmp3, shift10);
938 vy32_r = vec_sra(tmp4, shift10);
940 vy32_l = vec_sub(vy32_l, y_offset);
941 vy32_r = vec_sub(vy32_r, y_offset);
942 vy32_l = vec_mul(vy32_l, y_coeff);
943 vy32_r = vec_mul(vy32_r, y_coeff);
944 vy32_l = vec_add(vy32_l, y_add);
945 vy32_r = vec_add(vy32_r, y_add);
948 vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
949 vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
950 vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
951 vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
953 R_l = vec_mul(vvd32_l, v2r_coeff);
954 R_l = vec_add(R_l, vy32_l);
955 R_r = vec_mul(vvd32_r, v2r_coeff);
956 R_r = vec_add(R_r, vy32_r);
957 G_l = vec_mul(vvd32_l, v2g_coeff);
958 tmp32 = vec_mul(vud32_l, u2g_coeff);
959 G_l = vec_add(G_l, vy32_l);
960 G_l = vec_add(G_l, tmp32);
961 G_r = vec_mul(vvd32_r, v2g_coeff);
962 tmp32 = vec_mul(vud32_r, u2g_coeff);
963 G_r = vec_add(G_r, vy32_r);
964 G_r = vec_add(G_r, tmp32);
966 B_l = vec_mul(vud32_l, u2b_coeff);
967 B_l = vec_add(B_l, vy32_l);
968 B_r = vec_mul(vud32_r, u2b_coeff);
969 B_r = vec_add(B_r, vy32_r);
978 yuv2rgb_full_1_vsx_template(
SwsContext *
c,
const int16_t *buf0,
979 const int16_t *ubuf[2],
const int16_t *vbuf[2],
980 const int16_t *abuf0, uint8_t *dest,
int dstW,
984 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
985 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
986 vec_s16 vy, vu, vv,
A = vec_splat_s16(0), tmp16;
987 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
988 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
990 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
991 const vec_u16 zero16 = vec_splat_u16(0);
992 const vec_s32 y_offset = vec_splats(
c->yuv2rgb_y_offset);
993 const vec_s32 y_coeff = vec_splats(
c->yuv2rgb_y_coeff);
994 const vec_s32 y_add = vec_splats(1 << 21);
995 const vec_s32 v2r_coeff = vec_splats(
c->yuv2rgb_v2r_coeff);
996 const vec_s32 v2g_coeff = vec_splats(
c->yuv2rgb_v2g_coeff);
997 const vec_s32 u2g_coeff = vec_splats(
c->yuv2rgb_u2g_coeff);
998 const vec_s32 u2b_coeff = vec_splats(
c->yuv2rgb_u2b_coeff);
999 const vec_s32 rgbclip = vec_splats(1 << 30);
1000 const vec_s32 zero32 = vec_splat_s32(0);
1002 const vec_u32 shift22 = vec_splats(22
U);
1003 const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
1004 const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
1005 const vec_s16 mul4 = vec_splat_s16(4);
1007 const vec_s16 add64 = vec_splat_s16(64);
1008 const vec_u16 shift7 = vec_splat_u16(7);
1009 const vec_s16 max255 = vec_splat_s16(255);
1032 for (
i = 0;
i < dstW;
i += 8) {
1033 vy = vec_ld(0, &buf0[
i]);
1034 vy32_l = vec_unpackh(vy);
1035 vy32_r = vec_unpackl(vy);
1036 vy32_l = vec_sl(vy32_l,
shift2);
1037 vy32_r = vec_sl(vy32_r,
shift2);
1039 vu = vec_ld(0, &ubuf0[
i]);
1040 vv = vec_ld(0, &vbuf0[
i]);
1041 if (uvalpha < 2048) {
1045 tmp32 = vec_mule(vu, mul4);
1046 tmp32_2 = vec_mulo(vu, mul4);
1047 vu32_l = vec_mergeh(tmp32, tmp32_2);
1048 vu32_r = vec_mergel(tmp32, tmp32_2);
1049 tmp32 = vec_mule(vv, mul4);
1050 tmp32_2 = vec_mulo(vv, mul4);
1051 vv32_l = vec_mergeh(tmp32, tmp32_2);
1052 vv32_r = vec_mergel(tmp32, tmp32_2);
1054 tmp16 = vec_ld(0, &ubuf1[
i]);
1055 vu = vec_add(vu, tmp16);
1057 tmp16 = vec_ld(0, &vbuf1[
i]);
1058 vv = vec_add(vv, tmp16);
1061 vu32_l = vec_mule(vu,
mul8);
1062 vu32_r = vec_mulo(vu,
mul8);
1063 vv32_l = vec_mule(vv,
mul8);
1064 vv32_r = vec_mulo(vv,
mul8);
1068 A = vec_ld(0, &abuf0[
i]);
1069 A = vec_add(
A, add64);
1070 A = vec_sr(
A, shift7);
1071 A = vec_max(
A, max255);
1072 ad = vec_packsu(
A, (
vec_s16) zero16);
1074 ad = vec_splats((uint8_t) 255);
1077 vy32_l = vec_sub(vy32_l, y_offset);
1078 vy32_r = vec_sub(vy32_r, y_offset);
1079 vy32_l = vec_mul(vy32_l, y_coeff);
1080 vy32_r = vec_mul(vy32_r, y_coeff);
1081 vy32_l = vec_add(vy32_l, y_add);
1082 vy32_r = vec_add(vy32_r, y_add);
1084 R_l = vec_mul(vv32_l, v2r_coeff);
1085 R_l = vec_add(R_l, vy32_l);
1086 R_r = vec_mul(vv32_r, v2r_coeff);
1087 R_r = vec_add(R_r, vy32_r);
1088 G_l = vec_mul(vv32_l, v2g_coeff);
1089 tmp32 = vec_mul(vu32_l, u2g_coeff);
1090 G_l = vec_add(G_l, vy32_l);
1091 G_l = vec_add(G_l, tmp32);
1092 G_r = vec_mul(vv32_r, v2g_coeff);
1093 tmp32 = vec_mul(vu32_r, u2g_coeff);
1094 G_r = vec_add(G_r, vy32_r);
1095 G_r = vec_add(G_r, tmp32);
1097 B_l = vec_mul(vu32_l, u2b_coeff);
1098 B_l = vec_add(B_l, vy32_l);
1099 B_r = vec_mul(vu32_r, u2b_coeff);
1100 B_r = vec_add(B_r, vy32_r);
1107 yuv2rgb_1_vsx_template(
SwsContext *
c,
const int16_t *buf0,
1108 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1109 const int16_t *abuf0, uint8_t *dest,
int dstW,
1113 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1114 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1115 vec_s16 vy, vu, vv,
A = vec_splat_s16(0), tmp16;
1116 vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
1117 vec_s32 vud32_l, vud32_r, vvd32_l, vvd32_r;
1118 vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
1120 vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
1121 const vec_u16 zero16 = vec_splat_u16(0);
1122 const vec_s32 y_offset = vec_splats(
c->yuv2rgb_y_offset);
1123 const vec_s32 y_coeff = vec_splats(
c->yuv2rgb_y_coeff);
1124 const vec_s32 y_add = vec_splats(1 << 21);
1125 const vec_s32 v2r_coeff = vec_splats(
c->yuv2rgb_v2r_coeff);
1126 const vec_s32 v2g_coeff = vec_splats(
c->yuv2rgb_v2g_coeff);
1127 const vec_s32 u2g_coeff = vec_splats(
c->yuv2rgb_u2g_coeff);
1128 const vec_s32 u2b_coeff = vec_splats(
c->yuv2rgb_u2b_coeff);
1129 const vec_s32 rgbclip = vec_splats(1 << 30);
1130 const vec_s32 zero32 = vec_splat_s32(0);
1132 const vec_u32 shift22 = vec_splats(22
U);
1133 const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
1134 const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
1135 const vec_s16 mul4 = vec_splat_s16(4);
1137 const vec_s16 add64 = vec_splat_s16(64);
1138 const vec_u16 shift7 = vec_splat_u16(7);
1139 const vec_s16 max255 = vec_splat_s16(255);
1170 for (
i = 0;
i < (dstW + 1) >> 1;
i += 8) {
1171 vy = vec_ld(0, &buf0[
i * 2]);
1172 vy32_l = vec_unpackh(vy);
1173 vy32_r = vec_unpackl(vy);
1174 vy32_l = vec_sl(vy32_l,
shift2);
1175 vy32_r = vec_sl(vy32_r,
shift2);
1177 vu = vec_ld(0, &ubuf0[
i]);
1178 vv = vec_ld(0, &vbuf0[
i]);
1179 if (uvalpha < 2048) {
1183 tmp32 = vec_mule(vu, mul4);
1184 tmp32_2 = vec_mulo(vu, mul4);
1185 vu32_l = vec_mergeh(tmp32, tmp32_2);
1186 vu32_r = vec_mergel(tmp32, tmp32_2);
1187 tmp32 = vec_mule(vv, mul4);
1188 tmp32_2 = vec_mulo(vv, mul4);
1189 vv32_l = vec_mergeh(tmp32, tmp32_2);
1190 vv32_r = vec_mergel(tmp32, tmp32_2);
1192 tmp16 = vec_ld(0, &ubuf1[
i]);
1193 vu = vec_add(vu, tmp16);
1195 tmp16 = vec_ld(0, &vbuf1[
i]);
1196 vv = vec_add(vv, tmp16);
1199 vu32_l = vec_mule(vu,
mul8);
1200 vu32_r = vec_mulo(vu,
mul8);
1201 vv32_l = vec_mule(vv,
mul8);
1202 vv32_r = vec_mulo(vv,
mul8);
1206 A = vec_ld(0, &abuf0[
i]);
1207 A = vec_add(
A, add64);
1208 A = vec_sr(
A, shift7);
1209 A = vec_max(
A, max255);
1210 ad = vec_packsu(
A, (
vec_s16) zero16);
1212 ad = vec_splats((uint8_t) 255);
1215 vy32_l = vec_sub(vy32_l, y_offset);
1216 vy32_r = vec_sub(vy32_r, y_offset);
1217 vy32_l = vec_mul(vy32_l, y_coeff);
1218 vy32_r = vec_mul(vy32_r, y_coeff);
1219 vy32_l = vec_add(vy32_l, y_add);
1220 vy32_r = vec_add(vy32_r, y_add);
1223 vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
1224 vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
1225 vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
1226 vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
1228 R_l = vec_mul(vvd32_l, v2r_coeff);
1229 R_l = vec_add(R_l, vy32_l);
1230 R_r = vec_mul(vvd32_r, v2r_coeff);
1231 R_r = vec_add(R_r, vy32_r);
1232 G_l = vec_mul(vvd32_l, v2g_coeff);
1233 tmp32 = vec_mul(vud32_l, u2g_coeff);
1234 G_l = vec_add(G_l, vy32_l);
1235 G_l = vec_add(G_l, tmp32);
1236 G_r = vec_mul(vvd32_r, v2g_coeff);
1237 tmp32 = vec_mul(vud32_r, u2g_coeff);
1238 G_r = vec_add(G_r, vy32_r);
1239 G_r = vec_add(G_r, tmp32);
1241 B_l = vec_mul(vud32_l, u2b_coeff);
1242 B_l = vec_add(B_l, vy32_l);
1243 B_r = vec_mul(vud32_r, u2b_coeff);
1244 B_r = vec_add(B_r, vy32_r);
1249 vy = vec_ld(16, &buf0[
i * 2]);
1250 vy32_l = vec_unpackh(vy);
1251 vy32_r = vec_unpackl(vy);
1252 vy32_l = vec_sl(vy32_l,
shift2);
1253 vy32_r = vec_sl(vy32_r,
shift2);
1255 vy32_l = vec_sub(vy32_l, y_offset);
1256 vy32_r = vec_sub(vy32_r, y_offset);
1257 vy32_l = vec_mul(vy32_l, y_coeff);
1258 vy32_r = vec_mul(vy32_r, y_coeff);
1259 vy32_l = vec_add(vy32_l, y_add);
1260 vy32_r = vec_add(vy32_r, y_add);
1263 vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
1264 vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
1265 vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
1266 vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
1268 R_l = vec_mul(vvd32_l, v2r_coeff);
1269 R_l = vec_add(R_l, vy32_l);
1270 R_r = vec_mul(vvd32_r, v2r_coeff);
1271 R_r = vec_add(R_r, vy32_r);
1272 G_l = vec_mul(vvd32_l, v2g_coeff);
1273 tmp32 = vec_mul(vud32_l, u2g_coeff);
1274 G_l = vec_add(G_l, vy32_l);
1275 G_l = vec_add(G_l, tmp32);
1276 G_r = vec_mul(vvd32_r, v2g_coeff);
1277 tmp32 = vec_mul(vud32_r, u2g_coeff);
1278 G_r = vec_add(G_r, vy32_r);
1279 G_r = vec_add(G_r, tmp32);
1281 B_l = vec_mul(vud32_l, u2b_coeff);
1282 B_l = vec_add(B_l, vy32_l);
1283 B_r = vec_mul(vud32_r, u2b_coeff);
1284 B_r = vec_add(B_r, vy32_r);
1292 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1293 static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
1294 const int16_t **lumSrc, int lumFilterSize, \
1295 const int16_t *chrFilter, const int16_t **chrUSrc, \
1296 const int16_t **chrVSrc, int chrFilterSize, \
1297 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1300 name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
1301 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1302 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1305 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1306 static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
1307 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1308 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1309 int yalpha, int uvalpha, int y) \
1311 name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
1312 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1315 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1316 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
1317 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1318 const int16_t *abuf0, uint8_t *dest, int dstW, \
1319 int uvalpha, int y) \
1321 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1322 dstW, uvalpha, y, fmt, hasAlpha); \
1372 0x0, 0x10, 0x1, 0x18,
1373 0x2, 0x11, 0x3, 0x19,
1374 0x4, 0x12, 0x5, 0x1a,
1375 0x6, 0x13, 0x7, 0x1b };
1377 0x8, 0x14, 0x9, 0x1c,
1378 0xa, 0x15, 0xb, 0x1d,
1379 0xc, 0x16, 0xd, 0x1e,
1380 0xe, 0x17, 0xf, 0x1f };
1382 0x0, 0x18, 0x1, 0x10,
1383 0x2, 0x19, 0x3, 0x11,
1384 0x4, 0x1a, 0x5, 0x12,
1385 0x6, 0x1b, 0x7, 0x13 };
1387 0x8, 0x1c, 0x9, 0x14,
1388 0xa, 0x1d, 0xb, 0x15,
1389 0xc, 0x1e, 0xd, 0x16,
1390 0xe, 0x1f, 0xf, 0x17 };
1392 0x10, 0x0, 0x18, 0x1,
1393 0x11, 0x2, 0x19, 0x3,
1394 0x12, 0x4, 0x1a, 0x5,
1395 0x13, 0x6, 0x1b, 0x7 };
1397 0x14, 0x8, 0x1c, 0x9,
1398 0x15, 0xa, 0x1d, 0xb,
1399 0x16, 0xc, 0x1e, 0xd,
1400 0x17, 0xe, 0x1f, 0xf };
1402 vd1 = vec_packsu(vy1, vy2);
1403 vd2 = vec_packsu(vu, vv);
1407 tmp = vec_perm(vd1, vd2, yuyv1);
1408 vec_st(
tmp, 0, dest);
1409 tmp = vec_perm(vd1, vd2, yuyv2);
1410 vec_st(
tmp, 16, dest);
1413 tmp = vec_perm(vd1, vd2, yvyu1);
1414 vec_st(
tmp, 0, dest);
1415 tmp = vec_perm(vd1, vd2, yvyu2);
1416 vec_st(
tmp, 16, dest);
1419 tmp = vec_perm(vd1, vd2, uyvy1);
1420 vec_st(
tmp, 0, dest);
1421 tmp = vec_perm(vd1, vd2, uyvy2);
1422 vec_st(
tmp, 16, dest);
1428 yuv2422_X_vsx_template(
SwsContext *
c,
const int16_t *lumFilter,
1429 const int16_t **lumSrc,
int lumFilterSize,
1430 const int16_t *chrFilter,
const int16_t **chrUSrc,
1431 const int16_t **chrVSrc,
int chrFilterSize,
1432 const int16_t **alpSrc, uint8_t *dest,
int dstW,
1437 vec_s32 vy32[4], vu32[2], vv32[2],
tmp, tmp2, tmp3, tmp4;
1439 const vec_s32 start = vec_splats(1 << 18);
1440 const vec_u32 shift19 = vec_splats(19
U);
1442 for (
i = 0;
i < lumFilterSize;
i++)
1443 vlumFilter[
i] = vec_splats(lumFilter[
i]);
1444 for (
i = 0;
i < chrFilterSize;
i++)
1445 vchrFilter[
i] = vec_splats(chrFilter[
i]);
1447 for (
i = 0;
i < ((dstW + 1) >> 1);
i += 8) {
1457 for (j = 0; j < lumFilterSize; j++) {
1458 vv = vec_ld(0, &lumSrc[j][
i * 2]);
1459 tmp = vec_mule(vv, vlumFilter[j]);
1460 tmp2 = vec_mulo(vv, vlumFilter[j]);
1461 tmp3 = vec_mergeh(
tmp, tmp2);
1462 tmp4 = vec_mergel(
tmp, tmp2);
1464 vy32[0] = vec_adds(vy32[0], tmp3);
1465 vy32[1] = vec_adds(vy32[1], tmp4);
1467 vv = vec_ld(0, &lumSrc[j][(
i + 4) * 2]);
1468 tmp = vec_mule(vv, vlumFilter[j]);
1469 tmp2 = vec_mulo(vv, vlumFilter[j]);
1470 tmp3 = vec_mergeh(
tmp, tmp2);
1471 tmp4 = vec_mergel(
tmp, tmp2);
1473 vy32[2] = vec_adds(vy32[2], tmp3);
1474 vy32[3] = vec_adds(vy32[3], tmp4);
1477 for (j = 0; j < chrFilterSize; j++) {
1478 vv = vec_ld(0, &chrUSrc[j][
i]);
1479 tmp = vec_mule(vv, vchrFilter[j]);
1480 tmp2 = vec_mulo(vv, vchrFilter[j]);
1481 tmp3 = vec_mergeh(
tmp, tmp2);
1482 tmp4 = vec_mergel(
tmp, tmp2);
1484 vu32[0] = vec_adds(vu32[0], tmp3);
1485 vu32[1] = vec_adds(vu32[1], tmp4);
1487 vv = vec_ld(0, &chrVSrc[j][
i]);
1488 tmp = vec_mule(vv, vchrFilter[j]);
1489 tmp2 = vec_mulo(vv, vchrFilter[j]);
1490 tmp3 = vec_mergeh(
tmp, tmp2);
1491 tmp4 = vec_mergel(
tmp, tmp2);
1493 vv32[0] = vec_adds(vv32[0], tmp3);
1494 vv32[1] = vec_adds(vv32[1], tmp4);
1497 for (j = 0; j < 4; j++) {
1498 vy32[j] = vec_sra(vy32[j], shift19);
1500 for (j = 0; j < 2; j++) {
1501 vu32[j] = vec_sra(vu32[j], shift19);
1502 vv32[j] = vec_sra(vv32[j], shift19);
1505 vy1 = vec_packs(vy32[0], vy32[1]);
1506 vy2 = vec_packs(vy32[2], vy32[3]);
1507 vu = vec_packs(vu32[0], vu32[1]);
1508 vv = vec_packs(vv32[0], vv32[1]);
1510 write422(vy1, vy2, vu, vv, &dest[
i * 4], target);
1514 #define SETUP(x, buf0, buf1, alpha) { \
1515 x = vec_ld(0, buf0); \
1516 tmp = vec_mule(x, alpha); \
1517 tmp2 = vec_mulo(x, alpha); \
1518 tmp3 = vec_mergeh(tmp, tmp2); \
1519 tmp4 = vec_mergel(tmp, tmp2); \
1521 x = vec_ld(0, buf1); \
1522 tmp = vec_mule(x, alpha); \
1523 tmp2 = vec_mulo(x, alpha); \
1524 tmp5 = vec_mergeh(tmp, tmp2); \
1525 tmp6 = vec_mergel(tmp, tmp2); \
1527 tmp3 = vec_add(tmp3, tmp5); \
1528 tmp4 = vec_add(tmp4, tmp6); \
1530 tmp3 = vec_sra(tmp3, shift19); \
1531 tmp4 = vec_sra(tmp4, shift19); \
1532 x = vec_packs(tmp3, tmp4); \
1536 yuv2422_2_vsx_template(
SwsContext *
c,
const int16_t *buf[2],
1537 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1538 const int16_t *abuf[2], uint8_t *dest,
int dstW,
1539 int yalpha,
int uvalpha,
int y,
1542 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1543 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1544 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
1545 const int16_t yalpha1 = 4096 - yalpha;
1546 const int16_t uvalpha1 = 4096 - uvalpha;
1549 const vec_s16 vyalpha1 = vec_splats(yalpha1);
1550 const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
1551 const vec_u32 shift19 = vec_splats(19
U);
1556 for (
i = 0;
i < ((dstW + 1) >> 1);
i += 8) {
1558 SETUP(vy1, &buf0[
i * 2], &buf1[
i * 2], vyalpha1)
1559 SETUP(vy2, &buf0[(
i + 4) * 2], &buf1[(
i + 4) * 2], vyalpha1)
1560 SETUP(vu, &ubuf0[
i], &ubuf1[
i], vuvalpha1)
1561 SETUP(vv, &vbuf0[
i], &vbuf1[
i], vuvalpha1)
1563 write422(vy1, vy2, vu, vv, &dest[
i * 4], target);
1570 yuv2422_1_vsx_template(
SwsContext *
c,
const int16_t *buf0,
1571 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1572 const int16_t *abuf0, uint8_t *dest,
int dstW,
1575 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1577 const vec_s16 add64 = vec_splats((int16_t) 64);
1578 const vec_s16 add128 = vec_splats((int16_t) 128);
1579 const vec_u16 shift7 = vec_splat_u16(7);
1580 const vec_u16 shift8 = vec_splat_u16(8);
1583 if (uvalpha < 2048) {
1584 for (
i = 0;
i < ((dstW + 1) >> 1);
i += 8) {
1585 vy1 = vec_ld(0, &buf0[
i * 2]);
1586 vy2 = vec_ld(0, &buf0[(
i + 4) * 2]);
1587 vu = vec_ld(0, &ubuf0[
i]);
1588 vv = vec_ld(0, &vbuf0[
i]);
1590 vy1 = vec_add(vy1, add64);
1591 vy2 = vec_add(vy2, add64);
1592 vu = vec_add(vu, add64);
1593 vv = vec_add(vv, add64);
1595 vy1 = vec_sra(vy1, shift7);
1596 vy2 = vec_sra(vy2, shift7);
1597 vu = vec_sra(vu, shift7);
1598 vv = vec_sra(vv, shift7);
1600 write422(vy1, vy2, vu, vv, &dest[
i * 4], target);
1603 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1604 for (
i = 0;
i < ((dstW + 1) >> 1);
i += 8) {
1605 vy1 = vec_ld(0, &buf0[
i * 2]);
1606 vy2 = vec_ld(0, &buf0[(
i + 4) * 2]);
1607 vu = vec_ld(0, &ubuf0[
i]);
1608 tmp = vec_ld(0, &ubuf1[
i]);
1609 vu = vec_adds(vu,
tmp);
1610 vv = vec_ld(0, &vbuf0[
i]);
1611 tmp = vec_ld(0, &vbuf1[
i]);
1612 vv = vec_adds(vv,
tmp);
1614 vy1 = vec_add(vy1, add64);
1615 vy2 = vec_add(vy2, add64);
1616 vu = vec_adds(vu, add128);
1617 vv = vec_adds(vv, add128);
1619 vy1 = vec_sra(vy1, shift7);
1620 vy2 = vec_sra(vy2, shift7);
1621 vu = vec_sra(vu, shift8);
1622 vv = vec_sra(vv, shift8);
1624 write422(vy1, vy2, vu, vv, &dest[
i * 4], target);
1629 #define YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
1630 static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
1631 const int16_t **lumSrc, int lumFilterSize, \
1632 const int16_t *chrFilter, const int16_t **chrUSrc, \
1633 const int16_t **chrVSrc, int chrFilterSize, \
1634 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1637 name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
1638 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1639 alpSrc, dest, dstW, y, fmt); \
1642 #define YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
1643 YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
1644 static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
1645 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1646 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1647 int yalpha, int uvalpha, int y) \
1649 name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
1650 dest, dstW, yalpha, uvalpha, y, fmt); \
1653 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
1654 YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
1655 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
1656 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1657 const int16_t *abuf0, uint8_t *dest, int dstW, \
1658 int uvalpha, int y) \
1660 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \
1661 abuf0, dest, dstW, uvalpha, \
1669 static void hyscale_fast_vsx(
SwsContext *
c, int16_t *dst,
int dstWidth,
1670 const uint8_t *
src,
int srcW,
int xInc)
1673 unsigned int xpos = 0, xx;
1676 vec_s16 vtmp, vtmp2, vtmp3, vtmp4;
1677 vec_u16 vd_l, vd_r, vcoord16[2];
1695 const vec_u32 vshift16 = vec_splats((uint32_t) 16);
1696 const vec_u16 vshift9 = vec_splat_u16(9);
1697 const vec_u8 vzero = vec_splat_u8(0);
1698 const vec_u16 vshift = vec_splat_u16(7);
1700 for (
i = 0;
i < dstWidth;
i += 16) {
1701 vcoord16[0] = vec_splats((uint16_t) xpos);
1702 vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
1704 vcoord16[0] = vec_add(vcoord16[0], vadd16);
1705 vcoord16[1] = vec_add(vcoord16[1], vadd16);
1707 vcoord16[0] = vec_sr(vcoord16[0], vshift9);
1708 vcoord16[1] = vec_sr(vcoord16[1], vshift9);
1709 valpha = (
vec_s8) vec_pack(vcoord16[0], vcoord16[1]);
1712 vin = vec_vsx_ld(0, &
src[xx]);
1714 vcoord[0] = vec_splats(xpos & 0xffff);
1715 vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
1716 vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
1717 vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
1719 vcoord[0] = vec_add(vcoord[0], vadd);
1720 vcoord[1] = vec_add(vcoord[1], vadd);
1721 vcoord[2] = vec_add(vcoord[2], vadd);
1722 vcoord[3] = vec_add(vcoord[3], vadd);
1724 vcoord[0] = vec_sr(vcoord[0], vshift16);
1725 vcoord[1] = vec_sr(vcoord[1], vshift16);
1726 vcoord[2] = vec_sr(vcoord[2], vshift16);
1727 vcoord[3] = vec_sr(vcoord[3], vshift16);
1729 vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
1730 vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
1731 vperm = vec_pack(vcoord16[0], vcoord16[1]);
1733 vin = vec_perm(vin, vin, vperm);
1735 vin2 = vec_vsx_ld(1, &
src[xx]);
1736 vin2 = vec_perm(vin2, vin2, vperm);
1738 vmul = (
vec_s8) vec_sub(vin2, vin);
1739 vtmp = vec_mule(vmul, valpha);
1740 vtmp2 = vec_mulo(vmul, valpha);
1741 vtmp3 = vec_mergeh(vtmp, vtmp2);
1742 vtmp4 = vec_mergel(vtmp, vtmp2);
1744 vd_l = (
vec_u16) vec_mergeh(vin, vzero);
1745 vd_r = (
vec_u16) vec_mergel(vin, vzero);
1746 vd_l = vec_sl(vd_l, vshift);
1747 vd_r = vec_sl(vd_r, vshift);
1749 vd_l = vec_add(vd_l, (
vec_u16) vtmp3);
1750 vd_r = vec_add(vd_r, (
vec_u16) vtmp4);
1752 vec_st((
vec_s16) vd_l, 0, &dst[
i]);
1753 vec_st((
vec_s16) vd_r, 0, &dst[
i + 8]);
1757 for (
i=dstWidth-1; (
i*xInc)>>16 >=srcW-1;
i--)
1758 dst[
i] =
src[srcW-1]*128;
1761 #define HCSCALE(in, out) \
1762 vin = vec_vsx_ld(0, &in[xx]); \
1763 vin = vec_perm(vin, vin, vperm); \
1765 vin2 = vec_vsx_ld(1, &in[xx]); \
1766 vin2 = vec_perm(vin2, vin2, vperm); \
1768 vtmp = vec_mule(vin, valphaxor); \
1769 vtmp2 = vec_mulo(vin, valphaxor); \
1770 vtmp3 = vec_mergeh(vtmp, vtmp2); \
1771 vtmp4 = vec_mergel(vtmp, vtmp2); \
1773 vtmp = vec_mule(vin2, valpha); \
1774 vtmp2 = vec_mulo(vin2, valpha); \
1775 vd_l = vec_mergeh(vtmp, vtmp2); \
1776 vd_r = vec_mergel(vtmp, vtmp2); \
1778 vd_l = vec_add(vd_l, vtmp3); \
1779 vd_r = vec_add(vd_r, vtmp4); \
1781 vec_st((vec_s16) vd_l, 0, &out[i]); \
1782 vec_st((vec_s16) vd_r, 0, &out[i + 8])
1784 static void hcscale_fast_vsx(
SwsContext *
c, int16_t *dst1, int16_t *dst2,
1785 int dstWidth,
const uint8_t *
src1,
1786 const uint8_t *
src2,
int srcW,
int xInc)
1789 unsigned int xpos = 0, xx;
1791 vec_u8 valpha, valphaxor;
1792 vec_u16 vtmp, vtmp2, vtmp3, vtmp4;
1793 vec_u16 vd_l, vd_r, vcoord16[2];
1795 const vec_u8 vxor = vec_splats((uint8_t) 127);
1812 const vec_u32 vshift16 = vec_splats((uint32_t) 16);
1813 const vec_u16 vshift9 = vec_splat_u16(9);
1815 for (
i = 0;
i < dstWidth;
i += 16) {
1816 vcoord16[0] = vec_splats((uint16_t) xpos);
1817 vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
1819 vcoord16[0] = vec_add(vcoord16[0], vadd16);
1820 vcoord16[1] = vec_add(vcoord16[1], vadd16);
1822 vcoord16[0] = vec_sr(vcoord16[0], vshift9);
1823 vcoord16[1] = vec_sr(vcoord16[1], vshift9);
1824 valpha = vec_pack(vcoord16[0], vcoord16[1]);
1825 valphaxor = vec_xor(valpha, vxor);
1829 vcoord[0] = vec_splats(xpos & 0xffff);
1830 vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
1831 vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
1832 vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
1834 vcoord[0] = vec_add(vcoord[0], vadd);
1835 vcoord[1] = vec_add(vcoord[1], vadd);
1836 vcoord[2] = vec_add(vcoord[2], vadd);
1837 vcoord[3] = vec_add(vcoord[3], vadd);
1839 vcoord[0] = vec_sr(vcoord[0], vshift16);
1840 vcoord[1] = vec_sr(vcoord[1], vshift16);
1841 vcoord[2] = vec_sr(vcoord[2], vshift16);
1842 vcoord[3] = vec_sr(vcoord[3], vshift16);
1844 vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
1845 vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
1846 vperm = vec_pack(vcoord16[0], vcoord16[1]);
1848 HCSCALE(
src1, dst1);
1849 HCSCALE(
src2, dst2);
1853 for (
i=dstWidth-1; (
i*xInc)>>16 >=srcW-1;
i--) {
1854 dst1[
i] =
src1[srcW-1]*128;
1855 dst2[
i] =
src2[srcW-1]*128;
1861 static void hScale8To19_vsx(
SwsContext *
c, int16_t *_dst,
int dstW,
1862 const uint8_t *
src,
const int16_t *
filter,
1863 const int32_t *filterPos,
int filterSize)
1870 const vec_u8 vzero = vec_splat_u8(0);
1871 const vec_u8 vunusedtab[8] = {
1872 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1873 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
1874 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
1875 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1876 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
1877 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1878 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
1879 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1880 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1881 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1882 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1883 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1884 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1885 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
1886 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1887 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
1889 const vec_u8 vunused = vunusedtab[filterSize % 8];
1891 if (filterSize == 1) {
1892 for (
i = 0;
i < dstW;
i++) {
1893 int srcPos = filterPos[
i];
1895 for (j = 0; j < filterSize; j++) {
1898 dst[
i] =
FFMIN(
val >> 3, (1 << 19) - 1);
1901 for (
i = 0;
i < dstW;
i++) {
1902 const int srcPos = filterPos[
i];
1903 vout = vec_splat_s32(0);
1904 for (j = 0; j < filterSize; j += 8) {
1905 vin8 = vec_vsx_ld(0, &
src[srcPos + j]);
1906 vin = (
vec_s16) vec_mergeh(vin8, vzero);
1907 if (j + 8 > filterSize)
1908 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
1910 vfilter = vec_vsx_ld(0, &
filter[filterSize *
i + j]);
1911 vout = vec_msums(vin, vfilter, vout);
1913 vout = vec_sums(vout, (
vec_s32) vzero);
1914 dst[
i] =
FFMIN(vout[3] >> 3, (1 << 19) - 1);
1919 static void hScale16To19_vsx(
SwsContext *
c, int16_t *_dst,
int dstW,
1920 const uint8_t *_src,
const int16_t *
filter,
1921 const int32_t *filterPos,
int filterSize)
1926 const uint16_t *
src = (
const uint16_t *) _src;
1927 int bits =
desc->comp[0].depth - 1;
1930 vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
1931 const vec_u8 vzero = vec_splat_u8(0);
1932 const vec_u8 vunusedtab[8] = {
1933 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1934 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
1935 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
1936 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1937 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
1938 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1939 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
1940 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1941 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1942 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1943 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1944 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
1945 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1946 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
1947 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
1948 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
1950 const vec_u8 vunused = vunusedtab[filterSize % 8];
1958 if (filterSize == 1) {
1959 for (
i = 0;
i < dstW;
i++) {
1960 int srcPos = filterPos[
i];
1963 for (j = 0; j < filterSize; j++) {
1967 dst[
i] =
FFMIN(
val >> sh, (1 << 19) - 1);
1970 for (
i = 0;
i < dstW;
i++) {
1971 const int srcPos = filterPos[
i];
1972 vout = vec_splat_s32(0);
1973 for (j = 0; j < filterSize; j += 8) {
1974 vin = (
vec_s16) vec_vsx_ld(0, &
src[srcPos + j]);
1975 if (j + 8 > filterSize)
1976 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
1978 vfilter = vec_vsx_ld(0, &
filter[filterSize *
i + j]);
1979 vfilter32_l = vec_unpackh(vfilter);
1980 vfilter32_r = vec_unpackl(vfilter);
1985 vtmp = vec_mul(vtmp, vfilter32_l);
1986 vtmp2 = vec_mul(vtmp2, vfilter32_r);
1988 vout = vec_adds(vout, vtmp);
1989 vout = vec_adds(vout, vtmp2);
1991 vout = vec_sums(vout, (
vec_s32) vzero);
1992 dst[
i] =
FFMIN(vout[3] >> sh, (1 << 19) - 1);
1997 static void hScale16To15_vsx(
SwsContext *
c, int16_t *dst,
int dstW,
1998 const uint8_t *_src,
const int16_t *
filter,
1999 const int32_t *filterPos,
int filterSize)
2003 const uint16_t *
src = (
const uint16_t *) _src;
2004 int sh =
desc->comp[0].depth - 1;
2006 vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
2007 const vec_u8 vzero = vec_splat_u8(0);
2008 const vec_u8 vunusedtab[8] = {
2009 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2010 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
2011 (
vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
2012 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2013 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
2014 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2015 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
2016 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2017 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2018 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2019 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2020 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
2021 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2022 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
2023 (
vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2024 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
2026 const vec_u8 vunused = vunusedtab[filterSize % 8];
2034 if (filterSize == 1) {
2035 for (
i = 0;
i < dstW;
i++) {
2036 int srcPos = filterPos[
i];
2039 for (j = 0; j < filterSize; j++) {
2043 dst[
i] =
FFMIN(
val >> sh, (1 << 15) - 1);
2046 for (
i = 0;
i < dstW;
i++) {
2047 const int srcPos = filterPos[
i];
2048 vout = vec_splat_s32(0);
2049 for (j = 0; j < filterSize; j += 8) {
2050 vin = (
vec_s16) vec_vsx_ld(0, &
src[srcPos + j]);
2051 if (j + 8 > filterSize)
2052 vin = vec_perm(vin, (
vec_s16) vzero, vunused);
2054 vfilter = vec_vsx_ld(0, &
filter[filterSize *
i + j]);
2055 vfilter32_l = vec_unpackh(vfilter);
2056 vfilter32_r = vec_unpackl(vfilter);
2061 vtmp = vec_mul(vtmp, vfilter32_l);
2062 vtmp2 = vec_mul(vtmp2, vfilter32_r);
2064 vout = vec_adds(vout, vtmp);
2065 vout = vec_adds(vout, vtmp2);
2067 vout = vec_sums(vout, (
vec_s32) vzero);
2068 dst[
i] =
FFMIN(vout[3] >> sh, (1 << 15) - 1);
2088 if (
c->srcBpc == 8) {
2089 if (
c->dstBpc <= 14) {
2090 c->hyScale =
c->hcScale = hScale_real_vsx;
2092 c->hyscale_fast = hyscale_fast_vsx;
2093 c->hcscale_fast = hcscale_fast_vsx;
2096 c->hyScale =
c->hcScale = hScale8To19_vsx;
2100 c->hyScale =
c->hcScale =
c->dstBpc > 14 ? hScale16To19_vsx
2107 c->yuv2planeX = yuv2planeX_vsx;
2112 switch (
c->dstBpc) {
2114 c->yuv2plane1 = yuv2plane1_8_vsx;
2118 c->yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
2119 c->yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
2122 c->yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
2123 c->yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
2126 c->yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
2127 c->yuv2planeX =
isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
2130 c->yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
2131 c->yuv2planeX =
isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
2134 c->yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
2137 c->yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
2150 switch (dstFormat) {
2153 c->yuv2packed1 = yuv2rgb24_full_1_vsx;
2154 c->yuv2packed2 = yuv2rgb24_full_2_vsx;
2155 c->yuv2packedX = yuv2rgb24_full_X_vsx;
2160 c->yuv2packed1 = yuv2bgr24_full_1_vsx;
2161 c->yuv2packed2 = yuv2bgr24_full_2_vsx;
2162 c->yuv2packedX = yuv2bgr24_full_X_vsx;
2167 if (!
c->needAlpha) {
2168 c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
2169 c->yuv2packed2 = yuv2bgrx32_full_2_vsx;
2170 c->yuv2packedX = yuv2bgrx32_full_X_vsx;
2176 if (!
c->needAlpha) {
2177 c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
2178 c->yuv2packed2 = yuv2rgbx32_full_2_vsx;
2179 c->yuv2packedX = yuv2rgbx32_full_X_vsx;
2185 if (!
c->needAlpha) {
2186 c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
2187 c->yuv2packed2 = yuv2xrgb32_full_2_vsx;
2188 c->yuv2packedX = yuv2xrgb32_full_X_vsx;
2194 if (!
c->needAlpha) {
2195 c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
2196 c->yuv2packed2 = yuv2xbgr32_full_2_vsx;
2197 c->yuv2packedX = yuv2xbgr32_full_X_vsx;
2203 switch (dstFormat) {
2205 c->yuv2packed1 = yuv2yuyv422_1_vsx;
2206 c->yuv2packed2 = yuv2yuyv422_2_vsx;
2207 c->yuv2packedX = yuv2yuyv422_X_vsx;
2210 c->yuv2packed1 = yuv2yvyu422_1_vsx;
2211 c->yuv2packed2 = yuv2yvyu422_2_vsx;
2212 c->yuv2packedX = yuv2yvyu422_X_vsx;
2215 c->yuv2packed1 = yuv2uyvy422_1_vsx;
2216 c->yuv2packed2 = yuv2uyvy422_2_vsx;
2217 c->yuv2packedX = yuv2uyvy422_X_vsx;
2221 if (!
c->needAlpha) {
2222 c->yuv2packed1 = yuv2bgrx32_1_vsx;
2223 c->yuv2packed2 = yuv2bgrx32_2_vsx;
2229 if (!
c->needAlpha) {
2230 c->yuv2packed1 = yuv2rgbx32_1_vsx;
2231 c->yuv2packed2 = yuv2rgbx32_2_vsx;
2237 if (!
c->needAlpha) {
2238 c->yuv2packed1 = yuv2xrgb32_1_vsx;
2239 c->yuv2packed2 = yuv2xrgb32_2_vsx;
2245 if (!
c->needAlpha) {
2246 c->yuv2packed1 = yuv2xbgr32_1_vsx;
2247 c->yuv2packed2 = yuv2xbgr32_2_vsx;
2253 c->yuv2packed1 = yuv2rgb24_1_vsx;
2254 c->yuv2packed2 = yuv2rgb24_2_vsx;
2259 c->yuv2packed1 = yuv2bgr24_1_vsx;
2260 c->yuv2packed2 = yuv2bgr24_2_vsx;