Go to the documentation of this file.
39 { 36, 68, 60, 92, 34, 66, 58, 90, },
40 { 100, 4, 124, 28, 98, 2, 122, 26, },
41 { 52, 84, 44, 76, 50, 82, 42, 74, },
42 { 116, 20, 108, 12, 114, 18, 106, 10, },
43 { 32, 64, 56, 88, 38, 70, 62, 94, },
44 { 96, 0, 120, 24, 102, 6, 126, 30, },
45 { 48, 80, 40, 72, 54, 86, 46, 78, },
46 { 112, 16, 104, 8, 118, 22, 110, 14, },
47 { 36, 68, 60, 92, 34, 66, 58, 90, },
51 64, 64, 64, 64, 64, 64, 64, 64
58 uint8_t *ptr = plane +
stride * y;
66 const uint8_t *_src,
const int16_t *
filter,
67 const int32_t *filterPos,
int filterSize)
72 const uint16_t *
src = (
const uint16_t *) _src;
82 for (
i = 0;
i < dstW;
i++) {
84 int srcPos = filterPos[
i];
87 for (j = 0; j < filterSize; j++) {
96 const uint8_t *_src,
const int16_t *
filter,
97 const int32_t *filterPos,
int filterSize)
101 const uint16_t *
src = (
const uint16_t *) _src;
102 int sh =
desc->comp[0].depth - 1;
110 for (
i = 0;
i < dstW;
i++) {
112 int srcPos = filterPos[
i];
115 for (j = 0; j < filterSize; j++) {
119 dst[
i] =
FFMIN(
val >> sh, (1 << 15) - 1);
125 const uint8_t *
src,
const int16_t *
filter,
126 const int32_t *filterPos,
int filterSize)
129 for (
i = 0;
i < dstW;
i++) {
131 int srcPos = filterPos[
i];
133 for (j = 0; j < filterSize; j++) {
141 const uint8_t *
src,
const int16_t *
filter,
142 const int32_t *filterPos,
int filterSize)
146 for (
i = 0;
i < dstW;
i++) {
148 int srcPos = filterPos[
i];
150 for (j = 0; j < filterSize; j++) {
163 dstU[
i] = (
FFMIN(dstU[
i], 30775) * 4663 - 9289992) >> 12;
164 dstV[
i] = (
FFMIN(dstV[
i], 30775) * 4663 - 9289992) >> 12;
172 dstU[
i] = (dstU[
i] * 1799 + 4081085) >> 11;
173 dstV[
i] = (dstV[
i] * 1799 + 4081085) >> 11;
181 dst[
i] = (
FFMIN(dst[
i], 30189) * 19077 - 39057361) >> 14;
188 dst[
i] = (dst[
i] * 14071 + 33561947) >> 14;
197 dstU[
i] = (
FFMIN(dstU[
i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
198 dstV[
i] = (
FFMIN(dstV[
i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
208 dstU[
i] = (dstU[
i] * 1799 + (4081085 << 4)) >> 11;
209 dstV[
i] = (dstV[
i] * 1799 + (4081085 << 4)) >> 11;
218 dst[
i] = ((
int)(
FFMIN(dst[
i], 30189 << 4) * 4769
U - (39057361 << 2))) >> 12;
227 dst[
i] = (dst[
i]*(14071/4) + (33561947<<4)/4)>>12;
231 #define DEBUG_SWSCALE_BUFFERS 0
232 #define DEBUG_BUFFERS(...) \
233 if (DEBUG_SWSCALE_BUFFERS) \
234 av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
237 int srcStride[],
int srcSliceY,
int srcSliceH,
238 uint8_t *dst[],
int dstStride[],
239 int dstSliceY,
int dstSliceH)
241 const int scale_dst = dstSliceY > 0 || dstSliceH <
c->dstH;
245 const int dstW =
c->dstW;
249 const int flags =
c->flags;
250 int32_t *vLumFilterPos =
c->vLumFilterPos;
251 int32_t *vChrFilterPos =
c->vChrFilterPos;
253 const int vLumFilterSize =
c->vLumFilterSize;
254 const int vChrFilterSize =
c->vChrFilterSize;
263 const int chrSrcSliceY = srcSliceY >>
c->chrSrcVSubSample;
264 const int chrSrcSliceH =
AV_CEIL_RSHIFT(srcSliceH,
c->chrSrcVSubSample);
265 int should_dither =
isNBPS(
c->srcFormat) ||
271 int lastInLumBuf =
c->lastInLumBuf;
272 int lastInChrBuf =
c->lastInChrBuf;
275 int lumEnd =
c->descIndex[0];
276 int chrStart = lumEnd;
277 int chrEnd =
c->descIndex[1];
279 int vEnd =
c->numDesc;
280 SwsSlice *src_slice = &
c->slice[lumStart];
281 SwsSlice *hout_slice = &
c->slice[
c->numSlice-2];
282 SwsSlice *vout_slice = &
c->slice[
c->numSlice-1];
285 int needAlpha =
c->needAlpha;
296 srcStride[3] = srcStride[0];
298 srcStride[1] *= 1 <<
c->vChrDrop;
299 srcStride[2] *= 1 <<
c->vChrDrop;
301 DEBUG_BUFFERS(
"swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
302 src[0], srcStride[0],
src[1], srcStride[1],
303 src[2], srcStride[2],
src[3], srcStride[3],
304 dst[0], dstStride[0], dst[1], dstStride[1],
305 dst[2], dstStride[2], dst[3], dstStride[3]);
306 DEBUG_BUFFERS(
"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
307 srcSliceY, srcSliceH, dstY, dstH);
309 vLumFilterSize, vChrFilterSize);
311 if (dstStride[0]&15 || dstStride[1]&15 ||
312 dstStride[2]&15 || dstStride[3]&15) {
317 "Warning: dstStride is not aligned!\n"
318 " ->cannot do aligned memory accesses anymore\n");
323 if ( (uintptr_t)dst[0]&15 || (uintptr_t)dst[1]&15 || (uintptr_t)dst[2]&15
324 || (uintptr_t)
src[0]&15 || (uintptr_t)
src[1]&15 || (uintptr_t)
src[2]&15
325 || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
326 || srcStride[0]&15 || srcStride[1]&15 || srcStride[2]&15 || srcStride[3]&15
339 dstH = dstY + dstSliceH;
342 }
else if (srcSliceY == 0) {
351 if (!should_dither) {
357 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX,
c->use_mmx_vfilter);
360 srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
363 dstY, dstSliceH, dstY >>
c->chrDstVSubSample,
365 if (srcSliceY == 0) {
375 hout_slice->
width = dstW;
378 for (; dstY < dstH; dstY++) {
379 const int chrDstY = dstY >>
c->chrDstVSubSample;
380 int use_mmx_vfilter=
c->use_mmx_vfilter;
383 const int firstLumSrcY =
FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
384 const int firstLumSrcY2 =
FFMAX(1 - vLumFilterSize, vLumFilterPos[
FFMIN(dstY | ((1 <<
c->chrDstVSubSample) - 1),
c->dstH - 1)]);
386 const int firstChrSrcY =
FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
389 int lastLumSrcY =
FFMIN(
c->srcH, firstLumSrcY + vLumFilterSize) - 1;
390 int lastLumSrcY2 =
FFMIN(
c->srcH, firstLumSrcY2 + vLumFilterSize) - 1;
391 int lastChrSrcY =
FFMIN(
c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
395 int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
398 if (firstLumSrcY > lastInLumBuf) {
400 hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
408 lastInLumBuf = firstLumSrcY - 1;
410 if (firstChrSrcY > lastInChrBuf) {
412 hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
420 lastInChrBuf = firstChrSrcY - 1;
424 DEBUG_BUFFERS(
"\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
425 firstLumSrcY, lastLumSrcY, lastInLumBuf);
426 DEBUG_BUFFERS(
"\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
427 firstChrSrcY, lastChrSrcY, lastInChrBuf);
430 enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
431 lastChrSrcY <
AV_CEIL_RSHIFT(srcSliceY + srcSliceH,
c->chrSrcVSubSample);
434 lastLumSrcY = srcSliceY + srcSliceH - 1;
435 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
436 DEBUG_BUFFERS(
"buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
437 lastLumSrcY, lastChrSrcY);
445 if (posY <= lastLumSrcY && !hasLumHoles) {
446 firstPosY =
FFMAX(firstLumSrcY, posY);
450 lastPosY = lastLumSrcY;
454 if (cPosY <= lastChrSrcY && !hasChrHoles) {
455 firstCPosY =
FFMAX(firstChrSrcY, cPosY);
459 lastCPosY = lastChrSrcY;
464 if (posY < lastLumSrcY + 1) {
465 for (
i = lumStart;
i < lumEnd; ++
i)
469 lastInLumBuf = lastLumSrcY;
471 if (cPosY < lastChrSrcY + 1) {
472 for (
i = chrStart;
i < chrEnd; ++
i)
476 lastInChrBuf = lastChrSrcY;
488 if (dstY >=
c->dstH - 2) {
492 &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
495 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
498 for (
i = vStart;
i < vEnd; ++
i)
502 int offset = lastDstY - dstSliceY;
504 int height = dstY - lastDstY;
509 1,
desc->comp[3].depth,
511 }
else if (
is32BPS(dstFormat)) {
514 1,
desc->comp[3].depth,
520 #if HAVE_MMXEXT_INLINE
522 __asm__ volatile (
"sfence" :::
"memory");
528 c->lastInLumBuf = lastInLumBuf;
529 c->lastInChrBuf = lastInChrBuf;
531 return dstY - lastDstY;
536 c->lumConvertRange =
NULL;
537 c->chrConvertRange =
NULL;
538 if (
c->srcRange !=
c->dstRange && !
isAnyRGB(
c->dstFormat)) {
539 if (
c->dstBpc <= 14) {
564 &
c->yuv2nv12cX, &
c->yuv2packed1,
565 &
c->yuv2packed2, &
c->yuv2packedX, &
c->yuv2anyX);
569 if (
c->srcBpc == 8) {
570 if (
c->dstBpc <= 14) {
588 c->needs_hcscale = 1;
603 #elif ARCH_LOONGARCH64
623 const int linesizes[4])
630 for (
i = 0;
i < 4;
i++) {
631 int plane =
desc->comp[
i].plane;
632 if (!
data[plane] || !linesizes[plane])
645 for (yp=0; yp<
h; yp++) {
646 for (xp=0; xp+2<
stride; xp+=3) {
647 int x, y, z,
r,
g,
b;
659 x =
c->xyzgamma[x>>4];
660 y =
c->xyzgamma[y>>4];
661 z =
c->xyzgamma[z>>4];
664 r =
c->xyz2rgb_matrix[0][0] * x +
665 c->xyz2rgb_matrix[0][1] * y +
666 c->xyz2rgb_matrix[0][2] * z >> 12;
667 g =
c->xyz2rgb_matrix[1][0] * x +
668 c->xyz2rgb_matrix[1][1] * y +
669 c->xyz2rgb_matrix[1][2] * z >> 12;
670 b =
c->xyz2rgb_matrix[2][0] * x +
671 c->xyz2rgb_matrix[2][1] * y +
672 c->xyz2rgb_matrix[2][2] * z >> 12;
681 AV_WB16(dst + xp + 0,
c->rgbgamma[
r] << 4);
682 AV_WB16(dst + xp + 1,
c->rgbgamma[
g] << 4);
683 AV_WB16(dst + xp + 2,
c->rgbgamma[
b] << 4);
685 AV_WL16(dst + xp + 0,
c->rgbgamma[
r] << 4);
686 AV_WL16(dst + xp + 1,
c->rgbgamma[
g] << 4);
687 AV_WL16(dst + xp + 2,
c->rgbgamma[
b] << 4);
701 for (yp=0; yp<
h; yp++) {
702 for (xp=0; xp+2<
stride; xp+=3) {
703 int x, y, z,
r,
g,
b;
715 r =
c->rgbgammainv[
r>>4];
716 g =
c->rgbgammainv[
g>>4];
717 b =
c->rgbgammainv[
b>>4];
720 x =
c->rgb2xyz_matrix[0][0] *
r +
721 c->rgb2xyz_matrix[0][1] *
g +
722 c->rgb2xyz_matrix[0][2] *
b >> 12;
723 y =
c->rgb2xyz_matrix[1][0] *
r +
724 c->rgb2xyz_matrix[1][1] *
g +
725 c->rgb2xyz_matrix[1][2] *
b >> 12;
726 z =
c->rgb2xyz_matrix[2][0] *
r +
727 c->rgb2xyz_matrix[2][1] *
g +
728 c->rgb2xyz_matrix[2][2] *
b >> 12;
737 AV_WB16(dst + xp + 0,
c->xyzgammainv[x] << 4);
738 AV_WB16(dst + xp + 1,
c->xyzgammainv[y] << 4);
739 AV_WB16(dst + xp + 2,
c->xyzgammainv[z] << 4);
741 AV_WL16(dst + xp + 0,
c->xyzgammainv[x] << 4);
742 AV_WL16(dst + xp + 1,
c->xyzgammainv[y] << 4);
743 AV_WL16(dst + xp + 2,
c->xyzgammainv[z] << 4);
753 for (
int i = 0;
i < 256;
i++) {
754 int r,
g,
b, y,
u, v,
a = 0xff;
757 a = (p >> 24) & 0xFF;
758 r = (p >> 16) & 0xFF;
763 g = ((
i >> 2) & 7) * 36;
767 g = ((
i >> 3) & 7) * 36;
770 r = (
i >> 3 ) * 255;
771 g = ((
i >> 1) & 3) * 85;
777 b = (
i >> 3 ) * 255;
778 g = ((
i >> 1) & 3) * 85;
781 #define RGB2YUV_SHIFT 15
782 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
783 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
784 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
785 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
786 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
787 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
788 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
789 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
790 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
795 c->pal_yuv[
i]= y + (
u<<8) + (v<<16) + ((unsigned)
a<<24);
797 switch (
c->dstFormat) {
802 c->pal_rgb[
i]=
r + (
g<<8) + (
b<<16) + ((unsigned)
a<<24);
808 c->pal_rgb[
i]=
a + (
r<<8) + (
g<<16) + ((unsigned)
b<<24);
814 c->pal_rgb[
i]=
a + (
b<<8) + (
g<<16) + ((unsigned)
r<<24);
821 c->pal_rgb[
i]=
b + (
g<<8) + (
r<<16) + ((unsigned)
a<<24);
827 const uint8_t *
const srcSlice[],
const int srcStride[],
828 int srcSliceY,
int srcSliceH,
829 uint8_t *
const dstSlice[],
const int dstStride[],
830 int dstSliceY,
int dstSliceH);
833 const uint8_t *
const srcSlice[],
const int srcStride[],
834 int srcSliceY,
int srcSliceH,
835 uint8_t *
const dstSlice[],
const int dstStride[],
836 int dstSliceY,
int dstSliceH)
839 srcSlice, srcStride, srcSliceY, srcSliceH,
840 c->cascaded_tmp,
c->cascaded_tmpStride, 0,
c->srcH);
845 if (
c->cascaded_context[2])
847 c->cascaded_tmpStride, srcSliceY, srcSliceH,
848 c->cascaded1_tmp,
c->cascaded1_tmpStride, 0,
c->dstH);
851 c->cascaded_tmpStride, srcSliceY, srcSliceH,
852 dstSlice, dstStride, dstSliceY, dstSliceH);
857 if (
c->cascaded_context[2]) {
859 c->cascaded1_tmpStride,
c->cascaded_context[1]->dstY -
ret,
860 c->cascaded_context[1]->dstY,
861 dstSlice, dstStride, dstSliceY, dstSliceH);
867 const uint8_t *
const srcSlice[],
const int srcStride[],
868 int srcSliceY,
int srcSliceH,
869 uint8_t *
const dstSlice[],
const int dstStride[],
870 int dstSliceY,
int dstSliceH)
873 srcSlice, srcStride, srcSliceY, srcSliceH,
874 c->cascaded_tmp,
c->cascaded_tmpStride,
875 0,
c->cascaded_context[0]->dstH);
879 (
const uint8_t *
const * )
c->cascaded_tmp,
c->cascaded_tmpStride,
880 0,
c->cascaded_context[0]->dstH,
881 dstSlice, dstStride, dstSliceY, dstSliceH);
886 const uint8_t *
const srcSlice[],
const int srcStride[],
887 int srcSliceY,
int srcSliceH,
888 uint8_t *
const dstSlice[],
const int dstStride[],
889 int dstSliceY,
int dstSliceH)
891 const int scale_dst = dstSliceY > 0 || dstSliceH <
c->dstH;
894 const uint8_t *
src2[4];
896 int macro_height_src =
isBayer(
c->srcFormat) ? 2 : (1 <<
c->chrSrcVSubSample);
897 int macro_height_dst =
isBayer(
c->dstFormat) ? 2 : (1 <<
c->chrDstVSubSample);
901 int srcSliceY_internal = srcSliceY;
903 if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
904 av_log(
c,
AV_LOG_ERROR,
"One of the input parameters to sws_scale() is NULL, please check the calling code\n");
908 if ((srcSliceY & (macro_height_src - 1)) ||
909 ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH !=
c->srcH) ||
910 srcSliceY + srcSliceH >
c->srcH ||
911 (
isBayer(
c->srcFormat) && srcSliceH <= 1)) {
916 if ((dstSliceY & (macro_height_dst - 1)) ||
917 ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH !=
c->dstH) ||
918 dstSliceY + dstSliceH >
c->dstH) {
936 if (
c->gamma_flag &&
c->cascaded_context[0])
937 return scale_gamma(
c, srcSlice, srcStride, srcSliceY, srcSliceH,
938 dstSlice, dstStride, dstSliceY, dstSliceH);
940 if (
c->cascaded_context[0] && srcSliceY == 0 && srcSliceH ==
c->cascaded_context[0]->srcH)
942 dstSlice, dstStride, dstSliceY, dstSliceH);
945 for (
i = 0;
i < 4;
i++)
946 memset(
c->dither_error[
i], 0,
sizeof(
c->dither_error[0][0]) * (
c->dstW+2));
951 memcpy(
src2, srcSlice,
sizeof(
src2));
952 memcpy(dst2, dstSlice,
sizeof(dst2));
953 memcpy(srcStride2, srcStride,
sizeof(srcStride2));
954 memcpy(dstStride2, dstStride,
sizeof(dstStride2));
957 if (srcSliceY != 0 && srcSliceY + srcSliceH !=
c->srcH) {
962 c->sliceDir = (srcSliceY == 0) ? 1 : -1;
963 }
else if (scale_dst)
966 if (
c->src0Alpha && !
c->dst0Alpha &&
isALPHA(
c->dstFormat)) {
971 FFABS(srcStride[0]) * srcSliceH + 32);
972 if (!
c->rgb0_scratch)
975 base = srcStride[0] < 0 ?
c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
977 for (y=0; y<srcSliceH; y++){
978 memcpy(
base + srcStride[0]*y,
src2[0] + srcStride[0]*y, 4*
c->srcW);
979 for (x=
c->src0Alpha-1; x<4*c->srcW; x+=4) {
980 base[ srcStride[0]*y + x] = 0xFF;
986 if (
c->srcXYZ && !(
c->dstXYZ &&
c->srcW==
c->dstW &&
c->srcH==
c->dstH)) {
990 FFABS(srcStride[0]) * srcSliceH + 32);
994 base = srcStride[0] < 0 ?
c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1001 if (
c->sliceDir != 1) {
1003 for (
i=0;
i<4;
i++) {
1004 srcStride2[
i] *= -1;
1005 dstStride2[
i] *= -1;
1008 src2[0] += (srcSliceH - 1) * srcStride[0];
1010 src2[1] += ((srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[1];
1011 src2[2] += ((srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[2];
1012 src2[3] += (srcSliceH - 1) * srcStride[3];
1013 dst2[0] += (
c->dstH - 1) * dstStride[0];
1014 dst2[1] += ((
c->dstH >>
c->chrDstVSubSample) - 1) * dstStride[1];
1015 dst2[2] += ((
c->dstH >>
c->chrDstVSubSample) - 1) * dstStride[2];
1016 dst2[3] += (
c->dstH - 1) * dstStride[3];
1018 srcSliceY_internal =
c->srcH-srcSliceY-srcSliceH;
1023 if (
c->convert_unscaled) {
1024 int offset = srcSliceY_internal;
1025 int slice_h = srcSliceH;
1030 for (
i = 0;
i < 4 &&
src2[
i];
i++) {
1033 src2[
i] += (dstSliceY >> ((
i == 1 ||
i == 2) ?
c->chrSrcVSubSample : 0)) * srcStride2[
i];
1036 for (
i = 0;
i < 4 && dst2[
i];
i++) {
1037 if (!dst2[
i] || (
i > 0 &&
usePal(
c->dstFormat)))
1039 dst2[
i] -= (dstSliceY >> ((
i == 1 ||
i == 2) ?
c->chrDstVSubSample : 0)) * dstStride2[
i];
1042 slice_h = dstSliceH;
1048 dst2[0] += dstSliceY * dstStride2[0];
1051 dst2, dstStride2, dstSliceY, dstSliceH);
1054 if (
c->dstXYZ && !(
c->srcXYZ &&
c->srcW==
c->dstW &&
c->srcH==
c->dstH)) {
1058 dst16 = (uint16_t *)dst2[0];
1060 int dstY =
c->dstY ?
c->dstY : srcSliceY + srcSliceH;
1065 dst16 = (uint16_t*)(dst2[0] + (dstY -
ret) * dstStride2[0]);
1073 if ((srcSliceY_internal + srcSliceH ==
c->srcH) || scale_dst)
1083 c->src_ranges.nb_ranges = 0;
1088 int ret, allocated = 0;
1117 unsigned int slice_height)
1131 return c->slice_ctx[0]->dst_slice_align;
1133 return c->dst_slice_align;
1137 unsigned int slice_height)
1143 if (!(
c->src_ranges.nb_ranges == 1 &&
1144 c->src_ranges.ranges[0].start == 0 &&
1145 c->src_ranges.ranges[0].len ==
c->srcH))
1148 if ((
slice_start > 0 || slice_height < c->dstH) &&
1151 "Incorrectly aligned output: %u/%u not multiples of %u\n",
1156 if (
c->slicethread) {
1157 int nb_jobs =
c->slice_ctx[0]->dither ==
SWS_DITHER_ED ? 1 :
c->nb_slice_ctx;
1161 c->dst_slice_height = slice_height;
1165 for (
int i = 0;
i <
c->nb_slice_ctx;
i++) {
1166 if (
c->slice_err[
i] < 0) {
1167 ret =
c->slice_err[
i];
1172 memset(
c->slice_err, 0,
c->nb_slice_ctx *
sizeof(*
c->slice_err));
1178 ptrdiff_t
offset =
c->frame_dst->linesize[
i] * (ptrdiff_t)(
slice_start >>
c->chrDstVSubSample);
1183 c->frame_src->linesize, 0,
c->srcH,
1184 dst,
c->frame_dst->linesize,
slice_start, slice_height);
1209 const uint8_t *
const srcSlice[],
1210 const int srcStride[],
int srcSliceY,
1211 int srcSliceH, uint8_t *
const dst[],
1212 const int dstStride[])
1214 if (
c->nb_slice_ctx)
1215 c =
c->slice_ctx[0];
1218 dst, dstStride, 0,
c->dstH);
1222 int nb_jobs,
int nb_threads)
1228 c->dst_slice_align);
1234 uint8_t *dst[4] = {
NULL };
1237 const int vshift = (
i == 1 ||
i == 2) ?
c->chrDstVSubSample : 0;
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
#define AV_LOG_WARNING
Something somehow does not look correct.
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
AVPixelFormat
Pixel format.
int sliceH
number of lines
static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
#define u(width, name, range_min, range_max)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
void ff_hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
SwsPlane plane[MAX_SLICE_PLANES]
color planes
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
This structure describes decoded (raw) audio or video data.
static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
void ff_sws_init_input_funcs(SwsContext *c)
Struct which holds all necessary data for processing a slice.
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
#define AV_PIX_FMT_RGB32_1
void(* filter)(uint8_t *src, int stride, int qscale)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define DEBUG_BUFFERS(...)
static int scale_internal(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static atomic_int cpu_flags
int sws_send_slice(struct SwsContext *c, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
static int scale_cascaded(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
static void xyz12Torgb48(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int sws_receive_slice(struct SwsContext *c, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
#define SWS_FAST_BILINEAR
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
static void frame_start(MpegEncContext *s)
static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
static double val(void *priv, double ch)
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
av_cold void ff_sws_init_swscale_loongarch(SwsContext *c)
static av_cold void sws_init_swscale(SwsContext *c)
int sws_frame_start(struct SwsContext *c, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
#define AV_CEIL_RSHIFT(a, b)
av_cold void ff_sws_init_swscale_arm(SwsContext *c)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static enum AVPixelFormat pix_fmt
int width
Slice line width.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static int scale_gamma(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
static void lumRangeFromJpeg_c(int16_t *dst, int width)
void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
#define AV_PIX_FMT_BGR32_1
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
av_cold void ff_sws_init_range_convert(SwsContext *c)
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
int available_lines
max number of lines that can be hold by this plane
av_cold void ff_sws_init_swscale_x86(SwsContext *c)
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
#define FF_PTR_ADD(ptr, off)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
#define attribute_align_arg
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
static void update_palette(SwsContext *c, const uint32_t *pal)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void ff_sws_init_scale(SwsContext *c)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define DECLARE_ALIGNED(n, t, v)
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
struct SwsContext ** slice_ctx
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
void ff_hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
av_cold void ff_sws_init_swscale_riscv(SwsContext *c)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
unsigned int sws_receive_slice_alignment(const struct SwsContext *c)
Get the alignment required for slices.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
int ff_init_slice_from_src(SwsSlice *s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
static const uint8_t sws_pb_64[8]
void sws_frame_end(struct SwsContext *c)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
#define atomic_exchange_explicit(object, desired, order)
static int swscale(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[], int dstSliceY, int dstSliceH)
const uint8_t ff_dither_8x8_128[9][8]
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
static void lumRangeToJpeg_c(int16_t *dst, int width)
static void lumRangeFromJpeg16_c(int16_t *_dst, int width)
static void lumRangeToJpeg16_c(int16_t *_dst, int width)
int sliceY
index of first line
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
void ff_updateMMXDitherTables(SwsContext *c, int dstY)
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
static void rgb48Toxyz12(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16