Go to the documentation of this file.
24 #define _DEFAULT_SOURCE
25 #define _SVID_SOURCE // needed for MAP_ANONYMOUS
26 #define _DARWIN_C_SOURCE // needed for MAP_ANON
33 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
34 #define MAP_ANONYMOUS MAP_ANON
297 int flags,
const double *param)
320 int filterSize, int16_t *
filter,
329 if ((
c->srcBpc == 8) && (
c->dstBpc <= 14)) {
330 int16_t *filterCopy =
NULL;
331 if (filterSize > 4) {
334 memcpy(filterCopy,
filter, dstW * filterSize *
sizeof(int16_t));
338 for (
i = 0;
i + 16 <= dstW;
i += 16) {
339 FFSWAP(
int, filterPos[
i + 2], filterPos[
i + 4]);
340 FFSWAP(
int, filterPos[
i + 3], filterPos[
i + 5]);
341 FFSWAP(
int, filterPos[
i + 10], filterPos[
i + 12]);
342 FFSWAP(
int, filterPos[
i + 11], filterPos[
i + 13]);
344 if (filterSize > 4) {
346 for (
i = 0;
i + 16 <= dstW;
i += 16) {
348 for (k = 0; k + 4 <= filterSize; k += 4) {
349 for (j = 0; j < 16; ++j) {
350 int from = (
i + j) * filterSize + k;
351 int to =
i * filterSize + j * 4 + k * 16;
352 memcpy(&
filter[
to], &filterCopy[
from], 4 *
sizeof(int16_t));
357 for (;
i < dstW;
i += 4) {
359 int rem = dstW -
i >= 4 ? 4 : dstW -
i;
360 for (k = 0; k + 4 <= filterSize; k += 4) {
361 for (j = 0; j < rem; ++j) {
362 int from = (
i + j) * filterSize + k;
363 int to =
i * filterSize + j * 4 + k * 4;
364 memcpy(&
filter[
to], &filterCopy[
from], 4 *
sizeof(int16_t));
398 return ((d * dist +
c) * dist +
b) * dist +
a;
401 b + 2.0 *
c + 3.0 * d,
403 -
b - 3.0 *
c - 6.0 * d,
409 if (
pos == -1 ||
pos <= -513) {
410 pos = (128 << chr_subsample) - 128;
413 return pos >> chr_subsample;
430 {
SWS_POINT,
"nearest neighbor / point", -1 },
433 {
SWS_X,
"experimental", 8 },
437 int *outFilterSize,
int xInc,
int srcW,
438 int dstW,
int filterAlign,
int one,
441 double param[2],
int srcPos,
int dstPos)
458 if (
FFABS(xInc - 0x10000) < 10 && srcPos == dstPos) {
464 for (
i = 0;
i < dstW;
i++) {
475 xDstInSrc = ((dstPos*(
int64_t)xInc)>>8) - ((srcPos*0x8000LL)>>7);
476 for (
i = 0;
i < dstW;
i++) {
477 int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
479 (*filterPos)[
i] = xx;
491 xDstInSrc = ((dstPos*(
int64_t)xInc)>>8) - ((srcPos*0x8000LL)>>7);
492 for (
i = 0;
i < dstW;
i++) {
493 int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
496 (*filterPos)[
i] = xx;
498 for (j = 0; j < filterSize; j++) {
522 filterSize = 1 + sizeFactor;
524 filterSize = 1 + (sizeFactor * srcW + dstW - 1) / dstW;
526 filterSize =
FFMIN(filterSize, srcW - 2);
527 filterSize =
FFMAX(filterSize, 1);
531 xDstInSrc = ((dstPos*(
int64_t)xInc)>>7) - ((srcPos*0x10000LL)>>7);
532 for (
i = 0;
i < dstW;
i++) {
533 int xx = (xDstInSrc - (filterSize - 2) * (1LL<<16)) / (1 << 17);
535 (*filterPos)[
i] = xx;
536 for (j = 0; j < filterSize; j++) {
543 floatd = d * (1.0 / (1 << 30));
549 if (d >= 1LL << 31) {
556 coeff = (12 * (1 << 24) - 9 *
B - 6 *
C) * ddd +
557 (-18 * (1 << 24) + 12 *
B + 6 *
C) * dd +
558 (6 * (1 << 24) - 2 *
B) * (1 << 30);
561 (6 *
B + 30 *
C) * dd +
562 (-12 *
B - 48 *
C) * d +
563 (8 *
B + 24 *
C) * (1 << 30);
565 coeff /= (1LL<<54)/fone;
571 c = cos(floatd *
M_PI);
578 coeff = (
c * 0.5 + 0.5) * fone;
581 if (d2 * xInc < -(1LL << (29 + 16)))
582 coeff = 1.0 * (1LL << (30 + 16));
583 else if (d2 * xInc < (1LL << (29 + 16)))
584 coeff = -d2 * xInc + (1LL << (29 + 16));
587 coeff *= fone >> (30 + 16);
590 coeff =
exp2(-p * floatd * floatd) * fone;
592 coeff = (d ? sin(floatd *
M_PI) / (floatd *
M_PI) : 1.0) * fone;
596 (floatd * floatd *
M_PI *
M_PI / p) : 1.0) * fone;
600 coeff = (1 << 30) - d;
605 double p = -2.196152422706632;
614 xDstInSrc += 2LL * xInc;
622 filter2Size = filterSize;
624 filter2Size += srcFilter->
length - 1;
626 filter2Size += dstFilter->
length - 1;
630 for (
i = 0;
i < dstW;
i++) {
634 for (k = 0; k < srcFilter->
length; k++) {
635 for (j = 0; j < filterSize; j++)
636 filter2[
i * filter2Size + k + j] +=
640 for (j = 0; j < filterSize; j++)
641 filter2[
i * filter2Size + j] =
filter[
i * filterSize + j];
645 (*filterPos)[
i] += (filterSize - 1) / 2 - (filter2Size - 1) / 2;
652 for (
i = dstW - 1;
i >= 0;
i--) {
653 int min = filter2Size;
658 for (j = 0; j < filter2Size; j++) {
660 cutOff +=
FFABS(filter2[
i * filter2Size]);
667 if (
i < dstW - 1 && (*filterPos)[
i] >= (*filterPos)[
i + 1])
671 for (k = 1; k < filter2Size; k++)
672 filter2[
i * filter2Size + k - 1] = filter2[
i * filter2Size + k];
673 filter2[
i * filter2Size + k - 1] = 0;
679 for (j = filter2Size - 1; j > 0; j--) {
680 cutOff +=
FFABS(filter2[
i * filter2Size + j]);
687 if (
min > minFilterSize)
693 if (minFilterSize < 5)
699 if (minFilterSize < 3)
705 if (minFilterSize == 1 && filterAlign == 2)
710 int reNum = minFilterSize & (0x07);
712 if (minFilterSize < 5)
719 filterSize = (minFilterSize + (filterAlign - 1)) & (~(filterAlign - 1));
729 *outFilterSize = filterSize;
733 "SwScaler: reducing / aligning filtersize %d -> %d\n",
734 filter2Size, filterSize);
736 for (
i = 0;
i < dstW;
i++) {
739 for (j = 0; j < filterSize; j++) {
740 if (j >= filter2Size)
741 filter[
i * filterSize + j] = 0;
743 filter[
i * filterSize + j] = filter2[
i * filter2Size + j];
745 filter[
i * filterSize + j] = 0;
752 for (
i = 0;
i < dstW;
i++) {
754 if ((*filterPos)[
i] < 0) {
756 for (j = 1; j < filterSize; j++) {
759 filter[
i * filterSize + j] = 0;
764 if ((*filterPos)[
i] + filterSize > srcW) {
765 int shift = (*filterPos)[
i] +
FFMIN(filterSize - srcW, 0);
768 for (j = filterSize - 1; j >= 0; j--) {
769 if ((*filterPos)[
i] + j >= srcW) {
770 acc +=
filter[
i * filterSize + j];
771 filter[
i * filterSize + j] = 0;
774 for (j = filterSize - 1; j >= 0; j--) {
776 filter[
i * filterSize + j] = 0;
783 filter[
i * filterSize + srcW - 1 - (*filterPos)[
i]] += acc;
787 if ((*filterPos)[
i] + filterSize > srcW) {
788 for (j = 0; j < filterSize; j++) {
800 for (
i = 0;
i < dstW;
i++) {
805 for (j = 0; j < filterSize; j++) {
806 sum +=
filter[
i * filterSize + j];
808 sum = (sum + one / 2) / one;
813 for (j = 0; j < *outFilterSize; j++) {
816 (*outFilter)[
i * (*outFilterSize) + j] = intV;
817 error = v - intV * sum;
821 (*filterPos)[dstW + 0] =
822 (*filterPos)[dstW + 1] =
823 (*filterPos)[dstW + 2] = (*filterPos)[dstW - 1];
825 for (
i = 0;
i < *outFilterSize;
i++) {
826 int k = (dstW - 1) * (*outFilterSize) +
i;
827 (*outFilter)[k + 1 * (*outFilterSize)] =
828 (*outFilter)[k + 2 * (*outFilterSize)] =
829 (*outFilter)[k + 3 * (*outFilterSize)] = (*outFilter)[k];
854 uint8_t *p = (uint8_t*)
c->input_rgb2yuv_table;
856 static const int8_t
map[] = {
881 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
882 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
883 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
884 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
885 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
886 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
887 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
888 -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 ,
946 static const int16_t xyz2rgb_matrix[3][4] = {
947 {13270, -6295, -2041},
949 { 228, -835, 4329} };
950 static const int16_t rgb2xyz_matrix[3][4] = {
955 static uint16_t xyzgamma_tab[4096], rgbgammainv_tab[4096];
956 static uint16_t rgbgamma_tab[65536], xyzgammainv_tab[65536];
961 memcpy(
c->xyz2rgb_matrix, xyz2rgb_matrix,
sizeof(
c->xyz2rgb_matrix));
962 memcpy(
c->rgb2xyz_matrix, rgb2xyz_matrix,
sizeof(
c->rgb2xyz_matrix));
965 c->xyzgamma =
av_malloc(
sizeof(uint16_t) * 2 * (4096 + 65536));
968 c->rgbgammainv =
c->xyzgamma + 4096;
969 c->rgbgamma =
c->rgbgammainv + 4096;
970 c->xyzgammainv =
c->rgbgamma + 65536;
972 c->xyzgamma = xyzgamma_tab;
973 c->rgbgamma = rgbgamma_tab;
974 c->xyzgammainv = xyzgammainv_tab;
975 c->rgbgammainv = rgbgammainv_tab;
976 if (xyzgamma_tab[4095])
981 for (
i = 0;
i < 4096;
i++) {
982 c->xyzgamma[
i] =
lrint(pow(
i / 4095.0, xyzgamma) * 65535.0);
983 c->rgbgammainv[
i] =
lrint(pow(
i / 4095.0, rgbgammainv) * 65535.0);
987 for (
i = 0;
i < 65536;
i++) {
988 c->rgbgamma[
i] =
lrint(pow(
i / 65535.0, rgbgamma) * 4095.0);
989 c->xyzgammainv[
i] =
lrint(pow(
i / 65535.0, xyzgammainv) * 4095.0);
1059 if (
c->srcXYZ ||
c->dstXYZ)
1071 int srcRange,
const int table[4],
int dstRange,
1072 int brightness,
int contrast,
int saturation)
1077 int ret, need_reinit = 0;
1079 if (
c->nb_slice_ctx) {
1081 for (
int i = 0;
i <
c->nb_slice_ctx;
i++) {
1083 srcRange,
table, dstRange,
1105 c->brightness != brightness ||
1106 c->contrast != contrast ||
1108 memcmp(
c->srcColorspaceTable, inv_table,
sizeof(
int) * 4) ||
1109 memcmp(
c->dstColorspaceTable,
table,
sizeof(
int) * 4)
1113 memmove(
c->srcColorspaceTable, inv_table,
sizeof(
int) * 4);
1114 memmove(
c->dstColorspaceTable,
table,
sizeof(
int) * 4);
1118 c->brightness = brightness;
1119 c->contrast = contrast;
1130 if (
c->cascaded_context[
c->cascaded_mainindex])
1137 if (!
c->cascaded_context[0] &&
1138 memcmp(
c->dstColorspaceTable,
c->srcColorspaceTable,
sizeof(
int) * 4) &&
1141 int tmp_width, tmp_height;
1147 av_log(
c,
AV_LOG_VERBOSE,
"YUV color matrix differs for YUV->YUV, using intermediate RGB to convert\n");
1163 if (srcW*srcH > dstW*dstH) {
1172 tmp_width, tmp_height, tmp_format, 64);
1177 tmp_width, tmp_height, tmp_format,
1179 if (!
c->cascaded_context[0])
1188 srcRange,
table, dstRange,
1191 c->cascaded_context[1] =
alloc_set_opts(tmp_width, tmp_height, tmp_format,
1194 if (!
c->cascaded_context[1])
1196 c->cascaded_context[1]->src_range = srcRange;
1197 c->cascaded_context[1]->dst_range = dstRange;
1202 srcRange,
table, dstRange,
1203 0, 1 << 16, 1 << 16);
1207 if (
c->cascaded_context[0] && memcmp(
c->dstColorspaceTable,
c->srcColorspaceTable,
sizeof(
int) * 4))
1229 int *srcRange,
int **
table,
int *dstRange,
1230 int *brightness,
int *contrast,
int *
saturation)
1236 if (
c->nb_slice_ctx) {
1238 table, dstRange, brightness, contrast,
1242 *inv_table =
c->srcColorspaceTable;
1243 *
table =
c->dstColorspaceTable;
1246 *brightness =
c->brightness;
1247 *contrast =
c->contrast;
1271 tbl = (uint16_t*)
av_malloc(
sizeof(uint16_t) * 1 << 16);
1275 for (
i = 0;
i < 65536; ++
i) {
1276 tbl[
i] = pow(
i / 65535.0, e) * 65535.0;
1346 int usesVFilter, usesHFilter;
1354 int dst_stride =
FFALIGN(dstW *
sizeof(int16_t) + 66, 16);
1361 static const float float_mult = 1.0f / 255.0f;
1367 unscaled = (srcW == dstW && srcH == dstH);
1369 if (!
c->contrast && !
c->saturation && !
c->dstFormatBpp)
1415 if (dstW < srcW && dstH < srcH)
1417 else if (dstW > srcW && dstH > srcH)
1422 }
else if (
i & (
i - 1)) {
1424 "Exactly one scaler algorithm must be chosen, got %X\n",
i);
1428 if (srcW < 1 || srcH < 1 || dstW < 1 || dstH < 1) {
1432 srcW, srcH, dstW, dstH);
1436 if (srcW < 8 || dstW < 8) {
1443 dstFilter = &dummyFilter;
1445 srcFilter = &dummyFilter;
1447 c->lumXInc = (((
int64_t)srcW << 16) + (dstW >> 1)) / dstW;
1448 c->lumYInc = (((
int64_t)srcH << 16) + (dstH >> 1)) / dstH;
1451 c->vRounder = 4 * 0x0001000100010001ULL;
1453 usesVFilter = (srcFilter->
lumV && srcFilter->
lumV->
length > 1) ||
1457 usesHFilter = (srcFilter->
lumH && srcFilter->
lumH->
length > 1) ||
1465 c->dst_slice_align = 1 <<
c->chrDstVSubSample;
1474 if (
c->chrSrcHSubSample == 0
1475 &&
c->chrSrcVSubSample == 0
1479 av_log(
c,
AV_LOG_DEBUG,
"Forcing full internal H chroma due to input having non subsampled chroma\n");
1499 "Desired dithering only supported in full chroma interpolation for destination format '%s'\n",
1508 "Ordered dither is not supported in full chroma interpolation for destination format '%s'\n",
1517 "%s output is not supported with half chroma resolution, switching to full\n",
1551 "full chroma interpolation for destination format '%s' not yet implemented\n",
1557 c->chrDstHSubSample = 1;
1562 c->chrSrcVSubSample +=
c->vChrDrop;
1581 ((dstW >>
c->chrDstHSubSample) <= (srcW >> 1) ||
1583 c->chrSrcHSubSample = 1;
1602 if (
c->dstBpc == 16)
1606 c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
1607 c->chrDstW >=
c->chrSrcW &&
1609 if (!
c->canMMXEXTBeUsed && dstW >= srcW &&
c->chrDstW >=
c->chrSrcW && (srcW & 15) == 0
1614 "output width is not a multiple of 32 -> no MMXEXT scaler\n");
1617 c->canMMXEXTBeUsed = 0;
1619 c->canMMXEXTBeUsed = 0;
1621 c->chrXInc = (((
int64_t)
c->chrSrcW << 16) + (
c->chrDstW >> 1)) /
c->chrDstW;
1622 c->chrYInc = (((
int64_t)
c->chrSrcH << 16) + (
c->chrDstH >> 1)) /
c->chrDstH;
1632 if (
c->canMMXEXTBeUsed) {
1638 c->lumXInc = ((
int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
1639 c->chrXInc = ((
int64_t)(
c->chrSrcW - 2) << 16) / (
c->chrDstW - 2) - 20;
1644 c->gamma_value = 2.2;
1647 if (!unscaled &&
sws->
gamma_flag && (srcFormat != tmpFmt || dstFormat != tmpFmt)) {
1649 c->cascaded_context[0] =
NULL;
1652 srcW, srcH, tmpFmt, 64);
1660 if (!
c->cascaded_context[0]) {
1666 flags, srcFilter, dstFilter,
1669 if (!
c->cascaded_context[1])
1673 c2->is_internal_gamma = 1;
1676 if (!
c2->gamma || !
c2->inv_gamma)
1685 c->cascaded_context[1] =
NULL;
1689 c->cascaded_context[2] =
NULL;
1690 if (dstFormat != tmpFmt) {
1692 dstW, dstH, tmpFmt, 64);
1697 dstW, dstH, dstFormat,
1700 if (!
c->cascaded_context[2])
1713 srcW, srcH, tmpFormat, 64);
1718 srcW, srcH, tmpFormat,
1721 if (!
c->cascaded_context[0])
1725 dstW, dstH, dstFormat,
1728 if (!
c->cascaded_context[1])
1735 for (
i = 0;
i < 256; ++
i){
1736 c->uint2float_lut[
i] = (
float)
i * float_mult;
1742 (!unscaled || unscaled && dstFormat != srcFormat && (srcFormat !=
AV_PIX_FMT_GRAYF32 ||
1747 if (CONFIG_SWSCALE_ALPHA &&
isALPHA(srcFormat) && !
isALPHA(dstFormat)) {
1752 dstFormat != tmpFormat ||
1753 usesHFilter || usesVFilter ||
1756 c->cascaded_mainindex = 1;
1758 srcW, srcH, tmpFormat, 64);
1763 srcW, srcH, tmpFormat,
1765 if (!
c->cascaded_context[0])
1773 dstW, dstH, dstFormat,
1775 if (!
c->cascaded_context[1])
1790 if (unscaled && !usesHFilter && !usesVFilter &&
1800 "using alpha blendaway %s -> %s special converter\n",
1806 if (unscaled && !usesHFilter && !usesVFilter &&
1812 if (
c->convert_unscaled) {
1815 "using unscaled %s -> %s special converter\n",
1821 #if HAVE_MMAP && HAVE_MPROTECT && defined(MAP_ANONYMOUS)
1829 #if HAVE_MMXEXT_INLINE
1838 c->lumMmxextFilterCode = mmap(
NULL,
c->lumMmxextFilterCodeSize,
1839 PROT_READ | PROT_WRITE,
1840 MAP_PRIVATE | MAP_ANONYMOUS,
1842 c->chrMmxextFilterCode = mmap(
NULL,
c->chrMmxextFilterCodeSize,
1843 PROT_READ | PROT_WRITE,
1844 MAP_PRIVATE | MAP_ANONYMOUS,
1846 #elif HAVE_VIRTUALALLOC
1847 c->lumMmxextFilterCode = VirtualAlloc(
NULL,
1848 c->lumMmxextFilterCodeSize,
1850 PAGE_EXECUTE_READWRITE);
1851 c->chrMmxextFilterCode = VirtualAlloc(
NULL,
1852 c->chrMmxextFilterCodeSize,
1854 PAGE_EXECUTE_READWRITE);
1856 c->lumMmxextFilterCode =
av_malloc(
c->lumMmxextFilterCodeSize);
1857 c->chrMmxextFilterCode =
av_malloc(
c->chrMmxextFilterCodeSize);
1860 #ifdef MAP_ANONYMOUS
1861 if (
c->lumMmxextFilterCode == MAP_FAILED ||
c->chrMmxextFilterCode == MAP_FAILED)
1863 if (!
c->lumMmxextFilterCode || !
c->chrMmxextFilterCode)
1877 c->hLumFilter, (uint32_t*)
c->hLumFilterPos, 8);
1879 c->hChrFilter, (uint32_t*)
c->hChrFilterPos, 4);
1882 if ( mprotect(
c->lumMmxextFilterCode,
c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1
1883 || mprotect(
c->chrMmxextFilterCode,
c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1) {
1899 &
c->hLumFilterSize,
c->lumXInc,
1900 srcW, dstW, filterAlign, 1 << 14,
1910 &
c->hChrFilterSize,
c->chrXInc,
1911 c->chrSrcW,
c->chrDstW, filterAlign, 1 << 14,
1929 if ((
ret =
initFilter(&
c->vLumFilter, &
c->vLumFilterPos, &
c->vLumFilterSize,
1930 c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
1937 if ((
ret =
initFilter(&
c->vChrFilter, &
c->vChrFilterPos, &
c->vChrFilterSize,
1938 c->chrYInc,
c->chrSrcH,
c->chrDstH,
1939 filterAlign, (1 << 12),
1955 short *p = (
short *)&
c->vYCoeffsBank[
i];
1956 for (j = 0; j < 8; j++)
1957 p[j] =
c->vLumFilter[
i];
1960 for (
i = 0;
i <
c->vChrFilterSize *
c->chrDstH;
i++) {
1962 short *p = (
short *)&
c->vCCoeffsBank[
i];
1963 for (j = 0; j < 8; j++)
1964 p[j] =
c->vChrFilter[
i];
1969 for (
i = 0;
i < 4;
i++)
1976 c->uv_off = (dst_stride>>1) + 64 / (
c->dstBpc &~ 7);
1977 c->uv_offx2 = dst_stride + 16;
1982 const char *scaler =
NULL, *cpucaps;
1991 scaler =
"ehh flags invalid?!";
2006 cpucaps =
"AltiVec";
2014 "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
2017 "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
2018 c->chrSrcW,
c->chrSrcH,
c->chrDstW,
c->chrDstH,
2019 c->chrXInc,
c->chrYInc);
2029 int tmpW = sqrt(srcW * (
int64_t)dstW);
2030 int tmpH = sqrt(srcH * (
int64_t)dstH);
2036 if (srcW*(
int64_t)srcH <= 4LL*dstW*dstH)
2040 tmpW, tmpH, tmpFormat, 64);
2045 tmpW, tmpH, tmpFormat,
2048 if (!
c->cascaded_context[0])
2052 dstW, dstH, dstFormat,
2055 if (!
c->cascaded_context[1])
2080 if (!
c->slice_ctx || !
c->slice_err)
2102 "Error-diffusion dither is in use, scaling will be single-threaded.");
2120 if (!
c->frame_src || !
c->frame_dst)
2136 if (ret < 0 || sws->threads > 1)
2147 SwsFilter *dstFilter,
const double *param)
2152 dstW, dstH, dstFormat,
2168 for (
i=0;
i<
a->length;
i++)
2177 for (
i=0;
i<
a->length;
i++)
2185 if(length <= 0 || length > INT_MAX/
sizeof(
double))
2200 const int length = (int)(variance *
quality + 0.5) | 1;
2202 double middle = (length - 1) * 0.5;
2205 if(variance < 0 ||
quality < 0)
2213 for (
i = 0;
i < length;
i++) {
2214 double dist =
i - middle;
2215 vec->
coeff[
i] =
exp(-dist * dist / (2 * variance * variance)) /
2216 sqrt(2 * variance *
M_PI);
2237 for (
i = 0;
i < length;
i++)
2258 for (
i = 0;
i <
a->length;
i++)
2268 for (
i = 0;
i <
a->length;
i++)
2269 a->coeff[
i] *= scalar;
2279 int length =
FFMAX(
a->length,
b->length);
2286 for (
i = 0;
i <
a->length;
i++)
2287 vec->
coeff[
i + (length - 1) / 2 - (
a->length - 1) / 2] +=
a->coeff[
i];
2288 for (
i = 0;
i <
b->length;
i++)
2289 vec->
coeff[
i + (length - 1) / 2 - (
b->length - 1) / 2] +=
b->coeff[
i];
2304 for (
i = 0;
i <
a->length;
i++) {
2305 vec->
coeff[
i + (length - 1) / 2 -
2306 (
a->length - 1) / 2 -
shift] =
a->coeff[
i];
2321 a->coeff = shifted->
coeff;
2352 for (
i = 0;
i <
a->length;
i++)
2353 if (
a->coeff[
i] >
max)
2356 for (
i = 0;
i <
a->length;
i++)
2357 if (
a->coeff[
i] <
min)
2362 for (
i = 0;
i <
a->length;
i++) {
2363 int x = (int)((
a->coeff[
i] -
min) * 60.0 /
range + 0.5);
2364 av_log(log_ctx, log_level,
"%1.3f ",
a->coeff[
i]);
2366 av_log(log_ctx, log_level,
" ");
2367 av_log(log_ctx, log_level,
"|\n");
2393 float lumaSharpen,
float chromaSharpen,
2394 float chromaHShift,
float chromaVShift,
2401 if (lumaGBlur != 0.0) {
2409 if (chromaGBlur != 0.0) {
2420 if (chromaSharpen != 0.0) {
2431 if (lumaSharpen != 0.0) {
2442 if (chromaHShift != 0.0)
2445 if (chromaVShift != 0.0)
2485 for (
i = 0;
i <
c->nb_slice_ctx;
i++)
2492 for (
i = 0;
i < 4;
i++)
2516 if (
c->lumMmxextFilterCode)
2517 munmap(
c->lumMmxextFilterCode,
c->lumMmxextFilterCodeSize);
2518 if (
c->chrMmxextFilterCode)
2519 munmap(
c->chrMmxextFilterCode,
c->chrMmxextFilterCodeSize);
2520 #elif HAVE_VIRTUALALLOC
2521 if (
c->lumMmxextFilterCode)
2522 VirtualFree(
c->lumMmxextFilterCode, 0, MEM_RELEASE);
2523 if (
c->chrMmxextFilterCode)
2524 VirtualFree(
c->chrMmxextFilterCode, 0, MEM_RELEASE);
2529 c->lumMmxextFilterCode =
NULL;
2530 c->chrMmxextFilterCode =
NULL;
2539 memset(
c->cascaded_context, 0,
sizeof(
c->cascaded_context));
2573 const double *param)
2580 param = default_param;
2582 if (prev && (prev->
src_w == srcW &&
2583 prev->
src_h == srcH &&
2585 prev->
dst_w == dstW &&
2586 prev->
dst_h == dstH &&
2626 for (idx = 0; idx < rl->
nb_ranges; idx++)
2633 if (prev->
start + prev->
len > start)
2636 if (idx < rl->nb_ranges) {
2666 if (idx < rl->nb_ranges - 1) {
2692 .height =
frame->height,
2693 .format =
frame->format,
2694 .range =
frame->color_range,
2695 .csp =
frame->colorspace,
2696 .loc =
frame->chroma_location,
2699 .prim =
frame->color_primaries,
2700 .trc =
frame->color_trc,
2718 }
else if (
desc->nb_components < 3) {
2728 switch (
frame->format) {
2738 if (!
desc->log2_chroma_w && !
desc->log2_chroma_h)
2790 if (
av_cmp_q(pars->maxscl[1], maxrgb) > 0)
2791 maxrgb = pars->maxscl[1];
2793 maxrgb = pars->maxscl[2];
2795 if (maxrgb.
num > 0) {
2814 for (
int i = 0;
i < pars->num_distribution_maxrgb_percentiles;
i++) {
2815 const AVRational pct = pars->distribution_maxrgb[
i].percentile;
2842 switch (
ref->prim) {
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static void error(const char *err)
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
#define INLINE_MMX(flags)
@ AV_PIX_FMT_XYZ12LE
packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as lit...
enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt)
Utility function to swap the endianness of a pixel format.
@ AV_PIX_FMT_YUV420P9LE
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
AVHDRPlusColorTransformParams params[3]
The color transform parameters for every processing window.
@ AV_PIX_FMT_XV30LE
packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), little-endian, variant of Y410 where alpha channe...
int sws_setColorspaceDetails(SwsContext *sws, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
#define AV_LOG_WARNING
Something somehow does not look correct.
@ AV_PIX_FMT_GRAY10BE
Y , 10bpp, big-endian.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_BAYER_GBRG16LE
bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian
@ AV_PIX_FMT_BGR48LE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
static av_always_inline int isPlanarRGB(enum AVPixelFormat pix_fmt)
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
@ AV_PIX_FMT_P416BE
interleaved chroma YUV 4:4:4, 48bpp, big-endian
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
@ AV_PIX_FMT_BGRA64BE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
static SwsVector * sws_getIdentityVec(void)
Allocate and return a vector with just one coefficient, with value 1.0.
AVColorTransferCharacteristic
Color Transfer Characteristic.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
@ AV_PIX_FMT_RGB444LE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Struct that contains both white point location and primaries location, providing the complete descrip...
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int src_w
Deprecated frame property overrides, for the legacy API only.
@ AV_PIX_FMT_YUV422P14LE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
static IPT saturation(const CmsCtx *ctx, IPT ipt)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
@ AV_PIX_FMT_RGBF16LE
IEEE-754 half precision packed RGB 16:16:16, 48bpp, RGBRGB..., little-endian.
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
void sws_freeContext(SwsContext *sws)
Free the swscaler context swsContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define EXTERNAL_AVX2_FAST(flags)
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_YUVA444P10BE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
static enum AVPixelFormat alphaless_fmt(enum AVPixelFormat fmt)
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
static int handle_0alpha(enum AVPixelFormat *format)
@ AV_PIX_FMT_YUV440P12BE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
@ AVCOL_RANGE_JPEG
Full range content.
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
static int infer_trc_ref(SwsColor *csp, const SwsColor *ref)
int depth
Number of bits in the component.
@ AV_PIX_FMT_P412BE
interleaved chroma YUV 4:4:4, 36bpp, data in the high bits, big-endian
@ SWS_BILINEAR
bilinear filtering
int sws_test_primaries(enum AVColorPrimaries prim, int output)
Test if a given set of color primaries is supported.
static const uint16_t table[]
@ AV_PIX_FMT_P010BE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
@ AV_PIX_FMT_YUV420P14BE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
unsigned flags
Bitmask of SWS_*.
#define AV_LOG_VERBOSE
Detailed information.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
void(* filter)(uint8_t *src, int stride, int qscale)
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
AVColorPrimaries
Chromaticity coordinates of the source primaries.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
@ AV_PIX_FMT_YUV422P9BE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUVA444P9BE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
static SwsVector * sws_getShiftedVec(SwsVector *a, int shift)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
@ AV_PIX_FMT_BAYER_GRBG16BE
bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian
@ SWS_BICUBLIN
bicubic luma, bilinear chroma
static atomic_int cpu_flags
@ AV_PIX_FMT_GRAY10LE
Y , 10bpp, little-endian.
@ AV_PIX_FMT_GRAYF32LE
IEEE-754 single precision Y, 32bpp, little-endian.
@ AV_PIX_FMT_GBRAP14BE
planar GBR 4:4:4:4 56bpp, big-endian
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
@ AV_PIX_FMT_RGBAF16LE
IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., little-endian.
void sws_freeVec(SwsVector *a)
@ AV_PIX_FMT_AYUV64LE
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_BAYER_GBRG16BE
bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian
@ AV_PIX_FMT_AYUV64BE
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
static int isnan_vec(SwsVector *a)
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
@ SWS_FAST_BILINEAR
Scaler selection options.
static int handle_jpeg(enum AVPixelFormat *format)
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
AVPrimaryCoefficients gamut
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ SWS_FULL_CHR_H_INP
Perform full chroma interpolation when downscaling RGB sources.
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
static int infer_prim_ref(SwsColor *csp, const SwsColor *ref)
int avpriv_slicethread_create(AVSliceThread **pctx, void *priv, void(*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads), void(*main_func)(void *priv), int nb_threads)
Create slice threading context.
int src_v_chr_pos
Source vertical chroma position in luma grid / 256.
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUV422P9
SwsContext * sws_getCachedContext(SwsContext *prev, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
@ AV_PIX_FMT_GRAY9LE
Y , 9bpp, little-endian.
@ AVCOL_RANGE_NB
Not part of ABI.
av_cold int sws_init_context(SwsContext *sws, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
int ff_sws_alphablendaway(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
#define AV_PIX_FMT_GRAY16
enum AVColorTransferCharacteristic trc
@ AV_PIX_FMT_YUVA444P16BE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P10BE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
#define AV_CPU_FLAG_SLOW_GATHER
CPU has slow gathers.
@ AV_PIX_FMT_VUYA
packed VUYA 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), VUYAVUYA...
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
uint8_t is_supported_endianness
@ AV_PIX_FMT_YUV422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV444P14LE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
static __device__ float ceil(float a)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
@ AV_PIX_FMT_BAYER_RGGB16BE
bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian
static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos, int *outFilterSize, int xInc, int srcW, int dstW, int filterAlign, int one, int flags, int cpu_flags, SwsVector *srcFilter, SwsVector *dstFilter, double param[2], int srcPos, int dstPos)
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV422P16
SwsDither dither
Dither mode.
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define SWS_MAX_REDUCE_CUTOFF
int ff_range_add(RangeList *rl, unsigned int start, unsigned int len)
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
static void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level)
Print with av_log() a textual representation of the vector a if log_level <= av_log_level.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
@ AVCOL_PRI_NB
Not part of ABI.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
int threads
How many threads to use for processing, or 0 for automatic selection.
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
@ AV_PIX_FMT_P416LE
interleaved chroma YUV 4:4:4, 48bpp, little-endian
@ AV_PIX_FMT_BAYER_RGGB16LE
bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
@ AV_PIX_FMT_P210LE
interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, little-endian
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
static enum AVPixelFormat pix_fmt
int length
number of coefficients in the vector
SwsVector * sws_allocVec(int length)
Allocate and return an uninitialized vector with length coefficients.
@ AV_PIX_FMT_P016BE
like NV12, with 16bpp per component, big-endian
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_yuv2rgb_c_init_tables(SwsInternal *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation)
static int ff_fmt_equal(const SwsFormat *fmt1, const SwsFormat *fmt2)
#define AV_PIX_FMT_YUV420P9
@ AV_PIX_FMT_YUVA420P16BE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
int sws_test_colorspace(enum AVColorSpace csp, int output)
Test if a given color space is supported.
void ff_get_unscaled_swscale(SwsInternal *c)
Set c->convert_unscaled to an unscaled converter if one exists for the specific source and destinatio...
av_cold void ff_yuv2rgb_init_tables_ppc(SwsInternal *c, const int inv_table[4], int brightness, int contrast, int saturation)
static const ScaleAlgorithm scale_algorithms[]
int flag
flag associated to the algorithm
@ AV_PIX_FMT_RGB4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
static const FormatEntry format_entries[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
SwsVector * sws_getGaussianVec(double variance, double quality)
Return a normalized Gaussian curve used to filter stuff quality = 3 is high quality,...
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
#define AV_PIX_FMT_GRAYF32
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
int ff_init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode, int16_t *filter, int32_t *filterPos, int numSplits)
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
@ AV_PIX_FMT_BAYER_GRBG16LE
bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian
@ AV_PIX_FMT_YUV444P9BE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10BE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
static int fill_xyztables(SwsInternal *c)
static uint16_t * alloc_gamma_tbl(double e)
#define AV_PIX_FMT_GBRP16
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
#define SWS_SRC_V_CHR_DROP_SHIFT
uint8_t application_version
Application version in the application defining document in ST-2094 suite.
@ AV_PIX_FMT_Y216LE
packed YUV 4:2:2 like YUYV422, 32bpp, little-endian
Describe the class of an AVClass context structure.
int ff_free_filters(SwsInternal *c)
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
@ AV_PIX_FMT_P012LE
like NV12, with 12bpp per component, data in the high bits, zeros in the low bits,...
#define RETCODE_USE_CASCADE
@ SWS_BICUBIC
2-tap cubic B-spline
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
int gamma_flag
Use gamma correct scaling.
Rational number (pair of numerator and denominator).
int sws_is_noop(const AVFrame *dst, const AVFrame *src)
Check if a given conversion is a noop.
@ AV_PIX_FMT_P210BE
interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, big-endian
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
SwsFilter * sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, float lumaSharpen, float chromaSharpen, float chromaHShift, float chromaVShift, int verbose)
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AV_PIX_FMT_YUVA422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
#define ROUNDED_DIV(a, b)
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_PIX_FMT_BGR565LE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
@ AV_PIX_FMT_YUVA444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
static void makenan_vec(SwsVector *a)
int sws_test_format(enum AVPixelFormat format, int output)
Test if a given pixel format is supported.
@ AV_PIX_FMT_YUVA444P9LE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
int src_range
Source is full range.
@ AV_PIX_FMT_Y210LE
packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
@ AV_PIX_FMT_YUVA420P16LE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
av_cold void ff_sws_rgb2rgb_init(void)
@ AV_PIX_FMT_BGR4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
#define AV_PIX_FMT_YUV422P10
av_cold void ff_sws_init_range_convert(SwsInternal *c)
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
static void sws_addVec(SwsVector *a, SwsVector *b)
double * coeff
pointer to the list of coefficients
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
static int range_override_needed(enum AVPixelFormat format)
@ AV_PIX_FMT_BGR555BE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
@ AV_PIX_FMT_YUVA420P9LE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
const AVClass ff_sws_context_class
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
@ AVCOL_RANGE_UNSPECIFIED
int dst_h_chr_pos
Destination horizontal chroma position.
void sws_scaleVec(SwsVector *a, double scalar)
Scale all the coefficients of a by the scalar value.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static SwsVector * sws_getConstVec(double c, int length)
Allocate and return a vector with length coefficients, all with the same value c.
@ AV_PIX_FMT_YUV420P14LE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_PIX_FMT_YUV444P14BE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
int av_opt_copy(void *dst, const void *src)
Copy options from src object into dest object.
@ AV_PIX_FMT_X2RGB10LE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
#define SWS_PARAM_DEFAULT
@ AV_PIX_FMT_P212LE
interleaved chroma YUV 4:2:2, 24bpp, data in the high bits, little-endian
@ AV_PIX_FMT_YUV420P9BE
The following 12 formats have the disadvantage of needing 1 format for each bit depth.
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
#define PPC_ALTIVEC(flags)
SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext and set its fields to default values.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static int shift(int a, int b)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
@ AV_PIX_FMT_BAYER_BGGR16LE
bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian
@ AV_PIX_FMT_YUV420P12BE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
av_cold int ff_sws_init_single_context(SwsContext *sws, SwsFilter *srcFilter, SwsFilter *dstFilter)
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
uint8_t num_windows
The number of processing windows.
int ff_test_fmt(const SwsFormat *fmt, int output)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_RGB444BE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
@ AV_PIX_FMT_XV36BE
packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, big-endian,...
@ AV_PIX_FMT_YUV422P14BE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
@ SWS_POINT
nearest neighbor
static AVRational av_make_q(int num, int den)
Create an AVRational.
SwsAlphaBlend alpha_blend
Alpha blending mode.
@ AV_PIX_FMT_GRAY12LE
Y , 12bpp, little-endian.
#define AV_PIX_FMT_BGR555
@ SWS_SPLINE
cubic Keys spline
static av_always_inline int isYUV(enum AVPixelFormat pix_fmt)
int src_h
Width and height of the source frame.
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
@ AVCHROMA_LOC_UNSPECIFIED
@ AV_PIX_FMT_YUV420P10BE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_RGBAF16BE
IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., big-endian.
static int test_range(enum AVColorRange range)
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_shuffle_filter_coefficients(SwsInternal *c, int *filterPos, int filterSize, int16_t *filter, int dstW)
int sws_getColorspaceDetails(SwsContext *sws, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
@ AV_PIX_FMT_BGR444BE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
@ AV_PIX_FMT_GBRAP10LE
planar GBR 4:4:4:4 40bpp, little-endian
int sws_test_transfer(enum AVColorTransferCharacteristic trc, int output)
Test if a given color transfer function is supported.
@ AV_PIX_FMT_BGR565BE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
int sws_isSupportedInput(enum AVPixelFormat pix_fmt)
Return a positive value if pix_fmt is a supported input format, 0 otherwise.
@ AV_PIX_FMT_P012BE
like NV12, with 12bpp per component, data in the high bits, zeros in the low bits,...
int dst_format
Destination pixel format.
@ AV_PIX_FMT_P410LE
interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, little-endian
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
#define AV_PIX_FMT_FLAG_BAYER
The pixel format is following a Bayer pattern.
#define AV_LOG_INFO
Standard information.
@ AVCOL_TRC_BT709
also ITU-R BT1361
AVChromaLocation
Location of chroma samples.
@ AV_PIX_FMT_AYUV
packed AYUV 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), AYUVAYUV...
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
@ AV_PIX_FMT_BGRA64LE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
@ AV_PIX_FMT_YUVA422P10BE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
@ AV_PIX_FMT_UYVA
packed UYVA 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), UYVAUYVA...
static int handle_xyz(enum AVPixelFormat *format)
@ AV_PIX_FMT_YUVA444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
@ AV_PIX_FMT_YUVA422P9BE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
static SwsContext * sws[3]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define AV_PIX_FMT_BGRA64
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
const int32_t ff_yuv2rgb_coeffs[11][4]
static void sws_shiftVec(SwsVector *a, int shift)
#define i(width, name, range_min, range_max)
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
void ff_sws_init_scale(SwsInternal *c)
#define AV_PIX_FMT_GBRP12
#define av_malloc_array(a, b)
AVColorSpace
YUV colorspace type.
@ AV_PIX_FMT_GRAY9BE
Y , 9bpp, big-endian.
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
static int test_loc(enum AVChromaLocation loc)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
static double getSplineCoeff(double a, double b, double c, double d, double dist)
int sws_isSupportedOutput(enum AVPixelFormat pix_fmt)
Return a positive value if pix_fmt is a supported output format, 0 otherwise.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
@ AV_PIX_FMT_XYZ12BE
packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big...
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int sws_test_frame(const AVFrame *frame, int output)
Helper function to run all sws_test_* against a frame, as well as testing the basic frame properties ...
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
#define AV_PIX_FMT_BGR565
int dst_h
Width and height of the destination frame.
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
@ AV_PIX_FMT_YUV444P16BE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
@ AV_PIX_FMT_NV42
as above, but U and V bytes are swapped
void * av_calloc(size_t nmemb, size_t size)
#define AV_PIX_FMT_YUV444P9
void sws_freeFilter(SwsFilter *filter)
static av_always_inline int isFloat(enum AVPixelFormat pix_fmt)
void(* av_csp_eotf_function)(double Lw, double Lb, double c[3])
Function pointer representing an ITU EOTF transfer for a given reference display configuration.
This struct represents dynamic metadata for color volume transform - application 4 of SMPTE 2094-40:2...
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_GRAY12BE
Y , 12bpp, big-endian.
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
#define FFSWAP(type, a, b)
@ AV_PIX_FMT_Y212LE
packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits,...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
@ AV_PIX_FMT_BAYER_BGGR16BE
bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian
@ AV_PIX_FMT_P410BE
interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, big-endian
@ AV_PIX_FMT_P016LE
like NV12, with 16bpp per component, little-endian
@ AV_PIX_FMT_GRAYF32BE
IEEE-754 single precision Y, 32bpp, big-endian.
@ SWS_FULL_CHR_H_INT
Perform full chroma upsampling when upscaling to RGB.
SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
@ AV_PIX_FMT_RGBF16BE
IEEE-754 half precision packed RGB 16:16:16, 48bpp, RGBRGB..., big-endian.
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
@ AV_PIX_FMT_YUV444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
#define AV_CPU_FLAG_MMX
standard MMX
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
double scaler_params[2]
Extra parameters for fine-tuning certain scalers.
#define AV_PIX_FMT_FLAG_XYZ
The pixel format contains XYZ-like data (as opposed to YUV/RGB/grayscale).
static void fill_rgb2yuv_table(SwsInternal *c, const int table[4], int dstRange)
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
@ AV_PIX_FMT_XV48LE
packed XVYU 4:4:4, 64bpp, little-endian, variant of Y416 where alpha channel is left undefined
@ AV_PIX_FMT_YUV444P9LE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ SWS_ERROR_DIFFUSION
Set SwsContext.dither instead.
@ SWS_GAUSS
gaussian approximation
static int ref[MAX_W *MAX_W]
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
@ AV_PIX_FMT_P216LE
interleaved chroma YUV 4:2:2, 32bpp, little-endian
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
const char * description
human-readable description
#define INLINE_MMXEXT(flags)
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUVA420P10BE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
int ff_infer_colors(SwsColor *src, SwsColor *dst)
@ AV_PIX_FMT_YUV420P16BE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P16BE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_P212BE
interleaved chroma YUV 4:2:2, 24bpp, data in the high bits, big-endian
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
@ AV_PIX_FMT_X2BGR10LE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
@ AV_PIX_FMT_V30XLE
packed VYUX 4:4:4 like XV30, 32bpp, (msb)10V 10Y 10U 2X(lsb), little-endian
static av_always_inline int isBayer16BPS(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_P010LE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
int ff_init_filters(SwsInternal *c)
@ AV_PIX_FMT_XV48BE
packed XVYU 4:4:4, 64bpp, big-endian, variant of Y416 where alpha channel is left undefined
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Structure to hold side data for an AVFrame.
int src_format
Source pixel format.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
@ AV_PIX_FMT_BGR555LE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
const VDPAUPixFmtMap * map
int size_factor
size factor used when initing the filters
@ AV_PIX_FMT_P216BE
interleaved chroma YUV 4:2:2, 32bpp, big-endian
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
int dst_range
Destination is full range.
@ AV_PIX_FMT_P412LE
interleaved chroma YUV 4:4:4, 36bpp, data in the high bits, little-endian
@ AV_PIX_FMT_GRAY14LE
Y , 14bpp, little-endian.
AVPrimaryCoefficients prim
@ AV_PIX_FMT_XV36LE
packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, little-endian,...
static SwsVector * sws_sumVec(SwsVector *a, SwsVector *b)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_GRAY14BE
Y , 14bpp, big-endian.
@ AV_PIX_FMT_YUVA422P16BE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV440P10BE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_PIX_FMT_YUV422P9LE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P16LE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
void sws_free_context(SwsContext **pctx)
Free the context and everything associated with it, and write NULL to the provided pointer.
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
av_csp_eotf_function av_csp_itu_eotf(enum AVColorTransferCharacteristic trc)
Returns the ITU EOTF corresponding to a given TRC.
#define flags(name, subs,...)
@ AVCHROMA_LOC_NB
Not part of ABI.
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
void avpriv_slicethread_free(AVSliceThread **pctx)
Destroy slice threading context.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static SwsContext * alloc_set_opts(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, const double *param)
Allocate and return an SwsContext without performing initialization.
static const double coeff[2][5]
int src_h_chr_pos
Source horizontal chroma position.
static SwsInternal * sws_internal(const SwsContext *sws)
@ AV_PIX_FMT_GBRAP10BE
planar GBR 4:4:4:4 40bpp, big-endian
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
@ SWS_ACCURATE_RND
Force bit-exact output.
@ SWS_LANCZOS
3-tap sinc/sinc
#define atomic_init(obj, value)
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
int dst_v_chr_pos
Destination vertical chroma position.
@ AV_PIX_FMT_VYU444
packed VYU 4:4:4, 24bpp (1 Cr & Cb sample per 1x1 Y), VYUVYU...
@ AV_PIX_FMT_YUVA422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
AVColorRange
Visual content value range.
@ SWS_SINC
unwindowed sinc
Main external API structure.
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
@ AV_PIX_FMT_BGR444LE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
static int handle_formats(SwsContext *sws)
#define SWS_SRC_V_CHR_DROP_MASK
static double sws_dcVec(SwsVector *a)
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
void sws_normalizeVec(SwsVector *a, double height)
Scale all the coefficients of a so that their sum equals height.
@ AV_PIX_FMT_YUVA420P9BE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
static av_cold int get_local_pos(SwsInternal *s, int chr_subsample, int pos, int dir)
@ AV_PIX_FMT_GBRAP14LE
planar GBR 4:4:4:4 56bpp, little-endian
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
enum AVColorPrimaries prim
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
av_csp_eotf_function av_csp_itu_eotf_inv(enum AVColorTransferCharacteristic trc)
Returns the mathematical inverse of the corresponding EOTF.
@ AV_PIX_FMT_UYYVYY411
packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
@ AV_PIX_FMT_YUVA422P9LE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
static int context_init_threaded(SwsContext *sws, SwsFilter *src_filter, SwsFilter *dst_filter)