FFmpeg
swscale.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/bswap.h"
27 #include "libavutil/common.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/emms.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "swscale_internal.h"
36 #include "swscale.h"
37 
38 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_128)[9][8] = {
39  { 36, 68, 60, 92, 34, 66, 58, 90, },
40  { 100, 4, 124, 28, 98, 2, 122, 26, },
41  { 52, 84, 44, 76, 50, 82, 42, 74, },
42  { 116, 20, 108, 12, 114, 18, 106, 10, },
43  { 32, 64, 56, 88, 38, 70, 62, 94, },
44  { 96, 0, 120, 24, 102, 6, 126, 30, },
45  { 48, 80, 40, 72, 54, 86, 46, 78, },
46  { 112, 16, 104, 8, 118, 22, 110, 14, },
47  { 36, 68, 60, 92, 34, 66, 58, 90, },
48 };
49 
50 DECLARE_ALIGNED(8, static const uint8_t, sws_pb_64)[8] = {
51  64, 64, 64, 64, 64, 64, 64, 64
52 };
53 
54 static av_always_inline void fillPlane(uint8_t *plane, int stride, int width,
55  int height, int y, uint8_t val)
56 {
57  int i;
58  uint8_t *ptr = plane + stride * y;
59  for (i = 0; i < height; i++) {
60  memset(ptr, val, width);
61  ptr += stride;
62  }
63 }
64 
65 static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW,
66  const uint8_t *_src, const int16_t *filter,
67  const int32_t *filterPos, int filterSize)
68 {
69  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
70  int i;
71  int32_t *dst = (int32_t *) _dst;
72  const uint16_t *src = (const uint16_t *) _src;
73  int bits = desc->comp[0].depth - 1;
74  int sh = bits - 4;
75 
76  if ((isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
77  sh = 9;
78  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
79  sh = 16 - 1 - 4;
80  }
81 
82  for (i = 0; i < dstW; i++) {
83  int j;
84  int srcPos = filterPos[i];
85  int val = 0;
86 
87  for (j = 0; j < filterSize; j++) {
88  val += src[srcPos + j] * filter[filterSize * i + j];
89  }
90  // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
91  dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
92  }
93 }
94 
95 static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW,
96  const uint8_t *_src, const int16_t *filter,
97  const int32_t *filterPos, int filterSize)
98 {
99  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
100  int i;
101  const uint16_t *src = (const uint16_t *) _src;
102  int sh = desc->comp[0].depth - 1;
103 
104  if (sh<15) {
105  sh = isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
106  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
107  sh = 16 - 1;
108  }
109 
110  for (i = 0; i < dstW; i++) {
111  int j;
112  int srcPos = filterPos[i];
113  int val = 0;
114 
115  for (j = 0; j < filterSize; j++) {
116  val += src[srcPos + j] * filter[filterSize * i + j];
117  }
118  // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
119  dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
120  }
121 }
122 
123 // bilinear / bicubic scaling
124 static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW,
125  const uint8_t *src, const int16_t *filter,
126  const int32_t *filterPos, int filterSize)
127 {
128  int i;
129  for (i = 0; i < dstW; i++) {
130  int j;
131  int srcPos = filterPos[i];
132  int val = 0;
133  for (j = 0; j < filterSize; j++) {
134  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
135  }
136  dst[i] = FFMIN(val >> 7, (1 << 15) - 1); // the cubic equation does overflow ...
137  }
138 }
139 
140 static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW,
141  const uint8_t *src, const int16_t *filter,
142  const int32_t *filterPos, int filterSize)
143 {
144  int i;
145  int32_t *dst = (int32_t *) _dst;
146  for (i = 0; i < dstW; i++) {
147  int j;
148  int srcPos = filterPos[i];
149  int val = 0;
150  for (j = 0; j < filterSize; j++) {
151  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
152  }
153  dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
154  }
155 }
156 
157 // FIXME all pal and rgb srcFormats could do this conversion as well
158 // FIXME all scalers more complex than bilinear could do half of this transform
159 static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width,
160  uint32_t _coeff, int64_t _offset)
161 {
162  uint16_t coeff = _coeff;
163  int32_t offset = _offset;
164  int i;
165  for (i = 0; i < width; i++) {
166  int U = (dstU[i] * coeff + offset) >> 14;
167  int V = (dstV[i] * coeff + offset) >> 14;
168  dstU[i] = FFMIN(U, (1 << 15) - 1);
169  dstV[i] = FFMIN(V, (1 << 15) - 1);
170  }
171 }
172 
173 static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width,
174  uint32_t _coeff, int64_t _offset)
175 {
176  uint16_t coeff = _coeff;
177  int32_t offset = _offset;
178  int i;
179  for (i = 0; i < width; i++) {
180  dstU[i] = (dstU[i] * coeff + offset) >> 14;
181  dstV[i] = (dstV[i] * coeff + offset) >> 14;
182  }
183 }
184 
185 static void lumRangeToJpeg_c(int16_t *dst, int width,
186  uint32_t _coeff, int64_t _offset)
187 {
188  uint16_t coeff = _coeff;
189  int32_t offset = _offset;
190  int i;
191  for (i = 0; i < width; i++) {
192  int Y = (dst[i] * coeff + offset) >> 14;
193  dst[i] = FFMIN(Y, (1 << 15) - 1);
194  }
195 }
196 
197 static void lumRangeFromJpeg_c(int16_t *dst, int width,
198  uint32_t _coeff, int64_t _offset)
199 {
200  uint16_t coeff = _coeff;
201  int32_t offset = _offset;
202  int i;
203  for (i = 0; i < width; i++)
204  dst[i] = (dst[i] * coeff + offset) >> 14;
205 }
206 
207 static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
208  uint32_t coeff, int64_t offset)
209 {
210  int i;
211  int32_t *dstU = (int32_t *) _dstU;
212  int32_t *dstV = (int32_t *) _dstV;
213  for (i = 0; i < width; i++) {
214  int U = ((int64_t) dstU[i] * coeff + offset) >> 18;
215  int V = ((int64_t) dstV[i] * coeff + offset) >> 18;
216  dstU[i] = FFMIN(U, (1 << 19) - 1);
217  dstV[i] = FFMIN(V, (1 << 19) - 1);
218  }
219 }
220 
221 static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
222  uint32_t coeff, int64_t offset)
223 {
224  int i;
225  int32_t *dstU = (int32_t *) _dstU;
226  int32_t *dstV = (int32_t *) _dstV;
227  for (i = 0; i < width; i++) {
228  dstU[i] = ((int64_t) dstU[i] * coeff + offset) >> 18;
229  dstV[i] = ((int64_t) dstV[i] * coeff + offset) >> 18;
230  }
231 }
232 
233 static void lumRangeToJpeg16_c(int16_t *_dst, int width,
234  uint32_t coeff, int64_t offset)
235 {
236  int i;
237  int32_t *dst = (int32_t *) _dst;
238  for (i = 0; i < width; i++) {
239  int Y = ((int64_t) dst[i] * coeff + offset) >> 18;
240  dst[i] = FFMIN(Y, (1 << 19) - 1);
241  }
242 }
243 
244 static void lumRangeFromJpeg16_c(int16_t *_dst, int width,
245  uint32_t coeff, int64_t offset)
246 {
247  int i;
248  int32_t *dst = (int32_t *) _dst;
249  for (i = 0; i < width; i++)
250  dst[i] = ((int64_t) dst[i] * coeff + offset) >> 18;
251 }
252 
253 
254 #define DEBUG_SWSCALE_BUFFERS 0
255 #define DEBUG_BUFFERS(...) \
256  if (DEBUG_SWSCALE_BUFFERS) \
257  av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
258 
259 int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
260  int srcSliceY, int srcSliceH, uint8_t *const dst[],
261  const int dstStride[], int dstSliceY, int dstSliceH)
262 {
263  const int scale_dst = dstSliceY > 0 || dstSliceH < c->opts.dst_h;
264 
265  /* load a few things into local vars to make the code more readable?
266  * and faster */
267  const int dstW = c->opts.dst_w;
268  int dstH = c->opts.dst_h;
269 
270  const enum AVPixelFormat dstFormat = c->opts.dst_format;
271  const int flags = c->opts.flags;
272  int32_t *vLumFilterPos = c->vLumFilterPos;
273  int32_t *vChrFilterPos = c->vChrFilterPos;
274 
275  const int vLumFilterSize = c->vLumFilterSize;
276  const int vChrFilterSize = c->vChrFilterSize;
277 
278  yuv2planar1_fn yuv2plane1 = c->yuv2plane1;
279  yuv2planarX_fn yuv2planeX = c->yuv2planeX;
280  yuv2interleavedX_fn yuv2nv12cX = c->yuv2nv12cX;
281  yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
282  yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
283  yuv2packedX_fn yuv2packedX = c->yuv2packedX;
284  yuv2anyX_fn yuv2anyX = c->yuv2anyX;
285  const int chrSrcSliceY = srcSliceY >> c->chrSrcVSubSample;
286  const int chrSrcSliceH = AV_CEIL_RSHIFT(srcSliceH, c->chrSrcVSubSample);
287  int should_dither = isNBPS(c->opts.src_format) ||
288  is16BPS(c->opts.src_format);
289  int lastDstY;
290 
291  /* vars which will change and which we need to store back in the context */
292  int dstY = c->dstY;
293  int lastInLumBuf = c->lastInLumBuf;
294  int lastInChrBuf = c->lastInChrBuf;
295 
296  int lumStart = 0;
297  int lumEnd = c->descIndex[0];
298  int chrStart = lumEnd;
299  int chrEnd = c->descIndex[1];
300  int vStart = chrEnd;
301  int vEnd = c->numDesc;
302  SwsSlice *src_slice = &c->slice[lumStart];
303  SwsSlice *hout_slice = &c->slice[c->numSlice-2];
304  SwsSlice *vout_slice = &c->slice[c->numSlice-1];
305  SwsFilterDescriptor *desc = c->desc;
306 
307  int needAlpha = c->needAlpha;
308 
309  int hasLumHoles = 1;
310  int hasChrHoles = 1;
311 
312  const uint8_t *src2[4];
313  int srcStride2[4];
314 
315  if (isPacked(c->opts.src_format)) {
316  src2[0] =
317  src2[1] =
318  src2[2] =
319  src2[3] = src[0];
320  srcStride2[0] =
321  srcStride2[1] =
322  srcStride2[2] =
323  srcStride2[3] = srcStride[0];
324  } else {
325  memcpy(src2, src, sizeof(src2));
326  memcpy(srcStride2, srcStride, sizeof(srcStride2));
327  }
328 
329  srcStride2[1] *= 1 << c->vChrDrop;
330  srcStride2[2] *= 1 << c->vChrDrop;
331 
332  DEBUG_BUFFERS("swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
333  src2[0], srcStride2[0], src2[1], srcStride2[1],
334  src2[2], srcStride2[2], src2[3], srcStride2[3],
335  dst[0], dstStride[0], dst[1], dstStride[1],
336  dst[2], dstStride[2], dst[3], dstStride[3]);
337  DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
338  srcSliceY, srcSliceH, dstY, dstH);
339  DEBUG_BUFFERS("vLumFilterSize: %d vChrFilterSize: %d\n",
340  vLumFilterSize, vChrFilterSize);
341 
342  if (dstStride[0]&15 || dstStride[1]&15 ||
343  dstStride[2]&15 || dstStride[3]&15) {
344  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
345  if (flags & SWS_PRINT_INFO &&
346  !atomic_exchange_explicit(&ctx->stride_unaligned_warned, 1, memory_order_relaxed)) {
348  "Warning: dstStride is not aligned!\n"
349  " ->cannot do aligned memory accesses anymore\n");
350  }
351  }
352 
353 #if ARCH_X86
354  if ( (uintptr_t) dst[0]&15 || (uintptr_t) dst[1]&15 || (uintptr_t) dst[2]&15
355  || (uintptr_t)src2[0]&15 || (uintptr_t)src2[1]&15 || (uintptr_t)src2[2]&15
356  || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
357  || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
358  ) {
359  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
360  int cpu_flags = av_get_cpu_flags();
361  if (flags & SWS_PRINT_INFO && HAVE_MMXEXT && (cpu_flags & AV_CPU_FLAG_SSE2) &&
362  !atomic_exchange_explicit(&ctx->stride_unaligned_warned,1, memory_order_relaxed)) {
363  av_log(c, AV_LOG_WARNING, "Warning: data is not aligned! This can lead to a speed loss\n");
364  }
365  }
366 #endif
367 
368  if (scale_dst) {
369  dstY = dstSliceY;
370  dstH = dstY + dstSliceH;
371  lastInLumBuf = -1;
372  lastInChrBuf = -1;
373  } else if (srcSliceY == 0) {
374  /* Note the user might start scaling the picture in the middle so this
375  * will not get executed. This is not really intended but works
376  * currently, so people might do it. */
377  dstY = 0;
378  lastInLumBuf = -1;
379  lastInChrBuf = -1;
380  }
381 
382  if (!should_dither) {
383  c->chrDither8 = c->lumDither8 = sws_pb_64;
384  }
385  lastDstY = dstY;
386 
387  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
388  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, c->use_mmx_vfilter);
389 
390  ff_init_slice_from_src(src_slice, (uint8_t**)src2, srcStride2, c->opts.src_w,
391  srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
392 
393  ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->opts.dst_w,
394  dstY, dstSliceH, dstY >> c->chrDstVSubSample,
395  AV_CEIL_RSHIFT(dstSliceH, c->chrDstVSubSample), scale_dst);
396  if (srcSliceY == 0) {
397  hout_slice->plane[0].sliceY = lastInLumBuf + 1;
398  hout_slice->plane[1].sliceY = lastInChrBuf + 1;
399  hout_slice->plane[2].sliceY = lastInChrBuf + 1;
400  hout_slice->plane[3].sliceY = lastInLumBuf + 1;
401 
402  hout_slice->plane[0].sliceH =
403  hout_slice->plane[1].sliceH =
404  hout_slice->plane[2].sliceH =
405  hout_slice->plane[3].sliceH = 0;
406  hout_slice->width = dstW;
407  }
408 
409  for (; dstY < dstH; dstY++) {
410  const int chrDstY = dstY >> c->chrDstVSubSample;
411  int use_mmx_vfilter= c->use_mmx_vfilter;
412 
413  // First line needed as input
414  const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
415  const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), c->opts.dst_h - 1)]);
416  // First line needed as input
417  const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
418 
419  // Last line needed as input
420  int lastLumSrcY = FFMIN(c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
421  int lastLumSrcY2 = FFMIN(c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
422  int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
423  int enough_lines;
424 
425  int i;
426  int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
427 
428  // handle holes (FAST_BILINEAR & weird filters)
429  if (firstLumSrcY > lastInLumBuf) {
430 
431  hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
432  if (hasLumHoles) {
433  hout_slice->plane[0].sliceY = firstLumSrcY;
434  hout_slice->plane[3].sliceY = firstLumSrcY;
435  hout_slice->plane[0].sliceH =
436  hout_slice->plane[3].sliceH = 0;
437  }
438 
439  lastInLumBuf = firstLumSrcY - 1;
440  }
441  if (firstChrSrcY > lastInChrBuf) {
442 
443  hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
444  if (hasChrHoles) {
445  hout_slice->plane[1].sliceY = firstChrSrcY;
446  hout_slice->plane[2].sliceY = firstChrSrcY;
447  hout_slice->plane[1].sliceH =
448  hout_slice->plane[2].sliceH = 0;
449  }
450 
451  lastInChrBuf = firstChrSrcY - 1;
452  }
453 
454  DEBUG_BUFFERS("dstY: %d\n", dstY);
455  DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
456  firstLumSrcY, lastLumSrcY, lastInLumBuf);
457  DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
458  firstChrSrcY, lastChrSrcY, lastInChrBuf);
459 
460  // Do we have enough lines in this slice to output the dstY line
461  enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
462  lastChrSrcY < AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample);
463 
464  if (!enough_lines) {
465  lastLumSrcY = srcSliceY + srcSliceH - 1;
466  lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
467  DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
468  lastLumSrcY, lastChrSrcY);
469  }
470 
471  av_assert0((lastLumSrcY - firstLumSrcY + 1) <= hout_slice->plane[0].available_lines);
472  av_assert0((lastChrSrcY - firstChrSrcY + 1) <= hout_slice->plane[1].available_lines);
473 
474 
475  posY = hout_slice->plane[0].sliceY + hout_slice->plane[0].sliceH;
476  if (posY <= lastLumSrcY && !hasLumHoles) {
477  firstPosY = FFMAX(firstLumSrcY, posY);
478  lastPosY = FFMIN(firstLumSrcY + hout_slice->plane[0].available_lines - 1, srcSliceY + srcSliceH - 1);
479  } else {
480  firstPosY = posY;
481  lastPosY = lastLumSrcY;
482  }
483 
484  cPosY = hout_slice->plane[1].sliceY + hout_slice->plane[1].sliceH;
485  if (cPosY <= lastChrSrcY && !hasChrHoles) {
486  firstCPosY = FFMAX(firstChrSrcY, cPosY);
487  lastCPosY = FFMIN(firstChrSrcY + hout_slice->plane[1].available_lines - 1, AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample) - 1);
488  } else {
489  firstCPosY = cPosY;
490  lastCPosY = lastChrSrcY;
491  }
492 
493  ff_rotate_slice(hout_slice, lastPosY, lastCPosY);
494 
495  if (posY < lastLumSrcY + 1) {
496  for (i = lumStart; i < lumEnd; ++i)
497  desc[i].process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
498  }
499 
500  lastInLumBuf = lastLumSrcY;
501 
502  if (cPosY < lastChrSrcY + 1) {
503  for (i = chrStart; i < chrEnd; ++i)
504  desc[i].process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
505  }
506 
507  lastInChrBuf = lastChrSrcY;
508 
509  if (!enough_lines)
510  break; // we can't output a dstY line so let's try with the next slice
511 
512 #if HAVE_MMX_INLINE
514  c->dstW_mmx = c->opts.dst_w;
515 #endif
516  if (should_dither) {
517  c->chrDither8 = ff_dither_8x8_128[chrDstY & 7];
518  c->lumDither8 = ff_dither_8x8_128[dstY & 7];
519  }
520  if (dstY >= c->opts.dst_h - 2) {
521  /* hmm looks like we can't use MMX here without overwriting
522  * this array's tail */
523  ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX,
524  &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
525  use_mmx_vfilter= 0;
526  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
527  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
528  }
529 
530  for (i = vStart; i < vEnd; ++i)
531  desc[i].process(c, &desc[i], dstY, 1);
532  }
533  if (isPlanar(dstFormat) && isALPHA(dstFormat) && !needAlpha) {
534  int offset = lastDstY - dstSliceY;
535  int length = dstW;
536  int height = dstY - lastDstY;
537 
538  if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
539  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
540  fillPlane16(dst[3], dstStride[3], length, height, offset,
541  1, desc->comp[3].depth,
542  isBE(dstFormat));
543  } else if (is32BPS(dstFormat)) {
544  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
545  fillPlane32(dst[3], dstStride[3], length, height, offset,
546  1, desc->comp[3].depth,
547  isBE(dstFormat), desc->flags & AV_PIX_FMT_FLAG_FLOAT);
548  } else
549  fillPlane(dst[3], dstStride[3], length, height, offset, 255);
550  }
551 
552 #if HAVE_MMXEXT_INLINE
554  __asm__ volatile ("sfence" ::: "memory");
555 #endif
556  emms_c();
557 
558  /* store changed local vars back in the context */
559  c->dstY = dstY;
560  c->lastInLumBuf = lastInLumBuf;
561  c->lastInChrBuf = lastInChrBuf;
562 
563  return dstY - lastDstY;
564 }
565 
566 /*
567  * Solve for coeff and offset:
568  * dst = ((src << src_shift) * coeff + offset) >> (mult_shift + src_shift)
569  *
570  * If SwsInternal->dstBpc is > 14, coeff is uint16_t and offset is int32_t,
571  * otherwise (SwsInternal->dstBpc is <= 14) coeff is uint32_t and offset is
572  * int64_t.
573  */
574 static void solve_range_convert(uint16_t src_min, uint16_t src_max,
575  uint16_t dst_min, uint16_t dst_max,
576  int src_bits, int src_shift, int mult_shift,
577  uint32_t *coeff, int64_t *offset)
578 {
579  uint16_t src_range = src_max - src_min;
580  uint16_t dst_range = dst_max - dst_min;
581  int total_shift = mult_shift + src_shift;
582  *coeff = AV_CEIL_RSHIFT(((uint64_t) dst_range << total_shift) / src_range, src_shift);
583  *offset = ((int64_t) dst_max << total_shift) -
584  ((int64_t) src_max << src_shift) * *coeff;
585 }
586 
588 {
589  const int bit_depth = c->dstBpc ? c->dstBpc : 8;
590  const int src_bits = bit_depth <= 14 ? 15 : 19;
591  const int src_shift = src_bits - bit_depth;
592  const int mult_shift = bit_depth <= 14 ? 14 : 18;
593  const uint16_t mpeg_min = 16U << (bit_depth - 8);
594  const uint16_t mpeg_max_lum = 235U << (bit_depth - 8);
595  const uint16_t mpeg_max_chr = 240U << (bit_depth - 8);
596  const uint16_t jpeg_max = (1U << bit_depth) - 1;
597  uint16_t src_min, src_max_lum, src_max_chr;
598  uint16_t dst_min, dst_max_lum, dst_max_chr;
599  if (c->opts.src_range) {
600  src_min = 0;
601  src_max_lum = jpeg_max;
602  src_max_chr = jpeg_max;
603  dst_min = mpeg_min;
604  dst_max_lum = mpeg_max_lum;
605  dst_max_chr = mpeg_max_chr;
606  } else {
607  src_min = mpeg_min;
608  src_max_lum = mpeg_max_lum;
609  src_max_chr = mpeg_max_chr;
610  dst_min = 0;
611  dst_max_lum = jpeg_max;
612  dst_max_chr = jpeg_max;
613  }
614  solve_range_convert(src_min, src_max_lum, dst_min, dst_max_lum,
615  src_bits, src_shift, mult_shift,
616  &c->lumConvertRange_coeff, &c->lumConvertRange_offset);
617  solve_range_convert(src_min, src_max_chr, dst_min, dst_max_chr,
618  src_bits, src_shift, mult_shift,
619  &c->chrConvertRange_coeff, &c->chrConvertRange_offset);
620 }
621 
623 {
624  c->lumConvertRange = NULL;
625  c->chrConvertRange = NULL;
626  if (c->opts.src_range != c->opts.dst_range && !isAnyRGB(c->opts.dst_format)) {
628  if (c->dstBpc <= 14) {
629  if (c->opts.src_range) {
630  c->lumConvertRange = lumRangeFromJpeg_c;
631  c->chrConvertRange = chrRangeFromJpeg_c;
632  } else {
633  c->lumConvertRange = lumRangeToJpeg_c;
634  c->chrConvertRange = chrRangeToJpeg_c;
635  }
636  } else {
637  if (c->opts.src_range) {
638  c->lumConvertRange = lumRangeFromJpeg16_c;
639  c->chrConvertRange = chrRangeFromJpeg16_c;
640  } else {
641  c->lumConvertRange = lumRangeToJpeg16_c;
642  c->chrConvertRange = chrRangeToJpeg16_c;
643  }
644  }
645 
646 #if ARCH_AARCH64
648 #elif ARCH_LOONGARCH64
650 #elif ARCH_RISCV
652 #elif ARCH_X86
654 #endif
655  }
656 }
657 
659 {
660  enum AVPixelFormat srcFormat = c->opts.src_format;
661 
662  ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX,
663  &c->yuv2nv12cX, &c->yuv2packed1,
664  &c->yuv2packed2, &c->yuv2packedX, &c->yuv2anyX);
665 
666  ff_sws_init_input_funcs(c, &c->lumToYV12, &c->alpToYV12, &c->chrToYV12,
667  &c->readLumPlanar, &c->readAlpPlanar, &c->readChrPlanar);
668 
669  if (c->srcBpc == 8) {
670  if (c->dstBpc <= 14) {
671  c->hyScale = c->hcScale = hScale8To15_c;
672  if (c->opts.flags & SWS_FAST_BILINEAR) {
673  c->hyscale_fast = ff_hyscale_fast_c;
674  c->hcscale_fast = ff_hcscale_fast_c;
675  }
676  } else {
677  c->hyScale = c->hcScale = hScale8To19_c;
678  }
679  } else {
680  c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_c
681  : hScale16To15_c;
682  }
683 
685 
686  if (!(isGray(srcFormat) || isGray(c->opts.dst_format) ||
687  srcFormat == AV_PIX_FMT_MONOBLACK || srcFormat == AV_PIX_FMT_MONOWHITE))
688  c->needs_hcscale = 1;
689 }
690 
692 {
694 
695 #if ARCH_PPC
697 #elif ARCH_X86
699 #elif ARCH_AARCH64
701 #elif ARCH_ARM
703 #elif ARCH_LOONGARCH64
705 #elif ARCH_RISCV
707 #endif
708 }
709 
710 static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
711 {
712  if (!isALPHA(format))
713  src[3] = NULL;
714  if (!isPlanar(format)) {
715  src[3] = src[2] = NULL;
716 
717  if (!usePal(format))
718  src[1] = NULL;
719  }
720 }
721 
722 static int check_image_pointers(const uint8_t * const data[4], enum AVPixelFormat pix_fmt,
723  const int linesizes[4])
724 {
726  int i;
727 
728  av_assert2(desc);
729 
730  for (i = 0; i < 4; i++) {
731  int plane = desc->comp[i].plane;
732  if (!data[plane] || !linesizes[plane])
733  return 0;
734  }
735 
736  return 1;
737 }
738 
739 void ff_xyz12Torgb48(const SwsInternal *c, uint8_t *dst, int dst_stride,
740  const uint8_t *src, int src_stride, int w, int h)
741 {
742  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
743 
744  for (int yp = 0; yp < h; yp++) {
745  const uint16_t *src16 = (const uint16_t *) src;
746  uint16_t *dst16 = (uint16_t *) dst;
747 
748  for (int xp = 0; xp < 3 * w; xp += 3) {
749  int x, y, z, r, g, b;
750 
751  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
752  x = AV_RB16(src16 + xp + 0);
753  y = AV_RB16(src16 + xp + 1);
754  z = AV_RB16(src16 + xp + 2);
755  } else {
756  x = AV_RL16(src16 + xp + 0);
757  y = AV_RL16(src16 + xp + 1);
758  z = AV_RL16(src16 + xp + 2);
759  }
760 
761  x = c->xyzgamma[x >> 4];
762  y = c->xyzgamma[y >> 4];
763  z = c->xyzgamma[z >> 4];
764 
765  // convert from XYZlinear to sRGBlinear
766  r = c->xyz2rgb_matrix[0][0] * x +
767  c->xyz2rgb_matrix[0][1] * y +
768  c->xyz2rgb_matrix[0][2] * z >> 12;
769  g = c->xyz2rgb_matrix[1][0] * x +
770  c->xyz2rgb_matrix[1][1] * y +
771  c->xyz2rgb_matrix[1][2] * z >> 12;
772  b = c->xyz2rgb_matrix[2][0] * x +
773  c->xyz2rgb_matrix[2][1] * y +
774  c->xyz2rgb_matrix[2][2] * z >> 12;
775 
776  // limit values to 12-bit depth
777  r = av_clip_uintp2(r, 12);
778  g = av_clip_uintp2(g, 12);
779  b = av_clip_uintp2(b, 12);
780 
781  // convert from sRGBlinear to RGB and scale from 12bit to 16bit
782  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
783  AV_WB16(dst16 + xp + 0, c->rgbgamma[r] << 4);
784  AV_WB16(dst16 + xp + 1, c->rgbgamma[g] << 4);
785  AV_WB16(dst16 + xp + 2, c->rgbgamma[b] << 4);
786  } else {
787  AV_WL16(dst16 + xp + 0, c->rgbgamma[r] << 4);
788  AV_WL16(dst16 + xp + 1, c->rgbgamma[g] << 4);
789  AV_WL16(dst16 + xp + 2, c->rgbgamma[b] << 4);
790  }
791  }
792 
793  src += src_stride;
794  dst += dst_stride;
795  }
796 }
797 
798 void ff_rgb48Toxyz12(const SwsInternal *c, uint8_t *dst, int dst_stride,
799  const uint8_t *src, int src_stride, int w, int h)
800 {
801  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.dst_format);
802 
803  for (int yp = 0; yp < h; yp++) {
804  uint16_t *src16 = (uint16_t *) src;
805  uint16_t *dst16 = (uint16_t *) dst;
806 
807  for (int xp = 0; xp < 3 * w; xp += 3) {
808  int x, y, z, r, g, b;
809 
810  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
811  r = AV_RB16(src16 + xp + 0);
812  g = AV_RB16(src16 + xp + 1);
813  b = AV_RB16(src16 + xp + 2);
814  } else {
815  r = AV_RL16(src16 + xp + 0);
816  g = AV_RL16(src16 + xp + 1);
817  b = AV_RL16(src16 + xp + 2);
818  }
819 
820  r = c->rgbgammainv[r>>4];
821  g = c->rgbgammainv[g>>4];
822  b = c->rgbgammainv[b>>4];
823 
824  // convert from sRGBlinear to XYZlinear
825  x = c->rgb2xyz_matrix[0][0] * r +
826  c->rgb2xyz_matrix[0][1] * g +
827  c->rgb2xyz_matrix[0][2] * b >> 12;
828  y = c->rgb2xyz_matrix[1][0] * r +
829  c->rgb2xyz_matrix[1][1] * g +
830  c->rgb2xyz_matrix[1][2] * b >> 12;
831  z = c->rgb2xyz_matrix[2][0] * r +
832  c->rgb2xyz_matrix[2][1] * g +
833  c->rgb2xyz_matrix[2][2] * b >> 12;
834 
835  // limit values to 12-bit depth
836  x = av_clip_uintp2(x, 12);
837  y = av_clip_uintp2(y, 12);
838  z = av_clip_uintp2(z, 12);
839 
840  // convert from XYZlinear to X'Y'Z' and scale from 12bit to 16bit
841  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
842  AV_WB16(dst16 + xp + 0, c->xyzgammainv[x] << 4);
843  AV_WB16(dst16 + xp + 1, c->xyzgammainv[y] << 4);
844  AV_WB16(dst16 + xp + 2, c->xyzgammainv[z] << 4);
845  } else {
846  AV_WL16(dst16 + xp + 0, c->xyzgammainv[x] << 4);
847  AV_WL16(dst16 + xp + 1, c->xyzgammainv[y] << 4);
848  AV_WL16(dst16 + xp + 2, c->xyzgammainv[z] << 4);
849  }
850  }
851 
852  src += src_stride;
853  dst += dst_stride;
854  }
855 }
856 
857 void ff_update_palette(SwsInternal *c, const uint32_t *pal)
858 {
859  for (int i = 0; i < 256; i++) {
860  int r, g, b, y, u, v, a = 0xff;
861  if (c->opts.src_format == AV_PIX_FMT_PAL8) {
862  uint32_t p = pal[i];
863  a = (p >> 24) & 0xFF;
864  r = (p >> 16) & 0xFF;
865  g = (p >> 8) & 0xFF;
866  b = p & 0xFF;
867  } else if (c->opts.src_format == AV_PIX_FMT_RGB8) {
868  r = ( i >> 5 ) * 36;
869  g = ((i >> 2) & 7) * 36;
870  b = ( i & 3) * 85;
871  } else if (c->opts.src_format == AV_PIX_FMT_BGR8) {
872  b = ( i >> 6 ) * 85;
873  g = ((i >> 3) & 7) * 36;
874  r = ( i & 7) * 36;
875  } else if (c->opts.src_format == AV_PIX_FMT_RGB4_BYTE) {
876  r = ( i >> 3 ) * 255;
877  g = ((i >> 1) & 3) * 85;
878  b = ( i & 1) * 255;
879  } else if (c->opts.src_format == AV_PIX_FMT_GRAY8 || c->opts.src_format == AV_PIX_FMT_GRAY8A) {
880  r = g = b = i;
881  } else {
882  av_assert1(c->opts.src_format == AV_PIX_FMT_BGR4_BYTE);
883  b = ( i >> 3 ) * 255;
884  g = ((i >> 1) & 3) * 85;
885  r = ( i & 1) * 255;
886  }
887 #define RGB2YUV_SHIFT 15
888 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
889 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
890 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
891 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
892 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
893 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
894 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
895 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
896 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
897 
898  y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
899  u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
900  v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
901  c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
902 
903  switch (c->opts.dst_format) {
904  case AV_PIX_FMT_BGR32:
905 #if !HAVE_BIGENDIAN
906  case AV_PIX_FMT_RGB24:
907 #endif
908  c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
909  break;
910  case AV_PIX_FMT_BGR32_1:
911 #if HAVE_BIGENDIAN
912  case AV_PIX_FMT_BGR24:
913 #endif
914  c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
915  break;
916  case AV_PIX_FMT_RGB32_1:
917 #if HAVE_BIGENDIAN
918  case AV_PIX_FMT_RGB24:
919 #endif
920  c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
921  break;
922  case AV_PIX_FMT_RGB32:
923 #if !HAVE_BIGENDIAN
924  case AV_PIX_FMT_BGR24:
925 #endif
926  default:
927  c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
928  }
929  }
930 }
931 
932 static int scale_internal(SwsContext *sws,
933  const uint8_t * const srcSlice[], const int srcStride[],
934  int srcSliceY, int srcSliceH,
935  uint8_t *const dstSlice[], const int dstStride[],
936  int dstSliceY, int dstSliceH);
937 
939  const uint8_t * const srcSlice[], const int srcStride[],
940  int srcSliceY, int srcSliceH,
941  uint8_t * const dstSlice[], const int dstStride[],
942  int dstSliceY, int dstSliceH)
943 {
944  int ret = scale_internal(c->cascaded_context[0],
945  srcSlice, srcStride, srcSliceY, srcSliceH,
946  c->cascaded_tmp[0], c->cascaded_tmpStride[0], 0, c->opts.src_h);
947 
948  if (ret < 0)
949  return ret;
950 
951  if (c->cascaded_context[2])
952  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
953  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
954  c->cascaded_tmp[1], c->cascaded_tmpStride[1], 0, c->opts.dst_h);
955  else
956  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
957  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
958  dstSlice, dstStride, dstSliceY, dstSliceH);
959 
960  if (ret < 0)
961  return ret;
962 
963  if (c->cascaded_context[2]) {
964  const int dstY1 = sws_internal(c->cascaded_context[1])->dstY;
965  ret = scale_internal(c->cascaded_context[2], (const uint8_t * const *)c->cascaded_tmp[1],
966  c->cascaded_tmpStride[1], dstY1 - ret, dstY1,
967  dstSlice, dstStride, dstSliceY, dstSliceH);
968  }
969  return ret;
970 }
971 
973  const uint8_t * const srcSlice[], const int srcStride[],
974  int srcSliceY, int srcSliceH,
975  uint8_t * const dstSlice[], const int dstStride[],
976  int dstSliceY, int dstSliceH)
977 {
978  const int dstH0 = c->cascaded_context[0]->dst_h;
979  int ret = scale_internal(c->cascaded_context[0],
980  srcSlice, srcStride, srcSliceY, srcSliceH,
981  c->cascaded_tmp[0], c->cascaded_tmpStride[0],
982  0, dstH0);
983  if (ret < 0)
984  return ret;
985  ret = scale_internal(c->cascaded_context[1],
986  (const uint8_t * const * )c->cascaded_tmp[0], c->cascaded_tmpStride[0],
987  0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
988  return ret;
989 }
990 
992  const uint8_t * const srcSlice[], const int srcStride[],
993  int srcSliceY, int srcSliceH,
994  uint8_t *const dstSlice[], const int dstStride[],
995  int dstSliceY, int dstSliceH)
996 {
998  const int scale_dst = dstSliceY > 0 || dstSliceH < sws->dst_h;
999  const int frame_start = scale_dst || !c->sliceDir;
1000  int i, ret;
1001  const uint8_t *src2[4];
1002  uint8_t *dst2[4];
1003  int macro_height_src = isBayer(sws->src_format) ? 2 : (1 << c->chrSrcVSubSample);
1004  int macro_height_dst = isBayer(sws->dst_format) ? 2 : (1 << c->chrDstVSubSample);
1005  // copy strides, so they can safely be modified
1006  int srcStride2[4];
1007  int dstStride2[4];
1008  int srcSliceY_internal = srcSliceY;
1009 
1010  if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1011  av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1012  return AVERROR(EINVAL);
1013  }
1014 
1015  if ((srcSliceY & (macro_height_src - 1)) ||
1016  ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH != sws->src_h) ||
1017  srcSliceY + srcSliceH > sws->src_h ||
1018  (isBayer(sws->src_format) && srcSliceH <= 1)) {
1019  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
1020  return AVERROR(EINVAL);
1021  }
1022 
1023  if ((dstSliceY & (macro_height_dst - 1)) ||
1024  ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH != sws->dst_h) ||
1025  dstSliceY + dstSliceH > sws->dst_h) {
1026  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", dstSliceY, dstSliceH);
1027  return AVERROR(EINVAL);
1028  }
1029 
1030  if (!check_image_pointers(srcSlice, sws->src_format, srcStride)) {
1031  av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
1032  return AVERROR(EINVAL);
1033  }
1034  if (!check_image_pointers((const uint8_t* const*)dstSlice, sws->dst_format, dstStride)) {
1035  av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
1036  return AVERROR(EINVAL);
1037  }
1038 
1039  // do not mess up sliceDir if we have a "trailing" 0-size slice
1040  if (srcSliceH == 0)
1041  return 0;
1042 
1043  if (sws->gamma_flag && c->cascaded_context[0])
1044  return scale_gamma(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1045  dstSlice, dstStride, dstSliceY, dstSliceH);
1046 
1047  if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->src_h)
1048  return scale_cascaded(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1049  dstSlice, dstStride, dstSliceY, dstSliceH);
1050 
1051  if (!srcSliceY && (sws->flags & SWS_BITEXACT) && sws->dither == SWS_DITHER_ED && c->dither_error[0])
1052  for (i = 0; i < 4; i++)
1053  memset(c->dither_error[i], 0, sizeof(c->dither_error[0][0]) * (sws->dst_w+2));
1054 
1055  if (usePal(sws->src_format))
1056  ff_update_palette(c, (const uint32_t *)srcSlice[1]);
1057 
1058  memcpy(src2, srcSlice, sizeof(src2));
1059  memcpy(dst2, dstSlice, sizeof(dst2));
1060  memcpy(srcStride2, srcStride, sizeof(srcStride2));
1061  memcpy(dstStride2, dstStride, sizeof(dstStride2));
1062 
1063  if (frame_start && !scale_dst) {
1064  if (srcSliceY != 0 && srcSliceY + srcSliceH != sws->src_h) {
1065  av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
1066  return AVERROR(EINVAL);
1067  }
1068 
1069  c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1070  } else if (scale_dst)
1071  c->sliceDir = 1;
1072 
1073  if (c->src0Alpha && !c->dst0Alpha && isALPHA(sws->dst_format)) {
1074  uint8_t *base;
1075  int x,y;
1076 
1077  av_fast_malloc(&c->rgb0_scratch, &c->rgb0_scratch_allocated,
1078  FFABS(srcStride[0]) * srcSliceH + 32);
1079  if (!c->rgb0_scratch)
1080  return AVERROR(ENOMEM);
1081 
1082  base = srcStride[0] < 0 ? c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1083  c->rgb0_scratch;
1084  for (y=0; y<srcSliceH; y++){
1085  memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*sws->src_w);
1086  for (x=c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1087  base[ srcStride[0]*y + x] = 0xFF;
1088  }
1089  }
1090  src2[0] = base;
1091  }
1092 
1093  if (c->srcXYZ && !(c->dstXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1094  uint8_t *base;
1095 
1096  av_fast_malloc(&c->xyz_scratch, &c->xyz_scratch_allocated,
1097  FFABS(srcStride[0]) * srcSliceH + 32);
1098  if (!c->xyz_scratch)
1099  return AVERROR(ENOMEM);
1100 
1101  base = srcStride[0] < 0 ? c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1102  c->xyz_scratch;
1103 
1104  ff_xyz12Torgb48(c, base, srcStride[0], src2[0], srcStride[0], sws->src_w, srcSliceH);
1105  src2[0] = base;
1106  }
1107 
1108  if (c->sliceDir != 1) {
1109  // slices go from bottom to top => we flip the image internally
1110  for (i=0; i<4; i++) {
1111  srcStride2[i] *= -1;
1112  dstStride2[i] *= -1;
1113  }
1114 
1115  src2[0] += (srcSliceH - 1) * srcStride[0];
1116  if (!usePal(sws->src_format))
1117  src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
1118  src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
1119  src2[3] += (srcSliceH - 1) * srcStride[3];
1120  dst2[0] += ( sws->dst_h - 1) * dstStride[0];
1121  dst2[1] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[1];
1122  dst2[2] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[2];
1123  dst2[3] += ( sws->dst_h - 1) * dstStride[3];
1124 
1125  srcSliceY_internal = sws->src_h-srcSliceY-srcSliceH;
1126  }
1128  reset_ptr((void*)dst2, sws->dst_format);
1129 
1130  if (c->convert_unscaled) {
1131  int offset = srcSliceY_internal;
1132  int slice_h = srcSliceH;
1133 
1134  // for dst slice scaling, offset the pointers to match the unscaled API
1135  if (scale_dst) {
1136  av_assert0(offset == 0);
1137  for (i = 0; i < 4 && src2[i]; i++) {
1138  if (!src2[i] || (i > 0 && usePal(sws->src_format)))
1139  break;
1140  src2[i] += (dstSliceY >> ((i == 1 || i == 2) ? c->chrSrcVSubSample : 0)) * srcStride2[i];
1141  }
1142 
1143  for (i = 0; i < 4 && dst2[i]; i++) {
1144  if (!dst2[i] || (i > 0 && usePal(sws->dst_format)))
1145  break;
1146  dst2[i] -= (dstSliceY >> ((i == 1 || i == 2) ? c->chrDstVSubSample : 0)) * dstStride2[i];
1147  }
1148  offset = dstSliceY;
1149  slice_h = dstSliceH;
1150  }
1151 
1152  ret = c->convert_unscaled(c, src2, srcStride2, offset, slice_h,
1153  dst2, dstStride2);
1154  if (scale_dst)
1155  dst2[0] += dstSliceY * dstStride2[0];
1156  } else {
1157  ret = ff_swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH,
1158  dst2, dstStride2, dstSliceY, dstSliceH);
1159  }
1160 
1161  if (c->dstXYZ && !(c->srcXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1162  uint8_t *dst;
1163 
1164  if (scale_dst) {
1165  dst = dst2[0];
1166  } else {
1167  int dstY = c->dstY ? c->dstY : srcSliceY + srcSliceH;
1168 
1169  av_assert0(dstY >= ret);
1170  av_assert0(ret >= 0);
1171  av_assert0(sws->dst_h >= dstY);
1172  dst = dst2[0] + (dstY - ret) * dstStride2[0];
1173  }
1174 
1175  /* replace on the same data */
1176  ff_rgb48Toxyz12(c, dst, dstStride2[0], dst, dstStride2[0], sws->dst_w, ret);
1177  }
1178 
1179  /* reset slice direction at end of frame */
1180  if ((srcSliceY_internal + srcSliceH == sws->src_h) || scale_dst)
1181  c->sliceDir = 0;
1182 
1183  return ret;
1184 }
1185 
1187 {
1189  av_frame_unref(c->frame_src);
1190  av_frame_unref(c->frame_dst);
1191  c->src_ranges.nb_ranges = 0;
1192 }
1193 
1195 {
1197  int ret, allocated = 0;
1198 
1199  ret = av_frame_ref(c->frame_src, src);
1200  if (ret < 0)
1201  return ret;
1202 
1203  if (!dst->buf[0]) {
1204  dst->width = sws->dst_w;
1205  dst->height = sws->dst_h;
1206  dst->format = sws->dst_format;
1207 
1208  ret = av_frame_get_buffer(dst, 0);
1209  if (ret < 0)
1210  return ret;
1211  allocated = 1;
1212  }
1213 
1214  ret = av_frame_ref(c->frame_dst, dst);
1215  if (ret < 0) {
1216  if (allocated)
1218 
1219  return ret;
1220  }
1221 
1222  return 0;
1223 }
1224 
1226  unsigned int slice_height)
1227 {
1229  int ret;
1230 
1231  ret = ff_range_add(&c->src_ranges, slice_start, slice_height);
1232  if (ret < 0)
1233  return ret;
1234 
1235  return 0;
1236 }
1237 
1239 {
1241  if (c->slice_ctx)
1242  return sws_internal(c->slice_ctx[0])->dst_slice_align;
1243 
1244  return c->dst_slice_align;
1245 }
1246 
1248  unsigned int slice_height)
1249 {
1251  unsigned int align = sws_receive_slice_alignment(sws);
1252  uint8_t *dst[4];
1253 
1254  /* wait until complete input has been received */
1255  if (!(c->src_ranges.nb_ranges == 1 &&
1256  c->src_ranges.ranges[0].start == 0 &&
1257  c->src_ranges.ranges[0].len == sws->src_h))
1258  return AVERROR(EAGAIN);
1259 
1260  if ((slice_start > 0 || slice_height < sws->dst_h) &&
1261  (slice_start % align || slice_height % align)) {
1263  "Incorrectly aligned output: %u/%u not multiples of %u\n",
1264  slice_start, slice_height, align);
1265  return AVERROR(EINVAL);
1266  }
1267 
1268  if (c->slicethread) {
1269  int nb_jobs = c->nb_slice_ctx;
1270  int ret = 0;
1271 
1272  if (c->slice_ctx[0]->dither == SWS_DITHER_ED)
1273  nb_jobs = 1;
1274 
1275  c->dst_slice_start = slice_start;
1276  c->dst_slice_height = slice_height;
1277 
1278  avpriv_slicethread_execute(c->slicethread, nb_jobs, 0);
1279 
1280  for (int i = 0; i < c->nb_slice_ctx; i++) {
1281  if (c->slice_err[i] < 0) {
1282  ret = c->slice_err[i];
1283  break;
1284  }
1285  }
1286 
1287  memset(c->slice_err, 0, c->nb_slice_ctx * sizeof(*c->slice_err));
1288 
1289  return ret;
1290  }
1291 
1292  for (int i = 0; i < FF_ARRAY_ELEMS(dst); i++) {
1293  ptrdiff_t offset = c->frame_dst->linesize[i] * (ptrdiff_t)(slice_start >> c->chrDstVSubSample);
1294  dst[i] = FF_PTR_ADD(c->frame_dst->data[i], offset);
1295  }
1296 
1297  return scale_internal(sws, (const uint8_t * const *)c->frame_src->data,
1298  c->frame_src->linesize, 0, sws->src_h,
1299  dst, c->frame_dst->linesize, slice_start, slice_height);
1300 }
1301 
1302 static void get_frame_pointers(const AVFrame *frame, uint8_t *data[4],
1303  int linesize[4], int field)
1304 {
1305  for (int i = 0; i < 4; i++) {
1306  data[i] = frame->data[i];
1307  linesize[i] = frame->linesize[i];
1308  }
1309 
1310  if (!(frame->flags & AV_FRAME_FLAG_INTERLACED)) {
1311  av_assert1(!field);
1312  return;
1313  }
1314 
1315  if (field == FIELD_BOTTOM) {
1316  /* Odd rows, offset by one line */
1318  for (int i = 0; i < 4; i++) {
1319  data[i] += linesize[i];
1320  if (desc->flags & AV_PIX_FMT_FLAG_PAL)
1321  break;
1322  }
1323  }
1324 
1325  /* Take only every second line */
1326  for (int i = 0; i < 4; i++)
1327  linesize[i] <<= 1;
1328 }
1329 
1330 /* Subset of av_frame_ref() that only references (video) data buffers */
1331 static int frame_ref(AVFrame *dst, const AVFrame *src)
1332 {
1333  /* ref the buffers */
1334  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
1335  if (!src->buf[i])
1336  continue;
1337  dst->buf[i] = av_buffer_ref(src->buf[i]);
1338  if (!dst->buf[i])
1339  return AVERROR(ENOMEM);
1340  }
1341 
1342  memcpy(dst->data, src->data, sizeof(src->data));
1343  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
1344  return 0;
1345 }
1346 
1348 {
1349  int ret;
1351  if (!src || !dst)
1352  return AVERROR(EINVAL);
1353 
1354  if (c->frame_src) {
1355  /* Context has been initialized with explicit values, fall back to
1356  * legacy API */
1357  ret = sws_frame_start(sws, dst, src);
1358  if (ret < 0)
1359  return ret;
1360 
1361  ret = sws_send_slice(sws, 0, src->height);
1362  if (ret >= 0)
1363  ret = sws_receive_slice(sws, 0, dst->height);
1364 
1365  sws_frame_end(sws);
1366 
1367  return ret;
1368  }
1369 
1370  ret = sws_frame_setup(sws, dst, src);
1371  if (ret < 0)
1372  return ret;
1373 
1374  if (!src->data[0])
1375  return 0;
1376 
1377  if (c->graph[FIELD_TOP]->noop &&
1378  (!c->graph[FIELD_BOTTOM] || c->graph[FIELD_BOTTOM]->noop) &&
1379  src->buf[0] && !dst->buf[0] && !dst->data[0])
1380  {
1381  /* Lightweight refcopy */
1382  ret = frame_ref(dst, src);
1383  if (ret < 0)
1384  return ret;
1385  } else {
1386  if (!dst->data[0]) {
1387  ret = av_frame_get_buffer(dst, 0);
1388  if (ret < 0)
1389  return ret;
1390  }
1391 
1392  for (int field = 0; field < 2; field++) {
1393  SwsGraph *graph = c->graph[field];
1394  uint8_t *dst_data[4], *src_data[4];
1395  int dst_linesize[4], src_linesize[4];
1396  get_frame_pointers(dst, dst_data, dst_linesize, field);
1397  get_frame_pointers(src, src_data, src_linesize, field);
1398  sws_graph_run(graph, dst_data, dst_linesize,
1399  (const uint8_t **) src_data, src_linesize);
1400  if (!graph->dst.interlaced)
1401  break;
1402  }
1403  }
1404 
1405  return 0;
1406 }
1407 
1409 {
1410 #define VALIDATE(field, min, max) \
1411  if (ctx->field < min || ctx->field > max) { \
1412  av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1413  #field, (int) ctx->field, min, max); \
1414  return AVERROR(EINVAL); \
1415  }
1416 
1417  VALIDATE(threads, 0, SWS_MAX_THREADS);
1418  VALIDATE(dither, 0, SWS_DITHER_NB - 1)
1419  VALIDATE(alpha_blend, 0, SWS_ALPHA_BLEND_NB - 1)
1420  return 0;
1421 }
1422 
1424 {
1426  const char *err_msg;
1427  int ret;
1428 
1429  if (!src || !dst)
1430  return AVERROR(EINVAL);
1431  if ((ret = validate_params(ctx)) < 0)
1432  return ret;
1433 
1434  for (int field = 0; field < 2; field++) {
1435  SwsFormat src_fmt = ff_fmt_from_frame(src, field);
1436  SwsFormat dst_fmt = ff_fmt_from_frame(dst, field);
1437 
1438  if ((src->flags ^ dst->flags) & AV_FRAME_FLAG_INTERLACED) {
1439  err_msg = "Cannot convert interlaced to progressive frames or vice versa.\n";
1440  ret = AVERROR(EINVAL);
1441  goto fail;
1442  }
1443 
1444  /* TODO: remove once implemented */
1445  if ((dst_fmt.prim != src_fmt.prim || dst_fmt.trc != src_fmt.trc) &&
1446  !s->color_conversion_warned)
1447  {
1448  av_log(ctx, AV_LOG_WARNING, "Conversions between different primaries / "
1449  "transfer functions are not currently implemented, expect "
1450  "wrong results.\n");
1451  s->color_conversion_warned = 1;
1452  }
1453 
1454  if (!ff_test_fmt(&src_fmt, 0)) {
1455  err_msg = "Unsupported input";
1456  ret = AVERROR(ENOTSUP);
1457  goto fail;
1458  }
1459 
1460  if (!ff_test_fmt(&dst_fmt, 1)) {
1461  err_msg = "Unsupported output";
1462  ret = AVERROR(ENOTSUP);
1463  goto fail;
1464  }
1465 
1466  ret = sws_graph_reinit(ctx, &dst_fmt, &src_fmt, field, &s->graph[field]);
1467  if (ret < 0) {
1468  err_msg = "Failed initializing scaling graph";
1469  goto fail;
1470  }
1471 
1472  if (s->graph[field]->incomplete && ctx->flags & SWS_STRICT) {
1473  err_msg = "Incomplete scaling graph";
1474  ret = AVERROR(EINVAL);
1475  goto fail;
1476  }
1477 
1478  if (!src_fmt.interlaced) {
1479  sws_graph_free(&s->graph[FIELD_BOTTOM]);
1480  break;
1481  }
1482 
1483  continue;
1484 
1485  fail:
1486  av_log(ctx, AV_LOG_ERROR, "%s (%s): fmt:%s csp:%s prim:%s trc:%s ->"
1487  " fmt:%s csp:%s prim:%s trc:%s\n",
1488  err_msg, av_err2str(ret),
1493 
1494  for (int i = 0; i < FF_ARRAY_ELEMS(s->graph); i++)
1495  sws_graph_free(&s->graph[i]);
1496 
1497  return ret;
1498  }
1499 
1500  return 0;
1501 }
1502 
1503 /**
1504  * swscale wrapper, so we don't need to export the SwsContext.
1505  * Assumes planar YUV to be in YUV order instead of YVU.
1506  */
1508  const uint8_t * const srcSlice[],
1509  const int srcStride[], int srcSliceY,
1510  int srcSliceH, uint8_t *const dst[],
1511  const int dstStride[])
1512 {
1514  if (c->nb_slice_ctx) {
1515  sws = c->slice_ctx[0];
1516  c = sws_internal(sws);
1517  }
1518 
1519  return scale_internal(sws, srcSlice, srcStride, srcSliceY, srcSliceH,
1520  dst, dstStride, 0, sws->dst_h);
1521 }
1522 
1523 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1524  int nb_jobs, int nb_threads)
1525 {
1526  SwsInternal *parent = priv;
1527  SwsContext *sws = parent->slice_ctx[threadnr];
1529 
1530  const int slice_height = FFALIGN(FFMAX((parent->dst_slice_height + nb_jobs - 1) / nb_jobs, 1),
1531  c->dst_slice_align);
1532  const int slice_start = jobnr * slice_height;
1533  const int slice_end = FFMIN((jobnr + 1) * slice_height, parent->dst_slice_height);
1534  int err = 0;
1535 
1536  if (slice_end > slice_start) {
1537  uint8_t *dst[4] = { NULL };
1538 
1539  for (int i = 0; i < FF_ARRAY_ELEMS(dst) && parent->frame_dst->data[i]; i++) {
1540  const int vshift = (i == 1 || i == 2) ? c->chrDstVSubSample : 0;
1541  const ptrdiff_t offset = parent->frame_dst->linesize[i] *
1542  (ptrdiff_t)((slice_start + parent->dst_slice_start) >> vshift);
1543 
1544  dst[i] = parent->frame_dst->data[i] + offset;
1545  }
1546 
1547  err = scale_internal(sws, (const uint8_t * const *)parent->frame_src->data,
1548  parent->frame_src->linesize, 0, sws->src_h,
1549  dst, parent->frame_dst->linesize,
1551  }
1552 
1553  parent->slice_err[threadnr] = err;
1554 }
sws_init_swscale
static av_cold void sws_init_swscale(SwsInternal *c)
Definition: swscale.c:658
isBayer
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:842
_dst
uint8_t * _dst
Definition: dsp.h:52
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:108
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
SwsPlane::sliceH
int sliceH
number of lines
Definition: swscale_internal.h:1094
isPacked
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:887
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mem_internal.h
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:304
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:477
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
ff_rgb48Toxyz12
void ff_rgb48Toxyz12(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:798
SwsFormat::interlaced
int interlaced
Definition: utils.h:36
lumRangeToJpeg16_c
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:233
lumRangeToJpeg_c
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:185
ff_sws_init_range_convert_aarch64
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
Definition: swscale.c:238
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
SwsContext::src_w
int src_w
Deprecated frame property overrides, for the legacy API only.
Definition: swscale.h:220
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
sws_graph_free
void sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
Definition: graph.c:548
ff_rotate_slice
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Definition: slice.c:120
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
SwsSlice::plane
SwsPlane plane[MAX_SLICE_PLANES]
color planes
Definition: swscale_internal.h:1112
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:271
ff_sws_init_range_convert_loongarch
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:27
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
isGray
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:787
GV
#define GV
SWS_BITEXACT
@ SWS_BITEXACT
Definition: swscale.h:156
b
#define b
Definition: input.c:41
SwsFilterDescriptor
Struct which holds all necessary data for processing a slice.
Definition: swscale_internal.h:1119
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
data
const char data[16]
Definition: mxf.c:149
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:476
SwsContext::flags
unsigned flags
Bitmask of SWS_*.
Definition: swscale.h:187
base
uint8_t base
Definition: vp3data.h:128
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
sws_receive_slice
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
Definition: swscale.c:1247
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
ff_sws_init_swscale_riscv
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
Definition: swscale.c:74
RV
#define RV
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
DEBUG_BUFFERS
#define DEBUG_BUFFERS(...)
Definition: swscale.c:255
SwsFormat::trc
enum AVColorTransferCharacteristic trc
Definition: utils.h:40
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:246
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:56
hScale16To15_c
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:95
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:52
SwsInternal::frame_dst
AVFrame * frame_dst
Definition: swscale_internal.h:358
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
SWS_FAST_BILINEAR
@ SWS_FAST_BILINEAR
Scaler selection options.
Definition: swscale.h:98
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:727
ff_sws_init_input_funcs
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
validate_params
static int validate_params(SwsContext *ctx)
Definition: swscale.c:1408
chrRangeToJpeg16_c
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:207
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1720
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3546
chrRangeFromJpeg_c
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:173
frame_start
static void frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1778
SWS_MAX_THREADS
#define SWS_MAX_THREADS
Definition: swscale_internal.h:52
fail
#define fail()
Definition: checkasm.h:193
chrRangeFromJpeg16_c
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:221
FIELD_BOTTOM
@ FIELD_BOTTOM
Definition: utils.h:30
SwsInternal::frame_src
AVFrame * frame_src
Definition: swscale_internal.h:357
sws_frame_setup
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
Definition: swscale.c:1423
val
static double val(void *priv, double ch)
Definition: aeval.c:77
SWS_ALPHA_BLEND_NB
@ SWS_ALPHA_BLEND_NB
Definition: swscale.h:91
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:741
init_range_convert_constants
static void init_range_convert_constants(SwsInternal *c)
Definition: swscale.c:587
RY
#define RY
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
hScale8To19_c
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:140
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FIELD_TOP
@ FIELD_TOP
Definition: utils.h:29
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
yuv2packed2_fn
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:214
SwsContext::dither
SwsDither dither
Dither mode.
Definition: swscale.h:202
SwsInternal::slice_err
int * slice_err
Definition: swscale_internal.h:326
emms_c
#define emms_c()
Definition: emms.h:63
intreadwrite.h
SwsInternal::slice_ctx
SwsContext ** slice_ctx
Definition: swscale_internal.h:325
s
#define s(width, name)
Definition: cbs_vp9.c:198
GU
#define GU
ff_update_palette
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
Definition: swscale.c:857
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_sws_init_swscale_arm
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
Definition: swscale.c:33
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1451
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:128
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
SwsSlice::width
int width
Slice line width.
Definition: swscale_internal.h:1106
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
VALIDATE
#define VALIDATE(field, min, max)
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
chrRangeToJpeg_c
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:159
ff_hcscale_fast_c
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:38
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_xyz12Torgb48
void ff_xyz12Torgb48(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:739
ff_sws_init_range_convert_riscv
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
Definition: swscale.c:29
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:478
sws_frame_end
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
Definition: swscale.c:1186
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
ff_sws_init_range_convert_x86
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
Definition: swscale.c:479
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:143
scale_internal
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:991
fillPlane
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
Definition: swscale.c:54
NULL
#define NULL
Definition: coverity.c:32
SwsPlane::available_lines
int available_lines
max number of lines that can be hold by this plane
Definition: swscale_internal.h:1092
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
SwsContext::gamma_flag
int gamma_flag
Use gamma correct scaling.
Definition: swscale.h:212
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
sws_graph_reinit
int sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around sws_graph_create that does nothing if the format is unchanged.
Definition: graph.c:586
V
#define V
Definition: avdct.c:31
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3504
sws_graph_run
void sws_graph_run(SwsGraph *graph, uint8_t *const out_data[4], const int out_linesize[4], const uint8_t *const in_data[4], const int in_linesize[4])
Dispatch the filter graph on a single field.
Definition: graph.c:600
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
hScale8To15_c
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:124
ff_sws_init_range_convert
av_cold void ff_sws_init_range_convert(SwsInternal *c)
Definition: swscale.c:622
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
hScale16To19_c
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:65
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SwsInternal::dstY
int dstY
Last destination vertical line output from last slice.
Definition: swscale_internal.h:440
SwsFormat::prim
enum AVColorPrimaries prim
Definition: utils.h:39
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
ff_range_add
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
Definition: utils.c:2589
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
yuv2packedX_fn
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:246
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:35
ff_sws_slice_worker
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: swscale.c:1523
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:748
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
ff_sws_init_swscale_loongarch
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:62
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:102
get_frame_pointers
static void get_frame_pointers(const AVFrame *frame, uint8_t *data[4], int linesize[4], int field)
Definition: swscale.c:1302
fillPlane16
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
Definition: swscale_internal.h:1052
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
usePal
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:918
BV
#define BV
cpu.h
ff_test_fmt
int ff_test_fmt(const SwsFormat *fmt, int output)
Definition: utils.c:2755
isAnyRGB
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:856
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
SwsContext::src_h
int src_h
Width and height of the source frame.
Definition: swscale.h:220
SwsFormat
Definition: utils.h:34
RGB2YUV_SHIFT
#define RGB2YUV_SHIFT
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
is32BPS
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:734
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:475
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
lumRangeFromJpeg_c
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:197
SWS_DITHER_NB
@ SWS_DITHER_NB
Definition: swscale.h:86
ff_sws_init_swscale_ppc
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
Definition: swscale_altivec.c:232
SwsContext::dst_format
int dst_format
Destination pixel format.
Definition: swscale.h:223
fillPlane32
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
Definition: swscale_internal.h:1066
GY
#define GY
Y
#define Y
Definition: boxblur.h:37
yuv2anyX_fn
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:280
ff_sws_init_swscale_x86
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
Definition: swscale.c:495
scale_cascaded
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:972
emms.h
SwsInternal::dst_slice_align
unsigned int dst_slice_align
Definition: swscale_internal.h:687
sws
static SwsContext * sws[3]
Definition: swscale.c:69
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
SwsGraph::dst
SwsFormat dst
Definition: graph.h:114
SwsFormat::format
enum AVPixelFormat format
Definition: utils.h:37
sws_send_slice
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
Definition: swscale.c:1225
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_sws_init_scale
void ff_sws_init_scale(SwsInternal *c)
Definition: swscale.c:691
src2
const pixel * src2
Definition: h264pred_template.c:422
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
check_image_pointers
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
Definition: swscale.c:722
av_always_inline
#define av_always_inline
Definition: attributes.h:49
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:144
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
SwsContext::dst_h
int dst_h
Width and height of the destination frame.
Definition: swscale.h:221
ff_updateMMXDitherTables
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
SwsSlice
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
Definition: swscale_internal.h:1104
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:648
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:736
stride
#define stride
Definition: h264pred_template.c:537
ff_init_slice_from_src
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
Definition: slice.c:148
frame_ref
static int frame_ref(AVFrame *dst, const AVFrame *src)
Definition: swscale.c:1331
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale.h:83
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
yuv2packed1_fn
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:181
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:97
SwsInternal
Definition: swscale_internal.h:317
ret
ret
Definition: filter_design.txt:187
sws_receive_slice_alignment
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
Definition: swscale.c:1238
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
ff_fmt_from_frame
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
Definition: utils.c:2653
bswap.h
sws_frame_start
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
Definition: swscale.c:1194
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
sws_pb_64
static const uint8_t sws_pb_64[8]
Definition: swscale.c:50
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:124
reset_ptr
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
Definition: swscale.c:710
ff_init_vscale_pfn
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
Definition: vscale.c:258
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1507
SWS_PRINT_INFO
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
Definition: swscale.h:119
lumRangeFromJpeg16_c
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:244
atomic_exchange_explicit
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:106
SWS_STRICT
@ SWS_STRICT
Return an error on underspecified conversions.
Definition: swscale.h:114
ff_dither_8x8_128
const uint8_t ff_dither_8x8_128[9][8]
Definition: swscale.c:38
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:31
ff_swscale
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:259
SwsFormat::csp
enum AVColorSpace csp
Definition: utils.h:41
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
RU
#define RU
BU
#define BU
desc
const char * desc
Definition: libsvtav1.c:79
scale_gamma
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:938
mem.h
SwsPlane::sliceY
int sliceY
index of first line
Definition: swscale_internal.h:1093
SwsContext::dst_w
int dst_w
Definition: swscale.h:221
SwsInternal::dst_slice_height
int dst_slice_height
Definition: swscale_internal.h:334
SwsGraph
Filter graph, which represents a 'baked' pixel format conversion.
Definition: graph.h:94
SwsContext::src_format
int src_format
Source pixel format.
Definition: swscale.h:222
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_hyscale_fast_c
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:23
BY
#define BY
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3169
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
ff_sws_init_swscale_aarch64
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
Definition: swscale.c:263
SwsInternal::dst_slice_start
int dst_slice_start
Definition: swscale_internal.h:333
int32_t
int32_t
Definition: audioconvert.c:56
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sws_internal
static SwsInternal * sws_internal(const SwsContext *sws)
Definition: swscale_internal.h:74
sws_scale_frame
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1347
h
h
Definition: vp9dsp_template.c:2070
solve_range_convert
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
Definition: swscale.c:574
isPlanar
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:896
width
#define width
Definition: dsp.h:85
SwsContext
Main external API structure.
Definition: swscale.h:174
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3525
src
#define src
Definition: vp8dsp.c:248
swscale.h
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3090
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
isALPHA
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:878
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:62