FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/opt.h"
39 #include "avcodec.h"
40 #include "blockdsp.h"
41 #include "codec_internal.h"
42 #include "copy_block.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "idctdsp.h"
46 #include "internal.h"
47 #include "jpegtables.h"
48 #include "mjpeg.h"
49 #include "mjpegdec.h"
50 #include "jpeglsdec.h"
51 #include "profiles.h"
52 #include "put_bits.h"
53 #include "tiff.h"
54 #include "exif.h"
55 #include "bytestream.h"
56 #include "tiff_common.h"
57 
58 
60 {
61  static const struct {
62  int class;
63  int index;
64  const uint8_t *bits;
65  const uint8_t *values;
66  int length;
67  } ht[] = {
69  ff_mjpeg_val_dc, 12 },
71  ff_mjpeg_val_dc, 12 },
80  };
81  int i, ret;
82 
83  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
84  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
85  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
86  ht[i].bits, ht[i].values,
87  ht[i].class == 1, s->avctx);
88  if (ret < 0)
89  return ret;
90 
91  if (ht[i].class < 2) {
92  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
93  ht[i].bits + 1, 16);
94  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
95  ht[i].values, ht[i].length);
96  }
97  }
98 
99  return 0;
100 }
101 
102 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
103 {
104  s->buggy_avid = 1;
105  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
106  s->interlace_polarity = 1;
107  if (len > 14 && buf[12] == 2) /* 2 - PAL */
108  s->interlace_polarity = 0;
109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
110  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
111 }
112 
113 static void init_idct(AVCodecContext *avctx)
114 {
115  MJpegDecodeContext *s = avctx->priv_data;
116 
117  ff_idctdsp_init(&s->idsp, avctx);
118  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
119  s->idsp.idct_permutation);
120 }
121 
123 {
124  MJpegDecodeContext *s = avctx->priv_data;
125  int ret;
126 
127  if (!s->picture_ptr) {
128  s->picture = av_frame_alloc();
129  if (!s->picture)
130  return AVERROR(ENOMEM);
131  s->picture_ptr = s->picture;
132  }
133 
134  s->avctx = avctx;
135  ff_blockdsp_init(&s->bdsp);
136  ff_hpeldsp_init(&s->hdsp, avctx->flags);
137  init_idct(avctx);
138  s->buffer_size = 0;
139  s->buffer = NULL;
140  s->start_code = -1;
141  s->first_picture = 1;
142  s->got_picture = 0;
143  s->orig_height = avctx->coded_height;
145  avctx->colorspace = AVCOL_SPC_BT470BG;
146  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
147 
148  if ((ret = init_default_huffman_tables(s)) < 0)
149  return ret;
150 
151  if (s->extern_huff) {
152  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
153  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
154  return ret;
155  if (ff_mjpeg_decode_dht(s)) {
156  av_log(avctx, AV_LOG_ERROR,
157  "error using external huffman table, switching back to internal\n");
158  if ((ret = init_default_huffman_tables(s)) < 0)
159  return ret;
160  }
161  }
162  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
163  s->interlace_polarity = 1; /* bottom field first */
164  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
165  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
166  if (avctx->codec_tag == AV_RL32("MJPG"))
167  s->interlace_polarity = 1;
168  }
169 
170  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
171  if (avctx->extradata_size >= 4)
172  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
173 
174  if (s->smv_frames_per_jpeg <= 0) {
175  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
176  return AVERROR_INVALIDDATA;
177  }
178 
179  s->smv_frame = av_frame_alloc();
180  if (!s->smv_frame)
181  return AVERROR(ENOMEM);
182  } else if (avctx->extradata_size > 8
183  && AV_RL32(avctx->extradata) == 0x2C
184  && AV_RL32(avctx->extradata+4) == 0x18) {
185  parse_avid(s, avctx->extradata, avctx->extradata_size);
186  }
187 
188  if (avctx->codec->id == AV_CODEC_ID_AMV)
189  s->flipped = 1;
190 
191  return 0;
192 }
193 
194 
195 /* quantize tables */
197 {
198  int len, index, i;
199 
200  len = get_bits(&s->gb, 16) - 2;
201 
202  if (8*len > get_bits_left(&s->gb)) {
203  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
204  return AVERROR_INVALIDDATA;
205  }
206 
207  while (len >= 65) {
208  int pr = get_bits(&s->gb, 4);
209  if (pr > 1) {
210  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
211  return AVERROR_INVALIDDATA;
212  }
213  index = get_bits(&s->gb, 4);
214  if (index >= 4)
215  return -1;
216  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
217  /* read quant table */
218  for (i = 0; i < 64; i++) {
219  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
220  if (s->quant_matrixes[index][i] == 0) {
221  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
222  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
223  if (s->avctx->err_recognition & AV_EF_EXPLODE)
224  return AVERROR_INVALIDDATA;
225  }
226  }
227 
228  // XXX FIXME fine-tune, and perhaps add dc too
229  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
230  s->quant_matrixes[index][8]) >> 1;
231  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
232  index, s->qscale[index]);
233  len -= 1 + 64 * (1+pr);
234  }
235  return 0;
236 }
237 
238 /* decode huffman tables and build VLC decoders */
240 {
241  int len, index, i, class, n, v;
242  uint8_t bits_table[17];
243  uint8_t val_table[256];
244  int ret = 0;
245 
246  len = get_bits(&s->gb, 16) - 2;
247 
248  if (8*len > get_bits_left(&s->gb)) {
249  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
250  return AVERROR_INVALIDDATA;
251  }
252 
253  while (len > 0) {
254  if (len < 17)
255  return AVERROR_INVALIDDATA;
256  class = get_bits(&s->gb, 4);
257  if (class >= 2)
258  return AVERROR_INVALIDDATA;
259  index = get_bits(&s->gb, 4);
260  if (index >= 4)
261  return AVERROR_INVALIDDATA;
262  n = 0;
263  for (i = 1; i <= 16; i++) {
264  bits_table[i] = get_bits(&s->gb, 8);
265  n += bits_table[i];
266  }
267  len -= 17;
268  if (len < n || n > 256)
269  return AVERROR_INVALIDDATA;
270 
271  for (i = 0; i < n; i++) {
272  v = get_bits(&s->gb, 8);
273  val_table[i] = v;
274  }
275  len -= n;
276 
277  /* build VLC and flush previous vlc if present */
278  ff_free_vlc(&s->vlcs[class][index]);
279  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
280  class, index, n);
281  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
282  val_table, class > 0, s->avctx)) < 0)
283  return ret;
284 
285  if (class > 0) {
286  ff_free_vlc(&s->vlcs[2][index]);
287  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
288  val_table, 0, s->avctx)) < 0)
289  return ret;
290  }
291 
292  for (i = 0; i < 16; i++)
293  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
294  for (i = 0; i < 256; i++)
295  s->raw_huffman_values[class][index][i] = val_table[i];
296  }
297  return 0;
298 }
299 
301 {
302  int len, nb_components, i, width, height, bits, ret, size_change;
303  unsigned pix_fmt_id;
304  int h_count[MAX_COMPONENTS] = { 0 };
305  int v_count[MAX_COMPONENTS] = { 0 };
306 
307  s->cur_scan = 0;
308  memset(s->upscale_h, 0, sizeof(s->upscale_h));
309  memset(s->upscale_v, 0, sizeof(s->upscale_v));
310 
311  len = get_bits(&s->gb, 16);
312  bits = get_bits(&s->gb, 8);
313 
314  if (bits > 16 || bits < 1) {
315  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
316  return AVERROR_INVALIDDATA;
317  }
318 
319  if (s->avctx->bits_per_raw_sample != bits) {
320  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
321  s->avctx->bits_per_raw_sample = bits;
322  init_idct(s->avctx);
323  }
324  if (s->pegasus_rct)
325  bits = 9;
326  if (bits == 9 && !s->pegasus_rct)
327  s->rct = 1; // FIXME ugly
328 
329  if(s->lossless && s->avctx->lowres){
330  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
331  return -1;
332  }
333 
334  height = get_bits(&s->gb, 16);
335  width = get_bits(&s->gb, 16);
336 
337  // HACK for odd_height.mov
338  if (s->interlaced && s->width == width && s->height == height + 1)
339  height= s->height;
340 
341  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
342  if (av_image_check_size(width, height, 0, s->avctx) < 0)
343  return AVERROR_INVALIDDATA;
344  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
345  return AVERROR_INVALIDDATA;
346 
347  nb_components = get_bits(&s->gb, 8);
348  if (nb_components <= 0 ||
349  nb_components > MAX_COMPONENTS)
350  return -1;
351  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
352  if (nb_components != s->nb_components) {
353  av_log(s->avctx, AV_LOG_ERROR,
354  "nb_components changing in interlaced picture\n");
355  return AVERROR_INVALIDDATA;
356  }
357  }
358  if (s->ls && !(bits <= 8 || nb_components == 1)) {
360  "JPEG-LS that is not <= 8 "
361  "bits/component or 16-bit gray");
362  return AVERROR_PATCHWELCOME;
363  }
364  if (len != 8 + 3 * nb_components) {
365  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
366  return AVERROR_INVALIDDATA;
367  }
368 
369  s->nb_components = nb_components;
370  s->h_max = 1;
371  s->v_max = 1;
372  for (i = 0; i < nb_components; i++) {
373  /* component id */
374  s->component_id[i] = get_bits(&s->gb, 8);
375  h_count[i] = get_bits(&s->gb, 4);
376  v_count[i] = get_bits(&s->gb, 4);
377  /* compute hmax and vmax (only used in interleaved case) */
378  if (h_count[i] > s->h_max)
379  s->h_max = h_count[i];
380  if (v_count[i] > s->v_max)
381  s->v_max = v_count[i];
382  s->quant_index[i] = get_bits(&s->gb, 8);
383  if (s->quant_index[i] >= 4) {
384  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
385  return AVERROR_INVALIDDATA;
386  }
387  if (!h_count[i] || !v_count[i]) {
388  av_log(s->avctx, AV_LOG_ERROR,
389  "Invalid sampling factor in component %d %d:%d\n",
390  i, h_count[i], v_count[i]);
391  return AVERROR_INVALIDDATA;
392  }
393 
394  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
395  i, h_count[i], v_count[i],
396  s->component_id[i], s->quant_index[i]);
397  }
398  if ( nb_components == 4
399  && s->component_id[0] == 'C'
400  && s->component_id[1] == 'M'
401  && s->component_id[2] == 'Y'
402  && s->component_id[3] == 'K')
403  s->adobe_transform = 0;
404 
405  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
406  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
407  return AVERROR_PATCHWELCOME;
408  }
409 
410  if (s->bayer) {
411  if (nb_components == 2) {
412  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
413  width stored in their SOF3 markers is the width of each one. We only output
414  a single component, therefore we need to adjust the output image width. We
415  handle the deinterleaving (but not the debayering) in this file. */
416  width *= 2;
417  }
418  /* They can also contain 1 component, which is double the width and half the height
419  of the final image (rows are interleaved). We don't handle the decoding in this
420  file, but leave that to the TIFF/DNG decoder. */
421  }
422 
423  /* if different size, realloc/alloc picture */
424  if (width != s->width || height != s->height || bits != s->bits ||
425  memcmp(s->h_count, h_count, sizeof(h_count)) ||
426  memcmp(s->v_count, v_count, sizeof(v_count))) {
427  size_change = 1;
428 
429  s->width = width;
430  s->height = height;
431  s->bits = bits;
432  memcpy(s->h_count, h_count, sizeof(h_count));
433  memcpy(s->v_count, v_count, sizeof(v_count));
434  s->interlaced = 0;
435  s->got_picture = 0;
436 
437  /* test interlaced mode */
438  if (s->first_picture &&
439  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
440  s->orig_height != 0 &&
441  s->height < ((s->orig_height * 3) / 4)) {
442  s->interlaced = 1;
443  s->bottom_field = s->interlace_polarity;
444  s->picture_ptr->interlaced_frame = 1;
445  s->picture_ptr->top_field_first = !s->interlace_polarity;
446  height *= 2;
447  }
448 
449  ret = ff_set_dimensions(s->avctx, width, height);
450  if (ret < 0)
451  return ret;
452 
453  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
454  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
455  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
456  s->orig_height < height)
457  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
458 
459  s->first_picture = 0;
460  } else {
461  size_change = 0;
462  }
463 
464  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
465  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
466  if (s->avctx->height <= 0)
467  return AVERROR_INVALIDDATA;
468  }
469 
470  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
471  if (s->progressive) {
472  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
473  return AVERROR_INVALIDDATA;
474  }
475  } else {
476  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
477  s->rgb = 1;
478  else if (!s->lossless)
479  s->rgb = 0;
480  /* XXX: not complete test ! */
481  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
482  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
483  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
484  (s->h_count[3] << 4) | s->v_count[3];
485  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
486  /* NOTE we do not allocate pictures large enough for the possible
487  * padding of h/v_count being 4 */
488  if (!(pix_fmt_id & 0xD0D0D0D0))
489  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
490  if (!(pix_fmt_id & 0x0D0D0D0D))
491  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
492 
493  for (i = 0; i < 8; i++) {
494  int j = 6 + (i&1) - (i&6);
495  int is = (pix_fmt_id >> (4*i)) & 0xF;
496  int js = (pix_fmt_id >> (4*j)) & 0xF;
497 
498  if (is == 1 && js != 2 && (i < 2 || i > 5))
499  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
500  if (is == 1 && js != 2 && (i < 2 || i > 5))
501  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
502 
503  if (is == 1 && js == 2) {
504  if (i & 1) s->upscale_h[j/2] = 1;
505  else s->upscale_v[j/2] = 1;
506  }
507  }
508 
509  if (s->bayer) {
510  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
511  goto unk_pixfmt;
512  }
513 
514  switch (pix_fmt_id) {
515  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
516  if (!s->bayer)
517  goto unk_pixfmt;
518  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
519  break;
520  case 0x11111100:
521  if (s->rgb)
522  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
523  else {
524  if ( s->adobe_transform == 0
525  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
526  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
527  } else {
528  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
529  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
530  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
531  }
532  }
533  av_assert0(s->nb_components == 3);
534  break;
535  case 0x11111111:
536  if (s->rgb)
537  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
538  else {
539  if (s->adobe_transform == 0 && s->bits <= 8) {
540  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
541  } else {
542  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
543  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
544  }
545  }
546  av_assert0(s->nb_components == 4);
547  break;
548  case 0x22111122:
549  case 0x22111111:
550  if (s->adobe_transform == 0 && s->bits <= 8) {
551  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
552  s->upscale_v[1] = s->upscale_v[2] = 1;
553  s->upscale_h[1] = s->upscale_h[2] = 1;
554  } else if (s->adobe_transform == 2 && s->bits <= 8) {
555  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
556  s->upscale_v[1] = s->upscale_v[2] = 1;
557  s->upscale_h[1] = s->upscale_h[2] = 1;
558  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
559  } else {
560  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
561  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
562  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
563  }
564  av_assert0(s->nb_components == 4);
565  break;
566  case 0x12121100:
567  case 0x22122100:
568  case 0x21211100:
569  case 0x21112100:
570  case 0x22211200:
571  case 0x22221100:
572  case 0x22112200:
573  case 0x11222200:
574  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
575  else
576  goto unk_pixfmt;
577  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
578  break;
579  case 0x11000000:
580  case 0x13000000:
581  case 0x14000000:
582  case 0x31000000:
583  case 0x33000000:
584  case 0x34000000:
585  case 0x41000000:
586  case 0x43000000:
587  case 0x44000000:
588  if(s->bits <= 8)
589  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
590  else
591  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
592  break;
593  case 0x12111100:
594  case 0x14121200:
595  case 0x14111100:
596  case 0x22211100:
597  case 0x22112100:
598  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
599  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
600  else
601  goto unk_pixfmt;
602  s->upscale_v[0] = s->upscale_v[1] = 1;
603  } else {
604  if (pix_fmt_id == 0x14111100)
605  s->upscale_v[1] = s->upscale_v[2] = 1;
606  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
607  else
608  goto unk_pixfmt;
609  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
610  }
611  break;
612  case 0x21111100:
613  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
614  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
615  else
616  goto unk_pixfmt;
617  s->upscale_h[0] = s->upscale_h[1] = 1;
618  } else {
619  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
620  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
621  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
622  }
623  break;
624  case 0x31111100:
625  if (s->bits > 8)
626  goto unk_pixfmt;
627  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
628  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
629  s->upscale_h[1] = s->upscale_h[2] = 2;
630  break;
631  case 0x22121100:
632  case 0x22111200:
633  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
634  else
635  goto unk_pixfmt;
636  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
637  break;
638  case 0x22111100:
639  case 0x23111100:
640  case 0x42111100:
641  case 0x24111100:
642  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
643  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
644  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
645  if (pix_fmt_id == 0x42111100) {
646  if (s->bits > 8)
647  goto unk_pixfmt;
648  s->upscale_h[1] = s->upscale_h[2] = 1;
649  } else if (pix_fmt_id == 0x24111100) {
650  if (s->bits > 8)
651  goto unk_pixfmt;
652  s->upscale_v[1] = s->upscale_v[2] = 1;
653  } else if (pix_fmt_id == 0x23111100) {
654  if (s->bits > 8)
655  goto unk_pixfmt;
656  s->upscale_v[1] = s->upscale_v[2] = 2;
657  }
658  break;
659  case 0x41111100:
660  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
661  else
662  goto unk_pixfmt;
663  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
664  break;
665  default:
666  unk_pixfmt:
667  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
668  memset(s->upscale_h, 0, sizeof(s->upscale_h));
669  memset(s->upscale_v, 0, sizeof(s->upscale_v));
670  return AVERROR_PATCHWELCOME;
671  }
672  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
673  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
674  return AVERROR_PATCHWELCOME;
675  }
676  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
677  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
678  return AVERROR_PATCHWELCOME;
679  }
680  if (s->ls) {
681  memset(s->upscale_h, 0, sizeof(s->upscale_h));
682  memset(s->upscale_v, 0, sizeof(s->upscale_v));
683  if (s->nb_components == 3) {
684  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
685  } else if (s->nb_components != 1) {
686  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
687  return AVERROR_PATCHWELCOME;
688  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
689  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
690  else if (s->bits <= 8)
691  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
692  else
693  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
694  }
695 
696  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
697  if (!s->pix_desc) {
698  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
699  return AVERROR_BUG;
700  }
701 
702  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
703  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
704  } else {
705  enum AVPixelFormat pix_fmts[] = {
706 #if CONFIG_MJPEG_NVDEC_HWACCEL
708 #endif
709 #if CONFIG_MJPEG_VAAPI_HWACCEL
711 #endif
712  s->avctx->pix_fmt,
714  };
715  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
716  if (s->hwaccel_pix_fmt < 0)
717  return AVERROR(EINVAL);
718 
719  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
720  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
721  }
722 
723  if (s->avctx->skip_frame == AVDISCARD_ALL) {
724  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
725  s->picture_ptr->key_frame = 1;
726  s->got_picture = 1;
727  return 0;
728  }
729 
730  av_frame_unref(s->picture_ptr);
731  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
732  return -1;
733  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
734  s->picture_ptr->key_frame = 1;
735  s->got_picture = 1;
736 
737  // Lets clear the palette to avoid leaving uninitialized values in it
738  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
739  memset(s->picture_ptr->data[1], 0, 1024);
740 
741  for (i = 0; i < 4; i++)
742  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
743 
744  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
745  s->width, s->height, s->linesize[0], s->linesize[1],
746  s->interlaced, s->avctx->height);
747 
748  }
749 
750  if ((s->rgb && !s->lossless && !s->ls) ||
751  (!s->rgb && s->ls && s->nb_components > 1) ||
752  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
753  ) {
754  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
755  return AVERROR_PATCHWELCOME;
756  }
757 
758  /* totally blank picture as progressive JPEG will only add details to it */
759  if (s->progressive) {
760  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
761  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
762  for (i = 0; i < s->nb_components; i++) {
763  int size = bw * bh * s->h_count[i] * s->v_count[i];
764  av_freep(&s->blocks[i]);
765  av_freep(&s->last_nnz[i]);
766  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
767  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
768  if (!s->blocks[i] || !s->last_nnz[i])
769  return AVERROR(ENOMEM);
770  s->block_stride[i] = bw * s->h_count[i];
771  }
772  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
773  }
774 
775  if (s->avctx->hwaccel) {
776  s->hwaccel_picture_private =
777  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
778  if (!s->hwaccel_picture_private)
779  return AVERROR(ENOMEM);
780 
781  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
782  s->raw_image_buffer_size);
783  if (ret < 0)
784  return ret;
785  }
786 
787  return 0;
788 }
789 
790 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
791 {
792  int code;
793  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
794  if (code < 0 || code > 16) {
795  av_log(s->avctx, AV_LOG_WARNING,
796  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
797  0, dc_index, &s->vlcs[0][dc_index]);
798  return 0xfffff;
799  }
800 
801  if (code)
802  return get_xbits(&s->gb, code);
803  else
804  return 0;
805 }
806 
807 /* decode block and dequantize */
808 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
809  int dc_index, int ac_index, uint16_t *quant_matrix)
810 {
811  int code, i, j, level, val;
812 
813  /* DC coef */
814  val = mjpeg_decode_dc(s, dc_index);
815  if (val == 0xfffff) {
816  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
817  return AVERROR_INVALIDDATA;
818  }
819  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
820  val = av_clip_int16(val);
821  s->last_dc[component] = val;
822  block[0] = val;
823  /* AC coefs */
824  i = 0;
825  {OPEN_READER(re, &s->gb);
826  do {
827  UPDATE_CACHE(re, &s->gb);
828  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
829 
830  i += ((unsigned)code) >> 4;
831  code &= 0xf;
832  if (code) {
833  if (code > MIN_CACHE_BITS - 16)
834  UPDATE_CACHE(re, &s->gb);
835 
836  {
837  int cache = GET_CACHE(re, &s->gb);
838  int sign = (~cache) >> 31;
839  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
840  }
841 
842  LAST_SKIP_BITS(re, &s->gb, code);
843 
844  if (i > 63) {
845  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
846  return AVERROR_INVALIDDATA;
847  }
848  j = s->permutated_scantable[i];
849  block[j] = level * quant_matrix[i];
850  }
851  } while (i < 63);
852  CLOSE_READER(re, &s->gb);}
853 
854  return 0;
855 }
856 
858  int component, int dc_index,
859  uint16_t *quant_matrix, int Al)
860 {
861  unsigned val;
862  s->bdsp.clear_block(block);
863  val = mjpeg_decode_dc(s, dc_index);
864  if (val == 0xfffff) {
865  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
866  return AVERROR_INVALIDDATA;
867  }
868  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
869  s->last_dc[component] = val;
870  block[0] = val;
871  return 0;
872 }
873 
874 /* decode block and dequantize - progressive JPEG version */
876  uint8_t *last_nnz, int ac_index,
877  uint16_t *quant_matrix,
878  int ss, int se, int Al, int *EOBRUN)
879 {
880  int code, i, j, val, run;
881  unsigned level;
882 
883  if (*EOBRUN) {
884  (*EOBRUN)--;
885  return 0;
886  }
887 
888  {
889  OPEN_READER(re, &s->gb);
890  for (i = ss; ; i++) {
891  UPDATE_CACHE(re, &s->gb);
892  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
893 
894  run = ((unsigned) code) >> 4;
895  code &= 0xF;
896  if (code) {
897  i += run;
898  if (code > MIN_CACHE_BITS - 16)
899  UPDATE_CACHE(re, &s->gb);
900 
901  {
902  int cache = GET_CACHE(re, &s->gb);
903  int sign = (~cache) >> 31;
904  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
905  }
906 
907  LAST_SKIP_BITS(re, &s->gb, code);
908 
909  if (i >= se) {
910  if (i == se) {
911  j = s->permutated_scantable[se];
912  block[j] = level * (quant_matrix[se] << Al);
913  break;
914  }
915  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
916  return AVERROR_INVALIDDATA;
917  }
918  j = s->permutated_scantable[i];
919  block[j] = level * (quant_matrix[i] << Al);
920  } else {
921  if (run == 0xF) {// ZRL - skip 15 coefficients
922  i += 15;
923  if (i >= se) {
924  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
925  return AVERROR_INVALIDDATA;
926  }
927  } else {
928  val = (1 << run);
929  if (run) {
930  UPDATE_CACHE(re, &s->gb);
931  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
932  LAST_SKIP_BITS(re, &s->gb, run);
933  }
934  *EOBRUN = val - 1;
935  break;
936  }
937  }
938  }
939  CLOSE_READER(re, &s->gb);
940  }
941 
942  if (i > *last_nnz)
943  *last_nnz = i;
944 
945  return 0;
946 }
947 
948 #define REFINE_BIT(j) { \
949  UPDATE_CACHE(re, &s->gb); \
950  sign = block[j] >> 15; \
951  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
952  ((quant_matrix[i] ^ sign) - sign) << Al; \
953  LAST_SKIP_BITS(re, &s->gb, 1); \
954 }
955 
956 #define ZERO_RUN \
957 for (; ; i++) { \
958  if (i > last) { \
959  i += run; \
960  if (i > se) { \
961  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
962  return -1; \
963  } \
964  break; \
965  } \
966  j = s->permutated_scantable[i]; \
967  if (block[j]) \
968  REFINE_BIT(j) \
969  else if (run-- == 0) \
970  break; \
971 }
972 
973 /* decode block and dequantize - progressive JPEG refinement pass */
975  uint8_t *last_nnz,
976  int ac_index, uint16_t *quant_matrix,
977  int ss, int se, int Al, int *EOBRUN)
978 {
979  int code, i = ss, j, sign, val, run;
980  int last = FFMIN(se, *last_nnz);
981 
982  OPEN_READER(re, &s->gb);
983  if (*EOBRUN) {
984  (*EOBRUN)--;
985  } else {
986  for (; ; i++) {
987  UPDATE_CACHE(re, &s->gb);
988  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
989 
990  if (code & 0xF) {
991  run = ((unsigned) code) >> 4;
992  UPDATE_CACHE(re, &s->gb);
993  val = SHOW_UBITS(re, &s->gb, 1);
994  LAST_SKIP_BITS(re, &s->gb, 1);
995  ZERO_RUN;
996  j = s->permutated_scantable[i];
997  val--;
998  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
999  if (i == se) {
1000  if (i > *last_nnz)
1001  *last_nnz = i;
1002  CLOSE_READER(re, &s->gb);
1003  return 0;
1004  }
1005  } else {
1006  run = ((unsigned) code) >> 4;
1007  if (run == 0xF) {
1008  ZERO_RUN;
1009  } else {
1010  val = run;
1011  run = (1 << run);
1012  if (val) {
1013  UPDATE_CACHE(re, &s->gb);
1014  run += SHOW_UBITS(re, &s->gb, val);
1015  LAST_SKIP_BITS(re, &s->gb, val);
1016  }
1017  *EOBRUN = run - 1;
1018  break;
1019  }
1020  }
1021  }
1022 
1023  if (i > *last_nnz)
1024  *last_nnz = i;
1025  }
1026 
1027  for (; i <= last; i++) {
1028  j = s->permutated_scantable[i];
1029  if (block[j])
1030  REFINE_BIT(j)
1031  }
1032  CLOSE_READER(re, &s->gb);
1033 
1034  return 0;
1035 }
1036 #undef REFINE_BIT
1037 #undef ZERO_RUN
1038 
1039 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1040 {
1041  int i;
1042  int reset = 0;
1043 
1044  if (s->restart_interval) {
1045  s->restart_count--;
1046  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1047  align_get_bits(&s->gb);
1048  for (i = 0; i < nb_components; i++) /* reset dc */
1049  s->last_dc[i] = (4 << s->bits);
1050  }
1051 
1052  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1053  /* skip RSTn */
1054  if (s->restart_count == 0) {
1055  if( show_bits(&s->gb, i) == (1 << i) - 1
1056  || show_bits(&s->gb, i) == 0xFF) {
1057  int pos = get_bits_count(&s->gb);
1058  align_get_bits(&s->gb);
1059  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1060  skip_bits(&s->gb, 8);
1061  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1062  for (i = 0; i < nb_components; i++) /* reset dc */
1063  s->last_dc[i] = (4 << s->bits);
1064  reset = 1;
1065  } else
1066  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1067  }
1068  }
1069  }
1070  return reset;
1071 }
1072 
1073 /* Handles 1 to 4 components */
1074 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1075 {
1076  int i, mb_x, mb_y;
1077  unsigned width;
1078  uint16_t (*buffer)[4];
1079  int left[4], top[4], topleft[4];
1080  const int linesize = s->linesize[0];
1081  const int mask = ((1 << s->bits) - 1) << point_transform;
1082  int resync_mb_y = 0;
1083  int resync_mb_x = 0;
1084  int vpred[6];
1085 
1086  if (!s->bayer && s->nb_components < 3)
1087  return AVERROR_INVALIDDATA;
1088  if (s->bayer && s->nb_components > 2)
1089  return AVERROR_INVALIDDATA;
1090  if (s->nb_components <= 0 || s->nb_components > 4)
1091  return AVERROR_INVALIDDATA;
1092  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1093  return AVERROR_INVALIDDATA;
1094  if (s->bayer) {
1095  if (s->rct || s->pegasus_rct)
1096  return AVERROR_INVALIDDATA;
1097  }
1098 
1099 
1100  s->restart_count = s->restart_interval;
1101 
1102  if (s->restart_interval == 0)
1103  s->restart_interval = INT_MAX;
1104 
1105  if (s->bayer)
1106  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1107  else
1108  width = s->mb_width;
1109 
1110  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1111  if (!s->ljpeg_buffer)
1112  return AVERROR(ENOMEM);
1113 
1114  buffer = s->ljpeg_buffer;
1115 
1116  for (i = 0; i < 4; i++)
1117  buffer[0][i] = 1 << (s->bits - 1);
1118 
1119  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1120  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1121 
1122  if (s->interlaced && s->bottom_field)
1123  ptr += linesize >> 1;
1124 
1125  for (i = 0; i < 4; i++)
1126  top[i] = left[i] = topleft[i] = buffer[0][i];
1127 
1128  if ((mb_y * s->width) % s->restart_interval == 0) {
1129  for (i = 0; i < 6; i++)
1130  vpred[i] = 1 << (s->bits-1);
1131  }
1132 
1133  for (mb_x = 0; mb_x < width; mb_x++) {
1134  int modified_predictor = predictor;
1135 
1136  if (get_bits_left(&s->gb) < 1) {
1137  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1138  return AVERROR_INVALIDDATA;
1139  }
1140 
1141  if (s->restart_interval && !s->restart_count){
1142  s->restart_count = s->restart_interval;
1143  resync_mb_x = mb_x;
1144  resync_mb_y = mb_y;
1145  for(i=0; i<4; i++)
1146  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1147  }
1148  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1149  modified_predictor = 1;
1150 
1151  for (i=0;i<nb_components;i++) {
1152  int pred, dc;
1153 
1154  topleft[i] = top[i];
1155  top[i] = buffer[mb_x][i];
1156 
1157  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1158  if(dc == 0xFFFFF)
1159  return -1;
1160 
1161  if (!s->bayer || mb_x) {
1162  pred = left[i];
1163  } else { /* This path runs only for the first line in bayer images */
1164  vpred[i] += dc;
1165  pred = vpred[i] - dc;
1166  }
1167 
1168  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1169 
1170  left[i] = buffer[mb_x][i] =
1171  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1172  }
1173 
1174  if (s->restart_interval && !--s->restart_count) {
1175  align_get_bits(&s->gb);
1176  skip_bits(&s->gb, 16); /* skip RSTn */
1177  }
1178  }
1179  if (s->rct && s->nb_components == 4) {
1180  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1181  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1182  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1183  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1184  ptr[4*mb_x + 0] = buffer[mb_x][3];
1185  }
1186  } else if (s->nb_components == 4) {
1187  for(i=0; i<nb_components; i++) {
1188  int c= s->comp_index[i];
1189  if (s->bits <= 8) {
1190  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1191  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1192  }
1193  } else if(s->bits == 9) {
1194  return AVERROR_PATCHWELCOME;
1195  } else {
1196  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1197  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1198  }
1199  }
1200  }
1201  } else if (s->rct) {
1202  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1203  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1204  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1205  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1206  }
1207  } else if (s->pegasus_rct) {
1208  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1209  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1210  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1211  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1212  }
1213  } else if (s->bayer) {
1214  if (s->bits <= 8)
1215  return AVERROR_PATCHWELCOME;
1216  if (nb_components == 1) {
1217  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1218  for (mb_x = 0; mb_x < width; mb_x++)
1219  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1220  } else if (nb_components == 2) {
1221  for (mb_x = 0; mb_x < width; mb_x++) {
1222  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1223  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1224  }
1225  }
1226  } else {
1227  for(i=0; i<nb_components; i++) {
1228  int c= s->comp_index[i];
1229  if (s->bits <= 8) {
1230  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1231  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1232  }
1233  } else if(s->bits == 9) {
1234  return AVERROR_PATCHWELCOME;
1235  } else {
1236  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1237  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1238  }
1239  }
1240  }
1241  }
1242  }
1243  return 0;
1244 }
1245 
1247  int point_transform, int nb_components)
1248 {
1249  int i, mb_x, mb_y, mask;
1250  int bits= (s->bits+7)&~7;
1251  int resync_mb_y = 0;
1252  int resync_mb_x = 0;
1253 
1254  point_transform += bits - s->bits;
1255  mask = ((1 << s->bits) - 1) << point_transform;
1256 
1257  av_assert0(nb_components>=1 && nb_components<=4);
1258 
1259  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1260  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1261  if (get_bits_left(&s->gb) < 1) {
1262  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1263  return AVERROR_INVALIDDATA;
1264  }
1265  if (s->restart_interval && !s->restart_count){
1266  s->restart_count = s->restart_interval;
1267  resync_mb_x = mb_x;
1268  resync_mb_y = mb_y;
1269  }
1270 
1271  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1272  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1273  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1274  for (i = 0; i < nb_components; i++) {
1275  uint8_t *ptr;
1276  uint16_t *ptr16;
1277  int n, h, v, x, y, c, j, linesize;
1278  n = s->nb_blocks[i];
1279  c = s->comp_index[i];
1280  h = s->h_scount[i];
1281  v = s->v_scount[i];
1282  x = 0;
1283  y = 0;
1284  linesize= s->linesize[c];
1285 
1286  if(bits>8) linesize /= 2;
1287 
1288  for(j=0; j<n; j++) {
1289  int pred, dc;
1290 
1291  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1292  if(dc == 0xFFFFF)
1293  return -1;
1294  if ( h * mb_x + x >= s->width
1295  || v * mb_y + y >= s->height) {
1296  // Nothing to do
1297  } else if (bits<=8) {
1298  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1299  if(y==0 && toprow){
1300  if(x==0 && leftcol){
1301  pred= 1 << (bits - 1);
1302  }else{
1303  pred= ptr[-1];
1304  }
1305  }else{
1306  if(x==0 && leftcol){
1307  pred= ptr[-linesize];
1308  }else{
1309  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1310  }
1311  }
1312 
1313  if (s->interlaced && s->bottom_field)
1314  ptr += linesize >> 1;
1315  pred &= mask;
1316  *ptr= pred + ((unsigned)dc << point_transform);
1317  }else{
1318  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1319  if(y==0 && toprow){
1320  if(x==0 && leftcol){
1321  pred= 1 << (bits - 1);
1322  }else{
1323  pred= ptr16[-1];
1324  }
1325  }else{
1326  if(x==0 && leftcol){
1327  pred= ptr16[-linesize];
1328  }else{
1329  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1330  }
1331  }
1332 
1333  if (s->interlaced && s->bottom_field)
1334  ptr16 += linesize >> 1;
1335  pred &= mask;
1336  *ptr16= pred + ((unsigned)dc << point_transform);
1337  }
1338  if (++x == h) {
1339  x = 0;
1340  y++;
1341  }
1342  }
1343  }
1344  } else {
1345  for (i = 0; i < nb_components; i++) {
1346  uint8_t *ptr;
1347  uint16_t *ptr16;
1348  int n, h, v, x, y, c, j, linesize, dc;
1349  n = s->nb_blocks[i];
1350  c = s->comp_index[i];
1351  h = s->h_scount[i];
1352  v = s->v_scount[i];
1353  x = 0;
1354  y = 0;
1355  linesize = s->linesize[c];
1356 
1357  if(bits>8) linesize /= 2;
1358 
1359  for (j = 0; j < n; j++) {
1360  int pred;
1361 
1362  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1363  if(dc == 0xFFFFF)
1364  return -1;
1365  if ( h * mb_x + x >= s->width
1366  || v * mb_y + y >= s->height) {
1367  // Nothing to do
1368  } else if (bits<=8) {
1369  ptr = s->picture_ptr->data[c] +
1370  (linesize * (v * mb_y + y)) +
1371  (h * mb_x + x); //FIXME optimize this crap
1372  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1373 
1374  pred &= mask;
1375  *ptr = pred + ((unsigned)dc << point_transform);
1376  }else{
1377  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1378  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1379 
1380  pred &= mask;
1381  *ptr16= pred + ((unsigned)dc << point_transform);
1382  }
1383 
1384  if (++x == h) {
1385  x = 0;
1386  y++;
1387  }
1388  }
1389  }
1390  }
1391  if (s->restart_interval && !--s->restart_count) {
1392  align_get_bits(&s->gb);
1393  skip_bits(&s->gb, 16); /* skip RSTn */
1394  }
1395  }
1396  }
1397  return 0;
1398 }
1399 
1401  uint8_t *dst, const uint8_t *src,
1402  int linesize, int lowres)
1403 {
1404  switch (lowres) {
1405  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1406  break;
1407  case 1: copy_block4(dst, src, linesize, linesize, 4);
1408  break;
1409  case 2: copy_block2(dst, src, linesize, linesize, 2);
1410  break;
1411  case 3: *dst = *src;
1412  break;
1413  }
1414 }
1415 
1416 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1417 {
1418  int block_x, block_y;
1419  int size = 8 >> s->avctx->lowres;
1420  if (s->bits > 8) {
1421  for (block_y=0; block_y<size; block_y++)
1422  for (block_x=0; block_x<size; block_x++)
1423  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1424  } else {
1425  for (block_y=0; block_y<size; block_y++)
1426  for (block_x=0; block_x<size; block_x++)
1427  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1428  }
1429 }
1430 
1431 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1432  int Al, const uint8_t *mb_bitmask,
1433  int mb_bitmask_size,
1434  const AVFrame *reference)
1435 {
1436  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1437  uint8_t *data[MAX_COMPONENTS];
1438  const uint8_t *reference_data[MAX_COMPONENTS];
1439  int linesize[MAX_COMPONENTS];
1440  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1441  int bytes_per_pixel = 1 + (s->bits > 8);
1442 
1443  if (mb_bitmask) {
1444  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1445  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1446  return AVERROR_INVALIDDATA;
1447  }
1448  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1449  }
1450 
1451  s->restart_count = 0;
1452 
1453  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1454  &chroma_v_shift);
1455  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1456  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1457 
1458  for (i = 0; i < nb_components; i++) {
1459  int c = s->comp_index[i];
1460  data[c] = s->picture_ptr->data[c];
1461  reference_data[c] = reference ? reference->data[c] : NULL;
1462  linesize[c] = s->linesize[c];
1463  s->coefs_finished[c] |= 1;
1464  }
1465 
1466  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1467  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1468  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1469 
1470  if (s->restart_interval && !s->restart_count)
1471  s->restart_count = s->restart_interval;
1472 
1473  if (get_bits_left(&s->gb) < 0) {
1474  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1475  -get_bits_left(&s->gb));
1476  return AVERROR_INVALIDDATA;
1477  }
1478  for (i = 0; i < nb_components; i++) {
1479  uint8_t *ptr;
1480  int n, h, v, x, y, c, j;
1481  int block_offset;
1482  n = s->nb_blocks[i];
1483  c = s->comp_index[i];
1484  h = s->h_scount[i];
1485  v = s->v_scount[i];
1486  x = 0;
1487  y = 0;
1488  for (j = 0; j < n; j++) {
1489  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1490  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1491 
1492  if (s->interlaced && s->bottom_field)
1493  block_offset += linesize[c] >> 1;
1494  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1495  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1496  ptr = data[c] + block_offset;
1497  } else
1498  ptr = NULL;
1499  if (!s->progressive) {
1500  if (copy_mb) {
1501  if (ptr)
1502  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1503  linesize[c], s->avctx->lowres);
1504 
1505  } else {
1506  s->bdsp.clear_block(s->block);
1507  if (decode_block(s, s->block, i,
1508  s->dc_index[i], s->ac_index[i],
1509  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1510  av_log(s->avctx, AV_LOG_ERROR,
1511  "error y=%d x=%d\n", mb_y, mb_x);
1512  return AVERROR_INVALIDDATA;
1513  }
1514  if (ptr && linesize[c]) {
1515  s->idsp.idct_put(ptr, linesize[c], s->block);
1516  if (s->bits & 7)
1517  shift_output(s, ptr, linesize[c]);
1518  }
1519  }
1520  } else {
1521  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1522  (h * mb_x + x);
1523  int16_t *block = s->blocks[c][block_idx];
1524  if (Ah)
1525  block[0] += get_bits1(&s->gb) *
1526  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1527  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1528  s->quant_matrixes[s->quant_sindex[i]],
1529  Al) < 0) {
1530  av_log(s->avctx, AV_LOG_ERROR,
1531  "error y=%d x=%d\n", mb_y, mb_x);
1532  return AVERROR_INVALIDDATA;
1533  }
1534  }
1535  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1536  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1537  mb_x, mb_y, x, y, c, s->bottom_field,
1538  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1539  if (++x == h) {
1540  x = 0;
1541  y++;
1542  }
1543  }
1544  }
1545 
1546  handle_rstn(s, nb_components);
1547  }
1548  }
1549  return 0;
1550 }
1551 
1553  int se, int Ah, int Al)
1554 {
1555  int mb_x, mb_y;
1556  int EOBRUN = 0;
1557  int c = s->comp_index[0];
1558  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1559 
1560  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1561  if (se < ss || se > 63) {
1562  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1563  return AVERROR_INVALIDDATA;
1564  }
1565 
1566  // s->coefs_finished is a bitmask for coefficients coded
1567  // ss and se are parameters telling start and end coefficients
1568  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1569 
1570  s->restart_count = 0;
1571 
1572  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1573  int block_idx = mb_y * s->block_stride[c];
1574  int16_t (*block)[64] = &s->blocks[c][block_idx];
1575  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1576  if (get_bits_left(&s->gb) <= 0) {
1577  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1578  return AVERROR_INVALIDDATA;
1579  }
1580  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1581  int ret;
1582  if (s->restart_interval && !s->restart_count)
1583  s->restart_count = s->restart_interval;
1584 
1585  if (Ah)
1586  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1587  quant_matrix, ss, se, Al, &EOBRUN);
1588  else
1589  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1590  quant_matrix, ss, se, Al, &EOBRUN);
1591 
1592  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1594  if (ret < 0) {
1595  av_log(s->avctx, AV_LOG_ERROR,
1596  "error y=%d x=%d\n", mb_y, mb_x);
1597  return AVERROR_INVALIDDATA;
1598  }
1599 
1600  if (handle_rstn(s, 0))
1601  EOBRUN = 0;
1602  }
1603  }
1604  return 0;
1605 }
1606 
1608 {
1609  int mb_x, mb_y;
1610  int c;
1611  const int bytes_per_pixel = 1 + (s->bits > 8);
1612  const int block_size = s->lossless ? 1 : 8;
1613 
1614  for (c = 0; c < s->nb_components; c++) {
1615  uint8_t *data = s->picture_ptr->data[c];
1616  int linesize = s->linesize[c];
1617  int h = s->h_max / s->h_count[c];
1618  int v = s->v_max / s->v_count[c];
1619  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1620  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1621 
1622  if (~s->coefs_finished[c])
1623  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1624 
1625  if (s->interlaced && s->bottom_field)
1626  data += linesize >> 1;
1627 
1628  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1629  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1630  int block_idx = mb_y * s->block_stride[c];
1631  int16_t (*block)[64] = &s->blocks[c][block_idx];
1632  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1633  s->idsp.idct_put(ptr, linesize, *block);
1634  if (s->bits & 7)
1635  shift_output(s, ptr, linesize);
1636  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1637  }
1638  }
1639  }
1640 }
1641 
1642 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1643  int mb_bitmask_size, const AVFrame *reference)
1644 {
1645  int len, nb_components, i, h, v, predictor, point_transform;
1646  int index, id, ret;
1647  const int block_size = s->lossless ? 1 : 8;
1648  int ilv, prev_shift;
1649 
1650  if (!s->got_picture) {
1651  av_log(s->avctx, AV_LOG_WARNING,
1652  "Can not process SOS before SOF, skipping\n");
1653  return -1;
1654  }
1655 
1656  if (reference) {
1657  if (reference->width != s->picture_ptr->width ||
1658  reference->height != s->picture_ptr->height ||
1659  reference->format != s->picture_ptr->format) {
1660  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1661  return AVERROR_INVALIDDATA;
1662  }
1663  }
1664 
1665  /* XXX: verify len field validity */
1666  len = get_bits(&s->gb, 16);
1667  nb_components = get_bits(&s->gb, 8);
1668  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1670  "decode_sos: nb_components (%d)",
1671  nb_components);
1672  return AVERROR_PATCHWELCOME;
1673  }
1674  if (len != 6 + 2 * nb_components) {
1675  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1676  return AVERROR_INVALIDDATA;
1677  }
1678  for (i = 0; i < nb_components; i++) {
1679  id = get_bits(&s->gb, 8);
1680  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1681  /* find component index */
1682  for (index = 0; index < s->nb_components; index++)
1683  if (id == s->component_id[index])
1684  break;
1685  if (index == s->nb_components) {
1686  av_log(s->avctx, AV_LOG_ERROR,
1687  "decode_sos: index(%d) out of components\n", index);
1688  return AVERROR_INVALIDDATA;
1689  }
1690  /* Metasoft MJPEG codec has Cb and Cr swapped */
1691  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1692  && nb_components == 3 && s->nb_components == 3 && i)
1693  index = 3 - i;
1694 
1695  s->quant_sindex[i] = s->quant_index[index];
1696  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1697  s->h_scount[i] = s->h_count[index];
1698  s->v_scount[i] = s->v_count[index];
1699 
1700  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1701  index = (index+2)%3;
1702 
1703  s->comp_index[i] = index;
1704 
1705  s->dc_index[i] = get_bits(&s->gb, 4);
1706  s->ac_index[i] = get_bits(&s->gb, 4);
1707 
1708  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1709  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1710  goto out_of_range;
1711  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1712  goto out_of_range;
1713  }
1714 
1715  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1716  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1717  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1718  prev_shift = get_bits(&s->gb, 4); /* Ah */
1719  point_transform = get_bits(&s->gb, 4); /* Al */
1720  }else
1721  prev_shift = point_transform = 0;
1722 
1723  if (nb_components > 1) {
1724  /* interleaved stream */
1725  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1726  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1727  } else if (!s->ls) { /* skip this for JPEG-LS */
1728  h = s->h_max / s->h_scount[0];
1729  v = s->v_max / s->v_scount[0];
1730  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1731  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1732  s->nb_blocks[0] = 1;
1733  s->h_scount[0] = 1;
1734  s->v_scount[0] = 1;
1735  }
1736 
1737  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1738  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1739  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1740  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1741  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1742 
1743 
1744  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1745  for (i = s->mjpb_skiptosod; i > 0; i--)
1746  skip_bits(&s->gb, 8);
1747 
1748 next_field:
1749  for (i = 0; i < nb_components; i++)
1750  s->last_dc[i] = (4 << s->bits);
1751 
1752  if (s->avctx->hwaccel) {
1753  int bytes_to_start = get_bits_count(&s->gb) / 8;
1754  av_assert0(bytes_to_start >= 0 &&
1755  s->raw_scan_buffer_size >= bytes_to_start);
1756 
1757  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1758  s->raw_scan_buffer + bytes_to_start,
1759  s->raw_scan_buffer_size - bytes_to_start);
1760  if (ret < 0)
1761  return ret;
1762 
1763  } else if (s->lossless) {
1764  av_assert0(s->picture_ptr == s->picture);
1765  if (CONFIG_JPEGLS_DECODER && s->ls) {
1766 // for () {
1767 // reset_ls_coding_parameters(s, 0);
1768 
1770  point_transform, ilv)) < 0)
1771  return ret;
1772  } else {
1773  if (s->rgb || s->bayer) {
1774  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1775  return ret;
1776  } else {
1778  point_transform,
1779  nb_components)) < 0)
1780  return ret;
1781  }
1782  }
1783  } else {
1784  if (s->progressive && predictor) {
1785  av_assert0(s->picture_ptr == s->picture);
1787  ilv, prev_shift,
1788  point_transform)) < 0)
1789  return ret;
1790  } else {
1791  if ((ret = mjpeg_decode_scan(s, nb_components,
1792  prev_shift, point_transform,
1793  mb_bitmask, mb_bitmask_size, reference)) < 0)
1794  return ret;
1795  }
1796  }
1797 
1798  if (s->interlaced &&
1799  get_bits_left(&s->gb) > 32 &&
1800  show_bits(&s->gb, 8) == 0xFF) {
1801  GetBitContext bak = s->gb;
1802  align_get_bits(&bak);
1803  if (show_bits(&bak, 16) == 0xFFD1) {
1804  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1805  s->gb = bak;
1806  skip_bits(&s->gb, 16);
1807  s->bottom_field ^= 1;
1808 
1809  goto next_field;
1810  }
1811  }
1812 
1813  emms_c();
1814  return 0;
1815  out_of_range:
1816  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1817  return AVERROR_INVALIDDATA;
1818 }
1819 
1821 {
1822  if (get_bits(&s->gb, 16) != 4)
1823  return AVERROR_INVALIDDATA;
1824  s->restart_interval = get_bits(&s->gb, 16);
1825  s->restart_count = 0;
1826  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1827  s->restart_interval);
1828 
1829  return 0;
1830 }
1831 
1833 {
1834  int len, id, i;
1835 
1836  len = get_bits(&s->gb, 16);
1837  if (len < 6) {
1838  if (s->bayer) {
1839  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1840  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1841  skip_bits(&s->gb, len);
1842  return 0;
1843  } else
1844  return AVERROR_INVALIDDATA;
1845  }
1846  if (8 * len > get_bits_left(&s->gb))
1847  return AVERROR_INVALIDDATA;
1848 
1849  id = get_bits_long(&s->gb, 32);
1850  len -= 6;
1851 
1852  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1853  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1854  av_fourcc2str(av_bswap32(id)), id, len);
1855 
1856  /* Buggy AVID, it puts EOI only at every 10th frame. */
1857  /* Also, this fourcc is used by non-avid files too, it holds some
1858  information, but it's always present in AVID-created files. */
1859  if (id == AV_RB32("AVI1")) {
1860  /* structure:
1861  4bytes AVI1
1862  1bytes polarity
1863  1bytes always zero
1864  4bytes field_size
1865  4bytes field_size_less_padding
1866  */
1867  s->buggy_avid = 1;
1868  i = get_bits(&s->gb, 8); len--;
1869  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1870  goto out;
1871  }
1872 
1873  if (id == AV_RB32("JFIF")) {
1874  int t_w, t_h, v1, v2;
1875  if (len < 8)
1876  goto out;
1877  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1878  v1 = get_bits(&s->gb, 8);
1879  v2 = get_bits(&s->gb, 8);
1880  skip_bits(&s->gb, 8);
1881 
1882  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1883  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1884  if ( s->avctx->sample_aspect_ratio.num <= 0
1885  || s->avctx->sample_aspect_ratio.den <= 0) {
1886  s->avctx->sample_aspect_ratio.num = 0;
1887  s->avctx->sample_aspect_ratio.den = 1;
1888  }
1889 
1890  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1891  av_log(s->avctx, AV_LOG_INFO,
1892  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1893  v1, v2,
1894  s->avctx->sample_aspect_ratio.num,
1895  s->avctx->sample_aspect_ratio.den);
1896 
1897  len -= 8;
1898  if (len >= 2) {
1899  t_w = get_bits(&s->gb, 8);
1900  t_h = get_bits(&s->gb, 8);
1901  if (t_w && t_h) {
1902  /* skip thumbnail */
1903  if (len -10 - (t_w * t_h * 3) > 0)
1904  len -= t_w * t_h * 3;
1905  }
1906  len -= 2;
1907  }
1908  goto out;
1909  }
1910 
1911  if ( id == AV_RB32("Adob")
1912  && len >= 7
1913  && show_bits(&s->gb, 8) == 'e'
1914  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1915  skip_bits(&s->gb, 8); /* 'e' */
1916  skip_bits(&s->gb, 16); /* version */
1917  skip_bits(&s->gb, 16); /* flags0 */
1918  skip_bits(&s->gb, 16); /* flags1 */
1919  s->adobe_transform = get_bits(&s->gb, 8);
1920  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1921  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1922  len -= 7;
1923  goto out;
1924  }
1925 
1926  if (id == AV_RB32("LJIF")) {
1927  int rgb = s->rgb;
1928  int pegasus_rct = s->pegasus_rct;
1929  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1930  av_log(s->avctx, AV_LOG_INFO,
1931  "Pegasus lossless jpeg header found\n");
1932  skip_bits(&s->gb, 16); /* version ? */
1933  skip_bits(&s->gb, 16); /* unknown always 0? */
1934  skip_bits(&s->gb, 16); /* unknown always 0? */
1935  skip_bits(&s->gb, 16); /* unknown always 0? */
1936  switch (i=get_bits(&s->gb, 8)) {
1937  case 1:
1938  rgb = 1;
1939  pegasus_rct = 0;
1940  break;
1941  case 2:
1942  rgb = 1;
1943  pegasus_rct = 1;
1944  break;
1945  default:
1946  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1947  }
1948 
1949  len -= 9;
1950  if (s->bayer)
1951  goto out;
1952  if (s->got_picture)
1953  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1954  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1955  goto out;
1956  }
1957 
1958  s->rgb = rgb;
1959  s->pegasus_rct = pegasus_rct;
1960 
1961  goto out;
1962  }
1963  if (id == AV_RL32("colr") && len > 0) {
1964  s->colr = get_bits(&s->gb, 8);
1965  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1966  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1967  len --;
1968  goto out;
1969  }
1970  if (id == AV_RL32("xfrm") && len > 0) {
1971  s->xfrm = get_bits(&s->gb, 8);
1972  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1973  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1974  len --;
1975  goto out;
1976  }
1977 
1978  /* JPS extension by VRex */
1979  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1980  int flags, layout, type;
1981  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1982  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1983 
1984  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1985  skip_bits(&s->gb, 16); len -= 2; /* block length */
1986  skip_bits(&s->gb, 8); /* reserved */
1987  flags = get_bits(&s->gb, 8);
1988  layout = get_bits(&s->gb, 8);
1989  type = get_bits(&s->gb, 8);
1990  len -= 4;
1991 
1992  av_freep(&s->stereo3d);
1993  s->stereo3d = av_stereo3d_alloc();
1994  if (!s->stereo3d) {
1995  goto out;
1996  }
1997  if (type == 0) {
1998  s->stereo3d->type = AV_STEREO3D_2D;
1999  } else if (type == 1) {
2000  switch (layout) {
2001  case 0x01:
2002  s->stereo3d->type = AV_STEREO3D_LINES;
2003  break;
2004  case 0x02:
2005  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2006  break;
2007  case 0x03:
2008  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2009  break;
2010  }
2011  if (!(flags & 0x04)) {
2012  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2013  }
2014  }
2015  goto out;
2016  }
2017 
2018  /* EXIF metadata */
2019  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2020  GetByteContext gbytes;
2021  int ret, le, ifd_offset, bytes_read;
2022  const uint8_t *aligned;
2023 
2024  skip_bits(&s->gb, 16); // skip padding
2025  len -= 2;
2026 
2027  // init byte wise reading
2028  aligned = align_get_bits(&s->gb);
2029  bytestream2_init(&gbytes, aligned, len);
2030 
2031  // read TIFF header
2032  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2033  if (ret) {
2034  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2035  } else {
2036  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2037 
2038  // read 0th IFD and store the metadata
2039  // (return values > 0 indicate the presence of subimage metadata)
2040  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2041  if (ret < 0) {
2042  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2043  }
2044  }
2045 
2046  bytes_read = bytestream2_tell(&gbytes);
2047  skip_bits(&s->gb, bytes_read << 3);
2048  len -= bytes_read;
2049 
2050  goto out;
2051  }
2052 
2053  /* Apple MJPEG-A */
2054  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2055  id = get_bits_long(&s->gb, 32);
2056  len -= 4;
2057  /* Apple MJPEG-A */
2058  if (id == AV_RB32("mjpg")) {
2059  /* structure:
2060  4bytes field size
2061  4bytes pad field size
2062  4bytes next off
2063  4bytes quant off
2064  4bytes huff off
2065  4bytes image off
2066  4bytes scan off
2067  4bytes data off
2068  */
2069  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2070  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2071  }
2072  }
2073 
2074  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2075  int id2;
2076  unsigned seqno;
2077  unsigned nummarkers;
2078 
2079  id = get_bits_long(&s->gb, 32);
2080  id2 = get_bits(&s->gb, 24);
2081  len -= 7;
2082  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2083  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2084  goto out;
2085  }
2086 
2087  skip_bits(&s->gb, 8);
2088  seqno = get_bits(&s->gb, 8);
2089  len -= 2;
2090  if (seqno == 0) {
2091  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2092  goto out;
2093  }
2094 
2095  nummarkers = get_bits(&s->gb, 8);
2096  len -= 1;
2097  if (nummarkers == 0) {
2098  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2099  goto out;
2100  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2101  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2102  goto out;
2103  } else if (seqno > nummarkers) {
2104  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2105  goto out;
2106  }
2107 
2108  /* Allocate if this is the first APP2 we've seen. */
2109  if (s->iccnum == 0) {
2110  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2111  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2112  return AVERROR(ENOMEM);
2113  }
2114  s->iccnum = nummarkers;
2115  }
2116 
2117  if (s->iccentries[seqno - 1].data) {
2118  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2119  goto out;
2120  }
2121 
2122  s->iccentries[seqno - 1].length = len;
2123  s->iccentries[seqno - 1].data = av_malloc(len);
2124  if (!s->iccentries[seqno - 1].data) {
2125  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2126  return AVERROR(ENOMEM);
2127  }
2128 
2129  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2130  skip_bits(&s->gb, len << 3);
2131  len = 0;
2132  s->iccread++;
2133 
2134  if (s->iccread > s->iccnum)
2135  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2136  }
2137 
2138 out:
2139  /* slow but needed for extreme adobe jpegs */
2140  if (len < 0)
2141  av_log(s->avctx, AV_LOG_ERROR,
2142  "mjpeg: error, decode_app parser read over the end\n");
2143  while (--len > 0)
2144  skip_bits(&s->gb, 8);
2145 
2146  return 0;
2147 }
2148 
2150 {
2151  int len = get_bits(&s->gb, 16);
2152  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2153  int i;
2154  char *cbuf = av_malloc(len - 1);
2155  if (!cbuf)
2156  return AVERROR(ENOMEM);
2157 
2158  for (i = 0; i < len - 2; i++)
2159  cbuf[i] = get_bits(&s->gb, 8);
2160  if (i > 0 && cbuf[i - 1] == '\n')
2161  cbuf[i - 1] = 0;
2162  else
2163  cbuf[i] = 0;
2164 
2165  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2166  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2167 
2168  /* buggy avid, it puts EOI only at every 10th frame */
2169  if (!strncmp(cbuf, "AVID", 4)) {
2170  parse_avid(s, cbuf, len);
2171  } else if (!strcmp(cbuf, "CS=ITU601"))
2172  s->cs_itu601 = 1;
2173  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2174  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2175  s->flipped = 1;
2176  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2177  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2178  s->multiscope = 2;
2179  }
2180 
2181  av_free(cbuf);
2182  }
2183 
2184  return 0;
2185 }
2186 
2187 /* return the 8 bit start code value and update the search
2188  state. Return -1 if no start code found */
2189 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2190 {
2191  const uint8_t *buf_ptr;
2192  unsigned int v, v2;
2193  int val;
2194  int skipped = 0;
2195 
2196  buf_ptr = *pbuf_ptr;
2197  while (buf_end - buf_ptr > 1) {
2198  v = *buf_ptr++;
2199  v2 = *buf_ptr;
2200  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2201  val = *buf_ptr++;
2202  goto found;
2203  }
2204  skipped++;
2205  }
2206  buf_ptr = buf_end;
2207  val = -1;
2208 found:
2209  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2210  *pbuf_ptr = buf_ptr;
2211  return val;
2212 }
2213 
2215  const uint8_t **buf_ptr, const uint8_t *buf_end,
2216  const uint8_t **unescaped_buf_ptr,
2217  int *unescaped_buf_size)
2218 {
2219  int start_code;
2220  start_code = find_marker(buf_ptr, buf_end);
2221 
2222  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2223  if (!s->buffer)
2224  return AVERROR(ENOMEM);
2225 
2226  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2227  if (start_code == SOS && !s->ls) {
2228  const uint8_t *src = *buf_ptr;
2229  const uint8_t *ptr = src;
2230  uint8_t *dst = s->buffer;
2231 
2232  #define copy_data_segment(skip) do { \
2233  ptrdiff_t length = (ptr - src) - (skip); \
2234  if (length > 0) { \
2235  memcpy(dst, src, length); \
2236  dst += length; \
2237  src = ptr; \
2238  } \
2239  } while (0)
2240 
2241  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2242  ptr = buf_end;
2243  copy_data_segment(0);
2244  } else {
2245  while (ptr < buf_end) {
2246  uint8_t x = *(ptr++);
2247 
2248  if (x == 0xff) {
2249  ptrdiff_t skip = 0;
2250  while (ptr < buf_end && x == 0xff) {
2251  x = *(ptr++);
2252  skip++;
2253  }
2254 
2255  /* 0xFF, 0xFF, ... */
2256  if (skip > 1) {
2258 
2259  /* decrement src as it is equal to ptr after the
2260  * copy_data_segment macro and we might want to
2261  * copy the current value of x later on */
2262  src--;
2263  }
2264 
2265  if (x < RST0 || x > RST7) {
2266  copy_data_segment(1);
2267  if (x)
2268  break;
2269  }
2270  }
2271  }
2272  if (src < ptr)
2273  copy_data_segment(0);
2274  }
2275  #undef copy_data_segment
2276 
2277  *unescaped_buf_ptr = s->buffer;
2278  *unescaped_buf_size = dst - s->buffer;
2279  memset(s->buffer + *unescaped_buf_size, 0,
2281 
2282  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2283  (buf_end - *buf_ptr) - (dst - s->buffer));
2284  } else if (start_code == SOS && s->ls) {
2285  const uint8_t *src = *buf_ptr;
2286  uint8_t *dst = s->buffer;
2287  int bit_count = 0;
2288  int t = 0, b = 0;
2289  PutBitContext pb;
2290 
2291  /* find marker */
2292  while (src + t < buf_end) {
2293  uint8_t x = src[t++];
2294  if (x == 0xff) {
2295  while ((src + t < buf_end) && x == 0xff)
2296  x = src[t++];
2297  if (x & 0x80) {
2298  t -= FFMIN(2, t);
2299  break;
2300  }
2301  }
2302  }
2303  bit_count = t * 8;
2304  init_put_bits(&pb, dst, t);
2305 
2306  /* unescape bitstream */
2307  while (b < t) {
2308  uint8_t x = src[b++];
2309  put_bits(&pb, 8, x);
2310  if (x == 0xFF && b < t) {
2311  x = src[b++];
2312  if (x & 0x80) {
2313  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2314  x &= 0x7f;
2315  }
2316  put_bits(&pb, 7, x);
2317  bit_count--;
2318  }
2319  }
2320  flush_put_bits(&pb);
2321 
2322  *unescaped_buf_ptr = dst;
2323  *unescaped_buf_size = (bit_count + 7) >> 3;
2324  memset(s->buffer + *unescaped_buf_size, 0,
2326  } else {
2327  *unescaped_buf_ptr = *buf_ptr;
2328  *unescaped_buf_size = buf_end - *buf_ptr;
2329  }
2330 
2331  return start_code;
2332 }
2333 
2335 {
2336  int i;
2337 
2338  if (s->iccentries) {
2339  for (i = 0; i < s->iccnum; i++)
2340  av_freep(&s->iccentries[i].data);
2341  av_freep(&s->iccentries);
2342  }
2343 
2344  s->iccread = 0;
2345  s->iccnum = 0;
2346 }
2347 
2349  int *got_frame, const AVPacket *avpkt,
2350  const uint8_t *buf, const int buf_size)
2351 {
2352  MJpegDecodeContext *s = avctx->priv_data;
2353  const uint8_t *buf_end, *buf_ptr;
2354  const uint8_t *unescaped_buf_ptr;
2355  int hshift, vshift;
2356  int unescaped_buf_size;
2357  int start_code;
2358  int i, index;
2359  int ret = 0;
2360  int is16bit;
2361  AVDictionaryEntry *e = NULL;
2362 
2363  s->force_pal8 = 0;
2364 
2365  s->buf_size = buf_size;
2366 
2367  av_dict_free(&s->exif_metadata);
2368  av_freep(&s->stereo3d);
2369  s->adobe_transform = -1;
2370 
2371  if (s->iccnum != 0)
2373 
2374 redo_for_pal8:
2375  buf_ptr = buf;
2376  buf_end = buf + buf_size;
2377  while (buf_ptr < buf_end) {
2378  /* find start next marker */
2379  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2380  &unescaped_buf_ptr,
2381  &unescaped_buf_size);
2382  /* EOF */
2383  if (start_code < 0) {
2384  break;
2385  } else if (unescaped_buf_size > INT_MAX / 8) {
2386  av_log(avctx, AV_LOG_ERROR,
2387  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2388  start_code, unescaped_buf_size, buf_size);
2389  return AVERROR_INVALIDDATA;
2390  }
2391  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2392  start_code, buf_end - buf_ptr);
2393 
2394  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2395 
2396  if (ret < 0) {
2397  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2398  goto fail;
2399  }
2400 
2401  s->start_code = start_code;
2402  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2403  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2404 
2405  /* process markers */
2406  if (start_code >= RST0 && start_code <= RST7) {
2407  av_log(avctx, AV_LOG_DEBUG,
2408  "restart marker: %d\n", start_code & 0x0f);
2409  /* APP fields */
2410  } else if (start_code >= APP0 && start_code <= APP15) {
2411  if ((ret = mjpeg_decode_app(s)) < 0)
2412  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2413  av_err2str(ret));
2414  /* Comment */
2415  } else if (start_code == COM) {
2416  ret = mjpeg_decode_com(s);
2417  if (ret < 0)
2418  return ret;
2419  } else if (start_code == DQT) {
2421  if (ret < 0)
2422  return ret;
2423  }
2424 
2425  ret = -1;
2426 
2427  if (!CONFIG_JPEGLS_DECODER &&
2428  (start_code == SOF48 || start_code == LSE)) {
2429  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2430  return AVERROR(ENOSYS);
2431  }
2432 
2433  if (avctx->skip_frame == AVDISCARD_ALL) {
2434  switch(start_code) {
2435  case SOF0:
2436  case SOF1:
2437  case SOF2:
2438  case SOF3:
2439  case SOF48:
2440  case SOI:
2441  case SOS:
2442  case EOI:
2443  break;
2444  default:
2445  goto skip;
2446  }
2447  }
2448 
2449  switch (start_code) {
2450  case SOI:
2451  s->restart_interval = 0;
2452  s->restart_count = 0;
2453  s->raw_image_buffer = buf_ptr;
2454  s->raw_image_buffer_size = buf_end - buf_ptr;
2455  /* nothing to do on SOI */
2456  break;
2457  case DHT:
2458  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2459  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2460  goto fail;
2461  }
2462  break;
2463  case SOF0:
2464  case SOF1:
2465  if (start_code == SOF0)
2466  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2467  else
2469  s->lossless = 0;
2470  s->ls = 0;
2471  s->progressive = 0;
2472  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2473  goto fail;
2474  break;
2475  case SOF2:
2476  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2477  s->lossless = 0;
2478  s->ls = 0;
2479  s->progressive = 1;
2480  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2481  goto fail;
2482  break;
2483  case SOF3:
2484  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2485  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2486  s->lossless = 1;
2487  s->ls = 0;
2488  s->progressive = 0;
2489  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2490  goto fail;
2491  break;
2492  case SOF48:
2493  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2494  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2495  s->lossless = 1;
2496  s->ls = 1;
2497  s->progressive = 0;
2498  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2499  goto fail;
2500  break;
2501  case LSE:
2502  if (!CONFIG_JPEGLS_DECODER ||
2503  (ret = ff_jpegls_decode_lse(s)) < 0)
2504  goto fail;
2505  if (ret == 1)
2506  goto redo_for_pal8;
2507  break;
2508  case EOI:
2509 eoi_parser:
2510  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2511  s->progressive && s->cur_scan && s->got_picture)
2513  s->cur_scan = 0;
2514  if (!s->got_picture) {
2515  av_log(avctx, AV_LOG_WARNING,
2516  "Found EOI before any SOF, ignoring\n");
2517  break;
2518  }
2519  if (s->interlaced) {
2520  s->bottom_field ^= 1;
2521  /* if not bottom field, do not output image yet */
2522  if (s->bottom_field == !s->interlace_polarity)
2523  break;
2524  }
2525  if (avctx->skip_frame == AVDISCARD_ALL) {
2526  s->got_picture = 0;
2527  goto the_end_no_picture;
2528  }
2529  if (s->avctx->hwaccel) {
2530  ret = s->avctx->hwaccel->end_frame(s->avctx);
2531  if (ret < 0)
2532  return ret;
2533 
2534  av_freep(&s->hwaccel_picture_private);
2535  }
2536  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2537  return ret;
2538  *got_frame = 1;
2539  s->got_picture = 0;
2540 
2541  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2542  int qp = FFMAX3(s->qscale[0],
2543  s->qscale[1],
2544  s->qscale[2]);
2545 
2546  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2547  }
2548 
2549  goto the_end;
2550  case SOS:
2551  s->raw_scan_buffer = buf_ptr;
2552  s->raw_scan_buffer_size = buf_end - buf_ptr;
2553 
2554  s->cur_scan++;
2555  if (avctx->skip_frame == AVDISCARD_ALL) {
2556  skip_bits(&s->gb, get_bits_left(&s->gb));
2557  break;
2558  }
2559 
2560  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2561  (avctx->err_recognition & AV_EF_EXPLODE))
2562  goto fail;
2563  break;
2564  case DRI:
2565  if ((ret = mjpeg_decode_dri(s)) < 0)
2566  return ret;
2567  break;
2568  case SOF5:
2569  case SOF6:
2570  case SOF7:
2571  case SOF9:
2572  case SOF10:
2573  case SOF11:
2574  case SOF13:
2575  case SOF14:
2576  case SOF15:
2577  case JPG:
2578  av_log(avctx, AV_LOG_ERROR,
2579  "mjpeg: unsupported coding type (%x)\n", start_code);
2580  break;
2581  }
2582 
2583 skip:
2584  /* eof process start code */
2585  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2586  av_log(avctx, AV_LOG_DEBUG,
2587  "marker parser used %d bytes (%d bits)\n",
2588  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2589  }
2590  if (s->got_picture && s->cur_scan) {
2591  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2592  goto eoi_parser;
2593  }
2594  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2595  return AVERROR_INVALIDDATA;
2596 fail:
2597  s->got_picture = 0;
2598  return ret;
2599 the_end:
2600 
2601  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2602 
2603  if (AV_RB32(s->upscale_h)) {
2604  int p;
2606  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2607  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2608  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2609  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2610  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2611  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2612  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2613  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2614  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2615  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2616  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2617  );
2618  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2619  if (ret)
2620  return ret;
2621 
2622  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2623  for (p = 0; p<s->nb_components; p++) {
2624  uint8_t *line = s->picture_ptr->data[p];
2625  int w = s->width;
2626  int h = s->height;
2627  if (!s->upscale_h[p])
2628  continue;
2629  if (p==1 || p==2) {
2630  w = AV_CEIL_RSHIFT(w, hshift);
2631  h = AV_CEIL_RSHIFT(h, vshift);
2632  }
2633  if (s->upscale_v[p] == 1)
2634  h = (h+1)>>1;
2635  av_assert0(w > 0);
2636  for (i = 0; i < h; i++) {
2637  if (s->upscale_h[p] == 1) {
2638  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2639  else line[w - 1] = line[(w - 1) / 2];
2640  for (index = w - 2; index > 0; index--) {
2641  if (is16bit)
2642  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2643  else
2644  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2645  }
2646  } else if (s->upscale_h[p] == 2) {
2647  if (is16bit) {
2648  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2649  if (w > 1)
2650  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2651  } else {
2652  line[w - 1] = line[(w - 1) / 3];
2653  if (w > 1)
2654  line[w - 2] = line[w - 1];
2655  }
2656  for (index = w - 3; index > 0; index--) {
2657  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2658  }
2659  }
2660  line += s->linesize[p];
2661  }
2662  }
2663  }
2664  if (AV_RB32(s->upscale_v)) {
2665  int p;
2667  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2668  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2669  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2670  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2671  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2672  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2673  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2674  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2675  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2676  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2677  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2678  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2679  );
2680  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2681  if (ret)
2682  return ret;
2683 
2684  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2685  for (p = 0; p < s->nb_components; p++) {
2686  uint8_t *dst;
2687  int w = s->width;
2688  int h = s->height;
2689  if (!s->upscale_v[p])
2690  continue;
2691  if (p==1 || p==2) {
2692  w = AV_CEIL_RSHIFT(w, hshift);
2693  h = AV_CEIL_RSHIFT(h, vshift);
2694  }
2695  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2696  for (i = h - 1; i; i--) {
2697  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2698  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2699  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2700  memcpy(dst, src1, w);
2701  } else {
2702  for (index = 0; index < w; index++)
2703  dst[index] = (src1[index] + src2[index]) >> 1;
2704  }
2705  dst -= s->linesize[p];
2706  }
2707  }
2708  }
2709  if (s->flipped && !s->rgb) {
2710  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2711  if (ret)
2712  return ret;
2713 
2714  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2715  for (index=0; index<s->nb_components; index++) {
2716  int h = frame->height;
2717  if (index && index < 3)
2718  h = AV_CEIL_RSHIFT(h, vshift);
2719  if (frame->data[index]) {
2720  frame->data[index] += (h - 1) * frame->linesize[index];
2721  frame->linesize[index] *= -1;
2722  }
2723  }
2724  }
2725  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2726  int w = s->picture_ptr->width;
2727  int h = s->picture_ptr->height;
2728  av_assert0(s->nb_components == 4);
2729  for (i=0; i<h; i++) {
2730  int j;
2731  uint8_t *dst[4];
2732  for (index=0; index<4; index++) {
2733  dst[index] = s->picture_ptr->data[index]
2734  + s->picture_ptr->linesize[index]*i;
2735  }
2736  for (j=0; j<w; j++) {
2737  int k = dst[3][j];
2738  int r = dst[0][j] * k;
2739  int g = dst[1][j] * k;
2740  int b = dst[2][j] * k;
2741  dst[0][j] = g*257 >> 16;
2742  dst[1][j] = b*257 >> 16;
2743  dst[2][j] = r*257 >> 16;
2744  dst[3][j] = 255;
2745  }
2746  }
2747  }
2748  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2749  int w = s->picture_ptr->width;
2750  int h = s->picture_ptr->height;
2751  av_assert0(s->nb_components == 4);
2752  for (i=0; i<h; i++) {
2753  int j;
2754  uint8_t *dst[4];
2755  for (index=0; index<4; index++) {
2756  dst[index] = s->picture_ptr->data[index]
2757  + s->picture_ptr->linesize[index]*i;
2758  }
2759  for (j=0; j<w; j++) {
2760  int k = dst[3][j];
2761  int r = (255 - dst[0][j]) * k;
2762  int g = (128 - dst[1][j]) * k;
2763  int b = (128 - dst[2][j]) * k;
2764  dst[0][j] = r*257 >> 16;
2765  dst[1][j] = (g*257 >> 16) + 128;
2766  dst[2][j] = (b*257 >> 16) + 128;
2767  dst[3][j] = 255;
2768  }
2769  }
2770  }
2771 
2772  if (s->stereo3d) {
2774  if (stereo) {
2775  stereo->type = s->stereo3d->type;
2776  stereo->flags = s->stereo3d->flags;
2777  }
2778  av_freep(&s->stereo3d);
2779  }
2780 
2781  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2782  AVFrameSideData *sd;
2783  size_t offset = 0;
2784  int total_size = 0;
2785  int i;
2786 
2787  /* Sum size of all parts. */
2788  for (i = 0; i < s->iccnum; i++)
2789  total_size += s->iccentries[i].length;
2790 
2792  if (!sd) {
2793  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2794  return AVERROR(ENOMEM);
2795  }
2796 
2797  /* Reassemble the parts, which are now in-order. */
2798  for (i = 0; i < s->iccnum; i++) {
2799  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2800  offset += s->iccentries[i].length;
2801  }
2802  }
2803 
2804  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2805  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2806  int orientation = strtol(value, &endptr, 0);
2807 
2808  if (!*endptr) {
2809  AVFrameSideData *sd = NULL;
2810 
2811  if (orientation >= 2 && orientation <= 8) {
2812  int32_t *matrix;
2813 
2815  if (!sd) {
2816  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2817  return AVERROR(ENOMEM);
2818  }
2819 
2820  matrix = (int32_t *)sd->data;
2821 
2822  switch (orientation) {
2823  case 2:
2826  break;
2827  case 3:
2829  break;
2830  case 4:
2833  break;
2834  case 5:
2837  break;
2838  case 6:
2840  break;
2841  case 7:
2844  break;
2845  case 8:
2847  break;
2848  default:
2849  av_assert0(0);
2850  }
2851  }
2852  }
2853  }
2854 
2855  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2856  av_dict_free(&s->exif_metadata);
2857 
2858  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2859  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2860  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2861  avctx->coded_height > s->orig_height) {
2862  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2863  frame->crop_top = frame->height - avctx->height;
2864  }
2865 
2866 the_end_no_picture:
2867  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2868  buf_end - buf_ptr);
2869  return buf_ptr - buf;
2870 }
2871 
2872 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2873  AVPacket *avpkt)
2874 {
2875  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2876  avpkt, avpkt->data, avpkt->size);
2877 }
2878 
2879 
2880 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2881  * even without having called ff_mjpeg_decode_init(). */
2883 {
2884  MJpegDecodeContext *s = avctx->priv_data;
2885  int i, j;
2886 
2887  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2888  av_log(avctx, AV_LOG_INFO, "Single field\n");
2889  }
2890 
2891  if (s->picture) {
2892  av_frame_free(&s->picture);
2893  s->picture_ptr = NULL;
2894  } else if (s->picture_ptr)
2895  av_frame_unref(s->picture_ptr);
2896 
2897  av_frame_free(&s->smv_frame);
2898 
2899  av_freep(&s->buffer);
2900  av_freep(&s->stereo3d);
2901  av_freep(&s->ljpeg_buffer);
2902  s->ljpeg_buffer_size = 0;
2903 
2904  for (i = 0; i < 3; i++) {
2905  for (j = 0; j < 4; j++)
2906  ff_free_vlc(&s->vlcs[i][j]);
2907  }
2908  for (i = 0; i < MAX_COMPONENTS; i++) {
2909  av_freep(&s->blocks[i]);
2910  av_freep(&s->last_nnz[i]);
2911  }
2912  av_dict_free(&s->exif_metadata);
2913 
2915 
2916  av_freep(&s->hwaccel_picture_private);
2917  av_freep(&s->jls_state);
2918 
2919  return 0;
2920 }
2921 
2922 static void decode_flush(AVCodecContext *avctx)
2923 {
2924  MJpegDecodeContext *s = avctx->priv_data;
2925  s->got_picture = 0;
2926 
2927  s->smv_next_frame = 0;
2928  av_frame_unref(s->smv_frame);
2929 }
2930 
2931 #if CONFIG_MJPEG_DECODER
2932 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2933 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2934 static const AVOption options[] = {
2935  { "extern_huff", "Use external huffman table.",
2936  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2937  { NULL },
2938 };
2939 
2940 static const AVClass mjpegdec_class = {
2941  .class_name = "MJPEG decoder",
2942  .item_name = av_default_item_name,
2943  .option = options,
2944  .version = LIBAVUTIL_VERSION_INT,
2945 };
2946 
2947 const FFCodec ff_mjpeg_decoder = {
2948  .p.name = "mjpeg",
2949  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2950  .p.type = AVMEDIA_TYPE_VIDEO,
2951  .p.id = AV_CODEC_ID_MJPEG,
2952  .priv_data_size = sizeof(MJpegDecodeContext),
2954  .close = ff_mjpeg_decode_end,
2956  .flush = decode_flush,
2957  .p.capabilities = AV_CODEC_CAP_DR1,
2958  .p.max_lowres = 3,
2959  .p.priv_class = &mjpegdec_class,
2960  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2961  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2964  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2965 #if CONFIG_MJPEG_NVDEC_HWACCEL
2966  HWACCEL_NVDEC(mjpeg),
2967 #endif
2968 #if CONFIG_MJPEG_VAAPI_HWACCEL
2969  HWACCEL_VAAPI(mjpeg),
2970 #endif
2971  NULL
2972  },
2973 };
2974 #endif
2975 #if CONFIG_THP_DECODER
2976 const FFCodec ff_thp_decoder = {
2977  .p.name = "thp",
2978  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2979  .p.type = AVMEDIA_TYPE_VIDEO,
2980  .p.id = AV_CODEC_ID_THP,
2981  .priv_data_size = sizeof(MJpegDecodeContext),
2983  .close = ff_mjpeg_decode_end,
2985  .flush = decode_flush,
2986  .p.capabilities = AV_CODEC_CAP_DR1,
2987  .p.max_lowres = 3,
2988  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2989 };
2990 #endif
2991 
2992 #if CONFIG_SMVJPEG_DECODER
2993 // SMV JPEG just stacks several output frames into one JPEG picture
2994 // we handle that by setting up the cropping parameters appropriately
2995 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
2996 {
2997  MJpegDecodeContext *s = avctx->priv_data;
2998 
2999  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3000 
3001  frame->width = avctx->coded_width;
3002  frame->height = avctx->coded_height;
3003  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3004  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3005 
3006  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3007 
3008  if (s->smv_next_frame == 0)
3009  av_frame_unref(s->smv_frame);
3010 }
3011 
3012 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3013 {
3014  MJpegDecodeContext *s = avctx->priv_data;
3015  AVPacket *const pkt = avctx->internal->in_pkt;
3016  int64_t pkt_dts;
3017  int got_frame = 0;
3018  int ret;
3019 
3020  if (s->smv_next_frame > 0) {
3021  av_assert0(s->smv_frame->buf[0]);
3022  ret = av_frame_ref(frame, s->smv_frame);
3023  if (ret < 0)
3024  return ret;
3025 
3026  smv_process_frame(avctx, frame);
3027  return 0;
3028  }
3029 
3030  ret = ff_decode_get_packet(avctx, pkt);
3031  if (ret < 0)
3032  return ret;
3033 
3034  ret = ff_mjpeg_decode_frame(avctx, frame, &got_frame, pkt);
3035  pkt_dts = pkt->dts;
3037  if (ret < 0)
3038  return ret;
3039 
3040  if (!got_frame)
3041  return AVERROR(EAGAIN);
3042 
3043  frame->pkt_dts = pkt_dts;
3044 
3045  av_assert0(frame->buf[0]);
3046  av_frame_unref(s->smv_frame);
3047  ret = av_frame_ref(s->smv_frame, frame);
3048  if (ret < 0)
3049  return ret;
3050 
3051  smv_process_frame(avctx, frame);
3052  return 0;
3053 }
3054 
3055 const FFCodec ff_smvjpeg_decoder = {
3056  .p.name = "smvjpeg",
3057  CODEC_LONG_NAME("SMV JPEG"),
3058  .p.type = AVMEDIA_TYPE_VIDEO,
3059  .p.id = AV_CODEC_ID_SMVJPEG,
3060  .priv_data_size = sizeof(MJpegDecodeContext),
3062  .close = ff_mjpeg_decode_end,
3063  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3064  .flush = decode_flush,
3065  .p.capabilities = AV_CODEC_CAP_DR1,
3066  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3068 };
3069 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:268
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:183
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:474
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1147
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1400
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:670
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2922
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:956
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1382
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:553
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:411
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:113
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:198
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
AVFrame::width
int width
Definition: frame.h:402
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:501
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1669
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1031
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:790
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:146
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:216
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:493
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1359
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:253
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2928
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:239
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1246
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1416
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:122
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1713
fail
#define fail()
Definition: checkasm.h:134
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:503
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1665
GetBitContext
Definition: get_bits.h:107
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2348
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2149
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:59
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:39
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:443
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
av_bswap32
#define av_bswap32
Definition: bswap.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:168
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:857
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:471
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1852
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1667
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1039
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:187
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:102
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:472
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1668
g
const char * g
Definition: vf_curves.c:127
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:354
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:470
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2334
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2882
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:182
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:478
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:449
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:110
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:450
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1607
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:196
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:237
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1431
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:974
lowres
static int lowres
Definition: ffplay.c:335
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1552
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:631
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1494
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1473
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1074
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:50
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2872
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:875
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1642
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:267
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:176
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1234
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:282
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2189
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:164
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:198
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:178
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2046
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:808
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:361
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1666
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1820
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:168
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1366
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
len
int len
Definition: vorbis_enc_data.h:426
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: codec_internal.h:49
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2065
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:948
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:29
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
pos
unsigned int pos
Definition: spdifenc.c:413
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1363
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2214
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
id
enum AVCodecID id
Definition: dts2pts_bsf.c:364
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVFrame::height
int height
Definition: frame.h:402
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:312
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:249
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:683
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1358
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:300
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1832
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: codec_par.h:42
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
AVDictionaryEntry::value
char * value
Definition: dict.h:91
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:354
re
float re
Definition: fft.c:79