FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 #include <stdatomic.h>
33 
34 #include "libavutil/attributes.h"
35 #include "libavutil/emms.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/mem_internal.h"
39 #include "libavutil/reverse.h"
40 #include "libavutil/stereo3d.h"
41 #include "libavutil/timecode.h"
42 
43 #include "avcodec.h"
44 #include "codec_internal.h"
45 #include "decode.h"
46 #include "error_resilience.h"
47 #include "hwaccel_internal.h"
48 #include "hwconfig.h"
49 #include "idctdsp.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
71 };
72 
73 typedef struct Mpeg1Context {
75  AVPanScan pan_scan; /* some temporary storage for the panscan */
80  uint8_t afd;
81  int has_afd;
86  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
87  unsigned frame_rate_index;
88  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
90  int tmpgexs;
93  int vbv_delay;
95  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
96 } Mpeg1Context;
97 
98 /* as H.263, but only 17 codes */
99 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
100 {
101  int code, sign, val, shift;
102 
103  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
104  if (code == 0)
105  return pred;
106  if (code < 0)
107  return 0xffff;
108 
109  sign = get_bits1(&s->gb);
110  shift = fcode - 1;
111  val = code;
112  if (shift) {
113  val = (val - 1) << shift;
114  val |= get_bits(&s->gb, shift);
115  val++;
116  }
117  if (sign)
118  val = -val;
119  val += pred;
120 
121  /* modulo decoding */
122  return sign_extend(val, 5 + shift);
123 }
124 
125 #define MAX_INDEX (64 - 1)
126 #define check_scantable_index(ctx, x) \
127  do { \
128  if ((x) > MAX_INDEX) { \
129  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
130  ctx->mb_x, ctx->mb_y); \
131  return AVERROR_INVALIDDATA; \
132  } \
133  } while (0)
134 
136  int16_t *block, int n)
137 {
138  int level, i, j, run;
139  const uint8_t *const scantable = s->intra_scantable.permutated;
140  const uint16_t *quant_matrix = s->inter_matrix;
141  const int qscale = s->qscale;
142 
143  {
144  OPEN_READER(re, &s->gb);
145  i = -1;
146  // special case for first coefficient, no need to add second VLC table
147  UPDATE_CACHE(re, &s->gb);
148  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
149  level = (3 * qscale * quant_matrix[0]) >> 5;
150  level = (level - 1) | 1;
151  if (GET_CACHE(re, &s->gb) & 0x40000000)
152  level = -level;
153  block[0] = level;
154  i++;
155  SKIP_BITS(re, &s->gb, 2);
156  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
157  goto end;
158  }
159  /* now quantify & encode AC coefficients */
160  for (;;) {
161  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
162  TEX_VLC_BITS, 2, 0);
163 
164  if (level != 0) {
165  i += run;
166  if (i > MAX_INDEX)
167  break;
168  j = scantable[i];
169  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
170  level = (level - 1) | 1;
171  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
172  SHOW_SBITS(re, &s->gb, 1);
173  SKIP_BITS(re, &s->gb, 1);
174  } else {
175  /* escape */
176  run = SHOW_UBITS(re, &s->gb, 6) + 1;
177  LAST_SKIP_BITS(re, &s->gb, 6);
178  UPDATE_CACHE(re, &s->gb);
179  level = SHOW_SBITS(re, &s->gb, 8);
180  SKIP_BITS(re, &s->gb, 8);
181  if (level == -128) {
182  level = SHOW_UBITS(re, &s->gb, 8) - 256;
183  SKIP_BITS(re, &s->gb, 8);
184  } else if (level == 0) {
185  level = SHOW_UBITS(re, &s->gb, 8);
186  SKIP_BITS(re, &s->gb, 8);
187  }
188  i += run;
189  if (i > MAX_INDEX)
190  break;
191  j = scantable[i];
192  if (level < 0) {
193  level = -level;
194  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
195  level = (level - 1) | 1;
196  level = -level;
197  } else {
198  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
199  level = (level - 1) | 1;
200  }
201  }
202 
203  block[j] = level;
204  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
205  break;
206  UPDATE_CACHE(re, &s->gb);
207  }
208 end:
209  LAST_SKIP_BITS(re, &s->gb, 2);
210  CLOSE_READER(re, &s->gb);
211  }
212 
214 
215  s->block_last_index[n] = i;
216  return 0;
217 }
218 
220  int16_t *block, int n)
221 {
222  int level, i, j, run;
223  const uint8_t *const scantable = s->intra_scantable.permutated;
224  const uint16_t *quant_matrix;
225  const int qscale = s->qscale;
226  int mismatch;
227 
228  mismatch = 1;
229 
230  {
231  OPEN_READER(re, &s->gb);
232  i = -1;
233  if (n < 4)
234  quant_matrix = s->inter_matrix;
235  else
236  quant_matrix = s->chroma_inter_matrix;
237 
238  // Special case for first coefficient, no need to add second VLC table.
239  UPDATE_CACHE(re, &s->gb);
240  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
241  level = (3 * qscale * quant_matrix[0]) >> 5;
242  if (GET_CACHE(re, &s->gb) & 0x40000000)
243  level = -level;
244  block[0] = level;
245  mismatch ^= level;
246  i++;
247  SKIP_BITS(re, &s->gb, 2);
248  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
249  goto end;
250  }
251 
252  /* now quantify & encode AC coefficients */
253  for (;;) {
254  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
255  TEX_VLC_BITS, 2, 0);
256 
257  if (level != 0) {
258  i += run;
259  if (i > MAX_INDEX)
260  break;
261  j = scantable[i];
262  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
263  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
264  SHOW_SBITS(re, &s->gb, 1);
265  SKIP_BITS(re, &s->gb, 1);
266  } else {
267  /* escape */
268  run = SHOW_UBITS(re, &s->gb, 6) + 1;
269  LAST_SKIP_BITS(re, &s->gb, 6);
270  UPDATE_CACHE(re, &s->gb);
271  level = SHOW_SBITS(re, &s->gb, 12);
272  SKIP_BITS(re, &s->gb, 12);
273 
274  i += run;
275  if (i > MAX_INDEX)
276  break;
277  j = scantable[i];
278  if (level < 0) {
279  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
280  level = -level;
281  } else {
282  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
283  }
284  }
285 
286  mismatch ^= level;
287  block[j] = level;
288  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
289  break;
290  UPDATE_CACHE(re, &s->gb);
291  }
292 end:
293  LAST_SKIP_BITS(re, &s->gb, 2);
294  CLOSE_READER(re, &s->gb);
295  }
296  block[63] ^= (mismatch & 1);
297 
299 
300  s->block_last_index[n] = i;
301  return 0;
302 }
303 
305  int16_t *block, int n)
306 {
307  int level, dc, diff, i, j, run;
308  int component;
309  const RL_VLC_ELEM *rl_vlc;
310  const uint8_t *const scantable = s->intra_scantable.permutated;
311  const uint16_t *quant_matrix;
312  const int qscale = s->qscale;
313  int mismatch;
314 
315  /* DC coefficient */
316  if (n < 4) {
317  quant_matrix = s->intra_matrix;
318  component = 0;
319  } else {
320  quant_matrix = s->chroma_intra_matrix;
321  component = (n & 1) + 1;
322  }
323  diff = decode_dc(&s->gb, component);
324  dc = s->last_dc[component];
325  dc += diff;
326  s->last_dc[component] = dc;
327  block[0] = dc * (1 << (3 - s->intra_dc_precision));
328  ff_tlog(s->avctx, "dc=%d\n", block[0]);
329  mismatch = block[0] ^ 1;
330  i = 0;
331  if (s->intra_vlc_format)
333  else
335 
336  {
337  OPEN_READER(re, &s->gb);
338  /* now quantify & encode AC coefficients */
339  for (;;) {
340  UPDATE_CACHE(re, &s->gb);
341  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
342  TEX_VLC_BITS, 2, 0);
343 
344  if (level == 127) {
345  break;
346  } else if (level != 0) {
347  i += run;
348  if (i > MAX_INDEX)
349  break;
350  j = scantable[i];
351  level = (level * qscale * quant_matrix[j]) >> 4;
352  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
353  SHOW_SBITS(re, &s->gb, 1);
354  LAST_SKIP_BITS(re, &s->gb, 1);
355  } else {
356  /* escape */
357  run = SHOW_UBITS(re, &s->gb, 6) + 1;
358  SKIP_BITS(re, &s->gb, 6);
359  level = SHOW_SBITS(re, &s->gb, 12);
360  LAST_SKIP_BITS(re, &s->gb, 12);
361  i += run;
362  if (i > MAX_INDEX)
363  break;
364  j = scantable[i];
365  if (level < 0) {
366  level = (-level * qscale * quant_matrix[j]) >> 4;
367  level = -level;
368  } else {
369  level = (level * qscale * quant_matrix[j]) >> 4;
370  }
371  }
372 
373  mismatch ^= level;
374  block[j] = level;
375  }
376  CLOSE_READER(re, &s->gb);
377  }
378  block[63] ^= mismatch & 1;
379 
381 
382  s->block_last_index[n] = i;
383  return 0;
384 }
385 
386 /******************************************/
387 /* decoding */
388 
389 static inline int get_dmv(MpegEncContext *s)
390 {
391  if (get_bits1(&s->gb))
392  return 1 - (get_bits1(&s->gb) << 1);
393  else
394  return 0;
395 }
396 
397 /* motion type (for MPEG-2) */
398 #define MT_FIELD 1
399 #define MT_FRAME 2
400 #define MT_16X8 2
401 #define MT_DMV 3
402 
403 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
404 {
405  int i, j, k, cbp, val, mb_type, motion_type;
406  const int mb_block_count = 4 + (1 << s->chroma_format);
407  int ret;
408 
409  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
410 
411  av_assert2(s->mb_skipped == 0);
412 
413  if (s->mb_skip_run-- != 0) {
414  if (s->pict_type == AV_PICTURE_TYPE_P) {
415  s->mb_skipped = 1;
416  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
418  } else {
419  int mb_type;
420 
421  if (s->mb_x)
422  mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
423  else
424  // FIXME not sure if this is allowed in MPEG at all
425  mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
426  if (IS_INTRA(mb_type)) {
427  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
428  return AVERROR_INVALIDDATA;
429  }
430  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
431  mb_type | MB_TYPE_SKIP;
432 
433  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
434  s->mb_skipped = 1;
435  }
436 
437  return 0;
438  }
439 
440  switch (s->pict_type) {
441  default:
442  case AV_PICTURE_TYPE_I:
443  if (get_bits1(&s->gb) == 0) {
444  if (get_bits1(&s->gb) == 0) {
445  av_log(s->avctx, AV_LOG_ERROR,
446  "Invalid mb type in I-frame at %d %d\n",
447  s->mb_x, s->mb_y);
448  return AVERROR_INVALIDDATA;
449  }
450  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
451  } else {
452  mb_type = MB_TYPE_INTRA;
453  }
454  break;
455  case AV_PICTURE_TYPE_P:
456  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
457  if (mb_type < 0) {
458  av_log(s->avctx, AV_LOG_ERROR,
459  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
460  return AVERROR_INVALIDDATA;
461  }
462  break;
463  case AV_PICTURE_TYPE_B:
464  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
465  if (mb_type < 0) {
466  av_log(s->avctx, AV_LOG_ERROR,
467  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
468  return AVERROR_INVALIDDATA;
469  }
470  break;
471  }
472  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
473 // motion_type = 0; /* avoid warning */
474  if (IS_INTRA(mb_type)) {
475  s->bdsp.clear_blocks(s->block[0]);
476 
477  if (!s->chroma_y_shift)
478  s->bdsp.clear_blocks(s->block[6]);
479 
480  /* compute DCT type */
481  // FIXME: add an interlaced_dct coded var?
482  if (s->picture_structure == PICT_FRAME &&
483  !s->frame_pred_frame_dct)
484  s->interlaced_dct = get_bits1(&s->gb);
485 
486  if (IS_QUANT(mb_type))
487  s->qscale = mpeg_get_qscale(s);
488 
489  if (s->concealment_motion_vectors) {
490  /* just parse them */
491  if (s->picture_structure != PICT_FRAME)
492  skip_bits1(&s->gb); /* field select */
493 
494  s->mv[0][0][0] =
495  s->last_mv[0][0][0] =
496  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
497  s->last_mv[0][0][0]);
498  s->mv[0][0][1] =
499  s->last_mv[0][0][1] =
500  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
501  s->last_mv[0][0][1]);
502 
503  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
504  } else {
505  /* reset mv prediction */
506  memset(s->last_mv, 0, sizeof(s->last_mv));
507  }
508  s->mb_intra = 1;
509 
510  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
511  for (i = 0; i < mb_block_count; i++)
512  if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
513  return ret;
514  } else {
515  for (i = 0; i < 6; i++) {
517  s->intra_matrix,
518  s->intra_scantable.permutated,
519  s->last_dc, s->block[i],
520  i, s->qscale);
521  if (ret < 0) {
522  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
523  s->mb_x, s->mb_y);
524  return ret;
525  }
526 
527  s->block_last_index[i] = ret;
528  }
529  }
530  } else {
531  if (mb_type & MB_TYPE_ZERO_MV) {
532  av_assert2(mb_type & MB_TYPE_CBP);
533 
534  s->mv_dir = MV_DIR_FORWARD;
535  if (s->picture_structure == PICT_FRAME) {
536  if (s->picture_structure == PICT_FRAME
537  && !s->frame_pred_frame_dct)
538  s->interlaced_dct = get_bits1(&s->gb);
539  s->mv_type = MV_TYPE_16X16;
540  } else {
541  s->mv_type = MV_TYPE_FIELD;
542  mb_type |= MB_TYPE_INTERLACED;
543  s->field_select[0][0] = s->picture_structure - 1;
544  }
545 
546  if (IS_QUANT(mb_type))
547  s->qscale = mpeg_get_qscale(s);
548 
549  s->last_mv[0][0][0] = 0;
550  s->last_mv[0][0][1] = 0;
551  s->last_mv[0][1][0] = 0;
552  s->last_mv[0][1][1] = 0;
553  s->mv[0][0][0] = 0;
554  s->mv[0][0][1] = 0;
555  } else {
556  av_assert2(mb_type & MB_TYPE_BIDIR_MV);
557  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
558  /* get additional motion vector type */
559  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
560  motion_type = MT_FRAME;
561  } else {
562  motion_type = get_bits(&s->gb, 2);
563  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
564  s->interlaced_dct = get_bits1(&s->gb);
565  }
566 
567  if (IS_QUANT(mb_type))
568  s->qscale = mpeg_get_qscale(s);
569 
570  /* motion vectors */
571  s->mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
572  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
573  switch (motion_type) {
574  case MT_FRAME: /* or MT_16X8 */
575  if (s->picture_structure == PICT_FRAME) {
576  mb_type |= MB_TYPE_16x16;
577  s->mv_type = MV_TYPE_16X16;
578  for (i = 0; i < 2; i++) {
579  if (HAS_MV(mb_type, i)) {
580  /* MT_FRAME */
581  s->mv[i][0][0] =
582  s->last_mv[i][0][0] =
583  s->last_mv[i][1][0] =
584  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
585  s->last_mv[i][0][0]);
586  s->mv[i][0][1] =
587  s->last_mv[i][0][1] =
588  s->last_mv[i][1][1] =
589  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
590  s->last_mv[i][0][1]);
591  /* full_pel: only for MPEG-1 */
592  if (s->full_pel[i]) {
593  s->mv[i][0][0] *= 2;
594  s->mv[i][0][1] *= 2;
595  }
596  }
597  }
598  } else {
599  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
600  s->mv_type = MV_TYPE_16X8;
601  for (i = 0; i < 2; i++) {
602  if (HAS_MV(mb_type, i)) {
603  /* MT_16X8 */
604  for (j = 0; j < 2; j++) {
605  s->field_select[i][j] = get_bits1(&s->gb);
606  for (k = 0; k < 2; k++) {
607  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
608  s->last_mv[i][j][k]);
609  s->last_mv[i][j][k] = val;
610  s->mv[i][j][k] = val;
611  }
612  }
613  }
614  }
615  }
616  break;
617  case MT_FIELD:
618  s->mv_type = MV_TYPE_FIELD;
619  if (s->picture_structure == PICT_FRAME) {
620  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
621  for (i = 0; i < 2; i++) {
622  if (HAS_MV(mb_type, i)) {
623  for (j = 0; j < 2; j++) {
624  s->field_select[i][j] = get_bits1(&s->gb);
625  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
626  s->last_mv[i][j][0]);
627  s->last_mv[i][j][0] = val;
628  s->mv[i][j][0] = val;
629  ff_tlog(s->avctx, "fmx=%d\n", val);
630  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
631  s->last_mv[i][j][1] >> 1);
632  s->last_mv[i][j][1] = 2 * val;
633  s->mv[i][j][1] = val;
634  ff_tlog(s->avctx, "fmy=%d\n", val);
635  }
636  }
637  }
638  } else {
639  av_assert0(!s->progressive_sequence);
640  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
641  for (i = 0; i < 2; i++) {
642  if (HAS_MV(mb_type, i)) {
643  s->field_select[i][0] = get_bits1(&s->gb);
644  for (k = 0; k < 2; k++) {
645  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
646  s->last_mv[i][0][k]);
647  s->last_mv[i][0][k] = val;
648  s->last_mv[i][1][k] = val;
649  s->mv[i][0][k] = val;
650  }
651  }
652  }
653  }
654  break;
655  case MT_DMV:
656  if (s->progressive_sequence){
657  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
658  return AVERROR_INVALIDDATA;
659  }
660  s->mv_type = MV_TYPE_DMV;
661  for (i = 0; i < 2; i++) {
662  if (HAS_MV(mb_type, i)) {
663  int dmx, dmy, mx, my, m;
664  const int my_shift = s->picture_structure == PICT_FRAME;
665 
666  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
667  s->last_mv[i][0][0]);
668  s->last_mv[i][0][0] = mx;
669  s->last_mv[i][1][0] = mx;
670  dmx = get_dmv(s);
671  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
672  s->last_mv[i][0][1] >> my_shift);
673  dmy = get_dmv(s);
674 
675 
676  s->last_mv[i][0][1] = my * (1 << my_shift);
677  s->last_mv[i][1][1] = my * (1 << my_shift);
678 
679  s->mv[i][0][0] = mx;
680  s->mv[i][0][1] = my;
681  s->mv[i][1][0] = mx; // not used
682  s->mv[i][1][1] = my; // not used
683 
684  if (s->picture_structure == PICT_FRAME) {
685  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
686 
687  // m = 1 + 2 * s->top_field_first;
688  m = s->top_field_first ? 1 : 3;
689 
690  /* top -> top pred */
691  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
692  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
693  m = 4 - m;
694  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
695  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
696  } else {
697  mb_type |= MB_TYPE_16x16;
698 
699  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
700  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
701  if (s->picture_structure == PICT_TOP_FIELD)
702  s->mv[i][2][1]--;
703  else
704  s->mv[i][2][1]++;
705  }
706  }
707  }
708  break;
709  default:
710  av_log(s->avctx, AV_LOG_ERROR,
711  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
712  return AVERROR_INVALIDDATA;
713  }
714  }
715 
716  s->mb_intra = 0;
717  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision;
718  if (HAS_CBP(mb_type)) {
719  s->bdsp.clear_blocks(s->block[0]);
720 
721  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
722  if (mb_block_count > 6) {
723  cbp *= 1 << mb_block_count - 6;
724  cbp |= get_bits(&s->gb, mb_block_count - 6);
725  s->bdsp.clear_blocks(s->block[6]);
726  }
727  if (cbp <= 0) {
728  av_log(s->avctx, AV_LOG_ERROR,
729  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
730  return AVERROR_INVALIDDATA;
731  }
732 
733  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
734  cbp <<= 12 - mb_block_count;
735 
736  for (i = 0; i < mb_block_count; i++) {
737  if (cbp & (1 << 11)) {
738  if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
739  return ret;
740  } else {
741  s->block_last_index[i] = -1;
742  }
743  cbp += cbp;
744  }
745  } else {
746  for (i = 0; i < 6; i++) {
747  if (cbp & 32) {
748  if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
749  return ret;
750  } else {
751  s->block_last_index[i] = -1;
752  }
753  cbp += cbp;
754  }
755  }
756  } else {
757  for (i = 0; i < 12; i++)
758  s->block_last_index[i] = -1;
759  }
760  }
761 
762  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
763 
764  return 0;
765 }
766 
768 {
769  Mpeg1Context *s = avctx->priv_data;
770  MpegEncContext *s2 = &s->mpeg_enc_ctx;
771  int ret;
772 
773  s2->out_format = FMT_MPEG1;
774 
775  if ( avctx->codec_tag != AV_RL32("VCR2")
776  && avctx->codec_tag != AV_RL32("BW10"))
777  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
778  ret = ff_mpv_decode_init(s2, avctx);
779  if (ret < 0)
780  return ret;
781 
783 
785  avctx->color_range = AVCOL_RANGE_MPEG;
786  return 0;
787 }
788 
789 #if HAVE_THREADS
790 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
791  const AVCodecContext *avctx_from)
792 {
793  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
794  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
795  int err;
796 
797  if (avctx == avctx_from || !s1->context_initialized)
798  return 0;
799 
800  err = ff_mpeg_update_thread_context(avctx, avctx_from);
801  if (err)
802  return err;
803 
804  if (!s->context_initialized)
805  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
806 
807  return 0;
808 }
809 #endif
810 
812 #if CONFIG_MPEG1_NVDEC_HWACCEL
814 #endif
815 #if CONFIG_MPEG1_VDPAU_HWACCEL
817 #endif
820 };
821 
823 #if CONFIG_MPEG2_NVDEC_HWACCEL
825 #endif
826 #if CONFIG_MPEG2_VDPAU_HWACCEL
828 #endif
829 #if CONFIG_MPEG2_DXVA2_HWACCEL
831 #endif
832 #if CONFIG_MPEG2_D3D11VA_HWACCEL
835 #endif
836 #if CONFIG_MPEG2_D3D12VA_HWACCEL
838 #endif
839 #if CONFIG_MPEG2_VAAPI_HWACCEL
841 #endif
842 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
844 #endif
847 };
848 
849 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
852 };
853 
854 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
857 };
858 
860 {
861  Mpeg1Context *s1 = avctx->priv_data;
862  MpegEncContext *s = &s1->mpeg_enc_ctx;
863  const enum AVPixelFormat *pix_fmts;
864 
865  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
866  return AV_PIX_FMT_GRAY8;
867 
868  if (s->chroma_format < CHROMA_422)
872  else if (s->chroma_format == CHROMA_422)
874  else
876 
877  return ff_get_format(avctx, pix_fmts);
878 }
879 
880 /* Call this function when we know all parameters.
881  * It may be called in different places for MPEG-1 and MPEG-2. */
883 {
884  Mpeg1Context *s1 = avctx->priv_data;
885  MpegEncContext *s = &s1->mpeg_enc_ctx;
886  int ret;
887 
888  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
889  // MPEG-1 aspect
890  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
891  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
892  } else { // MPEG-2
893  // MPEG-2 aspect
894  if (s1->aspect_ratio_info > 1) {
895  AVRational dar =
897  (AVRational) { s1->pan_scan.width,
898  s1->pan_scan.height }),
899  (AVRational) { s->width, s->height });
900 
901  /* We ignore the spec here and guess a bit as reality does not
902  * match the spec, see for example res_change_ffmpeg_aspect.ts
903  * and sequence-display-aspect.mpg.
904  * issue1613, 621, 562 */
905  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
906  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
907  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
908  s->avctx->sample_aspect_ratio =
910  (AVRational) { s->width, s->height });
911  } else {
912  s->avctx->sample_aspect_ratio =
914  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
915 // issue1613 4/3 16/9 -> 16/9
916 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
917 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
918 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
919  ff_dlog(avctx, "aspect A %d/%d\n",
922  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
923  s->avctx->sample_aspect_ratio.den);
924  }
925  } else {
926  s->avctx->sample_aspect_ratio =
928  }
929  } // MPEG-2
930 
931  if (av_image_check_sar(s->width, s->height,
932  avctx->sample_aspect_ratio) < 0) {
933  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
934  avctx->sample_aspect_ratio.num,
935  avctx->sample_aspect_ratio.den);
936  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
937  }
938 
939  if (!s->context_initialized ||
940  avctx->coded_width != s->width ||
941  avctx->coded_height != s->height ||
942  s1->save_width != s->width ||
943  s1->save_height != s->height ||
944  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
945  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
946  0) {
947  if (s->context_initialized)
949 
950  ret = ff_set_dimensions(avctx, s->width, s->height);
951  if (ret < 0)
952  return ret;
953 
954  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s1->bit_rate &&
955  (s1->bit_rate != 0x3FFFF*400)) {
956  avctx->rc_max_rate = s1->bit_rate;
957  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s1->bit_rate &&
958  (s1->bit_rate != 0x3FFFF*400 || s1->vbv_delay != 0xFFFF)) {
959  avctx->bit_rate = s1->bit_rate;
960  }
961  s1->save_aspect = s->avctx->sample_aspect_ratio;
962  s1->save_width = s->width;
963  s1->save_height = s->height;
964  s1->save_progressive_seq = s->progressive_sequence;
965 
966  /* low_delay may be forced, in this case we will have B-frames
967  * that behave like P-frames. */
968  avctx->has_b_frames = !s->low_delay;
969 
970  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
971  // MPEG-1 fps
974  } else { // MPEG-2
975  // MPEG-2 fps
976  av_reduce(&s->avctx->framerate.num,
977  &s->avctx->framerate.den,
980  1 << 30);
981 
982  switch (s->chroma_format) {
984  case CHROMA_422:
986  default: av_assert0(0);
987  }
988  } // MPEG-2
989 
990  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
991 
992  if ((ret = ff_mpv_common_init(s)) < 0)
993  return ret;
994  if (!s->avctx->lowres)
995  for (int i = 0; i < s->slice_context_count; i++)
996  ff_mpv_framesize_disable(&s->thread_context[i]->sc);
997  }
998  return 0;
999 }
1000 
1001 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1002  int buf_size)
1003 {
1004  Mpeg1Context *s1 = avctx->priv_data;
1005  MpegEncContext *s = &s1->mpeg_enc_ctx;
1006  int ref, f_code, vbv_delay, ret;
1007 
1008  ret = init_get_bits8(&s->gb, buf, buf_size);
1009  if (ret < 0)
1010  return ret;
1011 
1012  ref = get_bits(&s->gb, 10); /* temporal ref */
1013  s->pict_type = get_bits(&s->gb, 3);
1014  if (s->pict_type == 0 || s->pict_type > 3)
1015  return AVERROR_INVALIDDATA;
1016 
1017  vbv_delay = get_bits(&s->gb, 16);
1018  s1->vbv_delay = vbv_delay;
1019  if (s->pict_type == AV_PICTURE_TYPE_P ||
1020  s->pict_type == AV_PICTURE_TYPE_B) {
1021  s->full_pel[0] = get_bits1(&s->gb);
1022  f_code = get_bits(&s->gb, 3);
1023  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1024  return AVERROR_INVALIDDATA;
1025  f_code += !f_code;
1026  s->mpeg_f_code[0][0] = f_code;
1027  s->mpeg_f_code[0][1] = f_code;
1028  }
1029  if (s->pict_type == AV_PICTURE_TYPE_B) {
1030  s->full_pel[1] = get_bits1(&s->gb);
1031  f_code = get_bits(&s->gb, 3);
1032  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1033  return AVERROR_INVALIDDATA;
1034  f_code += !f_code;
1035  s->mpeg_f_code[1][0] = f_code;
1036  s->mpeg_f_code[1][1] = f_code;
1037  }
1038 
1039  if (avctx->debug & FF_DEBUG_PICT_INFO)
1040  av_log(avctx, AV_LOG_DEBUG,
1041  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1042 
1043  return 0;
1044 }
1045 
1047 {
1048  MpegEncContext *s = &s1->mpeg_enc_ctx;
1049  int horiz_size_ext, vert_size_ext;
1050  int bit_rate_ext;
1051 
1052  skip_bits(&s->gb, 1); /* profile and level esc*/
1053  s->avctx->profile = get_bits(&s->gb, 3);
1054  s->avctx->level = get_bits(&s->gb, 4);
1055  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1056  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1057 
1058  if (!s->chroma_format) {
1059  s->chroma_format = CHROMA_420;
1060  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1061  }
1062 
1063  horiz_size_ext = get_bits(&s->gb, 2);
1064  vert_size_ext = get_bits(&s->gb, 2);
1065  s->width |= (horiz_size_ext << 12);
1066  s->height |= (vert_size_ext << 12);
1067  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1068  s1->bit_rate += (bit_rate_ext << 18) * 400LL;
1069  check_marker(s->avctx, &s->gb, "after bit rate extension");
1070  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1071 
1072  s->low_delay = get_bits1(&s->gb);
1073  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1074  s->low_delay = 1;
1075 
1076  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1077  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1078 
1079  ff_dlog(s->avctx, "sequence extension\n");
1080  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1081 
1082  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1083  av_log(s->avctx, AV_LOG_DEBUG,
1084  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1085  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1086  s->avctx->rc_buffer_size, s1->bit_rate);
1087 }
1088 
1090 {
1091  MpegEncContext *s = &s1->mpeg_enc_ctx;
1092  int color_description, w, h;
1093 
1094  skip_bits(&s->gb, 3); /* video format */
1095  color_description = get_bits1(&s->gb);
1096  if (color_description) {
1097  s->avctx->color_primaries = get_bits(&s->gb, 8);
1098  s->avctx->color_trc = get_bits(&s->gb, 8);
1099  s->avctx->colorspace = get_bits(&s->gb, 8);
1100  }
1101  w = get_bits(&s->gb, 14);
1102  skip_bits(&s->gb, 1); // marker
1103  h = get_bits(&s->gb, 14);
1104  // remaining 3 bits are zero padding
1105 
1106  s1->pan_scan.width = 16 * w;
1107  s1->pan_scan.height = 16 * h;
1108 
1109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1110  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1111 }
1112 
1114 {
1115  MpegEncContext *s = &s1->mpeg_enc_ctx;
1116  int i, nofco;
1117 
1118  nofco = 1;
1119  if (s->progressive_sequence) {
1120  if (s->repeat_first_field) {
1121  nofco++;
1122  if (s->top_field_first)
1123  nofco++;
1124  }
1125  } else {
1126  if (s->picture_structure == PICT_FRAME) {
1127  nofco++;
1128  if (s->repeat_first_field)
1129  nofco++;
1130  }
1131  }
1132  for (i = 0; i < nofco; i++) {
1133  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1134  skip_bits(&s->gb, 1); // marker
1135  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1136  skip_bits(&s->gb, 1); // marker
1137  }
1138 
1139  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1140  av_log(s->avctx, AV_LOG_DEBUG,
1141  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1142  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1143  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1144  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1145 }
1146 
1147 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1148  uint16_t matrix1[64], int intra)
1149 {
1150  int i;
1151 
1152  for (i = 0; i < 64; i++) {
1153  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1154  int v = get_bits(&s->gb, 8);
1155  if (v == 0) {
1156  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1157  return AVERROR_INVALIDDATA;
1158  }
1159  if (intra && i == 0 && v != 8) {
1160  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1161  v = 8; // needed by pink.mpg / issue1046
1162  }
1163  matrix0[j] = v;
1164  if (matrix1)
1165  matrix1[j] = v;
1166  }
1167  return 0;
1168 }
1169 
1171 {
1172  ff_dlog(s->avctx, "matrix extension\n");
1173 
1174  if (get_bits1(&s->gb))
1175  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1176  if (get_bits1(&s->gb))
1177  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1178  if (get_bits1(&s->gb))
1179  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1180  if (get_bits1(&s->gb))
1181  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1182 }
1183 
1185 {
1186  MpegEncContext *s = &s1->mpeg_enc_ctx;
1187 
1188  s->full_pel[0] = s->full_pel[1] = 0;
1189  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1190  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1191  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1192  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1193  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1194  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1195  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1196  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1197  if (!s->pict_type && s->context_initialized) {
1198  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1199  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1200  return AVERROR_INVALIDDATA;
1201  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1202  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1203  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1204  s->pict_type = AV_PICTURE_TYPE_I;
1205  else
1206  s->pict_type = AV_PICTURE_TYPE_P;
1207  } else
1208  s->pict_type = AV_PICTURE_TYPE_B;
1209  }
1210 
1211  s->intra_dc_precision = get_bits(&s->gb, 2);
1212  s->picture_structure = get_bits(&s->gb, 2);
1213  s->top_field_first = get_bits1(&s->gb);
1214  s->frame_pred_frame_dct = get_bits1(&s->gb);
1215  s->concealment_motion_vectors = get_bits1(&s->gb);
1216  s->q_scale_type = get_bits1(&s->gb);
1217  s->intra_vlc_format = get_bits1(&s->gb);
1218  s->alternate_scan = get_bits1(&s->gb);
1219  s->repeat_first_field = get_bits1(&s->gb);
1220  s->chroma_420_type = get_bits1(&s->gb);
1221  s->progressive_frame = get_bits1(&s->gb);
1222 
1223  // We only initialize intra_scantable.permutated, as this is all we use.
1224  ff_permute_scantable(s->intra_scantable.permutated,
1225  s->alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct,
1226  s->idsp.idct_permutation);
1227 
1228  /* composite display not parsed */
1229  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1230  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1231  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1232  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1233  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1234  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1235  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1236  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1237  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1238 
1239  return 0;
1240 }
1241 
1242 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
1243 {
1244  MpegEncContext *s = &s1->mpeg_enc_ctx;
1245  AVCodecContext *avctx = s->avctx;
1246  int second_field = 0;
1247  int ret;
1248 
1249  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1250  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1251  return AVERROR_INVALIDDATA;
1252  }
1253 
1254  /* start frame decoding */
1255  if (s->first_field || s->picture_structure == PICT_FRAME) {
1256  AVFrameSideData *pan_scan;
1257 
1258  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1259  return ret;
1260 
1261  if (s->picture_structure != PICT_FRAME) {
1262  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1263  (s->picture_structure == PICT_TOP_FIELD);
1264 
1265  for (int i = 0; i < 3; i++) {
1266  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1267  s->cur_pic.data[i] = FF_PTR_ADD(s->cur_pic.data[i],
1268  s->cur_pic.linesize[i]);
1269  }
1270  s->cur_pic.linesize[i] *= 2;
1271  }
1272  }
1273 
1275 
1276  /* first check if we must repeat the frame */
1277  s->cur_pic.ptr->f->repeat_pict = 0;
1278  if (s->repeat_first_field) {
1279  if (s->progressive_sequence) {
1280  if (s->top_field_first)
1281  s->cur_pic.ptr->f->repeat_pict = 4;
1282  else
1283  s->cur_pic.ptr->f->repeat_pict = 2;
1284  } else if (s->progressive_frame) {
1285  s->cur_pic.ptr->f->repeat_pict = 1;
1286  }
1287  }
1288 
1289  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1290  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1291  &pan_scan);
1292  if (ret < 0)
1293  return ret;
1294  if (pan_scan)
1295  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1296 
1297  if (s1->a53_buf_ref) {
1299  s->avctx, s->cur_pic.ptr->f, AV_FRAME_DATA_A53_CC,
1300  &s1->a53_buf_ref);
1301  if (ret < 0)
1302  return ret;
1303  }
1304 
1305  if (s1->has_stereo3d) {
1306  AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic.ptr->f);
1307  if (!stereo)
1308  return AVERROR(ENOMEM);
1309 
1310  stereo->type = s1->stereo3d_type;
1311  s1->has_stereo3d = 0;
1312  }
1313 
1314  if (s1->has_afd) {
1315  AVFrameSideData *sd;
1316  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1317  AV_FRAME_DATA_AFD, 1, &sd);
1318  if (ret < 0)
1319  return ret;
1320  if (sd)
1321  *sd->data = s1->afd;
1322  s1->has_afd = 0;
1323  }
1324 
1325  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1326  ff_thread_finish_setup(avctx);
1327  } else { // second field
1328  second_field = 1;
1329  if (!s->cur_pic.ptr) {
1330  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1331  return AVERROR_INVALIDDATA;
1332  }
1333 
1334  if (s->avctx->hwaccel) {
1335  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1336  av_log(avctx, AV_LOG_ERROR,
1337  "hardware accelerator failed to decode first field\n");
1338  return ret;
1339  }
1340  }
1342  if (ret < 0)
1343  return ret;
1344 
1345  for (int i = 0; i < 3; i++) {
1346  s->cur_pic.data[i] = s->cur_pic.ptr->f->data[i];
1347  if (s->picture_structure == PICT_BOTTOM_FIELD)
1348  s->cur_pic.data[i] +=
1349  s->cur_pic.ptr->f->linesize[i];
1350  }
1351  }
1352 
1353  if (avctx->hwaccel) {
1354  if ((ret = FF_HW_CALL(avctx, start_frame, NULL, buf, buf_size)) < 0)
1355  return ret;
1356  } else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
1357  // Exchange UV
1358  FFSWAP(uint8_t*, s->cur_pic.data[1], s->cur_pic.data[2]);
1359  FFSWAP(ptrdiff_t, s->cur_pic.linesize[1], s->cur_pic.linesize[2]);
1360  if (!second_field) {
1361  FFSWAP(uint8_t*, s->next_pic.data[1], s->next_pic.data[2]);
1362  FFSWAP(ptrdiff_t, s->next_pic.linesize[1], s->next_pic.linesize[2]);
1363  FFSWAP(uint8_t*, s->last_pic.data[1], s->last_pic.data[2]);
1364  FFSWAP(ptrdiff_t, s->last_pic.linesize[1], s->last_pic.linesize[2]);
1365  }
1366  }
1367 
1368  return 0;
1369 }
1370 
1371 #define DECODE_SLICE_ERROR -1
1372 #define DECODE_SLICE_OK 0
1373 
1374 /**
1375  * Decode a slice.
1376  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1377  * @return DECODE_SLICE_ERROR if the slice is damaged,
1378  * DECODE_SLICE_OK if this slice is OK
1379  */
1380 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1381  const uint8_t **buf, int buf_size)
1382 {
1383  AVCodecContext *avctx = s->avctx;
1384  const int lowres = s->avctx->lowres;
1385  const int field_pic = s->picture_structure != PICT_FRAME;
1386  int ret;
1387 
1388  s->resync_mb_x =
1389  s->resync_mb_y = -1;
1390 
1391  av_assert0(mb_y < s->mb_height);
1392 
1393  ret = init_get_bits8(&s->gb, *buf, buf_size);
1394  if (ret < 0)
1395  return ret;
1396 
1397  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1398  skip_bits(&s->gb, 3);
1399 
1401  s->interlaced_dct = 0;
1402 
1403  s->qscale = mpeg_get_qscale(s);
1404 
1405  if (s->qscale == 0) {
1406  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1407  return AVERROR_INVALIDDATA;
1408  }
1409 
1410  /* extra slice info */
1411  if (skip_1stop_8data_bits(&s->gb) < 0)
1412  return AVERROR_INVALIDDATA;
1413 
1414  s->mb_x = 0;
1415 
1416  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1417  skip_bits1(&s->gb);
1418  } else {
1419  while (get_bits_left(&s->gb) > 0) {
1420  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1421  MBINCR_VLC_BITS, 2);
1422  if (code < 0) {
1423  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1424  return AVERROR_INVALIDDATA;
1425  }
1426  if (code >= 33) {
1427  if (code == 33)
1428  s->mb_x += 33;
1429  /* otherwise, stuffing, nothing to do */
1430  } else {
1431  s->mb_x += code;
1432  break;
1433  }
1434  }
1435  }
1436 
1437  if (s->mb_x >= (unsigned) s->mb_width) {
1438  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1439  return AVERROR_INVALIDDATA;
1440  }
1441 
1442  if (avctx->hwaccel) {
1443  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1444  int start_code = -1;
1445  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1446  if (buf_end < *buf + buf_size)
1447  buf_end -= 4;
1448  s->mb_y = mb_y;
1449  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1450  return DECODE_SLICE_ERROR;
1451  *buf = buf_end;
1452  return DECODE_SLICE_OK;
1453  }
1454 
1455  s->resync_mb_x = s->mb_x;
1456  s->resync_mb_y = s->mb_y = mb_y;
1457  s->mb_skip_run = 0;
1459 
1460  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1461  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1462  av_log(s->avctx, AV_LOG_DEBUG,
1463  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1464  s->qscale,
1465  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1466  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1467  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1468  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1469  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1470  s->progressive_sequence ? "ps" : "",
1471  s->progressive_frame ? "pf" : "",
1472  s->alternate_scan ? "alt" : "",
1473  s->top_field_first ? "top" : "",
1474  s->intra_dc_precision, s->picture_structure,
1475  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1476  s->q_scale_type, s->intra_vlc_format,
1477  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1478  }
1479  }
1480 
1481  for (;;) {
1482  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1483  return ret;
1484 
1485  // Note motion_val is normally NULL unless we want to extract the MVs.
1486  if (s->cur_pic.motion_val[0]) {
1487  const int wrap = s->b8_stride;
1488  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1489  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1490  int motion_x, motion_y, dir, i;
1491 
1492  for (i = 0; i < 2; i++) {
1493  for (dir = 0; dir < 2; dir++) {
1494  if (s->mb_intra ||
1495  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1496  motion_x = motion_y = 0;
1497  } else if (s->mv_type == MV_TYPE_16X16 ||
1498  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1499  motion_x = s->mv[dir][0][0];
1500  motion_y = s->mv[dir][0][1];
1501  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1502  motion_x = s->mv[dir][i][0];
1503  motion_y = s->mv[dir][i][1];
1504  }
1505 
1506  s->cur_pic.motion_val[dir][xy][0] = motion_x;
1507  s->cur_pic.motion_val[dir][xy][1] = motion_y;
1508  s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
1509  s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
1510  s->cur_pic.ref_index [dir][b8_xy] =
1511  s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1512  av_assert2(s->field_select[dir][i] == 0 ||
1513  s->field_select[dir][i] == 1);
1514  }
1515  xy += wrap;
1516  b8_xy += 2;
1517  }
1518  }
1519 
1520  s->dest[0] += 16 >> lowres;
1521  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1522  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1523 
1524  ff_mpv_reconstruct_mb(s, s->block);
1525 
1526  if (++s->mb_x >= s->mb_width) {
1527  const int mb_size = 16 >> s->avctx->lowres;
1528  int left;
1529 
1530  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1532 
1533  s->mb_x = 0;
1534  s->mb_y += 1 << field_pic;
1535 
1536  if (s->mb_y >= s->mb_height) {
1537  int left = get_bits_left(&s->gb);
1538  int is_d10 = s->chroma_format == CHROMA_422 &&
1539  s->pict_type == AV_PICTURE_TYPE_I &&
1540  avctx->profile == 0 && avctx->level == 5 &&
1541  s->intra_dc_precision == 2 &&
1542  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1543  s->progressive_frame == 0
1544  /* vbv_delay == 0xBBB || 0xE10 */;
1545 
1546  if (left >= 32 && !is_d10) {
1547  GetBitContext gb = s->gb;
1548  align_get_bits(&gb);
1549  if (show_bits(&gb, 24) == 0x060E2B) {
1550  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1551  is_d10 = 1;
1552  }
1553  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1554  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1555  goto eos;
1556  }
1557  }
1558 
1559  if (left < 0 ||
1560  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1561  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1562  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1563  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1564  return AVERROR_INVALIDDATA;
1565  } else
1566  goto eos;
1567  }
1568  // There are some files out there which are missing the last slice
1569  // in cases where the slice is completely outside the visible
1570  // area, we detect this here instead of running into the end expecting
1571  // more data
1572  left = get_bits_left(&s->gb);
1573  if (s->mb_y >= ((s->height + 15) >> 4) &&
1574  !s->progressive_sequence &&
1575  left <= 25 &&
1576  left >= 0 &&
1577  s->mb_skip_run == -1 &&
1578  (!left || show_bits(&s->gb, left) == 0))
1579  goto eos;
1580 
1582  }
1583 
1584  /* skip mb handling */
1585  if (s->mb_skip_run == -1) {
1586  /* read increment again */
1587  s->mb_skip_run = 0;
1588  for (;;) {
1589  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1590  MBINCR_VLC_BITS, 2);
1591  if (code < 0) {
1592  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1593  return AVERROR_INVALIDDATA;
1594  }
1595  if (code >= 33) {
1596  if (code == 33) {
1597  s->mb_skip_run += 33;
1598  } else if (code == 35) {
1599  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1600  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1601  return AVERROR_INVALIDDATA;
1602  }
1603  goto eos; /* end of slice */
1604  }
1605  /* otherwise, stuffing, nothing to do */
1606  } else {
1607  s->mb_skip_run += code;
1608  break;
1609  }
1610  }
1611  if (s->mb_skip_run) {
1612  int i;
1613  if (s->pict_type == AV_PICTURE_TYPE_I) {
1614  av_log(s->avctx, AV_LOG_ERROR,
1615  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1616  return AVERROR_INVALIDDATA;
1617  }
1618 
1619  /* skip mb */
1620  s->mb_intra = 0;
1621  for (i = 0; i < 12; i++)
1622  s->block_last_index[i] = -1;
1623  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision;
1624  if (s->picture_structure == PICT_FRAME)
1625  s->mv_type = MV_TYPE_16X16;
1626  else
1627  s->mv_type = MV_TYPE_FIELD;
1628  if (s->pict_type == AV_PICTURE_TYPE_P) {
1629  /* if P type, zero motion vector is implied */
1630  s->mv_dir = MV_DIR_FORWARD;
1631  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1632  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1633  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1634  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1635  } else {
1636  /* if B type, reuse previous vectors and directions */
1637  s->mv[0][0][0] = s->last_mv[0][0][0];
1638  s->mv[0][0][1] = s->last_mv[0][0][1];
1639  s->mv[1][0][0] = s->last_mv[1][0][0];
1640  s->mv[1][0][1] = s->last_mv[1][0][1];
1641  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1642  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1643  }
1644  }
1645  }
1646  }
1647 eos: // end of slice
1648  if (get_bits_left(&s->gb) < 0) {
1649  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1650  return AVERROR_INVALIDDATA;
1651  }
1652  *buf += (get_bits_count(&s->gb) - 1) / 8;
1653  ff_dlog(s->avctx, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1654  return 0;
1655 }
1656 
1658 {
1659  MpegEncContext *s = *(void **) arg;
1660  const uint8_t *buf = s->gb.buffer;
1661  int mb_y = s->start_mb_y;
1662  const int field_pic = s->picture_structure != PICT_FRAME;
1663 
1664  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1665 
1666  for (;;) {
1667  uint32_t start_code;
1668  int ret;
1669 
1670  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1671  emms_c();
1672  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1673  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1674  s->start_mb_y, s->end_mb_y, s->er.error_count);
1675  if (ret < 0) {
1676  if (c->err_recognition & AV_EF_EXPLODE)
1677  return ret;
1678  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1679  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1680  s->mb_x, s->mb_y,
1682  } else {
1683  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1684  s->mb_x - 1, s->mb_y,
1686  }
1687 
1688  if (s->mb_y == s->end_mb_y)
1689  return 0;
1690 
1691  start_code = -1;
1692  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1693  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1694  return AVERROR_INVALIDDATA;
1696  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1697  mb_y += (*buf&0xE0)<<2;
1698  mb_y <<= field_pic;
1699  if (s->picture_structure == PICT_BOTTOM_FIELD)
1700  mb_y++;
1701  if (mb_y >= s->end_mb_y)
1702  return AVERROR_INVALIDDATA;
1703  }
1704 }
1705 
1706 /**
1707  * Handle slice ends.
1708  * @return 1 if it seems to be the last slice
1709  */
1710 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
1711 {
1712  Mpeg1Context *s1 = avctx->priv_data;
1713  MpegEncContext *s = &s1->mpeg_enc_ctx;
1714 
1715  if (!s->context_initialized || !s->cur_pic.ptr)
1716  return 0;
1717 
1718  if (s->avctx->hwaccel) {
1719  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1720  if (ret < 0) {
1721  av_log(avctx, AV_LOG_ERROR,
1722  "hardware accelerator failed to decode picture\n");
1723  return ret;
1724  }
1725  }
1726 
1727  /* end of slice reached */
1728  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1729  /* end of image */
1730 
1731  ff_er_frame_end(&s->er, NULL);
1732 
1734 
1735  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1736  int ret = av_frame_ref(pict, s->cur_pic.ptr->f);
1737  if (ret < 0)
1738  return ret;
1739  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1740  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1741  *got_output = 1;
1742  } else {
1743  /* latency of 1 frame for I- and P-frames */
1744  if (s->last_pic.ptr && !s->last_pic.ptr->dummy) {
1745  int ret = av_frame_ref(pict, s->last_pic.ptr->f);
1746  if (ret < 0)
1747  return ret;
1748  ff_print_debug_info(s, s->last_pic.ptr, pict);
1749  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1750  *got_output = 1;
1751  }
1752  }
1753 
1754  return 1;
1755  } else {
1756  return 0;
1757  }
1758 }
1759 
1761  const uint8_t *buf, int buf_size)
1762 {
1763  Mpeg1Context *s1 = avctx->priv_data;
1764  MpegEncContext *s = &s1->mpeg_enc_ctx;
1765  int width, height;
1766  int i, v, j;
1767 
1768  int ret = init_get_bits8(&s->gb, buf, buf_size);
1769  if (ret < 0)
1770  return ret;
1771 
1772  width = get_bits(&s->gb, 12);
1773  height = get_bits(&s->gb, 12);
1774  if (width == 0 || height == 0) {
1775  av_log(avctx, AV_LOG_WARNING,
1776  "Invalid horizontal or vertical size value.\n");
1778  return AVERROR_INVALIDDATA;
1779  }
1780  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1781  if (s1->aspect_ratio_info == 0) {
1782  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1784  return AVERROR_INVALIDDATA;
1785  }
1786  s1->frame_rate_index = get_bits(&s->gb, 4);
1787  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1788  av_log(avctx, AV_LOG_WARNING,
1789  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1790  s1->frame_rate_index = 1;
1791  }
1792  s1->bit_rate = get_bits(&s->gb, 18) * 400;
1793  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1794  return AVERROR_INVALIDDATA;
1795  }
1796 
1797  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1798  skip_bits(&s->gb, 1);
1799 
1800  /* get matrix */
1801  if (get_bits1(&s->gb)) {
1802  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1803  } else {
1804  for (i = 0; i < 64; i++) {
1805  j = s->idsp.idct_permutation[i];
1807  s->intra_matrix[j] = v;
1808  s->chroma_intra_matrix[j] = v;
1809  }
1810  }
1811  if (get_bits1(&s->gb)) {
1812  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1813  } else {
1814  for (i = 0; i < 64; i++) {
1815  int j = s->idsp.idct_permutation[i];
1817  s->inter_matrix[j] = v;
1818  s->chroma_inter_matrix[j] = v;
1819  }
1820  }
1821 
1822  if (show_bits(&s->gb, 23) != 0) {
1823  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1824  return AVERROR_INVALIDDATA;
1825  }
1826 
1827  s->width = width;
1828  s->height = height;
1829 
1830  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1831  s->progressive_sequence = 1;
1832  s->progressive_frame = 1;
1833  s->picture_structure = PICT_FRAME;
1834  s->first_field = 0;
1835  s->frame_pred_frame_dct = 1;
1836  s->chroma_format = CHROMA_420;
1837  s->codec_id =
1838  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1839  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1840  s->low_delay = 1;
1841 
1842  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1843  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1844  s->avctx->rc_buffer_size, s1->bit_rate, s1->aspect_ratio_info);
1845 
1846  return 0;
1847 }
1848 
1850 {
1851  Mpeg1Context *s1 = avctx->priv_data;
1852  MpegEncContext *s = &s1->mpeg_enc_ctx;
1853  int i, v, ret;
1854 
1855  /* start new MPEG-1 context decoding */
1856  if (s->context_initialized)
1858 
1859  s->width = avctx->coded_width;
1860  s->height = avctx->coded_height;
1861  avctx->has_b_frames = 0; // true?
1862  s->low_delay = 1;
1863 
1864  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1865 
1866  if ((ret = ff_mpv_common_init(s)) < 0)
1867  return ret;
1868  if (!s->avctx->lowres)
1869  for (int i = 0; i < s->slice_context_count; i++)
1870  ff_mpv_framesize_disable(&s->thread_context[i]->sc);
1871 
1872  for (i = 0; i < 64; i++) {
1873  int j = s->idsp.idct_permutation[i];
1875  s->intra_matrix[j] = v;
1876  s->chroma_intra_matrix[j] = v;
1877 
1879  s->inter_matrix[j] = v;
1880  s->chroma_inter_matrix[j] = v;
1881  }
1882 
1883  s->progressive_sequence = 1;
1884  s->progressive_frame = 1;
1885  s->picture_structure = PICT_FRAME;
1886  s->first_field = 0;
1887  s->frame_pred_frame_dct = 1;
1888  s->chroma_format = CHROMA_420;
1889  if (s->codec_tag == AV_RL32("BW10")) {
1890  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1891  } else {
1892  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1893  }
1894  s1->save_width = s->width;
1895  s1->save_height = s->height;
1896  s1->save_progressive_seq = s->progressive_sequence;
1897  return 0;
1898 }
1899 
1901  const char *label)
1902 {
1903  Mpeg1Context *s1 = avctx->priv_data;
1904 
1906 
1907  if (!s1->cc_format) {
1908  s1->cc_format = format;
1909 
1910  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1911  }
1912 
1913 #if FF_API_CODEC_PROPS
1917 #endif
1918 }
1919 
1921  const uint8_t *p, int buf_size)
1922 {
1923  Mpeg1Context *s1 = avctx->priv_data;
1924 
1925  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1926  buf_size >= 6 &&
1927  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1928  p[4] == 3 && (p[5] & 0x40)) {
1929  /* extract A53 Part 4 CC data */
1930  int cc_count = p[5] & 0x1f;
1931  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1932  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1933  const uint64_t new_size = (old_size + cc_count
1934  * UINT64_C(3));
1935  int ret;
1936 
1937  if (new_size > 3*A53_MAX_CC_COUNT)
1938  return AVERROR(EINVAL);
1939 
1940  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1941  if (ret >= 0)
1942  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1943 
1944  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1945  }
1946  return 1;
1947  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1948  buf_size >= 2 &&
1949  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1950  /* extract SCTE-20 CC data */
1951  GetBitContext gb;
1952  int cc_count = 0;
1953  int i, ret;
1954 
1955  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1956  if (ret < 0)
1957  return ret;
1958  cc_count = get_bits(&gb, 5);
1959  if (cc_count > 0) {
1960  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1961  const uint64_t new_size = (old_size + cc_count
1962  * UINT64_C(3));
1963  if (new_size > 3*A53_MAX_CC_COUNT)
1964  return AVERROR(EINVAL);
1965 
1966  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1967  if (ret >= 0) {
1968  uint8_t field, cc1, cc2;
1969  uint8_t *cap = s1->a53_buf_ref->data + old_size;
1970 
1971  memset(cap, 0, cc_count * 3);
1972  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1973  skip_bits(&gb, 2); // priority
1974  field = get_bits(&gb, 2);
1975  skip_bits(&gb, 5); // line_offset
1976  cc1 = get_bits(&gb, 8);
1977  cc2 = get_bits(&gb, 8);
1978  skip_bits(&gb, 1); // marker
1979 
1980  if (!field) { // forbidden
1981  cap[0] = cap[1] = cap[2] = 0x00;
1982  } else {
1983  field = (field == 2 ? 1 : 0);
1984  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1985  cap[0] = 0x04 | field;
1986  cap[1] = ff_reverse[cc1];
1987  cap[2] = ff_reverse[cc2];
1988  }
1989  cap += 3;
1990  }
1991  }
1992 
1993  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
1994  }
1995  return 1;
1996  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
1997  buf_size >= 11 &&
1998  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
1999  /* extract DVD CC data
2000  *
2001  * uint32_t user_data_start_code 0x000001B2 (big endian)
2002  * uint16_t user_identifier 0x4343 "CC"
2003  * uint8_t user_data_type_code 0x01
2004  * uint8_t caption_block_size 0xF8
2005  * uint8_t
2006  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2007  * bit 6 caption_filler 0
2008  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2009  * bit 0 caption_extra_field_added 1=one additional caption word
2010  *
2011  * struct caption_field_block {
2012  * uint8_t
2013  * bit 7:1 caption_filler 0x7F (all 1s)
2014  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2015  * uint8_t caption_first_byte
2016  * uint8_t caption_second_byte
2017  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2018  *
2019  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2020  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2021  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2022  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2023  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2024  int cc_count = 0;
2025  int i, ret;
2026  // There is a caption count field in the data, but it is often
2027  // incorrect. So count the number of captions present.
2028  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2029  cc_count++;
2030  // Transform the DVD format into A53 Part 4 format
2031  if (cc_count > 0) {
2032  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2033  const uint64_t new_size = (old_size + cc_count
2034  * UINT64_C(6));
2035  if (new_size > 3*A53_MAX_CC_COUNT)
2036  return AVERROR(EINVAL);
2037 
2038  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2039  if (ret >= 0) {
2040  uint8_t field1 = !!(p[4] & 0x80);
2041  uint8_t *cap = s1->a53_buf_ref->data + old_size;
2042  p += 5;
2043  for (i = 0; i < cc_count; i++) {
2044  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2045  cap[1] = p[1];
2046  cap[2] = p[2];
2047  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2048  cap[4] = p[4];
2049  cap[5] = p[5];
2050  cap += 6;
2051  p += 6;
2052  }
2053  }
2054 
2055  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2056  }
2057  return 1;
2058  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DISH) &&
2059  buf_size >= 12 &&
2060  p[0] == 0x05 && p[1] == 0x02) {
2061  /* extract Dish Network CC data */
2062  const uint8_t cc_header = 0xf8 | 0x04 /* valid */ | 0x00 /* line 21 field 1 */;
2063  uint8_t cc_data[4] = {0};
2064  int cc_count = 0;
2065  uint8_t cc_type = p[7];
2066  p += 8;
2067  buf_size -= 8;
2068 
2069  if (cc_type == 0x05 && buf_size >= 7) {
2070  cc_type = p[6];
2071  p += 7;
2072  buf_size -= 7;
2073  }
2074 
2075  if (cc_type == 0x02 && buf_size >= 4) { /* 2-byte caption, can be repeated */
2076  cc_count = 1;
2077  cc_data[0] = p[1];
2078  cc_data[1] = p[2];
2079  cc_type = p[3];
2080 
2081  /* Only repeat characters when the next type flag
2082  * is 0x04 and the characters are repeatable (i.e., less than
2083  * 32 with the parity stripped).
2084  */
2085  if (cc_type == 0x04 && (cc_data[0] & 0x7f) < 32) {
2086  cc_count = 2;
2087  cc_data[2] = cc_data[0];
2088  cc_data[3] = cc_data[1];
2089  }
2090  } else if (cc_type == 0x04 && buf_size >= 5) { /* 4-byte caption, not repeated */
2091  cc_count = 2;
2092  cc_data[0] = p[1];
2093  cc_data[1] = p[2];
2094  cc_data[2] = p[3];
2095  cc_data[3] = p[4];
2096  }
2097 
2098  if (cc_count > 0) {
2099  int ret;
2100  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2101  const uint64_t new_size = (old_size + cc_count * UINT64_C(3));
2102  if (new_size > 3 * A53_MAX_CC_COUNT)
2103  return AVERROR(EINVAL);
2104 
2105  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2106  if (ret >= 0) {
2107  uint8_t *cap = s1->a53_buf_ref->data + old_size;
2108  cap[0] = cc_header;
2109  cap[1] = cc_data[0];
2110  cap[2] = cc_data[1];
2111  if (cc_count == 2) {
2112  cap[3] = cc_header;
2113  cap[4] = cc_data[2];
2114  cap[5] = cc_data[3];
2115  }
2116  }
2117 
2118  mpeg_set_cc_format(avctx, CC_FORMAT_DISH, "Dish Network");
2119  }
2120  return 1;
2121  }
2122  return 0;
2123 }
2124 
2126  const uint8_t *p, int buf_size)
2127 {
2128  Mpeg1Context *s = avctx->priv_data;
2129  const uint8_t *buf_end = p + buf_size;
2130  Mpeg1Context *s1 = avctx->priv_data;
2131 
2132 #if 0
2133  int i;
2134  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2135  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2136  }
2137  av_log(avctx, AV_LOG_ERROR, "\n");
2138 #endif
2139 
2140  if (buf_size > 29){
2141  int i;
2142  for(i=0; i<20; i++)
2143  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2144  s->tmpgexs= 1;
2145  }
2146  }
2147  /* we parse the DTG active format information */
2148  if (buf_end - p >= 5 &&
2149  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2150  int flags = p[4];
2151  p += 5;
2152  if (flags & 0x80) {
2153  /* skip event id */
2154  p += 2;
2155  }
2156  if (flags & 0x40) {
2157  if (buf_end - p < 1)
2158  return;
2159  s1->has_afd = 1;
2160  s1->afd = p[0] & 0x0f;
2161  }
2162  } else if (buf_end - p >= 6 &&
2163  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2164  p[4] == 0x03) { // S3D_video_format_length
2165  // the 0x7F mask ignores the reserved_bit value
2166  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2167 
2168  if (S3D_video_format_type == 0x03 ||
2169  S3D_video_format_type == 0x04 ||
2170  S3D_video_format_type == 0x08 ||
2171  S3D_video_format_type == 0x23) {
2172 
2173  s1->has_stereo3d = 1;
2174 
2175  switch (S3D_video_format_type) {
2176  case 0x03:
2178  break;
2179  case 0x04:
2181  break;
2182  case 0x08:
2184  break;
2185  case 0x23:
2187  break;
2188  }
2189  }
2190  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2191  return;
2192  }
2193 }
2194 
2196  const uint8_t *buf, int buf_size)
2197 {
2198  Mpeg1Context *s1 = avctx->priv_data;
2199  MpegEncContext *s = &s1->mpeg_enc_ctx;
2200  int broken_link;
2201  int64_t tc;
2202 
2203  int ret = init_get_bits8(&s->gb, buf, buf_size);
2204  if (ret < 0)
2205  return ret;
2206 
2207  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2208 
2209  s1->closed_gop = get_bits1(&s->gb);
2210  /* broken_link indicates that after editing the
2211  * reference frames of the first B-Frames after GOP I-Frame
2212  * are missing (open gop) */
2213  broken_link = get_bits1(&s->gb);
2214 
2215  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2216  char tcbuf[AV_TIMECODE_STR_SIZE];
2218  av_log(s->avctx, AV_LOG_DEBUG,
2219  "GOP (%s) closed_gop=%d broken_link=%d\n",
2220  tcbuf, s1->closed_gop, broken_link);
2221  }
2222 
2223  return 0;
2224 }
2225 
2227  Mpeg1Context *const s)
2228 {
2229  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2230  !avctx->hwaccel) {
2231  MpegEncContext *const s2 = &s->mpeg_enc_ctx;
2232  int error_count = 0;
2233 
2234  avctx->execute(avctx, slice_decode_thread,
2235  s2->thread_context, NULL,
2236  s->slice_count, sizeof(void *));
2237 
2238  for (int i = 0; i < s->slice_count; i++) {
2239  MpegEncContext *const slice = s2->thread_context[i];
2240  int slice_err = atomic_load_explicit(&slice->er.error_count,
2241  memory_order_relaxed);
2242  // error_count can get set to INT_MAX on serious errors.
2243  // So use saturated addition.
2244  if ((unsigned)slice_err > INT_MAX - error_count) {
2245  error_count = INT_MAX;
2246  break;
2247  }
2248  error_count += slice_err;
2249  }
2250  atomic_store_explicit(&s2->er.error_count, error_count,
2251  memory_order_relaxed);
2252  }
2253 }
2254 
2255 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2256  int *got_output, const uint8_t *buf, int buf_size)
2257 {
2258  Mpeg1Context *s = avctx->priv_data;
2259  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2260  const uint8_t *buf_ptr = buf;
2261  const uint8_t *buf_end = buf + buf_size;
2262  int ret, input_size;
2263  int last_code = 0, skip_frame = 0;
2264  int picture_start_code_seen = 0;
2265 
2266  for (;;) {
2267  /* find next start code */
2268  uint32_t start_code = -1;
2269  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2270  if (start_code > 0x1ff) {
2271  if (!skip_frame) {
2273 
2274  ret = slice_end(avctx, picture, got_output);
2275  if (ret < 0)
2276  return ret;
2277  }
2278  s2->pict_type = 0;
2279 
2280  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2281  return AVERROR_INVALIDDATA;
2282 
2283  return FFMAX(0, buf_ptr - buf);
2284  }
2285 
2286  input_size = buf_end - buf_ptr;
2287 
2288  if (avctx->debug & FF_DEBUG_STARTCODE)
2289  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2290  start_code, buf_ptr - buf, input_size);
2291 
2292  /* prepare data for next start code */
2293  switch (start_code) {
2294  case SEQ_START_CODE:
2295  if (last_code == 0) {
2296  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2297  if (buf != avctx->extradata)
2298  s->sync = 1;
2299  } else {
2300  av_log(avctx, AV_LOG_ERROR,
2301  "ignoring SEQ_START_CODE after %X\n", last_code);
2302  if (avctx->err_recognition & AV_EF_EXPLODE)
2303  return AVERROR_INVALIDDATA;
2304  }
2305  break;
2306 
2307  case PICTURE_START_CODE:
2308  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2309  /* If it's a frame picture, there can't be more than one picture header.
2310  Yet, it does happen and we need to handle it. */
2311  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2312  break;
2313  }
2314  picture_start_code_seen = 1;
2315 
2316  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2317  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2318  break;
2319  }
2320 
2321  if (s2->width <= 0 || s2->height <= 0) {
2322  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2323  s2->width, s2->height);
2324  return AVERROR_INVALIDDATA;
2325  }
2326 
2327  if (s->tmpgexs){
2328  s2->intra_dc_precision= 3;
2329  s2->intra_matrix[0]= 1;
2330  }
2331  if (s->slice_count) {
2333  s->slice_count = 0;
2334  }
2335  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2336  ret = mpeg_decode_postinit(avctx);
2337  if (ret < 0) {
2338  av_log(avctx, AV_LOG_ERROR,
2339  "mpeg_decode_postinit() failure\n");
2340  return ret;
2341  }
2342 
2343  /* We have a complete image: we try to decompress it. */
2344  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2345  s2->pict_type = 0;
2346  s->first_slice = 1;
2347  last_code = PICTURE_START_CODE;
2348  } else {
2349  av_log(avctx, AV_LOG_ERROR,
2350  "ignoring pic after %X\n", last_code);
2351  if (avctx->err_recognition & AV_EF_EXPLODE)
2352  return AVERROR_INVALIDDATA;
2353  }
2354  break;
2355  case EXT_START_CODE:
2356  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2357  if (ret < 0)
2358  return ret;
2359 
2360  switch (get_bits(&s2->gb, 4)) {
2361  case 0x1:
2362  if (last_code == 0) {
2364  } else {
2365  av_log(avctx, AV_LOG_ERROR,
2366  "ignoring seq ext after %X\n", last_code);
2367  if (avctx->err_recognition & AV_EF_EXPLODE)
2368  return AVERROR_INVALIDDATA;
2369  }
2370  break;
2371  case 0x2:
2373  break;
2374  case 0x3:
2376  break;
2377  case 0x7:
2379  break;
2380  case 0x8:
2381  if (last_code == PICTURE_START_CODE) {
2383  if (ret < 0)
2384  return ret;
2385  } else {
2386  av_log(avctx, AV_LOG_ERROR,
2387  "ignoring pic cod ext after %X\n", last_code);
2388  if (avctx->err_recognition & AV_EF_EXPLODE)
2389  return AVERROR_INVALIDDATA;
2390  }
2391  break;
2392  }
2393  break;
2394  case USER_START_CODE:
2395  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2396  break;
2397  case GOP_START_CODE:
2398  if (last_code == 0) {
2399  s2->first_field = 0;
2400  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2401  if (ret < 0)
2402  return ret;
2403  s->sync = 1;
2404  } else {
2405  av_log(avctx, AV_LOG_ERROR,
2406  "ignoring GOP_START_CODE after %X\n", last_code);
2407  if (avctx->err_recognition & AV_EF_EXPLODE)
2408  return AVERROR_INVALIDDATA;
2409  }
2410  break;
2411  default:
2413  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2414  if (s2->progressive_sequence && !s2->progressive_frame) {
2415  s2->progressive_frame = 1;
2416  av_log(s2->avctx, AV_LOG_ERROR,
2417  "interlaced frame in progressive sequence, ignoring\n");
2418  }
2419 
2420  if (s2->picture_structure == 0 ||
2422  av_log(s2->avctx, AV_LOG_ERROR,
2423  "picture_structure %d invalid, ignoring\n",
2424  s2->picture_structure);
2426  }
2427 
2429  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2430 
2431  if (s2->picture_structure == PICT_FRAME) {
2432  s2->first_field = 0;
2433  s2->v_edge_pos = 16 * s2->mb_height;
2434  } else {
2435  s2->first_field ^= 1;
2436  s2->v_edge_pos = 8 * s2->mb_height;
2437  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2438  }
2439  }
2441  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2442  const int field_pic = s2->picture_structure != PICT_FRAME;
2443  int mb_y = start_code - SLICE_MIN_START_CODE;
2444  last_code = SLICE_MIN_START_CODE;
2445  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2446  mb_y += (*buf_ptr&0xE0)<<2;
2447 
2448  mb_y <<= field_pic;
2450  mb_y++;
2451 
2452  if (buf_end - buf_ptr < 2) {
2453  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2454  return AVERROR_INVALIDDATA;
2455  }
2456 
2457  if (mb_y >= s2->mb_height) {
2458  av_log(s2->avctx, AV_LOG_ERROR,
2459  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2460  return AVERROR_INVALIDDATA;
2461  }
2462 
2463  if (!s2->last_pic.ptr) {
2464  /* Skip B-frames if we do not have reference frames and
2465  * GOP is not closed. */
2466  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2467  if (!s->closed_gop) {
2468  skip_frame = 1;
2469  av_log(s2->avctx, AV_LOG_DEBUG,
2470  "Skipping B slice due to open GOP\n");
2471  break;
2472  }
2473  }
2474  }
2476  s->sync = 1;
2477  if (!s2->next_pic.ptr) {
2478  /* Skip P-frames if we do not have a reference frame or
2479  * we have an invalid header. */
2480  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2481  skip_frame = 1;
2482  av_log(s2->avctx, AV_LOG_DEBUG,
2483  "Skipping P slice due to !sync\n");
2484  break;
2485  }
2486  }
2487  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2488  s2->pict_type == AV_PICTURE_TYPE_B) ||
2489  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2490  s2->pict_type != AV_PICTURE_TYPE_I) ||
2491  avctx->skip_frame >= AVDISCARD_ALL) {
2492  skip_frame = 1;
2493  break;
2494  }
2495 
2496  if (!s2->context_initialized)
2497  break;
2498 
2499  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2500  if (mb_y < avctx->skip_top ||
2501  mb_y >= s2->mb_height - avctx->skip_bottom)
2502  break;
2503  }
2504 
2505  if (!s2->pict_type) {
2506  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2507  if (avctx->err_recognition & AV_EF_EXPLODE)
2508  return AVERROR_INVALIDDATA;
2509  break;
2510  }
2511 
2512  if (s->first_slice) {
2513  skip_frame = 0;
2514  s->first_slice = 0;
2515  if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
2516  return ret;
2517  }
2518  if (!s2->cur_pic.ptr) {
2519  av_log(avctx, AV_LOG_ERROR,
2520  "current_picture not initialized\n");
2521  return AVERROR_INVALIDDATA;
2522  }
2523 
2524  if (HAVE_THREADS &&
2525  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2526  !avctx->hwaccel) {
2527  int threshold = (s2->mb_height * s->slice_count +
2528  s2->slice_context_count / 2) /
2529  s2->slice_context_count;
2530  if (threshold <= mb_y) {
2531  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2532 
2533  thread_context->start_mb_y = mb_y;
2534  thread_context->end_mb_y = s2->mb_height;
2535  if (s->slice_count) {
2536  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2537  ret = ff_update_duplicate_context(thread_context, s2);
2538  if (ret < 0)
2539  return ret;
2540  }
2541  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2542  if (ret < 0)
2543  return ret;
2544  s->slice_count++;
2545  }
2546  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2547  } else {
2548  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2549  emms_c();
2550 
2551  if (ret < 0) {
2552  if (avctx->err_recognition & AV_EF_EXPLODE)
2553  return ret;
2554  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2555  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2556  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2558  } else {
2559  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2560  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2562  }
2563  }
2564  }
2565  break;
2566  }
2567  }
2568 }
2569 
2570 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2571  int *got_output, AVPacket *avpkt)
2572 {
2573  const uint8_t *buf = avpkt->data;
2574  int ret;
2575  int buf_size = avpkt->size;
2576  Mpeg1Context *s = avctx->priv_data;
2577  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2578 
2579  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2580  /* special case for last picture */
2581  if (s2->low_delay == 0 && s2->next_pic.ptr) {
2582  int ret = av_frame_ref(picture, s2->next_pic.ptr->f);
2583  if (ret < 0)
2584  return ret;
2585 
2587 
2588  *got_output = 1;
2589  }
2590  return buf_size;
2591  }
2592 
2593  if (!s2->context_initialized &&
2594  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2595  vcr2_init_sequence(avctx);
2596 
2597  s->slice_count = 0;
2598 
2599  if (avctx->extradata && !s->extradata_decoded) {
2600  ret = decode_chunks(avctx, picture, got_output,
2601  avctx->extradata, avctx->extradata_size);
2602  if (*got_output) {
2603  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2604  av_frame_unref(picture);
2605  *got_output = 0;
2606  }
2607  s->extradata_decoded = 1;
2608  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2610  return ret;
2611  }
2612  }
2613 
2614  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2615  if (ret<0 || *got_output) {
2617 
2618  if (s->timecode_frame_start != -1 && *got_output) {
2619  char tcbuf[AV_TIMECODE_STR_SIZE];
2620  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2622  sizeof(int64_t));
2623  if (!tcside)
2624  return AVERROR(ENOMEM);
2625  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2626 
2627  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2628  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2629 
2630  s->timecode_frame_start = -1;
2631  }
2632  }
2633 
2634  return ret;
2635 }
2636 
2637 static av_cold void flush(AVCodecContext *avctx)
2638 {
2639  Mpeg1Context *s = avctx->priv_data;
2640 
2641  s->sync = 0;
2642  s->closed_gop = 0;
2643 
2644  av_buffer_unref(&s->a53_buf_ref);
2645  ff_mpeg_flush(avctx);
2646 }
2647 
2649 {
2650  Mpeg1Context *s = avctx->priv_data;
2651 
2652  av_buffer_unref(&s->a53_buf_ref);
2653  return ff_mpv_decode_close(avctx);
2654 }
2655 
2657  .p.name = "mpeg1video",
2658  CODEC_LONG_NAME("MPEG-1 video"),
2659  .p.type = AVMEDIA_TYPE_VIDEO,
2660  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2661  .priv_data_size = sizeof(Mpeg1Context),
2665  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2667  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2668  .flush = flush,
2669  .p.max_lowres = 3,
2670  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2671  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2672 #if CONFIG_MPEG1_NVDEC_HWACCEL
2673  HWACCEL_NVDEC(mpeg1),
2674 #endif
2675 #if CONFIG_MPEG1_VDPAU_HWACCEL
2676  HWACCEL_VDPAU(mpeg1),
2677 #endif
2678 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2679  HWACCEL_VIDEOTOOLBOX(mpeg1),
2680 #endif
2681  NULL
2682  },
2683 };
2684 
2685 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2686 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2687 
2688 static const AVOption mpeg2video_options[] = {
2689  { "cc_format", "extract a specific Closed Captions format",
2690  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2691  CC_FORMAT_AUTO, CC_FORMAT_DISH, M2V_PARAM, .unit = "cc_format" },
2692 
2693  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2694  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2695  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2696  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2697  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2698  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2699  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2700  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2701  { "dish", "pick Dish Network CC substream", 0, AV_OPT_TYPE_CONST,
2702  { .i64 = CC_FORMAT_DISH }, .flags = M2V_PARAM, .unit = "cc_format" },
2703  { NULL }
2704 };
2705 
2706 static const AVClass mpeg2video_class = {
2707  .class_name = "MPEG-2 video",
2708  .item_name = av_default_item_name,
2709  .option = mpeg2video_options,
2710  .version = LIBAVUTIL_VERSION_INT,
2711  .category = AV_CLASS_CATEGORY_DECODER,
2712 };
2713 
2715  .p.name = "mpeg2video",
2716  CODEC_LONG_NAME("MPEG-2 video"),
2717  .p.type = AVMEDIA_TYPE_VIDEO,
2718  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2719  .p.priv_class = &mpeg2video_class,
2720  .priv_data_size = sizeof(Mpeg1Context),
2724  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2726  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2727  .flush = flush,
2728  .p.max_lowres = 3,
2730  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2731 #if CONFIG_MPEG2_DXVA2_HWACCEL
2732  HWACCEL_DXVA2(mpeg2),
2733 #endif
2734 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2735  HWACCEL_D3D11VA(mpeg2),
2736 #endif
2737 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2738  HWACCEL_D3D11VA2(mpeg2),
2739 #endif
2740 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2741  HWACCEL_D3D12VA(mpeg2),
2742 #endif
2743 #if CONFIG_MPEG2_NVDEC_HWACCEL
2744  HWACCEL_NVDEC(mpeg2),
2745 #endif
2746 #if CONFIG_MPEG2_VAAPI_HWACCEL
2747  HWACCEL_VAAPI(mpeg2),
2748 #endif
2749 #if CONFIG_MPEG2_VDPAU_HWACCEL
2750  HWACCEL_VDPAU(mpeg2),
2751 #endif
2752 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2753  HWACCEL_VIDEOTOOLBOX(mpeg2),
2754 #endif
2755  NULL
2756  },
2757 };
2758 
2759 //legacy decoder
2761  .p.name = "mpegvideo",
2762  CODEC_LONG_NAME("MPEG-1 video"),
2763  .p.type = AVMEDIA_TYPE_VIDEO,
2764  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2765  .priv_data_size = sizeof(Mpeg1Context),
2769  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2771  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2772  .flush = flush,
2773  .p.max_lowres = 3,
2774 };
2775 
2776 typedef struct IPUContext {
2778 
2779  int flags;
2780  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2781 } IPUContext;
2782 
2784  int *got_frame, AVPacket *avpkt)
2785 {
2786  IPUContext *s = avctx->priv_data;
2787  MpegEncContext *m = &s->m;
2788  GetBitContext *gb = &m->gb;
2789  int ret;
2790 
2791  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2792  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
2793  return AVERROR_INVALIDDATA;
2794 
2795  ret = ff_get_buffer(avctx, frame, 0);
2796  if (ret < 0)
2797  return ret;
2798 
2799  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2800  if (ret < 0)
2801  return ret;
2802 
2803  s->flags = get_bits(gb, 8);
2804  m->intra_dc_precision = s->flags & 3;
2805  m->q_scale_type = !!(s->flags & 0x40);
2806  m->intra_vlc_format = !!(s->flags & 0x20);
2807  m->alternate_scan = !!(s->flags & 0x10);
2808 
2810  s->flags & 0x10 ? ff_alternate_vertical_scan : ff_zigzag_direct,
2811  m->idsp.idct_permutation);
2812 
2813  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2814  m->qscale = 1;
2815 
2816  for (int y = 0; y < avctx->height; y += 16) {
2817  int intraquant;
2818 
2819  for (int x = 0; x < avctx->width; x += 16) {
2820  if (x || y) {
2821  if (!get_bits1(gb))
2822  return AVERROR_INVALIDDATA;
2823  }
2824  if (get_bits1(gb)) {
2825  intraquant = 0;
2826  } else {
2827  if (!get_bits1(gb))
2828  return AVERROR_INVALIDDATA;
2829  intraquant = 1;
2830  }
2831 
2832  if (s->flags & 4)
2833  skip_bits1(gb);
2834 
2835  if (intraquant)
2836  m->qscale = mpeg_get_qscale(m);
2837 
2838  memset(s->block, 0, sizeof(s->block));
2839 
2840  for (int n = 0; n < 6; n++) {
2841  if (s->flags & 0x80) {
2843  m->intra_matrix,
2845  m->last_dc, s->block[n],
2846  n, m->qscale);
2847  } else {
2848  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2849  }
2850 
2851  if (ret < 0)
2852  return ret;
2853  }
2854 
2855  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2856  frame->linesize[0], s->block[0]);
2857  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2858  frame->linesize[0], s->block[1]);
2859  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2860  frame->linesize[0], s->block[2]);
2861  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2862  frame->linesize[0], s->block[3]);
2863  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2864  frame->linesize[1], s->block[4]);
2865  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2866  frame->linesize[2], s->block[5]);
2867  }
2868  }
2869 
2870  align_get_bits(gb);
2871  if (get_bits_left(gb) != 32)
2872  return AVERROR_INVALIDDATA;
2873 
2874  *got_frame = 1;
2875 
2876  return avpkt->size;
2877 }
2878 
2880 {
2881  IPUContext *s = avctx->priv_data;
2882  MpegEncContext *m = &s->m;
2883 
2884  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2885  m->avctx = avctx;
2886 
2887  ff_idctdsp_init(&m->idsp, avctx);
2889 
2890  for (int i = 0; i < 64; i++) {
2891  int j = m->idsp.idct_permutation[i];
2893  m->intra_matrix[j] = v;
2894  m->chroma_intra_matrix[j] = v;
2895  }
2896 
2897  return 0;
2898 }
2899 
2901  .p.name = "ipu",
2902  CODEC_LONG_NAME("IPU Video"),
2903  .p.type = AVMEDIA_TYPE_VIDEO,
2904  .p.id = AV_CODEC_ID_IPU,
2905  .priv_data_size = sizeof(IPUContext),
2906  .init = ipu_decode_init,
2908  .p.capabilities = AV_CODEC_CAP_DR1,
2909 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1849
flags
const SwsFlags flags[]
Definition: swscale.c:61
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:422
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1408
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:192
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:293
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2685
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:145
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:78
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
AVPanScan::position
int16_t position[3][2]
position of the top left corner in 1/16 pel for up to 3 fields/frames
Definition: defs.h:263
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:478
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1920
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:678
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:347
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:47
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1200
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2570
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:290
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
MpegEncContext::top_field_first
int top_field_first
Definition: mpegvideo.h:301
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:649
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:126
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:398
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:194
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:242
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1401
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
int64_t
long long int64_t
Definition: coverity.c:34
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:42
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:249
Mpeg1Context::vbv_delay
int vbv_delay
Definition: mpeg12dec.c:93
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2879
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:206
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:444
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2760
AVPacket::data
uint8_t * data
Definition: packet.h:535
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:403
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:89
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:304
AVOption
AVOption.
Definition: opt.h:429
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:147
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:41
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
ff_mpv_framesize_disable
static void ff_mpv_framesize_disable(ScratchpadContext *sc)
Disable allocating the ScratchpadContext's buffers in future calls to ff_mpv_framesize_alloc().
Definition: mpegpicture.h:143
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:552
reverse.h
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:208
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:91
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:33
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:882
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:91
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:31
thread.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1378
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:196
CC_FORMAT_DISH
@ CC_FORMAT_DISH
Definition: mpeg12dec.c:70
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:92
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:246
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:364
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:148
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:84
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:78
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:554
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:318
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:401
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1087
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:109
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:142
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:168
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1710
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2255
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1665
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1170
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
MpegEncContext::picture_structure
int picture_structure
Definition: mpegvideo.h:297
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
AVPanScan::width
int width
width and height in 1/16 pel
Definition: defs.h:255
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1657
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:47
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:90
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:854
MpegEncContext::width
int width
Definition: mpegvideo.h:91
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:610
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1760
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:87
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:300
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2043
IPUContext
Definition: mpeg12dec.c:2776
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:811
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2714
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:87
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2783
HAS_MV
#define HAS_MV(a, dir)
Definition: mpegutils.h:91
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:30
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:822
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:528
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1001
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:85
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:184
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:518
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:700
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
Mpeg1Context::stereo3d_type
enum AVStereo3DType stereo3d_type
Definition: mpeg12dec.c:76
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:431
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:137
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:310
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.h:28
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:243
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:49
flush
static av_cold void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2637
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:83
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1089
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:75
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:303
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:849
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:224
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1273
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:144
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:509
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:110
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:262
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:392
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:672
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:29
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:211
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
mpeg12_execute_slice_threads
static void mpeg12_execute_slice_threads(AVCodecContext *avctx, Mpeg1Context *const s)
Definition: mpeg12dec.c:2226
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:88
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:138
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:771
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:145
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:773
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1113
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2686
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:239
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:371
profiles.h
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:230
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:48
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:51
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:147
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:335
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
MpegEncContext::slice_context_count
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:126
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:635
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
close
av_cold void CBS_FUNC() close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
Definition: cbs.c:140
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1631
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:85
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:178
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:283
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:81
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:67
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1608
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2688
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:536
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:166
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:310
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:399
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2779
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:222
mpeg_field_start
static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1242
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:112
Mpeg1Context::bit_rate
int64_t bit_rate
Definition: mpeg12dec.c:94
VLCElem
Definition: vlc.h:32
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2656
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2005
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1576
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:423
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1693
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MpegEncContext::mbskip_table
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:158
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:107
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:326
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:45
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:173
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:77
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:767
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:193
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:195
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:396
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2706
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1575
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:116
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:311
emms.h
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:385
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:315
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:309
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:360
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:517
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:354
internal.h
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1900
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:85
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:211
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:185
atomic_store_explicit
#define atomic_store_explicit(object, desired, order)
Definition: stdatomic.h:90
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1385
MpegEncContext::thread_context
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:123
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_TYPE_MV_2_MV_DIR
#define MB_TYPE_MV_2_MV_DIR(a)
Definition: mpegutils.h:93
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:135
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2777
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:132
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:304
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:679
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:125
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:341
AVCodecContext::height
int height
Definition: avcodec.h:595
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:634
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:733
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:589
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2195
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:544
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:859
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:351
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:78
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:683
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1583
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:147
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1594
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:242
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:772
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1621
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:688
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:79
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:377
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:82
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
MpegEncContext::resync_mb_x
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:228
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1642
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:143
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1377
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:610
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:389
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2648
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2900
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:34
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:318
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:303
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:74
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:188
ff_tlog
#define ff_tlog(a,...)
Definition: tableprint_vlc.h:29
AVPacket
This structure stores compressed data.
Definition: packet.h:512
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:49
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:72
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1046
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:595
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1640
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AVStereo3DType
AVStereo3DType
List of possible 3D Types.
Definition: stereo3d.h:48
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:86
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:99
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2780
AVPanScan::height
int height
Definition: defs.h:256
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:144
MpegEncContext::resync_mb_y
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:229
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2125
h
h
Definition: vp9dsp_template.c:2070
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:121
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:32
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:95
width
#define width
Definition: dsp.h:85
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:120
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:305
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1372
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1371
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1147
MpegEncContext::codec_id
enum AVCodecID codec_id
Definition: mpegvideo.h:100
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:619
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:49
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:30
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:80
Mpeg1Context
Definition: mpeg12dec.c:73
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:223
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1184
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:92
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:219
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:64
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1380
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:308
MpegEncContext::codec_tag
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:103