FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
65 typedef struct Mpeg1Context {
67  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
68  int repeat_field; /* true if we must repeat the field */
69  AVPanScan pan_scan; /* some temporary storage for the panscan */
73  uint8_t afd;
74  int has_afd;
79  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
80  unsigned frame_rate_index;
81  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
83  int tmpgexs;
86  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
87 } Mpeg1Context;
88 
89 #define MB_TYPE_ZERO_MV 0x20000000
90 
91 static const uint32_t ptype2mb_type[7] = {
94  MB_TYPE_L0,
99 };
100 
101 static const uint32_t btype2mb_type[11] = {
103  MB_TYPE_L1,
105  MB_TYPE_L0,
107  MB_TYPE_L0L1,
113 };
114 
115 /* as H.263, but only 17 codes */
116 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
117 {
118  int code, sign, val, shift;
119 
120  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
121  if (code == 0)
122  return pred;
123  if (code < 0)
124  return 0xffff;
125 
126  sign = get_bits1(&s->gb);
127  shift = fcode - 1;
128  val = code;
129  if (shift) {
130  val = (val - 1) << shift;
131  val |= get_bits(&s->gb, shift);
132  val++;
133  }
134  if (sign)
135  val = -val;
136  val += pred;
137 
138  /* modulo decoding */
139  return sign_extend(val, 5 + shift);
140 }
141 
142 #define MAX_INDEX (64 - 1)
143 #define check_scantable_index(ctx, x) \
144  do { \
145  if ((x) > MAX_INDEX) { \
146  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
147  ctx->mb_x, ctx->mb_y); \
148  return AVERROR_INVALIDDATA; \
149  } \
150  } while (0)
151 
153  int16_t *block, int n)
154 {
155  int level, i, j, run;
156  uint8_t *const scantable = s->intra_scantable.permutated;
157  const uint16_t *quant_matrix = s->inter_matrix;
158  const int qscale = s->qscale;
159 
160  {
161  OPEN_READER(re, &s->gb);
162  i = -1;
163  // special case for first coefficient, no need to add second VLC table
164  UPDATE_CACHE(re, &s->gb);
165  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
166  level = (3 * qscale * quant_matrix[0]) >> 5;
167  level = (level - 1) | 1;
168  if (GET_CACHE(re, &s->gb) & 0x40000000)
169  level = -level;
170  block[0] = level;
171  i++;
172  SKIP_BITS(re, &s->gb, 2);
173  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
174  goto end;
175  }
176  /* now quantify & encode AC coefficients */
177  for (;;) {
178  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
179  TEX_VLC_BITS, 2, 0);
180 
181  if (level != 0) {
182  i += run;
183  if (i > MAX_INDEX)
184  break;
185  j = scantable[i];
186  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
187  level = (level - 1) | 1;
188  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
189  SHOW_SBITS(re, &s->gb, 1);
190  SKIP_BITS(re, &s->gb, 1);
191  } else {
192  /* escape */
193  run = SHOW_UBITS(re, &s->gb, 6) + 1;
194  LAST_SKIP_BITS(re, &s->gb, 6);
195  UPDATE_CACHE(re, &s->gb);
196  level = SHOW_SBITS(re, &s->gb, 8);
197  SKIP_BITS(re, &s->gb, 8);
198  if (level == -128) {
199  level = SHOW_UBITS(re, &s->gb, 8) - 256;
200  SKIP_BITS(re, &s->gb, 8);
201  } else if (level == 0) {
202  level = SHOW_UBITS(re, &s->gb, 8);
203  SKIP_BITS(re, &s->gb, 8);
204  }
205  i += run;
206  if (i > MAX_INDEX)
207  break;
208  j = scantable[i];
209  if (level < 0) {
210  level = -level;
211  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
212  level = (level - 1) | 1;
213  level = -level;
214  } else {
215  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
216  level = (level - 1) | 1;
217  }
218  }
219 
220  block[j] = level;
221  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
222  break;
223  UPDATE_CACHE(re, &s->gb);
224  }
225 end:
226  LAST_SKIP_BITS(re, &s->gb, 2);
227  CLOSE_READER(re, &s->gb);
228  }
229 
231 
232  s->block_last_index[n] = i;
233  return 0;
234 }
235 
237  int16_t *block, int n)
238 {
239  int level, i, j, run;
240  uint8_t *const scantable = s->intra_scantable.permutated;
241  const uint16_t *quant_matrix;
242  const int qscale = s->qscale;
243  int mismatch;
244 
245  mismatch = 1;
246 
247  {
248  OPEN_READER(re, &s->gb);
249  i = -1;
250  if (n < 4)
251  quant_matrix = s->inter_matrix;
252  else
253  quant_matrix = s->chroma_inter_matrix;
254 
255  // Special case for first coefficient, no need to add second VLC table.
256  UPDATE_CACHE(re, &s->gb);
257  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
258  level = (3 * qscale * quant_matrix[0]) >> 5;
259  if (GET_CACHE(re, &s->gb) & 0x40000000)
260  level = -level;
261  block[0] = level;
262  mismatch ^= level;
263  i++;
264  SKIP_BITS(re, &s->gb, 2);
265  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
266  goto end;
267  }
268 
269  /* now quantify & encode AC coefficients */
270  for (;;) {
271  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
272  TEX_VLC_BITS, 2, 0);
273 
274  if (level != 0) {
275  i += run;
276  if (i > MAX_INDEX)
277  break;
278  j = scantable[i];
279  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
280  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
281  SHOW_SBITS(re, &s->gb, 1);
282  SKIP_BITS(re, &s->gb, 1);
283  } else {
284  /* escape */
285  run = SHOW_UBITS(re, &s->gb, 6) + 1;
286  LAST_SKIP_BITS(re, &s->gb, 6);
287  UPDATE_CACHE(re, &s->gb);
288  level = SHOW_SBITS(re, &s->gb, 12);
289  SKIP_BITS(re, &s->gb, 12);
290 
291  i += run;
292  if (i > MAX_INDEX)
293  break;
294  j = scantable[i];
295  if (level < 0) {
296  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
297  level = -level;
298  } else {
299  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
300  }
301  }
302 
303  mismatch ^= level;
304  block[j] = level;
305  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
306  break;
307  UPDATE_CACHE(re, &s->gb);
308  }
309 end:
310  LAST_SKIP_BITS(re, &s->gb, 2);
311  CLOSE_READER(re, &s->gb);
312  }
313  block[63] ^= (mismatch & 1);
314 
316 
317  s->block_last_index[n] = i;
318  return 0;
319 }
320 
322  int16_t *block, int n)
323 {
324  int level, dc, diff, i, j, run;
325  int component;
326  const RL_VLC_ELEM *rl_vlc;
327  uint8_t *const scantable = s->intra_scantable.permutated;
328  const uint16_t *quant_matrix;
329  const int qscale = s->qscale;
330  int mismatch;
331 
332  /* DC coefficient */
333  if (n < 4) {
334  quant_matrix = s->intra_matrix;
335  component = 0;
336  } else {
337  quant_matrix = s->chroma_intra_matrix;
338  component = (n & 1) + 1;
339  }
340  diff = decode_dc(&s->gb, component);
341  dc = s->last_dc[component];
342  dc += diff;
343  s->last_dc[component] = dc;
344  block[0] = dc * (1 << (3 - s->intra_dc_precision));
345  ff_tlog(s->avctx, "dc=%d\n", block[0]);
346  mismatch = block[0] ^ 1;
347  i = 0;
348  if (s->intra_vlc_format)
350  else
352 
353  {
354  OPEN_READER(re, &s->gb);
355  /* now quantify & encode AC coefficients */
356  for (;;) {
357  UPDATE_CACHE(re, &s->gb);
358  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
359  TEX_VLC_BITS, 2, 0);
360 
361  if (level == 127) {
362  break;
363  } else if (level != 0) {
364  i += run;
365  if (i > MAX_INDEX)
366  break;
367  j = scantable[i];
368  level = (level * qscale * quant_matrix[j]) >> 4;
369  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
370  SHOW_SBITS(re, &s->gb, 1);
371  LAST_SKIP_BITS(re, &s->gb, 1);
372  } else {
373  /* escape */
374  run = SHOW_UBITS(re, &s->gb, 6) + 1;
375  SKIP_BITS(re, &s->gb, 6);
376  level = SHOW_SBITS(re, &s->gb, 12);
377  LAST_SKIP_BITS(re, &s->gb, 12);
378  i += run;
379  if (i > MAX_INDEX)
380  break;
381  j = scantable[i];
382  if (level < 0) {
383  level = (-level * qscale * quant_matrix[j]) >> 4;
384  level = -level;
385  } else {
386  level = (level * qscale * quant_matrix[j]) >> 4;
387  }
388  }
389 
390  mismatch ^= level;
391  block[j] = level;
392  }
393  CLOSE_READER(re, &s->gb);
394  }
395  block[63] ^= mismatch & 1;
396 
398 
399  s->block_last_index[n] = i;
400  return 0;
401 }
402 
403 /******************************************/
404 /* decoding */
405 
406 static inline int get_dmv(MpegEncContext *s)
407 {
408  if (get_bits1(&s->gb))
409  return 1 - (get_bits1(&s->gb) << 1);
410  else
411  return 0;
412 }
413 
414 /* motion type (for MPEG-2) */
415 #define MT_FIELD 1
416 #define MT_FRAME 2
417 #define MT_16X8 2
418 #define MT_DMV 3
419 
420 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
421 {
422  int i, j, k, cbp, val, mb_type, motion_type;
423  const int mb_block_count = 4 + (1 << s->chroma_format);
424  int ret;
425 
426  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
427 
428  av_assert2(s->mb_skipped == 0);
429 
430  if (s->mb_skip_run-- != 0) {
431  if (s->pict_type == AV_PICTURE_TYPE_P) {
432  s->mb_skipped = 1;
433  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
435  } else {
436  int mb_type;
437 
438  if (s->mb_x)
439  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
440  else
441  // FIXME not sure if this is allowed in MPEG at all
442  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
443  if (IS_INTRA(mb_type)) {
444  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
445  return AVERROR_INVALIDDATA;
446  }
447  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
448  mb_type | MB_TYPE_SKIP;
449 
450  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
451  s->mb_skipped = 1;
452  }
453 
454  return 0;
455  }
456 
457  switch (s->pict_type) {
458  default:
459  case AV_PICTURE_TYPE_I:
460  if (get_bits1(&s->gb) == 0) {
461  if (get_bits1(&s->gb) == 0) {
462  av_log(s->avctx, AV_LOG_ERROR,
463  "Invalid mb type in I-frame at %d %d\n",
464  s->mb_x, s->mb_y);
465  return AVERROR_INVALIDDATA;
466  }
467  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
468  } else {
469  mb_type = MB_TYPE_INTRA;
470  }
471  break;
472  case AV_PICTURE_TYPE_P:
473  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
474  if (mb_type < 0) {
475  av_log(s->avctx, AV_LOG_ERROR,
476  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
477  return AVERROR_INVALIDDATA;
478  }
479  mb_type = ptype2mb_type[mb_type];
480  break;
481  case AV_PICTURE_TYPE_B:
482  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
483  if (mb_type < 0) {
484  av_log(s->avctx, AV_LOG_ERROR,
485  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
486  return AVERROR_INVALIDDATA;
487  }
488  mb_type = btype2mb_type[mb_type];
489  break;
490  }
491  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
492 // motion_type = 0; /* avoid warning */
493  if (IS_INTRA(mb_type)) {
494  s->bdsp.clear_blocks(s->block[0]);
495 
496  if (!s->chroma_y_shift)
497  s->bdsp.clear_blocks(s->block[6]);
498 
499  /* compute DCT type */
500  // FIXME: add an interlaced_dct coded var?
501  if (s->picture_structure == PICT_FRAME &&
502  !s->frame_pred_frame_dct)
503  s->interlaced_dct = get_bits1(&s->gb);
504 
505  if (IS_QUANT(mb_type))
506  s->qscale = mpeg_get_qscale(s);
507 
508  if (s->concealment_motion_vectors) {
509  /* just parse them */
510  if (s->picture_structure != PICT_FRAME)
511  skip_bits1(&s->gb); /* field select */
512 
513  s->mv[0][0][0] =
514  s->last_mv[0][0][0] =
515  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
516  s->last_mv[0][0][0]);
517  s->mv[0][0][1] =
518  s->last_mv[0][0][1] =
519  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
520  s->last_mv[0][0][1]);
521 
522  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
523  } else {
524  /* reset mv prediction */
525  memset(s->last_mv, 0, sizeof(s->last_mv));
526  }
527  s->mb_intra = 1;
528 
529  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
530  for (i = 0; i < mb_block_count; i++)
531  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
532  return ret;
533  } else {
534  for (i = 0; i < 6; i++) {
536  s->intra_matrix,
537  s->intra_scantable.permutated,
538  s->last_dc, *s->pblocks[i],
539  i, s->qscale);
540  if (ret < 0) {
541  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
542  s->mb_x, s->mb_y);
543  return ret;
544  }
545 
546  s->block_last_index[i] = ret;
547  }
548  }
549  } else {
550  if (mb_type & MB_TYPE_ZERO_MV) {
551  av_assert2(mb_type & MB_TYPE_CBP);
552 
553  s->mv_dir = MV_DIR_FORWARD;
554  if (s->picture_structure == PICT_FRAME) {
555  if (s->picture_structure == PICT_FRAME
556  && !s->frame_pred_frame_dct)
557  s->interlaced_dct = get_bits1(&s->gb);
558  s->mv_type = MV_TYPE_16X16;
559  } else {
560  s->mv_type = MV_TYPE_FIELD;
561  mb_type |= MB_TYPE_INTERLACED;
562  s->field_select[0][0] = s->picture_structure - 1;
563  }
564 
565  if (IS_QUANT(mb_type))
566  s->qscale = mpeg_get_qscale(s);
567 
568  s->last_mv[0][0][0] = 0;
569  s->last_mv[0][0][1] = 0;
570  s->last_mv[0][1][0] = 0;
571  s->last_mv[0][1][1] = 0;
572  s->mv[0][0][0] = 0;
573  s->mv[0][0][1] = 0;
574  } else {
575  av_assert2(mb_type & MB_TYPE_L0L1);
576  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
577  /* get additional motion vector type */
578  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
579  motion_type = MT_FRAME;
580  } else {
581  motion_type = get_bits(&s->gb, 2);
582  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
583  s->interlaced_dct = get_bits1(&s->gb);
584  }
585 
586  if (IS_QUANT(mb_type))
587  s->qscale = mpeg_get_qscale(s);
588 
589  /* motion vectors */
590  s->mv_dir = (mb_type >> 13) & 3;
591  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
592  switch (motion_type) {
593  case MT_FRAME: /* or MT_16X8 */
594  if (s->picture_structure == PICT_FRAME) {
595  mb_type |= MB_TYPE_16x16;
596  s->mv_type = MV_TYPE_16X16;
597  for (i = 0; i < 2; i++) {
598  if (USES_LIST(mb_type, i)) {
599  /* MT_FRAME */
600  s->mv[i][0][0] =
601  s->last_mv[i][0][0] =
602  s->last_mv[i][1][0] =
603  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
604  s->last_mv[i][0][0]);
605  s->mv[i][0][1] =
606  s->last_mv[i][0][1] =
607  s->last_mv[i][1][1] =
608  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
609  s->last_mv[i][0][1]);
610  /* full_pel: only for MPEG-1 */
611  if (s->full_pel[i]) {
612  s->mv[i][0][0] *= 2;
613  s->mv[i][0][1] *= 2;
614  }
615  }
616  }
617  } else {
618  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
619  s->mv_type = MV_TYPE_16X8;
620  for (i = 0; i < 2; i++) {
621  if (USES_LIST(mb_type, i)) {
622  /* MT_16X8 */
623  for (j = 0; j < 2; j++) {
624  s->field_select[i][j] = get_bits1(&s->gb);
625  for (k = 0; k < 2; k++) {
626  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
627  s->last_mv[i][j][k]);
628  s->last_mv[i][j][k] = val;
629  s->mv[i][j][k] = val;
630  }
631  }
632  }
633  }
634  }
635  break;
636  case MT_FIELD:
637  s->mv_type = MV_TYPE_FIELD;
638  if (s->picture_structure == PICT_FRAME) {
639  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
640  for (i = 0; i < 2; i++) {
641  if (USES_LIST(mb_type, i)) {
642  for (j = 0; j < 2; j++) {
643  s->field_select[i][j] = get_bits1(&s->gb);
644  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
645  s->last_mv[i][j][0]);
646  s->last_mv[i][j][0] = val;
647  s->mv[i][j][0] = val;
648  ff_tlog(s->avctx, "fmx=%d\n", val);
649  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
650  s->last_mv[i][j][1] >> 1);
651  s->last_mv[i][j][1] = 2 * val;
652  s->mv[i][j][1] = val;
653  ff_tlog(s->avctx, "fmy=%d\n", val);
654  }
655  }
656  }
657  } else {
658  av_assert0(!s->progressive_sequence);
659  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
660  for (i = 0; i < 2; i++) {
661  if (USES_LIST(mb_type, i)) {
662  s->field_select[i][0] = get_bits1(&s->gb);
663  for (k = 0; k < 2; k++) {
664  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
665  s->last_mv[i][0][k]);
666  s->last_mv[i][0][k] = val;
667  s->last_mv[i][1][k] = val;
668  s->mv[i][0][k] = val;
669  }
670  }
671  }
672  }
673  break;
674  case MT_DMV:
675  if (s->progressive_sequence){
676  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
677  return AVERROR_INVALIDDATA;
678  }
679  s->mv_type = MV_TYPE_DMV;
680  for (i = 0; i < 2; i++) {
681  if (USES_LIST(mb_type, i)) {
682  int dmx, dmy, mx, my, m;
683  const int my_shift = s->picture_structure == PICT_FRAME;
684 
685  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
686  s->last_mv[i][0][0]);
687  s->last_mv[i][0][0] = mx;
688  s->last_mv[i][1][0] = mx;
689  dmx = get_dmv(s);
690  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
691  s->last_mv[i][0][1] >> my_shift);
692  dmy = get_dmv(s);
693 
694 
695  s->last_mv[i][0][1] = my * (1 << my_shift);
696  s->last_mv[i][1][1] = my * (1 << my_shift);
697 
698  s->mv[i][0][0] = mx;
699  s->mv[i][0][1] = my;
700  s->mv[i][1][0] = mx; // not used
701  s->mv[i][1][1] = my; // not used
702 
703  if (s->picture_structure == PICT_FRAME) {
704  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
705 
706  // m = 1 + 2 * s->top_field_first;
707  m = s->top_field_first ? 1 : 3;
708 
709  /* top -> top pred */
710  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
711  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
712  m = 4 - m;
713  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
714  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
715  } else {
716  mb_type |= MB_TYPE_16x16;
717 
718  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
719  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
720  if (s->picture_structure == PICT_TOP_FIELD)
721  s->mv[i][2][1]--;
722  else
723  s->mv[i][2][1]++;
724  }
725  }
726  }
727  break;
728  default:
729  av_log(s->avctx, AV_LOG_ERROR,
730  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
731  return AVERROR_INVALIDDATA;
732  }
733  }
734 
735  s->mb_intra = 0;
736  if (HAS_CBP(mb_type)) {
737  s->bdsp.clear_blocks(s->block[0]);
738 
739  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
740  if (mb_block_count > 6) {
741  cbp *= 1 << mb_block_count - 6;
742  cbp |= get_bits(&s->gb, mb_block_count - 6);
743  s->bdsp.clear_blocks(s->block[6]);
744  }
745  if (cbp <= 0) {
746  av_log(s->avctx, AV_LOG_ERROR,
747  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
748  return AVERROR_INVALIDDATA;
749  }
750 
751  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
752  cbp <<= 12 - mb_block_count;
753 
754  for (i = 0; i < mb_block_count; i++) {
755  if (cbp & (1 << 11)) {
756  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
757  return ret;
758  } else {
759  s->block_last_index[i] = -1;
760  }
761  cbp += cbp;
762  }
763  } else {
764  for (i = 0; i < 6; i++) {
765  if (cbp & 32) {
766  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
767  return ret;
768  } else {
769  s->block_last_index[i] = -1;
770  }
771  cbp += cbp;
772  }
773  }
774  } else {
775  for (i = 0; i < 12; i++)
776  s->block_last_index[i] = -1;
777  }
778  }
779 
780  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
781 
782  return 0;
783 }
784 
786 {
787  Mpeg1Context *s = avctx->priv_data;
788  MpegEncContext *s2 = &s->mpeg_enc_ctx;
789 
790  if ( avctx->codec_tag != AV_RL32("VCR2")
791  && avctx->codec_tag != AV_RL32("BW10"))
792  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
793  ff_mpv_decode_init(s2, avctx);
794 
796 
797  s2->chroma_format = 1;
798  s->mpeg_enc_ctx_allocated = 0;
799  s->repeat_field = 0;
800  avctx->color_range = AVCOL_RANGE_MPEG;
801  return 0;
802 }
803 
804 #if HAVE_THREADS
805 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
806  const AVCodecContext *avctx_from)
807 {
808  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
809  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
810  int err;
811 
812  if (avctx == avctx_from ||
813  !ctx_from->mpeg_enc_ctx_allocated ||
814  !s1->context_initialized)
815  return 0;
816 
817  err = ff_mpeg_update_thread_context(avctx, avctx_from);
818  if (err)
819  return err;
820 
821  if (!ctx->mpeg_enc_ctx_allocated)
822  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
823 
824  return 0;
825 }
826 #endif
827 
829 #if CONFIG_MPEG1_NVDEC_HWACCEL
831 #endif
832 #if CONFIG_MPEG1_VDPAU_HWACCEL
834 #endif
837 };
838 
840 #if CONFIG_MPEG2_NVDEC_HWACCEL
842 #endif
843 #if CONFIG_MPEG2_VDPAU_HWACCEL
845 #endif
846 #if CONFIG_MPEG2_DXVA2_HWACCEL
848 #endif
849 #if CONFIG_MPEG2_D3D11VA_HWACCEL
852 #endif
853 #if CONFIG_MPEG2_VAAPI_HWACCEL
855 #endif
856 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
858 #endif
861 };
862 
863 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
866 };
867 
868 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
871 };
872 
874 {
875  Mpeg1Context *s1 = avctx->priv_data;
876  MpegEncContext *s = &s1->mpeg_enc_ctx;
877  const enum AVPixelFormat *pix_fmts;
878 
879  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
880  return AV_PIX_FMT_GRAY8;
881 
882  if (s->chroma_format < 2)
886  else if (s->chroma_format == 2)
888  else
890 
891  return ff_get_format(avctx, pix_fmts);
892 }
893 
894 /* Call this function when we know all parameters.
895  * It may be called in different places for MPEG-1 and MPEG-2. */
897 {
898  Mpeg1Context *s1 = avctx->priv_data;
899  MpegEncContext *s = &s1->mpeg_enc_ctx;
900  int ret;
901 
902  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
903  // MPEG-1 aspect
904  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
905  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
906  } else { // MPEG-2
907  // MPEG-2 aspect
908  if (s1->aspect_ratio_info > 1) {
909  AVRational dar =
910  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
911  (AVRational) { s1->pan_scan.width,
912  s1->pan_scan.height }),
913  (AVRational) { s->width, s->height });
914 
915  /* We ignore the spec here and guess a bit as reality does not
916  * match the spec, see for example res_change_ffmpeg_aspect.ts
917  * and sequence-display-aspect.mpg.
918  * issue1613, 621, 562 */
919  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
920  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
921  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
922  s->avctx->sample_aspect_ratio =
923  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
924  (AVRational) { s->width, s->height });
925  } else {
926  s->avctx->sample_aspect_ratio =
927  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
928  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
929 // issue1613 4/3 16/9 -> 16/9
930 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
931 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
932 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
933  ff_dlog(avctx, "aspect A %d/%d\n",
934  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
935  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
936  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
937  s->avctx->sample_aspect_ratio.den);
938  }
939  } else {
940  s->avctx->sample_aspect_ratio =
941  ff_mpeg2_aspect[s1->aspect_ratio_info];
942  }
943  } // MPEG-2
944 
945  if (av_image_check_sar(s->width, s->height,
946  avctx->sample_aspect_ratio) < 0) {
947  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
948  avctx->sample_aspect_ratio.num,
949  avctx->sample_aspect_ratio.den);
950  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
951  }
952 
953  if ((s1->mpeg_enc_ctx_allocated == 0) ||
954  avctx->coded_width != s->width ||
955  avctx->coded_height != s->height ||
956  s1->save_width != s->width ||
957  s1->save_height != s->height ||
958  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
959  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
960  0) {
961  if (s1->mpeg_enc_ctx_allocated) {
963  s1->mpeg_enc_ctx_allocated = 0;
964  }
965 
966  ret = ff_set_dimensions(avctx, s->width, s->height);
967  if (ret < 0)
968  return ret;
969 
970  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
971  (s->bit_rate != 0x3FFFF*400)) {
972  avctx->rc_max_rate = s->bit_rate;
973  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
974  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
975  avctx->bit_rate = s->bit_rate;
976  }
977  s1->save_aspect = s->avctx->sample_aspect_ratio;
978  s1->save_width = s->width;
979  s1->save_height = s->height;
980  s1->save_progressive_seq = s->progressive_sequence;
981 
982  /* low_delay may be forced, in this case we will have B-frames
983  * that behave like P-frames. */
984  avctx->has_b_frames = !s->low_delay;
985 
986  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
987  // MPEG-1 fps
988  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
989 #if FF_API_TICKS_PER_FRAME
991  avctx->ticks_per_frame = 1;
993 #endif
994 
996  } else { // MPEG-2
997  // MPEG-2 fps
998  av_reduce(&s->avctx->framerate.num,
999  &s->avctx->framerate.den,
1000  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1001  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1002  1 << 30);
1003 #if FF_API_TICKS_PER_FRAME
1005  avctx->ticks_per_frame = 2;
1007 #endif
1008 
1009  switch (s->chroma_format) {
1010  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1011  case 2:
1012  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1013  default: av_assert0(0);
1014  }
1015  } // MPEG-2
1016 
1017  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1018 
1019  if ((ret = ff_mpv_common_init(s)) < 0)
1020  return ret;
1021 
1022  s1->mpeg_enc_ctx_allocated = 1;
1023  }
1024  return 0;
1025 }
1026 
1027 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1028  int buf_size)
1029 {
1030  Mpeg1Context *s1 = avctx->priv_data;
1031  MpegEncContext *s = &s1->mpeg_enc_ctx;
1032  int ref, f_code, vbv_delay, ret;
1033 
1034  ret = init_get_bits8(&s->gb, buf, buf_size);
1035  if (ret < 0)
1036  return ret;
1037 
1038  ref = get_bits(&s->gb, 10); /* temporal ref */
1039  s->pict_type = get_bits(&s->gb, 3);
1040  if (s->pict_type == 0 || s->pict_type > 3)
1041  return AVERROR_INVALIDDATA;
1042 
1043  vbv_delay = get_bits(&s->gb, 16);
1044  s->vbv_delay = vbv_delay;
1045  if (s->pict_type == AV_PICTURE_TYPE_P ||
1046  s->pict_type == AV_PICTURE_TYPE_B) {
1047  s->full_pel[0] = get_bits1(&s->gb);
1048  f_code = get_bits(&s->gb, 3);
1049  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1050  return AVERROR_INVALIDDATA;
1051  f_code += !f_code;
1052  s->mpeg_f_code[0][0] = f_code;
1053  s->mpeg_f_code[0][1] = f_code;
1054  }
1055  if (s->pict_type == AV_PICTURE_TYPE_B) {
1056  s->full_pel[1] = get_bits1(&s->gb);
1057  f_code = get_bits(&s->gb, 3);
1058  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1059  return AVERROR_INVALIDDATA;
1060  f_code += !f_code;
1061  s->mpeg_f_code[1][0] = f_code;
1062  s->mpeg_f_code[1][1] = f_code;
1063  }
1064 
1065  if (avctx->debug & FF_DEBUG_PICT_INFO)
1066  av_log(avctx, AV_LOG_DEBUG,
1067  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1068 
1069  s->y_dc_scale = 8;
1070  s->c_dc_scale = 8;
1071  return 0;
1072 }
1073 
1075 {
1076  MpegEncContext *s = &s1->mpeg_enc_ctx;
1077  int horiz_size_ext, vert_size_ext;
1078  int bit_rate_ext;
1079 
1080  skip_bits(&s->gb, 1); /* profile and level esc*/
1081  s->avctx->profile = get_bits(&s->gb, 3);
1082  s->avctx->level = get_bits(&s->gb, 4);
1083  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1084  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1085 
1086  if (!s->chroma_format) {
1087  s->chroma_format = 1;
1088  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1089  }
1090 
1091  horiz_size_ext = get_bits(&s->gb, 2);
1092  vert_size_ext = get_bits(&s->gb, 2);
1093  s->width |= (horiz_size_ext << 12);
1094  s->height |= (vert_size_ext << 12);
1095  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1096  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1097  check_marker(s->avctx, &s->gb, "after bit rate extension");
1098  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1099 
1100  s->low_delay = get_bits1(&s->gb);
1101  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1102  s->low_delay = 1;
1103 
1104  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1105  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1106 
1107  ff_dlog(s->avctx, "sequence extension\n");
1108  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1109 
1110  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1111  av_log(s->avctx, AV_LOG_DEBUG,
1112  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1113  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1114  s->avctx->rc_buffer_size, s->bit_rate);
1115 }
1116 
1118 {
1119  MpegEncContext *s = &s1->mpeg_enc_ctx;
1120  int color_description, w, h;
1121 
1122  skip_bits(&s->gb, 3); /* video format */
1123  color_description = get_bits1(&s->gb);
1124  if (color_description) {
1125  s->avctx->color_primaries = get_bits(&s->gb, 8);
1126  s->avctx->color_trc = get_bits(&s->gb, 8);
1127  s->avctx->colorspace = get_bits(&s->gb, 8);
1128  }
1129  w = get_bits(&s->gb, 14);
1130  skip_bits(&s->gb, 1); // marker
1131  h = get_bits(&s->gb, 14);
1132  // remaining 3 bits are zero padding
1133 
1134  s1->pan_scan.width = 16 * w;
1135  s1->pan_scan.height = 16 * h;
1136 
1137  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1138  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1139 }
1140 
1142 {
1143  MpegEncContext *s = &s1->mpeg_enc_ctx;
1144  int i, nofco;
1145 
1146  nofco = 1;
1147  if (s->progressive_sequence) {
1148  if (s->repeat_first_field) {
1149  nofco++;
1150  if (s->top_field_first)
1151  nofco++;
1152  }
1153  } else {
1154  if (s->picture_structure == PICT_FRAME) {
1155  nofco++;
1156  if (s->repeat_first_field)
1157  nofco++;
1158  }
1159  }
1160  for (i = 0; i < nofco; i++) {
1161  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1162  skip_bits(&s->gb, 1); // marker
1163  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1164  skip_bits(&s->gb, 1); // marker
1165  }
1166 
1167  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1168  av_log(s->avctx, AV_LOG_DEBUG,
1169  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1170  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1171  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1172  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1173 }
1174 
1175 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1176  uint16_t matrix1[64], int intra)
1177 {
1178  int i;
1179 
1180  for (i = 0; i < 64; i++) {
1181  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1182  int v = get_bits(&s->gb, 8);
1183  if (v == 0) {
1184  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1185  return AVERROR_INVALIDDATA;
1186  }
1187  if (intra && i == 0 && v != 8) {
1188  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1189  v = 8; // needed by pink.mpg / issue1046
1190  }
1191  matrix0[j] = v;
1192  if (matrix1)
1193  matrix1[j] = v;
1194  }
1195  return 0;
1196 }
1197 
1199 {
1200  ff_dlog(s->avctx, "matrix extension\n");
1201 
1202  if (get_bits1(&s->gb))
1203  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1204  if (get_bits1(&s->gb))
1205  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1206  if (get_bits1(&s->gb))
1207  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1208  if (get_bits1(&s->gb))
1209  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1210 }
1211 
1213 {
1214  MpegEncContext *s = &s1->mpeg_enc_ctx;
1215 
1216  s->full_pel[0] = s->full_pel[1] = 0;
1217  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1218  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1219  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1220  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1221  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1222  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1223  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1224  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1225  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1226  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1227  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1228  return AVERROR_INVALIDDATA;
1229  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1230  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1231  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1232  s->pict_type = AV_PICTURE_TYPE_I;
1233  else
1234  s->pict_type = AV_PICTURE_TYPE_P;
1235  } else
1236  s->pict_type = AV_PICTURE_TYPE_B;
1237  }
1238 
1239  s->intra_dc_precision = get_bits(&s->gb, 2);
1240  s->picture_structure = get_bits(&s->gb, 2);
1241  s->top_field_first = get_bits1(&s->gb);
1242  s->frame_pred_frame_dct = get_bits1(&s->gb);
1243  s->concealment_motion_vectors = get_bits1(&s->gb);
1244  s->q_scale_type = get_bits1(&s->gb);
1245  s->intra_vlc_format = get_bits1(&s->gb);
1246  s->alternate_scan = get_bits1(&s->gb);
1247  s->repeat_first_field = get_bits1(&s->gb);
1248  s->chroma_420_type = get_bits1(&s->gb);
1249  s->progressive_frame = get_bits1(&s->gb);
1250 
1251  if (s->alternate_scan) {
1252  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1253  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1254  } else {
1255  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1256  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1257  }
1258 
1259  /* composite display not parsed */
1260  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1261  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1262  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1263  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1264  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1265  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1266  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1267  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1268  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1269 
1270  return 0;
1271 }
1272 
1273 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1274 {
1275  AVCodecContext *avctx = s->avctx;
1276  Mpeg1Context *s1 = (Mpeg1Context *) s;
1277  int ret;
1278 
1279  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1280  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1281  return AVERROR_INVALIDDATA;
1282  }
1283 
1284  /* start frame decoding */
1285  if (s->first_field || s->picture_structure == PICT_FRAME) {
1286  AVFrameSideData *pan_scan;
1287 
1288  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1289  return ret;
1290 
1292 
1293  /* first check if we must repeat the frame */
1294  s->current_picture_ptr->f->repeat_pict = 0;
1295  if (s->repeat_first_field) {
1296  if (s->progressive_sequence) {
1297  if (s->top_field_first)
1298  s->current_picture_ptr->f->repeat_pict = 4;
1299  else
1300  s->current_picture_ptr->f->repeat_pict = 2;
1301  } else if (s->progressive_frame) {
1302  s->current_picture_ptr->f->repeat_pict = 1;
1303  }
1304  }
1305 
1306  pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
1308  sizeof(s1->pan_scan));
1309  if (!pan_scan)
1310  return AVERROR(ENOMEM);
1311  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1312 
1313  if (s1->a53_buf_ref) {
1315  s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1316  s1->a53_buf_ref);
1317  if (!sd)
1318  av_buffer_unref(&s1->a53_buf_ref);
1319  s1->a53_buf_ref = NULL;
1320  }
1321 
1322  if (s1->has_stereo3d) {
1323  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1324  if (!stereo)
1325  return AVERROR(ENOMEM);
1326 
1327  *stereo = s1->stereo3d;
1328  s1->has_stereo3d = 0;
1329  }
1330 
1331  if (s1->has_afd) {
1332  AVFrameSideData *sd =
1333  av_frame_new_side_data(s->current_picture_ptr->f,
1334  AV_FRAME_DATA_AFD, 1);
1335  if (!sd)
1336  return AVERROR(ENOMEM);
1337 
1338  *sd->data = s1->afd;
1339  s1->has_afd = 0;
1340  }
1341 
1342  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1343  ff_thread_finish_setup(avctx);
1344  } else { // second field
1345  int i;
1346 
1347  if (!s->current_picture_ptr) {
1348  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1349  return AVERROR_INVALIDDATA;
1350  }
1351 
1352  if (s->avctx->hwaccel) {
1353  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1354  av_log(avctx, AV_LOG_ERROR,
1355  "hardware accelerator failed to decode first field\n");
1356  return ret;
1357  }
1358  }
1359 
1360  for (i = 0; i < 4; i++) {
1361  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1362  if (s->picture_structure == PICT_BOTTOM_FIELD)
1363  s->current_picture.f->data[i] +=
1364  s->current_picture_ptr->f->linesize[i];
1365  }
1366  }
1367 
1368  if (avctx->hwaccel) {
1369  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1370  return ret;
1371  }
1372 
1373  return 0;
1374 }
1375 
1376 #define DECODE_SLICE_ERROR -1
1377 #define DECODE_SLICE_OK 0
1378 
1379 /**
1380  * Decode a slice.
1381  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1382  * @return DECODE_SLICE_ERROR if the slice is damaged,
1383  * DECODE_SLICE_OK if this slice is OK
1384  */
1385 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1386  const uint8_t **buf, int buf_size)
1387 {
1388  AVCodecContext *avctx = s->avctx;
1389  const int lowres = s->avctx->lowres;
1390  const int field_pic = s->picture_structure != PICT_FRAME;
1391  int ret;
1392 
1393  s->resync_mb_x =
1394  s->resync_mb_y = -1;
1395 
1396  av_assert0(mb_y < s->mb_height);
1397 
1398  ret = init_get_bits8(&s->gb, *buf, buf_size);
1399  if (ret < 0)
1400  return ret;
1401 
1402  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1403  skip_bits(&s->gb, 3);
1404 
1406  s->interlaced_dct = 0;
1407 
1408  s->qscale = mpeg_get_qscale(s);
1409 
1410  if (s->qscale == 0) {
1411  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1412  return AVERROR_INVALIDDATA;
1413  }
1414 
1415  /* extra slice info */
1416  if (skip_1stop_8data_bits(&s->gb) < 0)
1417  return AVERROR_INVALIDDATA;
1418 
1419  s->mb_x = 0;
1420 
1421  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1422  skip_bits1(&s->gb);
1423  } else {
1424  while (get_bits_left(&s->gb) > 0) {
1425  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1426  MBINCR_VLC_BITS, 2);
1427  if (code < 0) {
1428  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1429  return AVERROR_INVALIDDATA;
1430  }
1431  if (code >= 33) {
1432  if (code == 33)
1433  s->mb_x += 33;
1434  /* otherwise, stuffing, nothing to do */
1435  } else {
1436  s->mb_x += code;
1437  break;
1438  }
1439  }
1440  }
1441 
1442  if (s->mb_x >= (unsigned) s->mb_width) {
1443  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1444  return AVERROR_INVALIDDATA;
1445  }
1446 
1447  if (avctx->hwaccel) {
1448  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1449  int start_code = -1;
1450  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1451  if (buf_end < *buf + buf_size)
1452  buf_end -= 4;
1453  s->mb_y = mb_y;
1454  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1455  return DECODE_SLICE_ERROR;
1456  *buf = buf_end;
1457  return DECODE_SLICE_OK;
1458  }
1459 
1460  s->resync_mb_x = s->mb_x;
1461  s->resync_mb_y = s->mb_y = mb_y;
1462  s->mb_skip_run = 0;
1464 
1465  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1466  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1467  av_log(s->avctx, AV_LOG_DEBUG,
1468  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1469  s->qscale,
1470  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1471  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1472  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1473  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1474  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1475  s->progressive_sequence ? "ps" : "",
1476  s->progressive_frame ? "pf" : "",
1477  s->alternate_scan ? "alt" : "",
1478  s->top_field_first ? "top" : "",
1479  s->intra_dc_precision, s->picture_structure,
1480  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1481  s->q_scale_type, s->intra_vlc_format,
1482  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1483  }
1484  }
1485 
1486  for (;;) {
1487  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1488  return ret;
1489 
1490  // Note motion_val is normally NULL unless we want to extract the MVs.
1491  if (s->current_picture.motion_val[0]) {
1492  const int wrap = s->b8_stride;
1493  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1494  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1495  int motion_x, motion_y, dir, i;
1496 
1497  for (i = 0; i < 2; i++) {
1498  for (dir = 0; dir < 2; dir++) {
1499  if (s->mb_intra ||
1500  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1501  motion_x = motion_y = 0;
1502  } else if (s->mv_type == MV_TYPE_16X16 ||
1503  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1504  motion_x = s->mv[dir][0][0];
1505  motion_y = s->mv[dir][0][1];
1506  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1507  motion_x = s->mv[dir][i][0];
1508  motion_y = s->mv[dir][i][1];
1509  }
1510 
1511  s->current_picture.motion_val[dir][xy][0] = motion_x;
1512  s->current_picture.motion_val[dir][xy][1] = motion_y;
1513  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1514  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1515  s->current_picture.ref_index [dir][b8_xy] =
1516  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1517  av_assert2(s->field_select[dir][i] == 0 ||
1518  s->field_select[dir][i] == 1);
1519  }
1520  xy += wrap;
1521  b8_xy += 2;
1522  }
1523  }
1524 
1525  s->dest[0] += 16 >> lowres;
1526  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1527  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1528 
1529  ff_mpv_reconstruct_mb(s, s->block);
1530 
1531  if (++s->mb_x >= s->mb_width) {
1532  const int mb_size = 16 >> s->avctx->lowres;
1533  int left;
1534 
1535  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1537 
1538  s->mb_x = 0;
1539  s->mb_y += 1 << field_pic;
1540 
1541  if (s->mb_y >= s->mb_height) {
1542  int left = get_bits_left(&s->gb);
1543  int is_d10 = s->chroma_format == 2 &&
1544  s->pict_type == AV_PICTURE_TYPE_I &&
1545  avctx->profile == 0 && avctx->level == 5 &&
1546  s->intra_dc_precision == 2 &&
1547  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1548  s->progressive_frame == 0
1549  /* vbv_delay == 0xBBB || 0xE10 */;
1550 
1551  if (left >= 32 && !is_d10) {
1552  GetBitContext gb = s->gb;
1553  align_get_bits(&gb);
1554  if (show_bits(&gb, 24) == 0x060E2B) {
1555  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1556  is_d10 = 1;
1557  }
1558  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1559  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1560  goto eos;
1561  }
1562  }
1563 
1564  if (left < 0 ||
1565  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1566  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1567  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1568  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1569  return AVERROR_INVALIDDATA;
1570  } else
1571  goto eos;
1572  }
1573  // There are some files out there which are missing the last slice
1574  // in cases where the slice is completely outside the visible
1575  // area, we detect this here instead of running into the end expecting
1576  // more data
1577  left = get_bits_left(&s->gb);
1578  if (s->mb_y >= ((s->height + 15) >> 4) &&
1579  !s->progressive_sequence &&
1580  left <= 25 &&
1581  left >= 0 &&
1582  s->mb_skip_run == -1 &&
1583  (!left || show_bits(&s->gb, left) == 0))
1584  goto eos;
1585 
1587  }
1588 
1589  /* skip mb handling */
1590  if (s->mb_skip_run == -1) {
1591  /* read increment again */
1592  s->mb_skip_run = 0;
1593  for (;;) {
1594  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1595  MBINCR_VLC_BITS, 2);
1596  if (code < 0) {
1597  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1598  return AVERROR_INVALIDDATA;
1599  }
1600  if (code >= 33) {
1601  if (code == 33) {
1602  s->mb_skip_run += 33;
1603  } else if (code == 35) {
1604  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1605  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1606  return AVERROR_INVALIDDATA;
1607  }
1608  goto eos; /* end of slice */
1609  }
1610  /* otherwise, stuffing, nothing to do */
1611  } else {
1612  s->mb_skip_run += code;
1613  break;
1614  }
1615  }
1616  if (s->mb_skip_run) {
1617  int i;
1618  if (s->pict_type == AV_PICTURE_TYPE_I) {
1619  av_log(s->avctx, AV_LOG_ERROR,
1620  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1621  return AVERROR_INVALIDDATA;
1622  }
1623 
1624  /* skip mb */
1625  s->mb_intra = 0;
1626  for (i = 0; i < 12; i++)
1627  s->block_last_index[i] = -1;
1628  if (s->picture_structure == PICT_FRAME)
1629  s->mv_type = MV_TYPE_16X16;
1630  else
1631  s->mv_type = MV_TYPE_FIELD;
1632  if (s->pict_type == AV_PICTURE_TYPE_P) {
1633  /* if P type, zero motion vector is implied */
1634  s->mv_dir = MV_DIR_FORWARD;
1635  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1636  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1637  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1638  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1639  } else {
1640  /* if B type, reuse previous vectors and directions */
1641  s->mv[0][0][0] = s->last_mv[0][0][0];
1642  s->mv[0][0][1] = s->last_mv[0][0][1];
1643  s->mv[1][0][0] = s->last_mv[1][0][0];
1644  s->mv[1][0][1] = s->last_mv[1][0][1];
1645  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1646  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1647  }
1648  }
1649  }
1650  }
1651 eos: // end of slice
1652  if (get_bits_left(&s->gb) < 0) {
1653  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1654  return AVERROR_INVALIDDATA;
1655  }
1656  *buf += (get_bits_count(&s->gb) - 1) / 8;
1657  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1658  return 0;
1659 }
1660 
1662 {
1663  MpegEncContext *s = *(void **) arg;
1664  const uint8_t *buf = s->gb.buffer;
1665  int mb_y = s->start_mb_y;
1666  const int field_pic = s->picture_structure != PICT_FRAME;
1667 
1668  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1669 
1670  for (;;) {
1671  uint32_t start_code;
1672  int ret;
1673 
1674  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1675  emms_c();
1676  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1677  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1678  s->start_mb_y, s->end_mb_y, s->er.error_count);
1679  if (ret < 0) {
1680  if (c->err_recognition & AV_EF_EXPLODE)
1681  return ret;
1682  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1683  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1684  s->mb_x, s->mb_y,
1686  } else {
1687  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1688  s->mb_x - 1, s->mb_y,
1690  }
1691 
1692  if (s->mb_y == s->end_mb_y)
1693  return 0;
1694 
1695  start_code = -1;
1696  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1697  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1698  return AVERROR_INVALIDDATA;
1700  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1701  mb_y += (*buf&0xE0)<<2;
1702  mb_y <<= field_pic;
1703  if (s->picture_structure == PICT_BOTTOM_FIELD)
1704  mb_y++;
1705  if (mb_y >= s->end_mb_y)
1706  return AVERROR_INVALIDDATA;
1707  }
1708 }
1709 
1710 /**
1711  * Handle slice ends.
1712  * @return 1 if it seems to be the last slice
1713  */
1714 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
1715 {
1716  Mpeg1Context *s1 = avctx->priv_data;
1717  MpegEncContext *s = &s1->mpeg_enc_ctx;
1718 
1719  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
1720  return 0;
1721 
1722  if (s->avctx->hwaccel) {
1723  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1724  if (ret < 0) {
1725  av_log(avctx, AV_LOG_ERROR,
1726  "hardware accelerator failed to decode picture\n");
1727  return ret;
1728  }
1729  }
1730 
1731  /* end of slice reached */
1732  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1733  /* end of image */
1734 
1735  ff_er_frame_end(&s->er, NULL);
1736 
1738 
1739  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1740  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
1741  if (ret < 0)
1742  return ret;
1743  ff_print_debug_info(s, s->current_picture_ptr, pict);
1744  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1745  } else {
1746  /* latency of 1 frame for I- and P-frames */
1747  if (s->last_picture_ptr) {
1748  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
1749  if (ret < 0)
1750  return ret;
1751  ff_print_debug_info(s, s->last_picture_ptr, pict);
1752  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1753  }
1754  }
1755 
1756  return 1;
1757  } else {
1758  return 0;
1759  }
1760 }
1761 
1763  const uint8_t *buf, int buf_size)
1764 {
1765  Mpeg1Context *s1 = avctx->priv_data;
1766  MpegEncContext *s = &s1->mpeg_enc_ctx;
1767  int width, height;
1768  int i, v, j;
1769 
1770  int ret = init_get_bits8(&s->gb, buf, buf_size);
1771  if (ret < 0)
1772  return ret;
1773 
1774  width = get_bits(&s->gb, 12);
1775  height = get_bits(&s->gb, 12);
1776  if (width == 0 || height == 0) {
1777  av_log(avctx, AV_LOG_WARNING,
1778  "Invalid horizontal or vertical size value.\n");
1780  return AVERROR_INVALIDDATA;
1781  }
1782  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1783  if (s1->aspect_ratio_info == 0) {
1784  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1786  return AVERROR_INVALIDDATA;
1787  }
1788  s1->frame_rate_index = get_bits(&s->gb, 4);
1789  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1790  av_log(avctx, AV_LOG_WARNING,
1791  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1792  s1->frame_rate_index = 1;
1793  }
1794  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1795  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1796  return AVERROR_INVALIDDATA;
1797  }
1798 
1799  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1800  skip_bits(&s->gb, 1);
1801 
1802  /* get matrix */
1803  if (get_bits1(&s->gb)) {
1804  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1805  } else {
1806  for (i = 0; i < 64; i++) {
1807  j = s->idsp.idct_permutation[i];
1809  s->intra_matrix[j] = v;
1810  s->chroma_intra_matrix[j] = v;
1811  }
1812  }
1813  if (get_bits1(&s->gb)) {
1814  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1815  } else {
1816  for (i = 0; i < 64; i++) {
1817  int j = s->idsp.idct_permutation[i];
1819  s->inter_matrix[j] = v;
1820  s->chroma_inter_matrix[j] = v;
1821  }
1822  }
1823 
1824  if (show_bits(&s->gb, 23) != 0) {
1825  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1826  return AVERROR_INVALIDDATA;
1827  }
1828 
1829  s->width = width;
1830  s->height = height;
1831 
1832  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1833  s->progressive_sequence = 1;
1834  s->progressive_frame = 1;
1835  s->picture_structure = PICT_FRAME;
1836  s->first_field = 0;
1837  s->frame_pred_frame_dct = 1;
1838  s->chroma_format = 1;
1839  s->codec_id =
1840  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1841  s->out_format = FMT_MPEG1;
1842  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1843  s->low_delay = 1;
1844 
1845  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1846  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1847  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1848 
1849  return 0;
1850 }
1851 
1853 {
1854  Mpeg1Context *s1 = avctx->priv_data;
1855  MpegEncContext *s = &s1->mpeg_enc_ctx;
1856  int i, v, ret;
1857 
1858  /* start new MPEG-1 context decoding */
1859  s->out_format = FMT_MPEG1;
1860  if (s1->mpeg_enc_ctx_allocated) {
1862  s1->mpeg_enc_ctx_allocated = 0;
1863  }
1864  s->width = avctx->coded_width;
1865  s->height = avctx->coded_height;
1866  avctx->has_b_frames = 0; // true?
1867  s->low_delay = 1;
1868 
1869  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1870 
1871  if ((ret = ff_mpv_common_init(s)) < 0)
1872  return ret;
1873  s1->mpeg_enc_ctx_allocated = 1;
1874 
1875  for (i = 0; i < 64; i++) {
1876  int j = s->idsp.idct_permutation[i];
1878  s->intra_matrix[j] = v;
1879  s->chroma_intra_matrix[j] = v;
1880 
1882  s->inter_matrix[j] = v;
1883  s->chroma_inter_matrix[j] = v;
1884  }
1885 
1886  s->progressive_sequence = 1;
1887  s->progressive_frame = 1;
1888  s->picture_structure = PICT_FRAME;
1889  s->first_field = 0;
1890  s->frame_pred_frame_dct = 1;
1891  s->chroma_format = 1;
1892  if (s->codec_tag == AV_RL32("BW10")) {
1893  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1894  } else {
1895  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1896  }
1897  s1->save_width = s->width;
1898  s1->save_height = s->height;
1899  s1->save_progressive_seq = s->progressive_sequence;
1900  return 0;
1901 }
1902 
1904  const uint8_t *p, int buf_size)
1905 {
1906  Mpeg1Context *s1 = avctx->priv_data;
1907 
1908  if (buf_size >= 6 &&
1909  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1910  p[4] == 3 && (p[5] & 0x40)) {
1911  /* extract A53 Part 4 CC data */
1912  int cc_count = p[5] & 0x1f;
1913  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1914  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1915  const uint64_t new_size = (old_size + cc_count
1916  * UINT64_C(3));
1917  int ret;
1918 
1919  if (new_size > 3*A53_MAX_CC_COUNT)
1920  return AVERROR(EINVAL);
1921 
1922  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1923  if (ret >= 0)
1924  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1925 
1927  }
1928  return 1;
1929  } else if (buf_size >= 2 &&
1930  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1931  /* extract SCTE-20 CC data */
1932  GetBitContext gb;
1933  int cc_count = 0;
1934  int i, ret;
1935 
1936  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1937  if (ret < 0)
1938  return ret;
1939  cc_count = get_bits(&gb, 5);
1940  if (cc_count > 0) {
1941  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1942  const uint64_t new_size = (old_size + cc_count
1943  * UINT64_C(3));
1944  if (new_size > 3*A53_MAX_CC_COUNT)
1945  return AVERROR(EINVAL);
1946 
1947  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1948  if (ret >= 0) {
1949  uint8_t field, cc1, cc2;
1950  uint8_t *cap = s1->a53_buf_ref->data;
1951 
1952  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
1953  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1954  skip_bits(&gb, 2); // priority
1955  field = get_bits(&gb, 2);
1956  skip_bits(&gb, 5); // line_offset
1957  cc1 = get_bits(&gb, 8);
1958  cc2 = get_bits(&gb, 8);
1959  skip_bits(&gb, 1); // marker
1960 
1961  if (!field) { // forbidden
1962  cap[0] = cap[1] = cap[2] = 0x00;
1963  } else {
1964  field = (field == 2 ? 1 : 0);
1965  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1966  cap[0] = 0x04 | field;
1967  cap[1] = ff_reverse[cc1];
1968  cap[2] = ff_reverse[cc2];
1969  }
1970  cap += 3;
1971  }
1972  }
1974  }
1975  return 1;
1976  } else if (buf_size >= 11 &&
1977  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
1978  /* extract DVD CC data
1979  *
1980  * uint32_t user_data_start_code 0x000001B2 (big endian)
1981  * uint16_t user_identifier 0x4343 "CC"
1982  * uint8_t user_data_type_code 0x01
1983  * uint8_t caption_block_size 0xF8
1984  * uint8_t
1985  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
1986  * bit 6 caption_filler 0
1987  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
1988  * bit 0 caption_extra_field_added 1=one additional caption word
1989  *
1990  * struct caption_field_block {
1991  * uint8_t
1992  * bit 7:1 caption_filler 0x7F (all 1s)
1993  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
1994  * uint8_t caption_first_byte
1995  * uint8_t caption_second_byte
1996  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
1997  *
1998  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
1999  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2000  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2001  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2002  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2003  int cc_count = 0;
2004  int i, ret;
2005  // There is a caption count field in the data, but it is often
2006  // incorrect. So count the number of captions present.
2007  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2008  cc_count++;
2009  // Transform the DVD format into A53 Part 4 format
2010  if (cc_count > 0) {
2011  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2012  const uint64_t new_size = (old_size + cc_count
2013  * UINT64_C(6));
2014  if (new_size > 3*A53_MAX_CC_COUNT)
2015  return AVERROR(EINVAL);
2016 
2017  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2018  if (ret >= 0) {
2019  uint8_t field1 = !!(p[4] & 0x80);
2020  uint8_t *cap = s1->a53_buf_ref->data;
2021  p += 5;
2022  for (i = 0; i < cc_count; i++) {
2023  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2024  cap[1] = p[1];
2025  cap[2] = p[2];
2026  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2027  cap[4] = p[4];
2028  cap[5] = p[5];
2029  cap += 6;
2030  p += 6;
2031  }
2032  }
2034  }
2035  return 1;
2036  }
2037  return 0;
2038 }
2039 
2041  const uint8_t *p, int buf_size)
2042 {
2043  Mpeg1Context *s = avctx->priv_data;
2044  const uint8_t *buf_end = p + buf_size;
2045  Mpeg1Context *s1 = avctx->priv_data;
2046 
2047 #if 0
2048  int i;
2049  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2050  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2051  }
2052  av_log(avctx, AV_LOG_ERROR, "\n");
2053 #endif
2054 
2055  if (buf_size > 29){
2056  int i;
2057  for(i=0; i<20; i++)
2058  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2059  s->tmpgexs= 1;
2060  }
2061  }
2062  /* we parse the DTG active format information */
2063  if (buf_end - p >= 5 &&
2064  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2065  int flags = p[4];
2066  p += 5;
2067  if (flags & 0x80) {
2068  /* skip event id */
2069  p += 2;
2070  }
2071  if (flags & 0x40) {
2072  if (buf_end - p < 1)
2073  return;
2074  s1->has_afd = 1;
2075  s1->afd = p[0] & 0x0f;
2076  }
2077  } else if (buf_end - p >= 6 &&
2078  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2079  p[4] == 0x03) { // S3D_video_format_length
2080  // the 0x7F mask ignores the reserved_bit value
2081  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2082 
2083  if (S3D_video_format_type == 0x03 ||
2084  S3D_video_format_type == 0x04 ||
2085  S3D_video_format_type == 0x08 ||
2086  S3D_video_format_type == 0x23) {
2087 
2088  s1->has_stereo3d = 1;
2089 
2090  switch (S3D_video_format_type) {
2091  case 0x03:
2092  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2093  break;
2094  case 0x04:
2095  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2096  break;
2097  case 0x08:
2098  s1->stereo3d.type = AV_STEREO3D_2D;
2099  break;
2100  case 0x23:
2101  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2102  break;
2103  }
2104  }
2105  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2106  return;
2107  }
2108 }
2109 
2111  const uint8_t *buf, int buf_size)
2112 {
2113  Mpeg1Context *s1 = avctx->priv_data;
2114  MpegEncContext *s = &s1->mpeg_enc_ctx;
2115  int broken_link;
2116  int64_t tc;
2117 
2118  int ret = init_get_bits8(&s->gb, buf, buf_size);
2119  if (ret < 0)
2120  return ret;
2121 
2122  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2123 
2124  s1->closed_gop = get_bits1(&s->gb);
2125  /* broken_link indicates that after editing the
2126  * reference frames of the first B-Frames after GOP I-Frame
2127  * are missing (open gop) */
2128  broken_link = get_bits1(&s->gb);
2129 
2130  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2131  char tcbuf[AV_TIMECODE_STR_SIZE];
2133  av_log(s->avctx, AV_LOG_DEBUG,
2134  "GOP (%s) closed_gop=%d broken_link=%d\n",
2135  tcbuf, s1->closed_gop, broken_link);
2136  }
2137 
2138  return 0;
2139 }
2140 
2141 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2142  int *got_output, const uint8_t *buf, int buf_size)
2143 {
2144  Mpeg1Context *s = avctx->priv_data;
2145  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2146  const uint8_t *buf_ptr = buf;
2147  const uint8_t *buf_end = buf + buf_size;
2148  int ret, input_size;
2149  int last_code = 0, skip_frame = 0;
2150  int picture_start_code_seen = 0;
2151 
2152  for (;;) {
2153  /* find next start code */
2154  uint32_t start_code = -1;
2155  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2156  if (start_code > 0x1ff) {
2157  if (!skip_frame) {
2158  if (HAVE_THREADS &&
2159  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2160  !avctx->hwaccel) {
2161  int i;
2162  av_assert0(avctx->thread_count > 1);
2163 
2164  avctx->execute(avctx, slice_decode_thread,
2165  &s2->thread_context[0], NULL,
2166  s->slice_count, sizeof(void *));
2167  for (i = 0; i < s->slice_count; i++)
2168  s2->er.error_count += s2->thread_context[i]->er.error_count;
2169  }
2170 
2171  ret = slice_end(avctx, picture);
2172  if (ret < 0)
2173  return ret;
2174  else if (ret) {
2175  // FIXME: merge with the stuff in mpeg_decode_slice
2176  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2177  *got_output = 1;
2178  }
2179  }
2180  s2->pict_type = 0;
2181 
2182  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2183  return AVERROR_INVALIDDATA;
2184 
2185  return FFMAX(0, buf_ptr - buf);
2186  }
2187 
2188  input_size = buf_end - buf_ptr;
2189 
2190  if (avctx->debug & FF_DEBUG_STARTCODE)
2191  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2192  start_code, buf_ptr - buf, input_size);
2193 
2194  /* prepare data for next start code */
2195  switch (start_code) {
2196  case SEQ_START_CODE:
2197  if (last_code == 0) {
2198  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2199  if (buf != avctx->extradata)
2200  s->sync = 1;
2201  } else {
2202  av_log(avctx, AV_LOG_ERROR,
2203  "ignoring SEQ_START_CODE after %X\n", last_code);
2204  if (avctx->err_recognition & AV_EF_EXPLODE)
2205  return AVERROR_INVALIDDATA;
2206  }
2207  break;
2208 
2209  case PICTURE_START_CODE:
2210  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2211  /* If it's a frame picture, there can't be more than one picture header.
2212  Yet, it does happen and we need to handle it. */
2213  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2214  break;
2215  }
2216  picture_start_code_seen = 1;
2217 
2218  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2219  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2220  break;
2221  }
2222 
2223  if (s2->width <= 0 || s2->height <= 0) {
2224  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2225  s2->width, s2->height);
2226  return AVERROR_INVALIDDATA;
2227  }
2228 
2229  if (s->tmpgexs){
2230  s2->intra_dc_precision= 3;
2231  s2->intra_matrix[0]= 1;
2232  }
2233  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2234  !avctx->hwaccel && s->slice_count) {
2235  int i;
2236 
2237  avctx->execute(avctx, slice_decode_thread,
2238  s2->thread_context, NULL,
2239  s->slice_count, sizeof(void *));
2240  for (i = 0; i < s->slice_count; i++)
2241  s2->er.error_count += s2->thread_context[i]->er.error_count;
2242  s->slice_count = 0;
2243  }
2244  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2245  ret = mpeg_decode_postinit(avctx);
2246  if (ret < 0) {
2247  av_log(avctx, AV_LOG_ERROR,
2248  "mpeg_decode_postinit() failure\n");
2249  return ret;
2250  }
2251 
2252  /* We have a complete image: we try to decompress it. */
2253  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2254  s2->pict_type = 0;
2255  s->first_slice = 1;
2256  last_code = PICTURE_START_CODE;
2257  } else {
2258  av_log(avctx, AV_LOG_ERROR,
2259  "ignoring pic after %X\n", last_code);
2260  if (avctx->err_recognition & AV_EF_EXPLODE)
2261  return AVERROR_INVALIDDATA;
2262  }
2263  break;
2264  case EXT_START_CODE:
2265  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2266  if (ret < 0)
2267  return ret;
2268 
2269  switch (get_bits(&s2->gb, 4)) {
2270  case 0x1:
2271  if (last_code == 0) {
2273  } else {
2274  av_log(avctx, AV_LOG_ERROR,
2275  "ignoring seq ext after %X\n", last_code);
2276  if (avctx->err_recognition & AV_EF_EXPLODE)
2277  return AVERROR_INVALIDDATA;
2278  }
2279  break;
2280  case 0x2:
2282  break;
2283  case 0x3:
2285  break;
2286  case 0x7:
2288  break;
2289  case 0x8:
2290  if (last_code == PICTURE_START_CODE) {
2292  if (ret < 0)
2293  return ret;
2294  } else {
2295  av_log(avctx, AV_LOG_ERROR,
2296  "ignoring pic cod ext after %X\n", last_code);
2297  if (avctx->err_recognition & AV_EF_EXPLODE)
2298  return AVERROR_INVALIDDATA;
2299  }
2300  break;
2301  }
2302  break;
2303  case USER_START_CODE:
2304  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2305  break;
2306  case GOP_START_CODE:
2307  if (last_code == 0) {
2308  s2->first_field = 0;
2309  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2310  if (ret < 0)
2311  return ret;
2312  s->sync = 1;
2313  } else {
2314  av_log(avctx, AV_LOG_ERROR,
2315  "ignoring GOP_START_CODE after %X\n", last_code);
2316  if (avctx->err_recognition & AV_EF_EXPLODE)
2317  return AVERROR_INVALIDDATA;
2318  }
2319  break;
2320  default:
2322  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2323  if (s2->progressive_sequence && !s2->progressive_frame) {
2324  s2->progressive_frame = 1;
2325  av_log(s2->avctx, AV_LOG_ERROR,
2326  "interlaced frame in progressive sequence, ignoring\n");
2327  }
2328 
2329  if (s2->picture_structure == 0 ||
2330  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2331  av_log(s2->avctx, AV_LOG_ERROR,
2332  "picture_structure %d invalid, ignoring\n",
2333  s2->picture_structure);
2334  s2->picture_structure = PICT_FRAME;
2335  }
2336 
2337  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2338  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2339 
2340  if (s2->picture_structure == PICT_FRAME) {
2341  s2->first_field = 0;
2342  s2->v_edge_pos = 16 * s2->mb_height;
2343  } else {
2344  s2->first_field ^= 1;
2345  s2->v_edge_pos = 8 * s2->mb_height;
2346  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2347  }
2348  }
2350  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2351  const int field_pic = s2->picture_structure != PICT_FRAME;
2352  int mb_y = start_code - SLICE_MIN_START_CODE;
2353  last_code = SLICE_MIN_START_CODE;
2354  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2355  mb_y += (*buf_ptr&0xE0)<<2;
2356 
2357  mb_y <<= field_pic;
2358  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2359  mb_y++;
2360 
2361  if (buf_end - buf_ptr < 2) {
2362  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2363  return AVERROR_INVALIDDATA;
2364  }
2365 
2366  if (mb_y >= s2->mb_height) {
2367  av_log(s2->avctx, AV_LOG_ERROR,
2368  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2369  return AVERROR_INVALIDDATA;
2370  }
2371 
2372  if (!s2->last_picture_ptr) {
2373  /* Skip B-frames if we do not have reference frames and
2374  * GOP is not closed. */
2375  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2376  if (!s->closed_gop) {
2377  skip_frame = 1;
2378  av_log(s2->avctx, AV_LOG_DEBUG,
2379  "Skipping B slice due to open GOP\n");
2380  break;
2381  }
2382  }
2383  }
2384  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2385  s->sync = 1;
2386  if (!s2->next_picture_ptr) {
2387  /* Skip P-frames if we do not have a reference frame or
2388  * we have an invalid header. */
2389  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2390  skip_frame = 1;
2391  av_log(s2->avctx, AV_LOG_DEBUG,
2392  "Skipping P slice due to !sync\n");
2393  break;
2394  }
2395  }
2396  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2397  s2->pict_type == AV_PICTURE_TYPE_B) ||
2398  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2399  s2->pict_type != AV_PICTURE_TYPE_I) ||
2400  avctx->skip_frame >= AVDISCARD_ALL) {
2401  skip_frame = 1;
2402  break;
2403  }
2404 
2405  if (!s->mpeg_enc_ctx_allocated)
2406  break;
2407 
2408  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2409  if (mb_y < avctx->skip_top ||
2410  mb_y >= s2->mb_height - avctx->skip_bottom)
2411  break;
2412  }
2413 
2414  if (!s2->pict_type) {
2415  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2416  if (avctx->err_recognition & AV_EF_EXPLODE)
2417  return AVERROR_INVALIDDATA;
2418  break;
2419  }
2420 
2421  if (s->first_slice) {
2422  skip_frame = 0;
2423  s->first_slice = 0;
2424  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2425  return ret;
2426  }
2427  if (!s2->current_picture_ptr) {
2428  av_log(avctx, AV_LOG_ERROR,
2429  "current_picture not initialized\n");
2430  return AVERROR_INVALIDDATA;
2431  }
2432 
2433  if (HAVE_THREADS &&
2434  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2435  !avctx->hwaccel) {
2436  int threshold = (s2->mb_height * s->slice_count +
2437  s2->slice_context_count / 2) /
2438  s2->slice_context_count;
2439  av_assert0(avctx->thread_count > 1);
2440  if (threshold <= mb_y) {
2441  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2442 
2443  thread_context->start_mb_y = mb_y;
2444  thread_context->end_mb_y = s2->mb_height;
2445  if (s->slice_count) {
2446  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2447  ret = ff_update_duplicate_context(thread_context, s2);
2448  if (ret < 0)
2449  return ret;
2450  }
2451  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2452  if (ret < 0)
2453  return ret;
2454  s->slice_count++;
2455  }
2456  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2457  } else {
2458  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2459  emms_c();
2460 
2461  if (ret < 0) {
2462  if (avctx->err_recognition & AV_EF_EXPLODE)
2463  return ret;
2464  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2465  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2466  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2468  } else {
2469  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2470  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2472  }
2473  }
2474  }
2475  break;
2476  }
2477  }
2478 }
2479 
2480 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2481  int *got_output, AVPacket *avpkt)
2482 {
2483  const uint8_t *buf = avpkt->data;
2484  int ret;
2485  int buf_size = avpkt->size;
2486  Mpeg1Context *s = avctx->priv_data;
2487  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2488 
2489  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2490  /* special case for last picture */
2491  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2492  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2493  if (ret < 0)
2494  return ret;
2495 
2496  s2->next_picture_ptr = NULL;
2497 
2498  *got_output = 1;
2499  }
2500  return buf_size;
2501  }
2502 
2503  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2504  || s2->codec_tag == AV_RL32("BW10")
2505  ))
2506  vcr2_init_sequence(avctx);
2507 
2508  s->slice_count = 0;
2509 
2510  if (avctx->extradata && !s->extradata_decoded) {
2511  ret = decode_chunks(avctx, picture, got_output,
2512  avctx->extradata, avctx->extradata_size);
2513  if (*got_output) {
2514  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2515  av_frame_unref(picture);
2516  *got_output = 0;
2517  }
2518  s->extradata_decoded = 1;
2519  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2520  s2->current_picture_ptr = NULL;
2521  return ret;
2522  }
2523  }
2524 
2525  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2526  if (ret<0 || *got_output) {
2527  s2->current_picture_ptr = NULL;
2528 
2529  if (s->timecode_frame_start != -1 && *got_output) {
2530  char tcbuf[AV_TIMECODE_STR_SIZE];
2531  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2533  sizeof(int64_t));
2534  if (!tcside)
2535  return AVERROR(ENOMEM);
2536  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2537 
2538  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2539  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2540 
2541  s->timecode_frame_start = -1;
2542  }
2543  }
2544 
2545  return ret;
2546 }
2547 
2548 static void flush(AVCodecContext *avctx)
2549 {
2550  Mpeg1Context *s = avctx->priv_data;
2551 
2552  s->sync = 0;
2553  s->closed_gop = 0;
2554 
2555  av_buffer_unref(&s->a53_buf_ref);
2556  ff_mpeg_flush(avctx);
2557 }
2558 
2560 {
2561  Mpeg1Context *s = avctx->priv_data;
2562 
2563  if (s->mpeg_enc_ctx_allocated)
2564  ff_mpv_common_end(&s->mpeg_enc_ctx);
2565  av_buffer_unref(&s->a53_buf_ref);
2566  return 0;
2567 }
2568 
2570  .p.name = "mpeg1video",
2571  CODEC_LONG_NAME("MPEG-1 video"),
2572  .p.type = AVMEDIA_TYPE_VIDEO,
2573  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2574  .priv_data_size = sizeof(Mpeg1Context),
2576  .close = mpeg_decode_end,
2578  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2580  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2581  .flush = flush,
2582  .p.max_lowres = 3,
2583  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2584  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2585 #if CONFIG_MPEG1_NVDEC_HWACCEL
2586  HWACCEL_NVDEC(mpeg1),
2587 #endif
2588 #if CONFIG_MPEG1_VDPAU_HWACCEL
2589  HWACCEL_VDPAU(mpeg1),
2590 #endif
2591 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2592  HWACCEL_VIDEOTOOLBOX(mpeg1),
2593 #endif
2594  NULL
2595  },
2596 };
2597 
2599  .p.name = "mpeg2video",
2600  CODEC_LONG_NAME("MPEG-2 video"),
2601  .p.type = AVMEDIA_TYPE_VIDEO,
2602  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2603  .priv_data_size = sizeof(Mpeg1Context),
2605  .close = mpeg_decode_end,
2607  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2609  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2610  .flush = flush,
2611  .p.max_lowres = 3,
2613  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2614 #if CONFIG_MPEG2_DXVA2_HWACCEL
2615  HWACCEL_DXVA2(mpeg2),
2616 #endif
2617 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2618  HWACCEL_D3D11VA(mpeg2),
2619 #endif
2620 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2621  HWACCEL_D3D11VA2(mpeg2),
2622 #endif
2623 #if CONFIG_MPEG2_NVDEC_HWACCEL
2624  HWACCEL_NVDEC(mpeg2),
2625 #endif
2626 #if CONFIG_MPEG2_VAAPI_HWACCEL
2627  HWACCEL_VAAPI(mpeg2),
2628 #endif
2629 #if CONFIG_MPEG2_VDPAU_HWACCEL
2630  HWACCEL_VDPAU(mpeg2),
2631 #endif
2632 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2633  HWACCEL_VIDEOTOOLBOX(mpeg2),
2634 #endif
2635  NULL
2636  },
2637 };
2638 
2639 //legacy decoder
2641  .p.name = "mpegvideo",
2642  CODEC_LONG_NAME("MPEG-1 video"),
2643  .p.type = AVMEDIA_TYPE_VIDEO,
2644  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2645  .priv_data_size = sizeof(Mpeg1Context),
2647  .close = mpeg_decode_end,
2649  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2651  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2652  .flush = flush,
2653  .p.max_lowres = 3,
2654 };
2655 
2656 typedef struct IPUContext {
2658 
2659  int flags;
2660  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2661 } IPUContext;
2662 
2664  int *got_frame, AVPacket *avpkt)
2665 {
2666  IPUContext *s = avctx->priv_data;
2667  MpegEncContext *m = &s->m;
2668  GetBitContext *gb = &m->gb;
2669  int ret;
2670 
2671  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2672  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2 + 3*4 + 2*2 + 2*6))
2673  return AVERROR_INVALIDDATA;
2674 
2675  ret = ff_get_buffer(avctx, frame, 0);
2676  if (ret < 0)
2677  return ret;
2678 
2679  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2680  if (ret < 0)
2681  return ret;
2682 
2683  s->flags = get_bits(gb, 8);
2684  m->intra_dc_precision = s->flags & 3;
2685  m->q_scale_type = !!(s->flags & 0x40);
2686  m->intra_vlc_format = !!(s->flags & 0x20);
2687  m->alternate_scan = !!(s->flags & 0x10);
2688 
2689  if (s->flags & 0x10) {
2692  } else {
2695  }
2696 
2697  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2698  m->qscale = 1;
2699 
2700  for (int y = 0; y < avctx->height; y += 16) {
2701  int intraquant;
2702 
2703  for (int x = 0; x < avctx->width; x += 16) {
2704  if (x || y) {
2705  if (!get_bits1(gb))
2706  return AVERROR_INVALIDDATA;
2707  }
2708  if (get_bits1(gb)) {
2709  intraquant = 0;
2710  } else {
2711  if (!get_bits1(gb))
2712  return AVERROR_INVALIDDATA;
2713  intraquant = 1;
2714  }
2715 
2716  if (s->flags & 4)
2717  skip_bits1(gb);
2718 
2719  if (intraquant)
2720  m->qscale = mpeg_get_qscale(m);
2721 
2722  memset(s->block, 0, sizeof(s->block));
2723 
2724  for (int n = 0; n < 6; n++) {
2725  if (s->flags & 0x80) {
2727  m->intra_matrix,
2729  m->last_dc, s->block[n],
2730  n, m->qscale);
2731  if (ret >= 0)
2732  m->block_last_index[n] = ret;
2733  } else {
2734  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2735  }
2736 
2737  if (ret < 0)
2738  return ret;
2739  }
2740 
2741  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2742  frame->linesize[0], s->block[0]);
2743  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2744  frame->linesize[0], s->block[1]);
2745  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2746  frame->linesize[0], s->block[2]);
2747  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2748  frame->linesize[0], s->block[3]);
2749  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2750  frame->linesize[1], s->block[4]);
2751  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2752  frame->linesize[2], s->block[5]);
2753  }
2754  }
2755 
2756  align_get_bits(gb);
2757  if (get_bits_left(gb) != 32)
2758  return AVERROR_INVALIDDATA;
2759 
2762  *got_frame = 1;
2763 
2764  return avpkt->size;
2765 }
2766 
2768 {
2769  IPUContext *s = avctx->priv_data;
2770  MpegEncContext *m = &s->m;
2771 
2772  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2773 
2774  ff_mpv_decode_init(m, avctx);
2776 
2777  for (int i = 0; i < 64; i++) {
2778  int j = m->idsp.idct_permutation[i];
2780  m->intra_matrix[j] = v;
2781  m->chroma_intra_matrix[j] = v;
2782  }
2783 
2784  for (int i = 0; i < 64; i++) {
2785  int j = m->idsp.idct_permutation[i];
2787  m->inter_matrix[j] = v;
2788  m->chroma_inter_matrix[j] = v;
2789  }
2790 
2791  return 0;
2792 }
2793 
2795 {
2796  IPUContext *s = avctx->priv_data;
2797 
2798  ff_mpv_common_end(&s->m);
2799 
2800  return 0;
2801 }
2802 
2804  .p.name = "ipu",
2805  CODEC_LONG_NAME("IPU Video"),
2806  .p.type = AVMEDIA_TYPE_VIDEO,
2807  .p.id = AV_CODEC_ID_IPU,
2808  .priv_data_size = sizeof(IPUContext),
2809  .init = ipu_decode_init,
2811  .close = ipu_decode_end,
2812  .p.capabilities = AV_CODEC_CAP_DR1,
2813  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2814 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1852
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:74
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:126
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:72
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1903
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:511
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2480
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:425
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:143
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:415
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:263
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:240
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1412
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2767
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:220
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:570
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2640
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:491
ipu_decode_end
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2794
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:420
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:82
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:321
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:175
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:84
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:896
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1389
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:265
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:77
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:81
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1803
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:89
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:418
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1014
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:123
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2141
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1766
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1198
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1532
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1661
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:64
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:64
val
static double val(void *priv, double ch)
Definition: aeval.c:78
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:83
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:330
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:868
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:504
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1762
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:94
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2656
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:828
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2598
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:80
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2663
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:839
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1027
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2548
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:78
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:744
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:892
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:68
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:118
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1714
Mpeg1Context::mpeg_enc_ctx_allocated
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:67
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:76
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1117
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:69
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:863
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1284
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:277
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:78
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:81
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:694
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:696
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:491
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1141
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:297
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:63
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:333
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:780
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
mpeg12codecs.h
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1740
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:78
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:217
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
s2
#define s2
Definition: regdef.h:39
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:73
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:528
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1617
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:318
AVPacket::size
int size
Definition: packet.h:492
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:194
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:309
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
RL_VLC_ELEM
Definition: vlc.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:416
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
shift
static int shift(int a, int b)
Definition: bonk.c:262
IPUContext::flags
int flags
Definition: mpeg12dec.c:2659
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:295
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2569
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:541
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:967
AVCodecHWConfigInternal
Definition: hwconfig.h:25
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:549
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:71
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:785
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:172
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:113
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
emms.h
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:72
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:298
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:245
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1904
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:101
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:88
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1396
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:152
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:91
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2657
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:444
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1046
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:142
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:70
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2110
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:873
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:367
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1273
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:699
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1551
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:438
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1562
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:695
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1596
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:708
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:496
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:75
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:579
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1906
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:124
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1388
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:636
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:406
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2559
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:76
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:88
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2803
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:442
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:66
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:153
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:257
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:60
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:64
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1074
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:79
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:116
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2660
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:125
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2040
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:143
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:86
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:142
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:445
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1377
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1376
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1175
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:822
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:73
Mpeg1Context
Definition: mpeg12dec.c:65
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:296
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1212
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:85
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:236
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1385