FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mem.h"
35 
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "idctdsp.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "refstruct.h"
45 
47  int16_t *block, int n, int qscale)
48 {
49  int i, level, nCoeffs;
50  const uint16_t *quant_matrix;
51 
52  nCoeffs= s->block_last_index[n];
53 
54  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
55  /* XXX: only MPEG-1 */
56  quant_matrix = s->intra_matrix;
57  for(i=1;i<=nCoeffs;i++) {
58  int j= s->intra_scantable.permutated[i];
59  level = block[j];
60  if (level) {
61  if (level < 0) {
62  level = -level;
63  level = (int)(level * qscale * quant_matrix[j]) >> 3;
64  level = (level - 1) | 1;
65  level = -level;
66  } else {
67  level = (int)(level * qscale * quant_matrix[j]) >> 3;
68  level = (level - 1) | 1;
69  }
70  block[j] = level;
71  }
72  }
73 }
74 
76  int16_t *block, int n, int qscale)
77 {
78  int i, level, nCoeffs;
79  const uint16_t *quant_matrix;
80 
81  nCoeffs= s->block_last_index[n];
82 
83  quant_matrix = s->inter_matrix;
84  for(i=0; i<=nCoeffs; i++) {
85  int j= s->intra_scantable.permutated[i];
86  level = block[j];
87  if (level) {
88  if (level < 0) {
89  level = -level;
90  level = (((level << 1) + 1) * qscale *
91  ((int) (quant_matrix[j]))) >> 4;
92  level = (level - 1) | 1;
93  level = -level;
94  } else {
95  level = (((level << 1) + 1) * qscale *
96  ((int) (quant_matrix[j]))) >> 4;
97  level = (level - 1) | 1;
98  }
99  block[j] = level;
100  }
101  }
102 }
103 
105  int16_t *block, int n, int qscale)
106 {
107  int i, level, nCoeffs;
108  const uint16_t *quant_matrix;
109 
110  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
111  else qscale <<= 1;
112 
113  if(s->alternate_scan) nCoeffs= 63;
114  else nCoeffs= s->block_last_index[n];
115 
116  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
117  quant_matrix = s->intra_matrix;
118  for(i=1;i<=nCoeffs;i++) {
119  int j= s->intra_scantable.permutated[i];
120  level = block[j];
121  if (level) {
122  if (level < 0) {
123  level = -level;
124  level = (int)(level * qscale * quant_matrix[j]) >> 4;
125  level = -level;
126  } else {
127  level = (int)(level * qscale * quant_matrix[j]) >> 4;
128  }
129  block[j] = level;
130  }
131  }
132 }
133 
135  int16_t *block, int n, int qscale)
136 {
137  int i, level, nCoeffs;
138  const uint16_t *quant_matrix;
139  int sum=-1;
140 
141  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
142  else qscale <<= 1;
143 
144  if(s->alternate_scan) nCoeffs= 63;
145  else nCoeffs= s->block_last_index[n];
146 
147  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
148  sum += block[0];
149  quant_matrix = s->intra_matrix;
150  for(i=1;i<=nCoeffs;i++) {
151  int j= s->intra_scantable.permutated[i];
152  level = block[j];
153  if (level) {
154  if (level < 0) {
155  level = -level;
156  level = (int)(level * qscale * quant_matrix[j]) >> 4;
157  level = -level;
158  } else {
159  level = (int)(level * qscale * quant_matrix[j]) >> 4;
160  }
161  block[j] = level;
162  sum+=level;
163  }
164  }
165  block[63]^=sum&1;
166 }
167 
169  int16_t *block, int n, int qscale)
170 {
171  int i, level, nCoeffs;
172  const uint16_t *quant_matrix;
173  int sum=-1;
174 
175  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
176  else qscale <<= 1;
177 
178  if(s->alternate_scan) nCoeffs= 63;
179  else nCoeffs= s->block_last_index[n];
180 
181  quant_matrix = s->inter_matrix;
182  for(i=0; i<=nCoeffs; i++) {
183  int j= s->intra_scantable.permutated[i];
184  level = block[j];
185  if (level) {
186  if (level < 0) {
187  level = -level;
188  level = (((level << 1) + 1) * qscale *
189  ((int) (quant_matrix[j]))) >> 5;
190  level = -level;
191  } else {
192  level = (((level << 1) + 1) * qscale *
193  ((int) (quant_matrix[j]))) >> 5;
194  }
195  block[j] = level;
196  sum+=level;
197  }
198  }
199  block[63]^=sum&1;
200 }
201 
203  int16_t *block, int n, int qscale)
204 {
205  int i, level, qmul, qadd;
206  int nCoeffs;
207 
208  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
209 
210  qmul = qscale << 1;
211 
212  if (!s->h263_aic) {
213  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
214  qadd = (qscale - 1) | 1;
215  }else{
216  qadd = 0;
217  }
218  if(s->ac_pred)
219  nCoeffs=63;
220  else
221  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
222 
223  for(i=1; i<=nCoeffs; i++) {
224  level = block[i];
225  if (level) {
226  if (level < 0) {
227  level = level * qmul - qadd;
228  } else {
229  level = level * qmul + qadd;
230  }
231  block[i] = level;
232  }
233  }
234 }
235 
237  int16_t *block, int n, int qscale)
238 {
239  int i, level, qmul, qadd;
240  int nCoeffs;
241 
242  av_assert2(s->block_last_index[n]>=0);
243 
244  qadd = (qscale - 1) | 1;
245  qmul = qscale << 1;
246 
247  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
248 
249  for(i=0; i<=nCoeffs; i++) {
250  level = block[i];
251  if (level) {
252  if (level < 0) {
253  level = level * qmul - qadd;
254  } else {
255  level = level * qmul + qadd;
256  }
257  block[i] = level;
258  }
259  }
260 }
261 
262 
263 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
264 {
265  while(h--)
266  memset(dst + h*linesize, 128, 16);
267 }
268 
269 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 8);
273 }
274 
275 /* init common dct for both encoder and decoder */
277 {
278  ff_blockdsp_init(&s->bdsp);
279  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
280  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
281 
282  if (s->avctx->debug & FF_DEBUG_NOMC) {
283  int i;
284  for (i=0; i<4; i++) {
285  s->hdsp.avg_pixels_tab[0][i] = gray16;
286  s->hdsp.put_pixels_tab[0][i] = gray16;
287  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
288 
289  s->hdsp.avg_pixels_tab[1][i] = gray8;
290  s->hdsp.put_pixels_tab[1][i] = gray8;
291  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
292  }
293  }
294 }
295 
296 av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
297  const uint8_t *src_scantable)
298 {
299  st->scantable = src_scantable;
300 
301  for (int i = 0, end = -1; i < 64; i++) {
302  int j = src_scantable[i];
303  st->permutated[i] = permutation[j];
304  if (permutation[j] > end)
305  end = permutation[j];
306  st->raster_end[i] = end;
307  }
308 }
309 
311 {
312  if (s->codec_id == AV_CODEC_ID_MPEG4)
313  s->idsp.mpeg4_studio_profile = s->studio_profile;
314  ff_idctdsp_init(&s->idsp, s->avctx);
315 
316  /* load & permutate scantables
317  * note: only wmv uses different ones
318  */
319  if (s->alternate_scan) {
320  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
321  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
322  } else {
323  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
324  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
325  }
326  ff_permute_scantable(s->permutated_intra_h_scantable, ff_alternate_horizontal_scan,
327  s->idsp.idct_permutation);
328  ff_permute_scantable(s->permutated_intra_v_scantable, ff_alternate_vertical_scan,
329  s->idsp.idct_permutation);
330 
331  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
332  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
333  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
334  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
335  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
336  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
337  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
338  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
339 
340 #if HAVE_INTRINSICS_NEON
342 #endif
343 
344 #if ARCH_ARM
346 #elif ARCH_PPC
348 #elif ARCH_X86
350 #elif ARCH_MIPS
352 #endif
353 }
354 
356 {
357  if (s->encoding) {
358  s->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->me.map));
359  if (!s->me.map)
360  return AVERROR(ENOMEM);
361  s->me.score_map = s->me.map + ME_MAP_SIZE;
362 
363  if (s->noise_reduction) {
364  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
365  return AVERROR(ENOMEM);
366  }
367  }
368  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 1 + s->encoding))
369  return AVERROR(ENOMEM);
370  s->block = s->blocks[0];
371 
372  if (s->out_format == FMT_H263) {
373  int mb_height = s->msmpeg4_version == MSMP4_VC1 ?
374  FFALIGN(s->mb_height, 2) : s->mb_height;
375  int y_size = s->b8_stride * (2 * mb_height + 1);
376  int c_size = s->mb_stride * (mb_height + 1);
377  int yc_size = y_size + 2 * c_size;
378  /* ac values */
379  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
380  return AVERROR(ENOMEM);
381  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
382  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
383  s->ac_val[2] = s->ac_val[1] + c_size;
384  }
385 
386  return 0;
387 }
388 
390 {
391  int nb_slices = s->slice_context_count, ret;
392 
393  /* We initialize the copies before the original so that
394  * fields allocated in init_duplicate_context are NULL after
395  * copying. This prevents double-frees upon allocation error. */
396  for (int i = 1; i < nb_slices; i++) {
397  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
398  if (!s->thread_context[i])
399  return AVERROR(ENOMEM);
400  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
401  return ret;
402  s->thread_context[i]->start_mb_y =
403  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
404  s->thread_context[i]->end_mb_y =
405  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
406  }
407  s->start_mb_y = 0;
408  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
409  : s->mb_height;
410  return init_duplicate_context(s);
411 }
412 
414 {
415  if (!s)
416  return;
417 
418  av_freep(&s->sc.edge_emu_buffer);
419  av_freep(&s->sc.scratchpad_buf);
420  s->me.temp = s->me.scratchpad =
421  s->sc.obmc_scratchpad = NULL;
422  s->sc.linesize = 0;
423 
424  av_freep(&s->dct_error_sum);
425  av_freep(&s->me.map);
426  s->me.score_map = NULL;
427  av_freep(&s->blocks);
428  av_freep(&s->ac_val_base);
429  s->block = NULL;
430 }
431 
433 {
434  for (int i = 1; i < s->slice_context_count; i++) {
435  free_duplicate_context(s->thread_context[i]);
436  av_freep(&s->thread_context[i]);
437  }
439 }
440 
442 {
443 #define COPY(a) bak->a = src->a
444  COPY(sc);
445  COPY(me.map);
446  COPY(me.score_map);
447  COPY(blocks);
448  COPY(block);
449  COPY(start_mb_y);
450  COPY(end_mb_y);
451  COPY(me.map_generation);
452  COPY(dct_error_sum);
453  COPY(dct_count[0]);
454  COPY(dct_count[1]);
455  COPY(ac_val_base);
456  COPY(ac_val[0]);
457  COPY(ac_val[1]);
458  COPY(ac_val[2]);
459 #undef COPY
460 }
461 
463 {
464  MpegEncContext bak;
465  int ret;
466  // FIXME copy only needed parts
467  backup_duplicate_context(&bak, dst);
468  memcpy(dst, src, sizeof(MpegEncContext));
469  backup_duplicate_context(dst, &bak);
470 
471  ret = ff_mpv_framesize_alloc(dst->avctx, &dst->sc, dst->linesize);
472  if (ret < 0) {
473  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
474  "scratch buffers.\n");
475  return ret;
476  }
477  return 0;
478 }
479 
480 /**
481  * Set the given MpegEncContext to common defaults
482  * (same for encoding and decoding).
483  * The changed fields will not depend upon the
484  * prior state of the MpegEncContext.
485  */
487 {
488  s->y_dc_scale_table =
489  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
490  s->chroma_qscale_table = ff_default_chroma_qscale_table;
491  s->progressive_frame = 1;
492  s->progressive_sequence = 1;
493  s->picture_structure = PICT_FRAME;
494 
495  s->picture_number = 0;
496 
497  s->f_code = 1;
498  s->b_code = 1;
499 
500  s->slice_context_count = 1;
501 }
502 
504 {
510  pools->alloc_mb_height = pools->alloc_mb_width = pools->alloc_mb_stride = 0;
511 }
512 
514 {
515  BufferPoolContext *const pools = &s->buffer_pools;
516  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
517  int mb_height;
518 
519  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
520  s->mb_height = (s->height + 31) / 32 * 2;
521  else
522  s->mb_height = (s->height + 15) / 16;
523 
524  /* VC-1 can change from being progressive to interlaced on a per-frame
525  * basis. We therefore allocate certain buffers so big that they work
526  * in both instances. */
527  mb_height = s->msmpeg4_version == MSMP4_VC1 ?
528  FFALIGN(s->mb_height, 2) : s->mb_height;
529 
530  s->mb_width = (s->width + 15) / 16;
531  s->mb_stride = s->mb_width + 1;
532  s->b8_stride = s->mb_width * 2 + 1;
533  mb_array_size = mb_height * s->mb_stride;
534  mv_table_size = (mb_height + 2) * s->mb_stride + 1;
535 
536  /* set default edge pos, will be overridden
537  * in decode_header if needed */
538  s->h_edge_pos = s->mb_width * 16;
539  s->v_edge_pos = s->mb_height * 16;
540 
541  s->mb_num = s->mb_width * s->mb_height;
542 
543  s->block_wrap[0] =
544  s->block_wrap[1] =
545  s->block_wrap[2] =
546  s->block_wrap[3] = s->b8_stride;
547  s->block_wrap[4] =
548  s->block_wrap[5] = s->mb_stride;
549 
550  y_size = s->b8_stride * (2 * mb_height + 1);
551  c_size = s->mb_stride * (mb_height + 1);
552  yc_size = y_size + 2 * c_size;
553 
554  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
555  return AVERROR(ENOMEM);
556  for (y = 0; y < s->mb_height; y++)
557  for (x = 0; x < s->mb_width; x++)
558  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
559 
560  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
561 
562 #define ALLOC_POOL(name, size, flags) do { \
563  pools->name ##_pool = ff_refstruct_pool_alloc((size), (flags)); \
564  if (!pools->name ##_pool) \
565  return AVERROR(ENOMEM); \
566 } while (0)
567 
568  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
569  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
570  /* interlaced direct mode decoding tables */
571  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
572  if (!tmp)
573  return AVERROR(ENOMEM);
574  s->p_field_mv_table_base = tmp;
575  tmp += s->mb_stride + 1;
576  for (int i = 0; i < 2; i++) {
577  for (int j = 0; j < 2; j++) {
578  s->p_field_mv_table[i][j] = tmp;
579  tmp += mv_table_size;
580  }
581  }
582  if (s->codec_id == AV_CODEC_ID_MPEG4) {
583  ALLOC_POOL(mbskip_table, mb_array_size + 2,
584  !s->encoding ? FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME : 0);
585  if (!s->encoding) {
586  /* cbp, pred_dir */
587  if (!(s->cbp_table = av_mallocz(mb_array_size)) ||
588  !(s->pred_dir_table = av_mallocz(mb_array_size)))
589  return AVERROR(ENOMEM);
590  }
591  }
592  }
593 
594  if (s->msmpeg4_version >= MSMP4_V3) {
595  s->coded_block_base = av_mallocz(y_size);
596  if (!s->coded_block_base)
597  return AVERROR(ENOMEM);
598  s->coded_block = s->coded_block_base + s->b8_stride + 1;
599  }
600 
601  if (s->h263_pred || s->h263_plus || !s->encoding) {
602  /* dc values */
603  // MN: we need these for error resilience of intra-frames
604  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
605  return AVERROR(ENOMEM);
606  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
607  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
608  s->dc_val[2] = s->dc_val[1] + c_size;
609  for (i = 0; i < yc_size; i++)
610  s->dc_val_base[i] = 1024;
611  }
612 
613  // Note the + 1 is for a quicker MPEG-4 slice_end detection
614  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
615  /* which mb is an intra block, init macroblock skip table */
616  !(s->mbintra_table = av_malloc(mb_array_size)))
617  return AVERROR(ENOMEM);
618  memset(s->mbintra_table, 1, mb_array_size);
619 
620  ALLOC_POOL(qscale_table, mv_table_size, 0);
621  ALLOC_POOL(mb_type, mv_table_size * sizeof(uint32_t), 0);
622 
623  if (s->out_format == FMT_H263 || s->encoding ||
624  (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS)) {
625  const int b8_array_size = s->b8_stride * mb_height * 2;
626  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
627  int ref_index_size = 4 * mb_array_size;
628 
629  /* FIXME: The output of H.263 with OBMC depends upon
630  * the earlier content of the buffer; therefore we set
631  * the flags to always reset returned buffers here. */
633  ALLOC_POOL(ref_index, ref_index_size, 0);
634  }
635 #undef ALLOC_POOL
636  pools->alloc_mb_width = s->mb_width;
637  pools->alloc_mb_height = mb_height;
638  pools->alloc_mb_stride = s->mb_stride;
639 
640  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
641 }
642 
644 {
645  memset(&s->buffer_pools, 0, sizeof(s->buffer_pools));
646  memset(&s->next_pic, 0, sizeof(s->next_pic));
647  memset(&s->last_pic, 0, sizeof(s->last_pic));
648  memset(&s->cur_pic, 0, sizeof(s->cur_pic));
649 
650  memset(s->thread_context, 0, sizeof(s->thread_context));
651 
652  s->me.map = NULL;
653  s->me.score_map = NULL;
654  s->dct_error_sum = NULL;
655  s->block = NULL;
656  s->blocks = NULL;
657  s->ac_val_base = NULL;
658  s->ac_val[0] =
659  s->ac_val[1] =
660  s->ac_val[2] =NULL;
661  s->me.scratchpad = NULL;
662  s->me.temp = NULL;
663  memset(&s->sc, 0, sizeof(s->sc));
664 
665 
666  s->bitstream_buffer = NULL;
667  s->allocated_bitstream_buffer_size = 0;
668  s->p_field_mv_table_base = NULL;
669  for (int i = 0; i < 2; i++)
670  for (int j = 0; j < 2; j++)
671  s->p_field_mv_table[i][j] = NULL;
672 
673  s->dc_val_base = NULL;
674  s->coded_block_base = NULL;
675  s->mbintra_table = NULL;
676  s->cbp_table = NULL;
677  s->pred_dir_table = NULL;
678 
679  s->mbskip_table = NULL;
680 
681  s->er.error_status_table = NULL;
682  s->er.er_temp_buffer = NULL;
683  s->mb_index2xy = NULL;
684 }
685 
686 /**
687  * init common structure for both encoder and decoder.
688  * this assumes that some variables like width/height are already set
689  */
691 {
692  int nb_slices = (HAVE_THREADS &&
693  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
694  s->avctx->thread_count : 1;
695  int ret;
696 
697  clear_context(s);
698 
699  if (s->encoding && s->avctx->slices)
700  nb_slices = s->avctx->slices;
701 
702  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
703  av_log(s->avctx, AV_LOG_ERROR,
704  "decoding to AV_PIX_FMT_NONE is not supported.\n");
705  return AVERROR(EINVAL);
706  }
707 
708  if ((s->width || s->height) &&
709  av_image_check_size(s->width, s->height, 0, s->avctx))
710  return AVERROR(EINVAL);
711 
712  dsp_init(s);
713 
714  /* set chroma shifts */
715  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
716  &s->chroma_x_shift,
717  &s->chroma_y_shift);
718  if (ret)
719  return ret;
720 
722  goto fail;
723 
724  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
725  int max_slices;
726  if (s->mb_height)
727  max_slices = FFMIN(MAX_THREADS, s->mb_height);
728  else
729  max_slices = MAX_THREADS;
730  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
731  " reducing to %d\n", nb_slices, max_slices);
732  nb_slices = max_slices;
733  }
734 
735  s->context_initialized = 1;
736  memset(s->thread_context, 0, sizeof(s->thread_context));
737  s->thread_context[0] = s;
738  s->slice_context_count = nb_slices;
739 
740 // if (s->width && s->height) {
742  if (ret < 0)
743  goto fail;
744 // }
745 
746  return 0;
747  fail:
749  return ret;
750 }
751 
753 {
755 
756  free_buffer_pools(&s->buffer_pools);
757  av_freep(&s->p_field_mv_table_base);
758  for (int i = 0; i < 2; i++)
759  for (int j = 0; j < 2; j++)
760  s->p_field_mv_table[i][j] = NULL;
761 
762  av_freep(&s->dc_val_base);
763  av_freep(&s->coded_block_base);
764  av_freep(&s->mbintra_table);
765  av_freep(&s->cbp_table);
766  av_freep(&s->pred_dir_table);
767 
768  av_freep(&s->mbskip_table);
769 
770  av_freep(&s->er.error_status_table);
771  av_freep(&s->er.er_temp_buffer);
772  av_freep(&s->mb_index2xy);
773 
774  s->linesize = s->uvlinesize = 0;
775 }
776 
778 {
780  if (s->slice_context_count > 1)
781  s->slice_context_count = 1;
782 
783  av_freep(&s->bitstream_buffer);
784  s->allocated_bitstream_buffer_size = 0;
785 
786  ff_mpv_unref_picture(&s->last_pic);
787  ff_mpv_unref_picture(&s->cur_pic);
788  ff_mpv_unref_picture(&s->next_pic);
789 
790  s->context_initialized = 0;
791  s->context_reinit = 0;
792  s->linesize = s->uvlinesize = 0;
793 }
794 
795 
796 /**
797  * Clean dc, ac for the current non-intra MB.
798  */
800 {
801  int wrap = s->b8_stride;
802  int xy = s->block_index[0];
803 
804  s->dc_val[0][xy ] =
805  s->dc_val[0][xy + 1 ] =
806  s->dc_val[0][xy + wrap] =
807  s->dc_val[0][xy + 1 + wrap] = 1024;
808  /* ac pred */
809  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
810  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
811  /* chroma */
812  wrap = s->mb_stride;
813  xy = s->mb_x + s->mb_y * wrap;
814  s->dc_val[1][xy] =
815  s->dc_val[2][xy] = 1024;
816  /* ac pred */
817  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
818  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
819 
820  s->mbintra_table[xy]= 0;
821 }
822 
823 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
824  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
825  const int uvlinesize = s->cur_pic.linesize[1];
826  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
827  const int height_of_mb = 4 - s->avctx->lowres;
828 
829  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
830  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
831  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
832  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
833  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
834  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
835  //block_index is not used by mpeg2, so it is not affected by chroma_format
836 
837  s->dest[0] = s->cur_pic.data[0] + (int)((s->mb_x - 1U) << width_of_mb);
838  s->dest[1] = s->cur_pic.data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
839  s->dest[2] = s->cur_pic.data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
840 
841  if (s->picture_structure == PICT_FRAME) {
842  s->dest[0] += s->mb_y * linesize << height_of_mb;
843  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
844  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
845  } else {
846  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
847  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
848  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
849  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
850  }
851 }
852 
853 /**
854  * set qscale and update qscale dependent variables.
855  */
856 void ff_set_qscale(MpegEncContext * s, int qscale)
857 {
858  if (qscale < 1)
859  qscale = 1;
860  else if (qscale > 31)
861  qscale = 31;
862 
863  s->qscale = qscale;
864  s->chroma_qscale= s->chroma_qscale_table[qscale];
865 
866  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
867  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
868 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:690
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:432
level
uint8_t level
Definition: svq3.c:205
blockdsp.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:513
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:441
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:486
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:462
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:799
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:355
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:48
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:823
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:413
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:75
fail
#define fail()
Definition: checkasm.h:186
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
wrap
#define wrap(func)
Definition: neontest.h:65
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
BufferPoolContext::mb_type_pool
struct FFRefStructPool * mb_type_pool
Definition: mpegpicture.h:47
refstruct.h
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:46
BufferPoolContext::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:52
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:777
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:263
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpv_framesize_alloc
int ff_mpv_framesize_alloc(AVCodecContext *avctx, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:138
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:134
ALLOC_POOL
#define ALLOC_POOL(name, size, flags)
ScanTable::scantable
const uint8_t * scantable
Definition: mpegvideo.h:57
BufferPoolContext::motion_val_pool
struct FFRefStructPool * motion_val_pool
Definition: mpegpicture.h:48
BufferPoolContext::ref_index_pool
struct FFRefStructPool * ref_index_pool
Definition: mpegpicture.h:49
BufferPoolContext::mbskip_table_pool
struct FFRefStructPool * mbskip_table_pool
Definition: mpegpicture.h:45
BufferPoolContext::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:51
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:310
me
#define me
Definition: vf_colormatrix.c:102
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:856
mathops.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:52
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:39
free_buffer_pools
static void free_buffer_pools(BufferPoolContext *pools)
Definition: mpegvideo.c:503
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:104
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:102
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1595
mpegvideodata.h
attributes.h
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:168
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:643
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:296
FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
BufferPoolContext::qscale_table_pool
struct FFRefStructPool * qscale_table_pool
Definition: mpegpicture.h:46
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:236
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
dsp_init
static av_cold void dsp_init(MpegEncContext *s)
Definition: mpegvideo.c:276
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1411
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
U
#define U(x)
Definition: vpx_arith.h:37
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:752
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:202
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
mem.h
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:406
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:389
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
COPY
#define COPY(a)
ScanTable
Scantable.
Definition: mpegvideo.h:56
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
BufferPoolContext
Definition: mpegpicture.h:44
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
BufferPoolContext::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:50
int
int
Definition: ffmpeg_filter.c:424
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
ScanTable::raster_end
uint8_t raster_end[64]
Definition: mpegvideo.h:59