FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "mpeg4videodec.h"
41 #include "libavutil/refstruct.h"
42 #include "thread.h"
43 #include "threadprogress.h"
44 #include "wmv2dec.h"
45 
47 {
48  enum ThreadingStatus thread_status;
49 
51 
52  s->avctx = avctx;
53  s->width = avctx->coded_width;
54  s->height = avctx->coded_height;
55  s->codec_id = avctx->codec->id;
56  s->workaround_bugs = avctx->workaround_bugs;
57 
58  /* convert fourcc to upper case */
59  s->codec_tag = ff_toupper4(avctx->codec_tag);
60 
62 
63  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
64 
65  if (s->picture_pool) // VC-1 can call this multiple times
66  return 0;
67 
68  thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
69  if (thread_status != FF_THREAD_IS_COPY) {
70  s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
71  if (!s->picture_pool)
72  return AVERROR(ENOMEM);
73  }
74  return 0;
75 }
76 
78  const AVCodecContext *src)
79 {
80  MpegEncContext *const s1 = src->priv_data;
81  MpegEncContext *const s = dst->priv_data;
82  int ret;
83 
84  if (dst == src)
85  return 0;
86 
87  av_assert0(s != s1);
88 
89  // FIXME can parameters change on I-frames?
90  // in that case dst may need a reinit
91  if (!s->context_initialized) {
92  void *private_ctx = s->private_ctx;
93  int err;
94  memcpy(s, s1, sizeof(*s));
95 
96  s->context_initialized = 0;
97  s->context_reinit = 0;
98  s->avctx = dst;
99  s->private_ctx = private_ctx;
100  s->bitstream_buffer = NULL;
101  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
102 
103  if (s1->context_initialized) {
104  if ((err = ff_mpv_common_init(s)) < 0)
105  return err;
106  }
107  }
108 
109  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
110  s->height = s1->height;
111  s->width = s1->width;
113  return ret;
114  }
115 
116  s->quarter_sample = s1->quarter_sample;
117 
118  s->picture_number = s1->picture_number;
119 
120  ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
121  ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
122  ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
123 
124  s->linesize = s1->linesize;
125  s->uvlinesize = s1->uvlinesize;
126 
127  // Error/bug resilience
128  s->workaround_bugs = s1->workaround_bugs;
129  s->padding_bug_score = s1->padding_bug_score;
130 
131  // MPEG-4 timing info
132  memcpy(&s->last_time_base, &s1->last_time_base,
133  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
134  (char *) &s1->last_time_base);
135 
136  // B-frame info
137  s->max_b_frames = s1->max_b_frames;
138  s->low_delay = s1->low_delay;
139 
140  // DivX handling (doesn't work)
141  s->divx_packed = s1->divx_packed;
142 
143  if (s1->bitstream_buffer) {
144  av_fast_padded_malloc(&s->bitstream_buffer,
145  &s->allocated_bitstream_buffer_size,
147  if (!s->bitstream_buffer) {
148  s->bitstream_buffer_size = 0;
149  return AVERROR(ENOMEM);
150  }
151  s->bitstream_buffer_size = s1->bitstream_buffer_size;
152  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
154  }
155 
156  // MPEG-2/interlacing info
157  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
158  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
159 
160  return 0;
161 }
162 
164 {
166 
167  av_refstruct_pool_uninit(&s->picture_pool);
169  return 0;
170 }
171 
173 {
174  int err = 0;
175 
176  if (!s->context_initialized)
177  return AVERROR(EINVAL);
178 
180 
181  ff_mpv_unref_picture(&s->last_pic);
182  ff_mpv_unref_picture(&s->next_pic);
183  ff_mpv_unref_picture(&s->cur_pic);
184 
185  if ((s->width || s->height) &&
186  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
187  goto fail;
188 
189  /* set chroma shifts */
190  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
191  &s->chroma_x_shift,
192  &s->chroma_y_shift);
193  if (err < 0)
194  goto fail;
195 
196  if ((err = ff_mpv_init_context_frame(s)))
197  goto fail;
198 
199  memset(s->thread_context, 0, sizeof(s->thread_context));
200  s->thread_context[0] = s;
201 
202  if (s->width && s->height) {
204  if (err < 0)
205  goto fail;
206  }
207  s->context_reinit = 0;
208 
209  return 0;
210  fail:
212  s->context_reinit = 1;
213  return err;
214 }
215 
216 static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
217 {
218  AVCodecContext *avctx = s->avctx;
219  MPVPicture *pic = av_refstruct_pool_get(s->picture_pool);
220  int ret;
221 
222  if (!pic)
223  return AVERROR(ENOMEM);
224 
225  dst->ptr = pic;
226 
227  pic->reference = reference;
228 
229  /* WM Image / Screen codecs allocate internal buffers with different
230  * dimensions / colorspaces; ignore user-defined callbacks for these. */
235  reference ? AV_GET_BUFFER_FLAG_REF : 0);
236  } else {
237  pic->f->width = avctx->width;
238  pic->f->height = avctx->height;
239  pic->f->format = avctx->pix_fmt;
241  }
242  if (ret < 0)
243  goto fail;
244 
245  ret = ff_mpv_pic_check_linesize(avctx, pic->f, &s->linesize, &s->uvlinesize);
246  if (ret < 0)
247  goto fail;
248 
250  if (ret < 0)
251  goto fail;
252 
253  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
254  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height ||
255  FFALIGN(s->mb_height, 2) == s->buffer_pools.alloc_mb_height);
256  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
257  ret = ff_mpv_alloc_pic_accessories(s->avctx, dst, &s->sc,
258  &s->buffer_pools, s->mb_height);
259  if (ret < 0)
260  goto fail;
261 
262  return 0;
263 fail:
265  return ret;
266 }
267 
269 {
270  MPVPicture *pic;
271  int ret = alloc_picture(s, dst, 1);
272  if (ret < 0)
273  return ret;
274 
275  pic = dst->ptr;
276  pic->dummy = 1;
277 
278  ff_thread_progress_report(&pic->progress, INT_MAX);
279 
280  return 0;
281 }
282 
283 static void color_frame(AVFrame *frame, int luma)
284 {
285  int h_chroma_shift, v_chroma_shift;
286 
287  for (int i = 0; i < frame->height; i++)
288  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
289 
290  if (!frame->data[1])
291  return;
292  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
293  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
294  memset(frame->data[1] + frame->linesize[1] * i,
295  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
296  memset(frame->data[2] + frame->linesize[2] * i,
297  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
298  }
299 }
300 
302 {
303  AVCodecContext *avctx = s->avctx;
304  int ret;
305 
306  av_assert1(!s->last_pic.ptr || s->last_pic.ptr->f->buf[0]);
307  av_assert1(!s->next_pic.ptr || s->next_pic.ptr->f->buf[0]);
308  if (!s->last_pic.ptr && s->pict_type != AV_PICTURE_TYPE_I) {
309  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic.ptr)
311  "allocating dummy last picture for B frame\n");
312  else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
313  (s->picture_structure == PICT_FRAME || s->first_field))
315  "warning: first frame is no keyframe\n");
316 
317  /* Allocate a dummy frame */
318  ret = alloc_dummy_frame(s, &s->last_pic);
319  if (ret < 0)
320  return ret;
321 
322  if (!avctx->hwaccel) {
323  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
324  color_frame(s->last_pic.ptr->f, luma_val);
325  }
326  }
327  if (!s->next_pic.ptr && s->pict_type == AV_PICTURE_TYPE_B) {
328  /* Allocate a dummy frame */
329  ret = alloc_dummy_frame(s, &s->next_pic);
330  if (ret < 0)
331  return ret;
332  }
333 
334  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic.ptr &&
335  s->last_pic.ptr->f->buf[0]));
336 
337  return 0;
338 }
339 
340 /**
341  * generic function called after decoding
342  * the header and before a frame is decoded.
343  */
345 {
346  int ret;
347 
348  s->mb_skipped = 0;
349 
351  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
352  return AVERROR_BUG;
353  }
354 
355  ff_mpv_unref_picture(&s->cur_pic);
356  ret = alloc_picture(s, &s->cur_pic,
357  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
358  if (ret < 0)
359  return ret;
360 
361  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
362  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
363  (!s->progressive_frame && !s->progressive_sequence);
364  s->cur_pic.ptr->field_picture = s->picture_structure != PICT_FRAME;
365 
366  s->cur_pic.ptr->f->pict_type = s->pict_type;
367  if (s->pict_type == AV_PICTURE_TYPE_I)
368  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
369  else
370  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
371 
372  if (s->pict_type != AV_PICTURE_TYPE_B) {
373  ff_mpv_workpic_from_pic(&s->last_pic, s->next_pic.ptr);
374  if (!s->droppable)
375  ff_mpv_workpic_from_pic(&s->next_pic, s->cur_pic.ptr);
376  }
377  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
378  (void*)s->last_pic.ptr, (void*)s->next_pic.ptr, (void*)s->cur_pic.ptr,
379  s->last_pic.ptr ? s->last_pic.ptr->f->data[0] : NULL,
380  s->next_pic.ptr ? s->next_pic.ptr->f->data[0] : NULL,
381  s->cur_pic.ptr ? s->cur_pic.ptr->f->data[0] : NULL,
382  s->pict_type, s->droppable);
383 
385  if (ret < 0)
386  return ret;
387 
388  if (s->avctx->debug & FF_DEBUG_NOMC)
389  color_frame(s->cur_pic.ptr->f, 0x80);
390 
391  return 0;
392 }
393 
394 /* called after a frame has been decoded. */
396 {
397  emms_c();
398 
399  if (s->cur_pic.reference)
400  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
401 }
402 
404 {
405  ff_print_debug_info2(s->avctx, pict, p->mb_type,
406  p->qscale_table, p->motion_val,
407  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
408 }
409 
411  const MPVPicture *p, int qp_type)
412 {
413  AVVideoEncParams *par;
414  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
415  unsigned int nb_mb = p->mb_height * p->mb_width;
416 
417  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
418  return 0;
419 
421  if (!par)
422  return AVERROR(ENOMEM);
423 
424  for (unsigned y = 0; y < p->mb_height; y++)
425  for (unsigned x = 0; x < p->mb_width; x++) {
426  const unsigned int block_idx = y * p->mb_width + x;
427  const unsigned int mb_xy = y * p->mb_stride + x;
428  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
429 
430  b->src_x = x * 16;
431  b->src_y = y * 16;
432  b->w = 16;
433  b->h = 16;
434 
435  b->delta_qp = p->qscale_table[mb_xy] * mult;
436  }
437 
438  return 0;
439 }
440 
442 {
443  ff_draw_horiz_band(s->avctx, s->cur_pic.ptr->f,
444  s->last_pic.ptr ? s->last_pic.ptr->f : NULL,
445  y, h, s->picture_structure,
446  s->first_field, s->low_delay);
447 }
448 
450 {
451  MpegEncContext *const s = avctx->priv_data;
452 
453  ff_mpv_unref_picture(&s->cur_pic);
454  ff_mpv_unref_picture(&s->last_pic);
455  ff_mpv_unref_picture(&s->next_pic);
456 
457  s->mb_x = s->mb_y = 0;
458 
459  s->bitstream_buffer_size = 0;
460  s->pp_time = 0;
461 }
462 
464 {
465  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
466  ff_thread_progress_report(&s->cur_pic.ptr->progress, s->mb_y);
467 }
468 
469 
471  uint8_t *dest, const uint8_t *src,
472  int field_based, int field_select,
473  int src_x, int src_y,
474  int width, int height, ptrdiff_t stride,
475  int h_edge_pos, int v_edge_pos,
476  int w, int h, const h264_chroma_mc_func *pix_op,
477  int motion_x, int motion_y)
478 {
479  const int lowres = s->avctx->lowres;
480  const int op_index = lowres;
481  const int s_mask = (2 << lowres) - 1;
482  int emu = 0;
483  int sx, sy;
484 
485  av_assert2(op_index <= 3);
486 
487  if (s->quarter_sample) {
488  motion_x /= 2;
489  motion_y /= 2;
490  }
491 
492  sx = motion_x & s_mask;
493  sy = motion_y & s_mask;
494  src_x += motion_x >> lowres + 1;
495  src_y += motion_y >> lowres + 1;
496 
497  src += src_y * stride + src_x;
498 
499  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
500  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
501  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
502  s->linesize, s->linesize,
503  w + 1, (h + 1) << field_based,
504  src_x, src_y * (1 << field_based),
506  src = s->sc.edge_emu_buffer;
507  emu = 1;
508  }
509 
510  sx = (sx << 2) >> lowres;
511  sy = (sy << 2) >> lowres;
512  if (field_select)
513  src += s->linesize;
514  pix_op[op_index](dest, src, stride, h, sx, sy);
515  return emu;
516 }
517 
518 /* apply one mpeg motion vector to the three components */
520  uint8_t *dest_y,
521  uint8_t *dest_cb,
522  uint8_t *dest_cr,
523  int field_based,
524  int bottom_field,
525  int field_select,
526  uint8_t *const *ref_picture,
527  const h264_chroma_mc_func *pix_op,
528  int motion_x, int motion_y,
529  int h, int mb_y)
530 {
531  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
532  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
533  ptrdiff_t uvlinesize, linesize;
534  const int lowres = s->avctx->lowres;
535  const int op_index = lowres - 1 + s->chroma_x_shift;
536  const int block_s = 8 >> lowres;
537  const int s_mask = (2 << lowres) - 1;
538  const int h_edge_pos = s->h_edge_pos >> lowres;
539  const int v_edge_pos = s->v_edge_pos >> lowres;
540  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
541 
542  av_assert2(op_index <= 3);
543 
544  linesize = s->cur_pic.linesize[0] << field_based;
545  uvlinesize = s->cur_pic.linesize[1] << field_based;
546 
547  // FIXME obviously not perfect but qpel will not work in lowres anyway
548  if (s->quarter_sample) {
549  motion_x /= 2;
550  motion_y /= 2;
551  }
552 
553  if (field_based) {
554  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
555  }
556 
557  sx = motion_x & s_mask;
558  sy = motion_y & s_mask;
559  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
560  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
561 
562  if (s->out_format == FMT_H263) {
563  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
564  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
565  uvsrc_x = src_x >> 1;
566  uvsrc_y = src_y >> 1;
567  } else if (s->out_format == FMT_H261) {
568  // even chroma mv's are full pel in H261
569  mx = motion_x / 4;
570  my = motion_y / 4;
571  uvsx = (2 * mx) & s_mask;
572  uvsy = (2 * my) & s_mask;
573  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
574  uvsrc_y = mb_y * block_s + (my >> lowres);
575  } else {
576  if (s->chroma_y_shift) {
577  mx = motion_x / 2;
578  my = motion_y / 2;
579  uvsx = mx & s_mask;
580  uvsy = my & s_mask;
581  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
582  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
583  } else {
584  if (s->chroma_x_shift) {
585  //Chroma422
586  mx = motion_x / 2;
587  uvsx = mx & s_mask;
588  uvsy = motion_y & s_mask;
589  uvsrc_y = src_y;
590  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
591  } else {
592  //Chroma444
593  uvsx = motion_x & s_mask;
594  uvsy = motion_y & s_mask;
595  uvsrc_x = src_x;
596  uvsrc_y = src_y;
597  }
598  }
599  }
600 
601  ptr_y = ref_picture[0] + src_y * linesize + src_x;
602  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
603  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
604 
605  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
606  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
607  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
608  linesize >> field_based, linesize >> field_based,
609  17, 17 + field_based,
610  src_x, src_y * (1 << field_based), h_edge_pos,
611  v_edge_pos);
612  ptr_y = s->sc.edge_emu_buffer;
613  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
614  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
615  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
616  if (s->workaround_bugs & FF_BUG_IEDGE)
617  vbuf -= s->uvlinesize;
618  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
619  uvlinesize >> field_based, uvlinesize >> field_based,
620  9, 9 + field_based,
621  uvsrc_x, uvsrc_y * (1 << field_based),
622  h_edge_pos >> 1, v_edge_pos >> 1);
623  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
624  uvlinesize >> field_based,uvlinesize >> field_based,
625  9, 9 + field_based,
626  uvsrc_x, uvsrc_y * (1 << field_based),
627  h_edge_pos >> 1, v_edge_pos >> 1);
628  ptr_cb = ubuf;
629  ptr_cr = vbuf;
630  }
631  }
632 
633  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
634  if (bottom_field) {
635  dest_y += s->linesize;
636  dest_cb += s->uvlinesize;
637  dest_cr += s->uvlinesize;
638  }
639 
640  if (field_select) {
641  ptr_y += s->linesize;
642  ptr_cb += s->uvlinesize;
643  ptr_cr += s->uvlinesize;
644  }
645 
646  sx = (sx << 2) >> lowres;
647  sy = (sy << 2) >> lowres;
648  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
649 
650  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
651  uvsx = (uvsx << 2) >> lowres;
652  uvsy = (uvsy << 2) >> lowres;
653  if (hc) {
654  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
655  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
656  }
657  }
658  // FIXME h261 lowres loop filter
659 }
660 
662  uint8_t *dest_cb, uint8_t *dest_cr,
663  uint8_t *const *ref_picture,
664  const h264_chroma_mc_func * pix_op,
665  int mx, int my)
666 {
667  const int lowres = s->avctx->lowres;
668  const int op_index = lowres;
669  const int block_s = 8 >> lowres;
670  const int s_mask = (2 << lowres) - 1;
671  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
672  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
673  int emu = 0, src_x, src_y, sx, sy;
674  ptrdiff_t offset;
675  const uint8_t *ptr;
676 
677  av_assert2(op_index <= 3);
678 
679  if (s->quarter_sample) {
680  mx /= 2;
681  my /= 2;
682  }
683 
684  /* In case of 8X8, we construct a single chroma motion vector
685  with a special rounding */
688 
689  sx = mx & s_mask;
690  sy = my & s_mask;
691  src_x = s->mb_x * block_s + (mx >> lowres + 1);
692  src_y = s->mb_y * block_s + (my >> lowres + 1);
693 
694  offset = src_y * s->uvlinesize + src_x;
695  ptr = ref_picture[1] + offset;
696  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
697  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
698  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
699  s->uvlinesize, s->uvlinesize,
700  9, 9,
701  src_x, src_y, h_edge_pos, v_edge_pos);
702  ptr = s->sc.edge_emu_buffer;
703  emu = 1;
704  }
705  sx = (sx << 2) >> lowres;
706  sy = (sy << 2) >> lowres;
707  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
708 
709  ptr = ref_picture[2] + offset;
710  if (emu) {
711  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
712  s->uvlinesize, s->uvlinesize,
713  9, 9,
714  src_x, src_y, h_edge_pos, v_edge_pos);
715  ptr = s->sc.edge_emu_buffer;
716  }
717  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
718 }
719 
720 /**
721  * motion compensation of a single macroblock
722  * @param s context
723  * @param dest_y luma destination pointer
724  * @param dest_cb chroma cb/u destination pointer
725  * @param dest_cr chroma cr/v destination pointer
726  * @param dir direction (0->forward, 1->backward)
727  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
728  * @param pix_op halfpel motion compensation function (average or put normally)
729  * the motion vectors are taken from s->mv and the MV type from s->mv_type
730  */
731 static inline void MPV_motion_lowres(MpegEncContext *s,
732  uint8_t *dest_y, uint8_t *dest_cb,
733  uint8_t *dest_cr,
734  int dir, uint8_t *const *ref_picture,
735  const h264_chroma_mc_func *pix_op)
736 {
737  int mx, my;
738  int mb_x, mb_y;
739  const int lowres = s->avctx->lowres;
740  const int block_s = 8 >>lowres;
741 
742  mb_x = s->mb_x;
743  mb_y = s->mb_y;
744 
745  switch (s->mv_type) {
746  case MV_TYPE_16X16:
747  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
748  0, 0, 0,
749  ref_picture, pix_op,
750  s->mv[dir][0][0], s->mv[dir][0][1],
751  2 * block_s, mb_y);
752  break;
753  case MV_TYPE_8X8:
754  mx = 0;
755  my = 0;
756  for (int i = 0; i < 4; i++) {
757  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
758  s->linesize) * block_s,
759  ref_picture[0], 0, 0,
760  (2 * mb_x + (i & 1)) * block_s,
761  (2 * mb_y + (i >> 1)) * block_s,
762  s->width, s->height, s->linesize,
763  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
764  block_s, block_s, pix_op,
765  s->mv[dir][i][0], s->mv[dir][i][1]);
766 
767  mx += s->mv[dir][i][0];
768  my += s->mv[dir][i][1];
769  }
770 
771  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
772  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
773  pix_op, mx, my);
774  break;
775  case MV_TYPE_FIELD:
776  if (s->picture_structure == PICT_FRAME) {
777  /* top field */
778  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
779  1, 0, s->field_select[dir][0],
780  ref_picture, pix_op,
781  s->mv[dir][0][0], s->mv[dir][0][1],
782  block_s, mb_y);
783  /* bottom field */
784  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
785  1, 1, s->field_select[dir][1],
786  ref_picture, pix_op,
787  s->mv[dir][1][0], s->mv[dir][1][1],
788  block_s, mb_y);
789  } else {
790  if (s->picture_structure != s->field_select[dir][0] + 1 &&
791  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
792  ref_picture = s->cur_pic.ptr->f->data;
793  }
794  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
795  0, 0, s->field_select[dir][0],
796  ref_picture, pix_op,
797  s->mv[dir][0][0],
798  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
799  }
800  break;
801  case MV_TYPE_16X8:
802  for (int i = 0; i < 2; i++) {
803  uint8_t *const *ref2picture;
804 
805  if (s->picture_structure == s->field_select[dir][i] + 1 ||
806  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
807  ref2picture = ref_picture;
808  } else {
809  ref2picture = s->cur_pic.ptr->f->data;
810  }
811 
812  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
813  0, 0, s->field_select[dir][i],
814  ref2picture, pix_op,
815  s->mv[dir][i][0], s->mv[dir][i][1] +
816  2 * block_s * i, block_s, mb_y >> 1);
817 
818  dest_y += 2 * block_s * s->linesize;
819  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
820  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
821  }
822  break;
823  case MV_TYPE_DMV:
824  if (s->picture_structure == PICT_FRAME) {
825  for (int i = 0; i < 2; i++) {
826  for (int j = 0; j < 2; j++) {
827  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
828  1, j, j ^ i,
829  ref_picture, pix_op,
830  s->mv[dir][2 * i + j][0],
831  s->mv[dir][2 * i + j][1],
832  block_s, mb_y);
833  }
834  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
835  }
836  } else {
837  for (int i = 0; i < 2; i++) {
838  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
839  0, 0, s->picture_structure != i + 1,
840  ref_picture, pix_op,
841  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
842  2 * block_s, mb_y >> 1);
843 
844  // after put we make avg of the same block
845  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
846 
847  // opposite parity is always in the same
848  // frame if this is second field
849  if (!s->first_field) {
850  ref_picture = s->cur_pic.ptr->f->data;
851  }
852  }
853  }
854  break;
855  default:
856  av_assert2(0);
857  }
858 }
859 
860 /**
861  * find the lowest MB row referenced in the MVs
862  */
864 {
865  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
866  int off, mvs;
867 
868  if (s->picture_structure != PICT_FRAME || s->mcsel)
869  goto unhandled;
870 
871  switch (s->mv_type) {
872  case MV_TYPE_16X16:
873  mvs = 1;
874  break;
875  case MV_TYPE_16X8:
876  mvs = 2;
877  break;
878  case MV_TYPE_8X8:
879  mvs = 4;
880  break;
881  default:
882  goto unhandled;
883  }
884 
885  for (int i = 0; i < mvs; i++) {
886  int my = s->mv[dir][i][1];
887  my_max = FFMAX(my_max, my);
888  my_min = FFMIN(my_min, my);
889  }
890 
891  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
892 
893  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
894 unhandled:
895  return s->mb_height - 1;
896 }
897 
898 /* add block[] to dest[] */
899 static inline void add_dct(MpegEncContext *s,
900  int16_t *block, int i, uint8_t *dest, int line_size)
901 {
902  if (s->block_last_index[i] >= 0) {
903  s->idsp.idct_add(dest, line_size, block);
904  }
905 }
906 
907 #define IS_ENCODER 0
909 
911 {
912  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
913  /* print DCT coefficients */
914  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
915  for (int i = 0; i < 6; i++) {
916  for (int j = 0; j < 64; j++) {
917  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
918  block[i][s->idsp.idct_permutation[j]]);
919  }
920  av_log(s->avctx, AV_LOG_DEBUG, "\n");
921  }
922  }
923 
924  av_assert2((s->out_format <= FMT_H261) == (s->out_format == FMT_H261 || s->out_format == FMT_MPEG1));
925  if (!s->avctx->lowres) {
926 #if !CONFIG_SMALL
927  if (s->out_format <= FMT_H261)
929  else
931 #else
933 #endif
934  } else
936 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:107
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1445
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:442
av_clip
#define av_clip
Definition: common.h:100
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1367
threadprogress.h
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:510
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:483
MpegEncContext::max_b_frames
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:111
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:267
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1013
mpv_reconstruct_mb_template.c
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
MpegEncContext::workaround_bugs
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:114
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:463
AVFrame::width
int width
Definition: frame.h:475
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
b
#define b
Definition: input.c:41
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:294
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
mpegutils.h
thread.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:269
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:667
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:910
MPVPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:68
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
fail
#define fail()
Definition: checkasm.h:193
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:394
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
MPVPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:65
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:470
MpegEncContext::width
int width
Definition: mpegvideo.h:96
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:647
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MPVPicture::dummy
int dummy
Picture is a dummy and should not be output.
Definition: mpegpicture.h:81
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:654
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:172
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2251
s
#define s(width, name)
Definition: cbs_vp9.c:198
MpegEncContext::last_time_base
int last_time_base
Definition: mpegvideo.h:378
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:127
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
decode.h
limits.h
NOT_MPEG12_H261
#define NOT_MPEG12_H261
Definition: mpv_reconstruct_mb_template.c:23
MpegEncContext::divx_packed
int divx_packed
Definition: mpegvideo.h:397
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MpegEncContext::bitstream_buffer_size
int bitstream_buffer_size
Definition: mpegvideo.h:399
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:390
MpegEncContext::picture_pool
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
Definition: mpegvideo.h:131
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:276
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:410
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:307
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:165
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1382
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
lowres
static int lowres
Definition: ffplay.c:330
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpv_reconstruct_mb_template.c:24
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
Definition: mpegvideo_dec.c:268
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:94
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:266
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:301
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
MpegEncContext::pb_field_time
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:385
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:899
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1420
f
f
Definition: af_crystalizer.c:122
MPVPicture::mb_stride
int mb_stride
mb_stride of the tables
Definition: mpegpicture.h:79
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
height
#define height
Definition: dsp.h:85
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:441
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:490
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:387
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:119
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:344
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:449
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:283
MPVPicture::mb_width
int mb_width
mb_width of the tables
Definition: mpegpicture.h:77
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:863
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:221
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:250
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:403
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
DEFINITELY_MPEG12_H261
#define DEFINITELY_MPEG12_H261
Definition: mpv_reconstruct_mb_template.c:25
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
MPVPicture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:62
internal.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:519
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
MPVPicture::mb_height
int mb_height
mb_height of the tables
Definition: mpegpicture.h:78
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:731
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:662
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:122
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1429
avcodec.h
ff_mpv_workpic_from_pic
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
Definition: mpegpicture.c:128
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:661
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpv_decode_init
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:46
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1904
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:749
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:77
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVFrame::height
int height
Definition: frame.h:475
alloc_picture
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
Definition: mpegvideo_dec.c:216
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:395
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:647
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:386
MPVWorkPicture
Definition: mpegpicture.h:95
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
MPVPicture::progress
ThreadProgress progress
Definition: mpegpicture.h:92
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
MpegEncContext::bitstream_buffer
uint8_t * bitstream_buffer
Definition: mpegvideo.h:398
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
video_enc_params.h
ff_mpv_decode_close
int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:163
MpegEncContext::rtp_mode
int rtp_mode
Definition: mpegvideo.h:471