FFmpeg
h264dec.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "config_components.h"
31 
32 #include "libavutil/avassert.h"
33 #include "libavutil/emms.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/thread.h"
39 
40 #include "codec_internal.h"
41 #include "internal.h"
42 #include "error_resilience.h"
43 #include "avcodec.h"
44 #include "h264.h"
45 #include "h264dec.h"
46 #include "h2645_parse.h"
47 #include "h264data.h"
48 #include "h264_ps.h"
49 #include "golomb.h"
50 #include "hwaccel_internal.h"
51 #include "hwconfig.h"
52 #include "mpegutils.h"
53 #include "profiles.h"
54 #include "rectangle.h"
55 #include "refstruct.h"
56 #include "thread.h"
57 #include "threadframe.h"
58 
59 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
60 
62 {
63  H264Context *h = avctx->priv_data;
64  return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
65 }
66 
67 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
68  int (*mv)[2][4][2],
69  int mb_x, int mb_y, int mb_intra, int mb_skipped)
70 {
71  const H264Context *h = opaque;
72  H264SliceContext *sl = &h->slice_ctx[0];
73 
74  sl->mb_x = mb_x;
75  sl->mb_y = mb_y;
76  sl->mb_xy = mb_x + mb_y * h->mb_stride;
77  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
78  av_assert1(ref >= 0);
79  /* FIXME: It is possible albeit uncommon that slice references
80  * differ between slices. We take the easy approach and ignore
81  * it for now. If this turns out to have any relevance in
82  * practice then correct remapping should be added. */
83  if (ref >= sl->ref_count[0])
84  ref = 0;
85  if (!sl->ref_list[0][ref].data[0]) {
86  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
87  ref = 0;
88  }
89  if ((sl->ref_list[0][ref].reference&3) != 3) {
90  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
91  return;
92  }
93  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
94  2, 2, 2, ref, 1);
95  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
96  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
97  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
98  sl->mb_mbaff =
99  sl->mb_field_decoding_flag = 0;
100  ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
101 }
102 
104  int y, int height)
105 {
106  AVCodecContext *avctx = h->avctx;
107  const AVFrame *src = h->cur_pic.f;
108  const AVPixFmtDescriptor *desc;
110  int vshift;
111  const int field_pic = h->picture_structure != PICT_FRAME;
112 
113  if (!avctx->draw_horiz_band)
114  return;
115 
116  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
117  return;
118 
119  if (field_pic) {
120  height <<= 1;
121  y <<= 1;
122  }
123 
124  height = FFMIN(height, avctx->height - y);
125 
126  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
127  vshift = desc->log2_chroma_h;
128 
129  offset[0] = y * src->linesize[0];
130  offset[1] =
131  offset[2] = (y >> vshift) * src->linesize[1];
132  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
133  offset[i] = 0;
134 
135  emms_c();
136 
137  avctx->draw_horiz_band(avctx, src, offset,
138  y, h->picture_structure, height);
139 }
140 
142 {
143  int i;
144 
145  av_freep(&h->intra4x4_pred_mode);
146  av_freep(&h->chroma_pred_mode_table);
147  av_freep(&h->cbp_table);
148  av_freep(&h->mvd_table[0]);
149  av_freep(&h->mvd_table[1]);
150  av_freep(&h->direct_table);
151  av_freep(&h->non_zero_count);
152  av_freep(&h->slice_table_base);
153  h->slice_table = NULL;
154  av_freep(&h->list_counts);
155 
156  av_freep(&h->mb2b_xy);
157  av_freep(&h->mb2br_xy);
158 
159  ff_refstruct_pool_uninit(&h->qscale_table_pool);
160  ff_refstruct_pool_uninit(&h->mb_type_pool);
161  ff_refstruct_pool_uninit(&h->motion_val_pool);
162  ff_refstruct_pool_uninit(&h->ref_index_pool);
163 
164 #if CONFIG_ERROR_RESILIENCE
165  av_freep(&h->er.mb_index2xy);
166  av_freep(&h->er.error_status_table);
167  av_freep(&h->er.er_temp_buffer);
168  av_freep(&h->dc_val_base);
169 #endif
170 
171  for (i = 0; i < h->nb_slice_ctx; i++) {
172  H264SliceContext *sl = &h->slice_ctx[i];
173 
176  av_freep(&sl->top_borders[0]);
177  av_freep(&sl->top_borders[1]);
178 
181  sl->top_borders_allocated[0] = 0;
182  sl->top_borders_allocated[1] = 0;
183  }
184 }
185 
187 {
188  ERContext *const er = &h->er;
189  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
190  const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
191  const int st_size = big_mb_num + h->mb_stride;
192  int x, y;
193 
194  if (!FF_ALLOCZ_TYPED_ARRAY(h->intra4x4_pred_mode, row_mb_num * 8) ||
195  !FF_ALLOCZ_TYPED_ARRAY(h->non_zero_count, big_mb_num) ||
196  !FF_ALLOCZ_TYPED_ARRAY(h->slice_table_base, st_size) ||
197  !FF_ALLOCZ_TYPED_ARRAY(h->cbp_table, big_mb_num) ||
198  !FF_ALLOCZ_TYPED_ARRAY(h->chroma_pred_mode_table, big_mb_num) ||
199  !FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[0], row_mb_num * 8) ||
200  !FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[1], row_mb_num * 8) ||
201  !FF_ALLOCZ_TYPED_ARRAY(h->direct_table, big_mb_num * 4) ||
202  !FF_ALLOCZ_TYPED_ARRAY(h->list_counts, big_mb_num) ||
203  !FF_ALLOCZ_TYPED_ARRAY(h->mb2b_xy, big_mb_num) ||
204  !FF_ALLOCZ_TYPED_ARRAY(h->mb2br_xy, big_mb_num))
205  return AVERROR(ENOMEM);
206  h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
207  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
208  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
209  memset(h->slice_table_base, -1,
210  st_size * sizeof(*h->slice_table_base));
211  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
212  for (y = 0; y < h->mb_height; y++)
213  for (x = 0; x < h->mb_width; x++) {
214  const int mb_xy = x + y * h->mb_stride;
215  const int b_xy = 4 * x + 4 * y * h->b_stride;
216 
217  h->mb2b_xy[mb_xy] = b_xy;
218  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
219  }
220 
221  if (CONFIG_ERROR_RESILIENCE) {
222  const int er_size = h->mb_height * h->mb_stride * (4*sizeof(int) + 1);
223  int mb_array_size = h->mb_height * h->mb_stride;
224  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
225  int yc_size = y_size + 2 * big_mb_num;
226 
227  /* init ER */
228  er->avctx = h->avctx;
230  er->opaque = h;
231  er->quarter_sample = 1;
232 
233  er->mb_num = h->mb_num;
234  er->mb_width = h->mb_width;
235  er->mb_height = h->mb_height;
236  er->mb_stride = h->mb_stride;
237  er->b8_stride = h->mb_width * 2 + 1;
238 
239  // error resilience code looks cleaner with this
240  if (!FF_ALLOCZ_TYPED_ARRAY(er->mb_index2xy, h->mb_num + 1) ||
241  !FF_ALLOCZ_TYPED_ARRAY(er->error_status_table, mb_array_size) ||
242  !FF_ALLOCZ_TYPED_ARRAY(er->er_temp_buffer, er_size) ||
243  !FF_ALLOCZ_TYPED_ARRAY(h->dc_val_base, yc_size))
244  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
245 
246  for (y = 0; y < h->mb_height; y++)
247  for (x = 0; x < h->mb_width; x++)
248  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
249 
250  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
251  h->mb_stride + h->mb_width;
252  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
253  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
254  er->dc_val[2] = er->dc_val[1] + big_mb_num;
255  for (int i = 0; i < yc_size; i++)
256  h->dc_val_base[i] = 1024;
257  }
258 
259  return 0;
260 }
261 
262 /**
263  * Init slice context
264  */
266 {
267  sl->ref_cache[0][scan8[5] + 1] =
268  sl->ref_cache[0][scan8[7] + 1] =
269  sl->ref_cache[0][scan8[13] + 1] =
270  sl->ref_cache[1][scan8[5] + 1] =
271  sl->ref_cache[1][scan8[7] + 1] =
272  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
273 
274  sl->er = &h->er;
275 }
276 
277 static int h264_init_pic(H264Picture *pic)
278 {
279  pic->f = av_frame_alloc();
280  if (!pic->f)
281  return AVERROR(ENOMEM);
282 
283  pic->f_grain = av_frame_alloc();
284  if (!pic->f_grain)
285  return AVERROR(ENOMEM);
286 
287  return 0;
288 }
289 
291 {
292  int i, ret;
293 
294  h->avctx = avctx;
295  h->cur_chroma_format_idc = -1;
296 
297  h->width_from_caller = avctx->width;
298  h->height_from_caller = avctx->height;
299 
300  h->workaround_bugs = avctx->workaround_bugs;
301  h->flags = avctx->flags;
302  h->poc.prev_poc_msb = 1 << 16;
303  h->recovery_frame = -1;
304  h->frame_recovered = 0;
305  h->poc.prev_frame_num = -1;
306  h->sei.common.frame_packing.arrangement_cancel_flag = -1;
307  h->sei.common.unregistered.x264_build = -1;
308 
309  h->next_outputed_poc = INT_MIN;
310  for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)
311  h->last_pocs[i] = INT_MIN;
312 
313  ff_h264_sei_uninit(&h->sei);
314 
315  if (avctx->active_thread_type & FF_THREAD_FRAME) {
316  h->decode_error_flags_pool = ff_refstruct_pool_alloc(sizeof(atomic_int), 0);
317  if (!h->decode_error_flags_pool)
318  return AVERROR(ENOMEM);
319  }
320 
321  h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
322  h->slice_ctx = av_calloc(h->nb_slice_ctx, sizeof(*h->slice_ctx));
323  if (!h->slice_ctx) {
324  h->nb_slice_ctx = 0;
325  return AVERROR(ENOMEM);
326  }
327 
328  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
329  if ((ret = h264_init_pic(&h->DPB[i])) < 0)
330  return ret;
331  }
332 
333  if ((ret = h264_init_pic(&h->cur_pic)) < 0)
334  return ret;
335 
336  if ((ret = h264_init_pic(&h->last_pic_for_ec)) < 0)
337  return ret;
338 
339  for (i = 0; i < h->nb_slice_ctx; i++)
340  h->slice_ctx[i].h264 = h;
341 
342  return 0;
343 }
344 
346 {
348  av_frame_free(&pic->f);
349  av_frame_free(&pic->f_grain);
350 }
351 
353 {
354  H264Context *h = avctx->priv_data;
355  int i;
356 
359 
360  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
361  h264_free_pic(h, &h->DPB[i]);
362  }
363  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
364 
365  h->cur_pic_ptr = NULL;
366 
367  ff_refstruct_pool_uninit(&h->decode_error_flags_pool);
368 
369  av_freep(&h->slice_ctx);
370  h->nb_slice_ctx = 0;
371 
372  ff_h264_sei_uninit(&h->sei);
373  ff_h264_ps_uninit(&h->ps);
374 
375  ff_h2645_packet_uninit(&h->pkt);
376 
377  h264_free_pic(h, &h->cur_pic);
378  h264_free_pic(h, &h->last_pic_for_ec);
379 
380  return 0;
381 }
382 
384 
386 {
387  H264Context *h = avctx->priv_data;
388  int ret;
389 
390  ret = h264_init_context(avctx, h);
391  if (ret < 0)
392  return ret;
393 
395  if (ret != 0) {
396  av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
397  return AVERROR_UNKNOWN;
398  }
399 
400 #if FF_API_TICKS_PER_FRAME
402  avctx->ticks_per_frame = 2;
404 #endif
405 
406  if (!avctx->internal->is_copy) {
407  if (avctx->extradata_size > 0 && avctx->extradata) {
409  &h->ps, &h->is_avc, &h->nal_length_size,
410  avctx->err_recognition, avctx);
411  if (ret < 0) {
412  int explode = avctx->err_recognition & AV_EF_EXPLODE;
413  av_log(avctx, explode ? AV_LOG_ERROR: AV_LOG_WARNING,
414  "Error decoding the extradata\n");
415  if (explode) {
416  return ret;
417  }
418  ret = 0;
419  }
420  }
421  }
422 
423  if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
424  h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
425  h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
426  }
427 
429 
430  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
431  h->enable_er = 0;
432 
433  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
434  av_log(avctx, AV_LOG_WARNING,
435  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
436  "Use it at your own risk\n");
437  }
438 
439  return 0;
440 }
441 
442 /**
443  * instantaneous decoder refresh.
444  */
445 static void idr(H264Context *h)
446 {
447  int i;
449  h->poc.prev_frame_num =
450  h->poc.prev_frame_num_offset = 0;
451  h->poc.prev_poc_msb = 1<<16;
452  h->poc.prev_poc_lsb = -1;
453  for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)
454  h->last_pocs[i] = INT_MIN;
455 }
456 
457 /* forget old pics after a seek */
459 {
460  int i, j;
461 
462  h->next_outputed_poc = INT_MIN;
463  h->prev_interlaced_frame = 1;
464  idr(h);
465 
466  h->poc.prev_frame_num = -1;
467  if (h->cur_pic_ptr) {
468  h->cur_pic_ptr->reference = 0;
469  for (j=i=0; h->delayed_pic[i]; i++)
470  if (h->delayed_pic[i] != h->cur_pic_ptr)
471  h->delayed_pic[j++] = h->delayed_pic[i];
472  h->delayed_pic[j] = NULL;
473  }
474  ff_h264_unref_picture(&h->last_pic_for_ec);
475 
476  h->first_field = 0;
477  h->recovery_frame = -1;
478  h->frame_recovered = 0;
479  h->current_slice = 0;
480  h->mmco_reset = 1;
481 }
482 
484 {
485  H264Context *h = avctx->priv_data;
486  int i;
487 
488  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
489 
491  ff_h264_sei_uninit(&h->sei);
492 
493  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
494  ff_h264_unref_picture(&h->DPB[i]);
495  h->cur_pic_ptr = NULL;
496  ff_h264_unref_picture(&h->cur_pic);
497 
498  h->mb_y = 0;
499  h->non_gray = 0;
500 
502  h->context_initialized = 0;
503 
504  if (FF_HW_HAS_CB(avctx, flush))
505  FF_HW_SIMPLE_CALL(avctx, flush);
506 }
507 
509 {
510  int nals_needed = 0;
511  int slice_type = 0;
512  int picture_intra_only = 1;
513  int first_slice = 0;
514  int i, ret;
515 
516  for (i = 0; i < h->pkt.nb_nals; i++) {
517  H2645NAL *nal = &h->pkt.nals[i];
518  GetBitContext gb;
519 
520  /* packets can sometimes contain multiple PPS/SPS,
521  * e.g. two PAFF field pictures in one packet, or a demuxer
522  * which splits NALs strangely if so, when frame threading we
523  * can't start the next thread until we've read all of them */
524  switch (nal->type) {
525  case H264_NAL_SPS:
526  case H264_NAL_PPS:
527  nals_needed = i;
528  break;
529  case H264_NAL_DPA:
530  case H264_NAL_IDR_SLICE:
531  case H264_NAL_SLICE:
532  ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
533  if (ret < 0) {
534  av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
535  if (h->avctx->err_recognition & AV_EF_EXPLODE)
536  return ret;
537 
538  break;
539  }
540  if (!get_ue_golomb_long(&gb) || // first_mb_in_slice
541  !first_slice ||
542  first_slice != nal->type)
543  nals_needed = i;
544  slice_type = get_ue_golomb_31(&gb);
545  if (slice_type > 9)
546  slice_type = 0;
547  if (slice_type > 4)
548  slice_type -= 5;
549 
550  slice_type = ff_h264_golomb_to_pict_type[slice_type];
551  picture_intra_only &= (slice_type & 3) == AV_PICTURE_TYPE_I;
552  if (!first_slice)
553  first_slice = nal->type;
554  }
555  }
556 
557  h->picture_intra_only = picture_intra_only;
558 
559  return nals_needed;
560 }
561 
562 static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
563 {
564  av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
565  av_log(logctx, AV_LOG_DEBUG, " green_metadata_type: %d\n", gm->green_metadata_type);
566 
567  if (gm->green_metadata_type == 0) {
568  av_log(logctx, AV_LOG_DEBUG, " green_metadata_period_type: %d\n", gm->period_type);
569 
570  if (gm->period_type == 2)
571  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_seconds: %d\n", gm->num_seconds);
572  else if (gm->period_type == 3)
573  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_pictures: %d\n", gm->num_pictures);
574 
575  av_log(logctx, AV_LOG_DEBUG, " SEI GREEN Complexity Metrics: %f %f %f %f\n",
576  (float)gm->percent_non_zero_macroblocks/255,
577  (float)gm->percent_intra_coded_macroblocks/255,
578  (float)gm->percent_six_tap_filtering/255,
580 
581  } else if (gm->green_metadata_type == 1) {
582  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_type: %d\n", gm->xsd_metric_type);
583 
584  if (gm->xsd_metric_type == 0)
585  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_value: %f\n",
586  (float)gm->xsd_metric_value/100);
587  }
588 }
589 
590 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
591 {
592  AVCodecContext *const avctx = h->avctx;
593  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
594  int idr_cleared=0;
595  int i, ret = 0;
596 
597  h->has_slice = 0;
598  h->nal_unit_type= 0;
599 
600  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
601  h->current_slice = 0;
602  if (!h->first_field) {
603  h->cur_pic_ptr = NULL;
604  ff_h264_sei_uninit(&h->sei);
605  }
606  }
607 
608  if (h->nal_length_size == 4) {
609  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
610  h->is_avc = 0;
611  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
612  h->is_avc = 1;
613  }
614 
615  ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->nal_length_size,
616  avctx->codec_id, !!h->is_avc * H2645_FLAG_IS_NALFF);
617  if (ret < 0) {
618  av_log(avctx, AV_LOG_ERROR,
619  "Error splitting the input into NAL units.\n");
620  return ret;
621  }
622 
623  if (avctx->active_thread_type & FF_THREAD_FRAME)
624  nals_needed = get_last_needed_nal(h);
625  if (nals_needed < 0)
626  return nals_needed;
627 
628  for (i = 0; i < h->pkt.nb_nals; i++) {
629  H2645NAL *nal = &h->pkt.nals[i];
630  int max_slice_ctx, err;
631 
632  if (avctx->skip_frame >= AVDISCARD_NONREF &&
633  nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
634  continue;
635 
636  // FIXME these should stop being context-global variables
637  h->nal_ref_idc = nal->ref_idc;
638  h->nal_unit_type = nal->type;
639 
640  err = 0;
641  switch (nal->type) {
642  case H264_NAL_IDR_SLICE:
643  if ((nal->data[1] & 0xFC) == 0x98) {
644  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
645  h->next_outputed_poc = INT_MIN;
646  ret = -1;
647  goto end;
648  }
649  if(!idr_cleared) {
650  idr(h); // FIXME ensure we don't lose some frames if there is reordering
651  }
652  idr_cleared = 1;
653  h->has_recovery_point = 1;
654  case H264_NAL_SLICE:
655  h->has_slice = 1;
656 
657  if ((err = ff_h264_queue_decode_slice(h, nal))) {
658  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
659  sl->ref_count[0] = sl->ref_count[1] = 0;
660  break;
661  }
662 
663  if (h->current_slice == 1) {
664  if (avctx->active_thread_type & FF_THREAD_FRAME &&
665  i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
666  ff_thread_finish_setup(avctx);
667  h->setup_finished = 1;
668  }
669 
670  if (h->avctx->hwaccel &&
671  (ret = FF_HW_CALL(h->avctx, start_frame, buf, buf_size)) < 0)
672  goto end;
673  }
674 
675  max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
676  if (h->nb_slice_ctx_queued == max_slice_ctx) {
677  if (h->avctx->hwaccel) {
678  ret = FF_HW_CALL(avctx, decode_slice, nal->raw_data, nal->raw_size);
679  h->nb_slice_ctx_queued = 0;
680  } else
682  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
683  goto end;
684  }
685  break;
686  case H264_NAL_DPA:
687  case H264_NAL_DPB:
688  case H264_NAL_DPC:
689  avpriv_request_sample(avctx, "data partitioning");
690  break;
691  case H264_NAL_SEI:
692  if (h->setup_finished) {
693  avpriv_request_sample(avctx, "Late SEI");
694  break;
695  }
696  ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
697  h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
698  if (avctx->debug & FF_DEBUG_GREEN_MD)
699  debug_green_metadata(&h->sei.green_metadata, h->avctx);
700  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
701  goto end;
702  break;
703  case H264_NAL_SPS: {
704  GetBitContext tmp_gb = nal->gb;
705  if (FF_HW_HAS_CB(avctx, decode_params)) {
706  ret = FF_HW_CALL(avctx, decode_params,
707  nal->type, nal->raw_data, nal->raw_size);
708  if (ret < 0)
709  goto end;
710  }
711  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
712  break;
713  av_log(h->avctx, AV_LOG_DEBUG,
714  "SPS decoding failure, trying again with the complete NAL\n");
715  init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
716  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
717  break;
718  ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
719  break;
720  }
721  case H264_NAL_PPS:
722  if (FF_HW_HAS_CB(avctx, decode_params)) {
723  ret = FF_HW_CALL(avctx, decode_params,
724  nal->type, nal->raw_data, nal->raw_size);
725  if (ret < 0)
726  goto end;
727  }
728  ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
729  nal->size_bits);
730  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
731  goto end;
732  break;
733  case H264_NAL_AUD:
735  case H264_NAL_END_STREAM:
737  case H264_NAL_SPS_EXT:
739  break;
740  default:
741  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
742  nal->type, nal->size_bits);
743  }
744 
745  if (err < 0) {
746  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
747  }
748  }
749 
751  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
752  goto end;
753 
754  // set decode_error_flags to allow users to detect concealed decoding errors
755  if ((ret < 0 || h->er.error_occurred) && h->cur_pic_ptr) {
756  if (h->cur_pic_ptr->decode_error_flags) {
757  /* Frame-threading in use */
758  atomic_int *decode_error = h->cur_pic_ptr->decode_error_flags;
759  /* Using atomics here is not supposed to provide syncronisation;
760  * they are merely used to allow to set decode_error from both
761  * decoding threads in case of coded slices. */
763  memory_order_relaxed);
764  } else
765  h->cur_pic_ptr->f->decode_error_flags |= FF_DECODE_ERROR_DECODE_SLICES;
766  }
767 
768  ret = 0;
769 end:
770 
771 #if CONFIG_ERROR_RESILIENCE
772  /*
773  * FIXME: Error handling code does not seem to support interlaced
774  * when slices span multiple rows
775  * The ff_er_add_slice calls don't work right for bottom
776  * fields; they cause massive erroneous error concealing
777  * Error marking covers both fields (top and bottom).
778  * This causes a mismatched s->error_count
779  * and a bad error table. Further, the error count goes to
780  * INT_MAX when called for bottom field, because mb_y is
781  * past end by one (callers fault) and resync_mb_y != 0
782  * causes problems for the first MB line, too.
783  */
784  if (!FIELD_PICTURE(h) && h->current_slice && h->enable_er) {
785 
786  H264SliceContext *sl = h->slice_ctx;
787  int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
788  int decode_error_flags = 0;
789 
790  ff_h264_set_erpic(&h->er.cur_pic, h->cur_pic_ptr);
791 
792  if (use_last_pic) {
793  ff_h264_set_erpic(&h->er.last_pic, &h->last_pic_for_ec);
794  sl->ref_list[0][0].parent = &h->last_pic_for_ec;
795  memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
796  memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
797  sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
798  } else if (sl->ref_count[0]) {
799  ff_h264_set_erpic(&h->er.last_pic, sl->ref_list[0][0].parent);
800  } else
801  ff_h264_set_erpic(&h->er.last_pic, NULL);
802 
803  if (sl->ref_count[1])
804  ff_h264_set_erpic(&h->er.next_pic, sl->ref_list[1][0].parent);
805 
806  ff_er_frame_end(&h->er, &decode_error_flags);
807  if (decode_error_flags) {
808  if (h->cur_pic_ptr->decode_error_flags) {
809  atomic_int *decode_error = h->cur_pic_ptr->decode_error_flags;
810  atomic_fetch_or_explicit(decode_error, decode_error_flags,
811  memory_order_relaxed);
812  } else
813  h->cur_pic_ptr->f->decode_error_flags |= decode_error_flags;
814  }
815  if (use_last_pic)
816  memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
817  }
818 #endif /* CONFIG_ERROR_RESILIENCE */
819  /* clean up */
820  if (h->cur_pic_ptr && !h->droppable && h->has_slice) {
821  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
822  h->picture_structure == PICT_BOTTOM_FIELD);
823  }
824 
825  return (ret < 0) ? ret : buf_size;
826 }
827 
828 /**
829  * Return the number of bytes consumed for building the current frame.
830  */
831 static int get_consumed_bytes(int pos, int buf_size)
832 {
833  if (pos == 0)
834  pos = 1; // avoid infinite loops (I doubt that is needed but...)
835  if (pos + 10 > buf_size)
836  pos = buf_size; // oops ;)
837 
838  return pos;
839 }
840 
842 {
843  AVVideoEncParams *par;
844  unsigned int nb_mb = p->mb_height * p->mb_width;
845  unsigned int x, y;
846 
848  if (!par)
849  return AVERROR(ENOMEM);
850 
851  par->qp = p->pps->init_qp;
852 
853  par->delta_qp[1][0] = p->pps->chroma_qp_index_offset[0];
854  par->delta_qp[1][1] = p->pps->chroma_qp_index_offset[0];
855  par->delta_qp[2][0] = p->pps->chroma_qp_index_offset[1];
856  par->delta_qp[2][1] = p->pps->chroma_qp_index_offset[1];
857 
858  for (y = 0; y < p->mb_height; y++)
859  for (x = 0; x < p->mb_width; x++) {
860  const unsigned int block_idx = y * p->mb_width + x;
861  const unsigned int mb_xy = y * p->mb_stride + x;
863 
864  b->src_x = x * 16;
865  b->src_y = y * 16;
866  b->w = 16;
867  b->h = 16;
868 
869  b->delta_qp = p->qscale_table[mb_xy] - par->qp;
870  }
871 
872  return 0;
873 }
874 
876 {
877  int ret;
878 
879  ret = av_frame_ref(dst, srcp->needs_fg ? srcp->f_grain : srcp->f);
880  if (ret < 0)
881  return ret;
882 
883  if (srcp->needs_fg && (ret = av_frame_copy_props(dst, srcp->f)) < 0)
884  return ret;
885 
886  if (srcp->decode_error_flags) {
887  atomic_int *decode_error = srcp->decode_error_flags;
888  /* The following is not supposed to provide synchronisation at all:
889  * given that srcp has already finished decoding, decode_error
890  * has already been set to its final value. */
891  dst->decode_error_flags |= atomic_load_explicit(decode_error, memory_order_relaxed);
892  }
893 
894  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.common.frame_packing), 0);
895 
896  if (srcp->sei_recovery_frame_cnt == 0)
897  dst->flags |= AV_FRAME_FLAG_KEY;
898 
899  if (h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
900  ret = h264_export_enc_params(dst, srcp);
901  if (ret < 0)
902  goto fail;
903  }
904 
905  if (!(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))
907 
908  return 0;
909 fail:
911  return ret;
912 }
913 
914 static int is_avcc_extradata(const uint8_t *buf, int buf_size)
915 {
916  int cnt= buf[5]&0x1f;
917  const uint8_t *p= buf+6;
918  if (!cnt)
919  return 0;
920  while(cnt--){
921  int nalsize= AV_RB16(p) + 2;
922  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
923  return 0;
924  p += nalsize;
925  }
926  cnt = *(p++);
927  if(!cnt)
928  return 0;
929  while(cnt--){
930  int nalsize= AV_RB16(p) + 2;
931  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
932  return 0;
933  p += nalsize;
934  }
935  return 1;
936 }
937 
938 static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
939 {
940  int ret;
941 
942  if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
943  (h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
944  out->recovered)) {
945 
946  if (h->skip_gray > 0 &&
947  h->non_gray && out->gray &&
948  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)
949  )
950  return 0;
951 
952  if (!h->avctx->hwaccel &&
953  (out->field_poc[0] == INT_MAX ||
954  out->field_poc[1] == INT_MAX)
955  ) {
956  int p;
957  AVFrame *f = out->f;
958  int field = out->field_poc[0] == INT_MAX;
959  uint8_t *dst_data[4];
960  int linesizes[4];
961  const uint8_t *src_data[4];
962 
963  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
964 
965  for (p = 0; p<4; p++) {
966  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
967  src_data[p] = f->data[p] + field *f->linesize[p];
968  linesizes[p] = 2*f->linesize[p];
969  }
970 
971  av_image_copy(dst_data, linesizes, src_data, linesizes,
972  f->format, f->width, f->height>>1);
973  }
974 
975  ret = output_frame(h, dst, out);
976  if (ret < 0)
977  return ret;
978 
979  *got_frame = 1;
980 
981  if (CONFIG_MPEGVIDEODEC) {
982  ff_print_debug_info2(h->avctx, dst,
983  out->mb_type,
984  out->qscale_table,
985  out->motion_val,
986  out->mb_width, out->mb_height, out->mb_stride, 1);
987  }
988  }
989 
990  return 0;
991 }
992 
994  int *got_frame, int buf_index)
995 {
996  int ret, i, out_idx;
997  H264Picture *out;
998 
999  h->cur_pic_ptr = NULL;
1000  h->first_field = 0;
1001 
1002  while (h->delayed_pic[0]) {
1003  out = h->delayed_pic[0];
1004  out_idx = 0;
1005  for (i = 1;
1006  h->delayed_pic[i] &&
1007  !(h->delayed_pic[i]->f->flags & AV_FRAME_FLAG_KEY) &&
1008  !h->delayed_pic[i]->mmco_reset;
1009  i++)
1010  if (h->delayed_pic[i]->poc < out->poc) {
1011  out = h->delayed_pic[i];
1012  out_idx = i;
1013  }
1014 
1015  for (i = out_idx; h->delayed_pic[i]; i++)
1016  h->delayed_pic[i] = h->delayed_pic[i + 1];
1017 
1018  if (out) {
1019  h->frame_recovered |= out->recovered;
1020  out->recovered |= h->frame_recovered & FRAME_RECOVERED_SEI;
1021 
1022  out->reference &= ~DELAYED_PIC_REF;
1023  ret = finalize_frame(h, dst_frame, out, got_frame);
1024  if (ret < 0)
1025  return ret;
1026  if (*got_frame)
1027  break;
1028  }
1029  }
1030 
1031  return buf_index;
1032 }
1033 
1034 static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict,
1035  int *got_frame, AVPacket *avpkt)
1036 {
1037  const uint8_t *buf = avpkt->data;
1038  int buf_size = avpkt->size;
1039  H264Context *h = avctx->priv_data;
1040  int buf_index;
1041  int ret;
1042 
1043  h->flags = avctx->flags;
1044  h->setup_finished = 0;
1045  h->nb_slice_ctx_queued = 0;
1046 
1047  ff_h264_unref_picture(&h->last_pic_for_ec);
1048 
1049  /* end of stream, output what is still in the buffers */
1050  if (buf_size == 0)
1051  return send_next_delayed_frame(h, pict, got_frame, 0);
1052 
1054  size_t side_size;
1055  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1056  ff_h264_decode_extradata(side, side_size,
1057  &h->ps, &h->is_avc, &h->nal_length_size,
1058  avctx->err_recognition, avctx);
1059  }
1060  if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
1061  if (is_avcc_extradata(buf, buf_size))
1062  return ff_h264_decode_extradata(buf, buf_size,
1063  &h->ps, &h->is_avc, &h->nal_length_size,
1064  avctx->err_recognition, avctx);
1065  }
1066 
1067  buf_index = decode_nal_units(h, buf, buf_size);
1068  if (buf_index < 0)
1069  return AVERROR_INVALIDDATA;
1070 
1071  if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
1072  av_assert0(buf_index <= buf_size);
1073  return send_next_delayed_frame(h, pict, got_frame, buf_index);
1074  }
1075 
1076  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
1077  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1078  buf_size >= 4 && !memcmp("Q264", buf, 4))
1079  return buf_size;
1080  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1081  return AVERROR_INVALIDDATA;
1082  }
1083 
1084  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1085  (h->mb_y >= h->mb_height && h->mb_height)) {
1086  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1087  return ret;
1088 
1089  /* Wait for second field. */
1090  if (h->next_output_pic) {
1091  ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
1092  if (ret < 0)
1093  return ret;
1094  }
1095  }
1096 
1097  av_assert0(pict->buf[0] || !*got_frame);
1098 
1099  ff_h264_unref_picture(&h->last_pic_for_ec);
1100 
1101  return get_consumed_bytes(buf_index, buf_size);
1102 }
1103 
1104 #define OFFSET(x) offsetof(H264Context, x)
1105 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1106 #define VDX VD | AV_OPT_FLAG_EXPORT
1107 static const AVOption h264_options[] = {
1108  { "is_avc", "is avc", OFFSET(is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VDX },
1109  { "nal_length_size", "nal_length_size", OFFSET(nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, VDX },
1110  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1111  { "x264_build", "Assume this x264 version if no x264 version found in any SEI", OFFSET(x264_build), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VD },
1112  { "skip_gray", "Do not return gray gap frames", OFFSET(skip_gray), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VD },
1113  { "noref_gray", "Avoid using gray gap frames as references", OFFSET(noref_gray), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VD },
1114  { NULL },
1115 };
1116 
1117 static const AVClass h264_class = {
1118  .class_name = "H264 Decoder",
1119  .item_name = av_default_item_name,
1120  .option = h264_options,
1121  .version = LIBAVUTIL_VERSION_INT,
1122 };
1123 
1125  .p.name = "h264",
1126  CODEC_LONG_NAME("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1127  .p.type = AVMEDIA_TYPE_VIDEO,
1128  .p.id = AV_CODEC_ID_H264,
1129  .priv_data_size = sizeof(H264Context),
1131  .close = h264_decode_end,
1133  .p.capabilities = AV_CODEC_CAP_DR1 |
1136  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1137 #if CONFIG_H264_DXVA2_HWACCEL
1138  HWACCEL_DXVA2(h264),
1139 #endif
1140 #if CONFIG_H264_D3D11VA_HWACCEL
1141  HWACCEL_D3D11VA(h264),
1142 #endif
1143 #if CONFIG_H264_D3D11VA2_HWACCEL
1144  HWACCEL_D3D11VA2(h264),
1145 #endif
1146 #if CONFIG_H264_D3D12VA_HWACCEL
1147  HWACCEL_D3D12VA(h264),
1148 #endif
1149 #if CONFIG_H264_NVDEC_HWACCEL
1150  HWACCEL_NVDEC(h264),
1151 #endif
1152 #if CONFIG_H264_VAAPI_HWACCEL
1153  HWACCEL_VAAPI(h264),
1154 #endif
1155 #if CONFIG_H264_VDPAU_HWACCEL
1156  HWACCEL_VDPAU(h264),
1157 #endif
1158 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
1159  HWACCEL_VIDEOTOOLBOX(h264),
1160 #endif
1161 #if CONFIG_H264_VULKAN_HWACCEL
1162  HWACCEL_VULKAN(h264),
1163 #endif
1164  NULL
1165  },
1166  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
1168  .flush = h264_decode_flush,
1171  .p.profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1172  .p.priv_class = &h264_class,
1173 };
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1437
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:234
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:302
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:141
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_h264_sei_uninit
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:48
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:266
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1359
opt.h
ff_h264_mb_sizes
const uint16_t ff_h264_mb_sizes[4]
Definition: h264dec.c:59
H264Picture::f
AVFrame * f
Definition: h264dec.h:115
ff_h264_ps_uninit
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
Definition: h264_ps.c:270
idr
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264dec.c:445
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
out
FILE * out
Definition: movenc.c:55
thread.h
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:56
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
h264_decode_init
static av_cold int h264_decode_init(AVCodecContext *avctx)
Definition: h264dec.c:385
SLICE_FLAG_ALLOW_FIELD
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
Definition: avcodec.h:738
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1430
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:188
H264Picture::pps
const PPS * pps
Definition: h264dec.h:158
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:265
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:330
AVPacket::data
uint8_t * data
Definition: packet.h:539
AVOption
AVOption.
Definition: opt.h:429
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:602
b
#define b
Definition: input.c:41
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:270
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
H264SEIGreenMetaData::xsd_metric_value
uint16_t xsd_metric_value
Definition: h264_sei.h:116
H264SEIGreenMetaData::period_type
uint8_t period_type
Definition: h264_sei.h:108
PPS::chroma_qp_index_offset
int chroma_qp_index_offset[2]
Definition: h264_ps.h:122
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:121
FFCodec
Definition: codec_internal.h:127
ERContext
Definition: error_resilience.h:53
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:463
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
decode_nal_units
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264dec.c:590
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:530
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:602
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:233
H2645NAL::size_bits
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding.
Definition: h2645_parse.h:42
ff_h264_decode_picture_parameter_set
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
Definition: h264_ps.c:696
FF_DECODE_ERROR_DECODE_SLICES
#define FF_DECODE_ERROR_DECODE_SLICES
Definition: frame.h:720
H264SliceContext
Definition: h264dec.h:180
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:301
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:232
debug_green_metadata
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
Definition: h264dec.c:562
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
H264SEIGreenMetaData::percent_non_zero_macroblocks
uint8_t percent_non_zero_macroblocks
Definition: h264_sei.h:111
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1830
fail
#define fail()
Definition: checkasm.h:193
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1593
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
ff_h264_sei_decode
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
Definition: h264_sei.c:230
GetBitContext
Definition: get_bits.h:108
AV_VIDEO_ENC_PARAMS_H264
@ AV_VIDEO_ENC_PARAMS_H264
H.264 stores:
Definition: video_enc_params.h:57
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:161
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:458
finalize_frame
static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
Definition: h264dec.c:938
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:118
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
H264Ref::data
uint8_t * data[3]
Definition: h264dec.h:170
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
H2645_FLAG_IS_NALFF
@ H2645_FLAG_IS_NALFF
Definition: h2645_parse.h:97
ERContext::mb_num
int mb_num
Definition: error_resilience.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
ERContext::avctx
AVCodecContext * avctx
Definition: error_resilience.h:54
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:800
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
h264_decode_frame
static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
Definition: h264dec.c:1034
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
h264_free_pic
static void h264_free_pic(H264Context *h, H264Picture *pic)
Definition: h264dec.c:345
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:627
H2645NAL::size
int size
Definition: h2645_parse.h:36
emms_c
#define emms_c()
Definition: emms.h:63
get_last_needed_nal
static int get_last_needed_nal(H264Context *h)
Definition: h264dec.c:508
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2053
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
h264_init_pic
static int h264_init_pic(H264Picture *pic)
Definition: h264dec.c:277
h264_decode_end
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264dec.c:352
get_consumed_bytes
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264dec.c:831
ff_h264_decode_extradata
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
Definition: h264_parse.c:466
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:67
avpriv_h264_has_num_reorder_frames
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264dec.c:61
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
h264data.h
ff_h264_remove_all_refs
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:564
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ERContext::dc_val
int16_t * dc_val[3]
Definition: error_resilience.h:69
H264_NAL_SPS_EXT
@ H264_NAL_SPS_EXT
Definition: h264.h:47
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_h264_decoder
const FFCodec ff_h264_decoder
Definition: h264dec.c:1124
H264_NAL_SLICE
@ H264_NAL_SLICE
Definition: h264.h:35
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:155
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
H264SEIGreenMetaData::percent_alpha_point_deblocking_instance
uint8_t percent_alpha_point_deblocking_instance
Definition: h264_sei.h:114
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
h264_class
static const AVClass h264_class
Definition: h264dec.c:1117
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
h264_er_decode_mb
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264dec.c:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:725
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:286
hwaccel_internal.h
AVCodecContext::slice_flags
int slice_flags
slice flags
Definition: avcodec.h:736
h264_init_context
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264dec.c:290
H264Ref::parent
const H264Picture * parent
Definition: h264dec.h:177
H264Ref::linesize
int linesize[3]
Definition: h264dec.h:171
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
profiles.h
ff_h264_profiles
const AVProfile ff_h264_profiles[]
Definition: profiles.c:72
PPS::init_qp
int init_qp
pic_init_qp_minus26 + 26
Definition: h264_ps.h:120
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
H264Picture::mb_height
int mb_height
Definition: h264dec.h:160
H264SEIGreenMetaData
Definition: h264_sei.h:106
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:290
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
AVOnce
#define AVOnce
Definition: thread.h:202
H264SEIGreenMetaData::percent_intra_coded_macroblocks
uint8_t percent_intra_coded_macroblocks
Definition: h264_sei.h:112
h264_ps.h
H264_NAL_FILLER_DATA
@ H264_NAL_FILLER_DATA
Definition: h264.h:46
H264_NAL_AUXILIARY_SLICE
@ H264_NAL_AUXILIARY_SLICE
Definition: h264.h:53
h264_vlc_init
static AVOnce h264_vlc_init
Definition: h264dec.c:383
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
ERContext::opaque
void * opaque
Definition: error_resilience.h:89
f
f
Definition: af_crystalizer.c:122
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
H264SEIGreenMetaData::percent_six_tap_filtering
uint8_t percent_six_tap_filtering
Definition: h264_sei.h:113
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:515
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
h264_options
static const AVOption h264_options[]
Definition: h264dec.c:1107
codec_internal.h
H264SEIGreenMetaData::num_seconds
uint16_t num_seconds
Definition: h264_sei.h:109
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:61
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:390
ERContext::b8_stride
ptrdiff_t b8_stride
Definition: error_resilience.h:63
H264Picture::decode_error_flags
atomic_int * decode_error_flags
RefStruct reference; its pointee is shared between decoding threads.
Definition: h264dec.h:164
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
H2645NAL
Definition: h2645_parse.h:34
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:287
H264_NAL_END_STREAM
@ H264_NAL_END_STREAM
Definition: h264.h:45
H264SEIGreenMetaData::green_metadata_type
uint8_t green_metadata_type
Definition: h264_sei.h:107
ERContext::mb_stride
ptrdiff_t mb_stride
Definition: error_resilience.h:62
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1605
AVCodecHWConfigInternal
Definition: hwconfig.h:25
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:233
H264_NAL_DPB
@ H264_NAL_DPB
Definition: h264.h:37
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1029
ERContext::decode_mb
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: error_resilience.h:86
VD
#define VD
Definition: av1dec.c:1560
output_frame
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264dec.c:875
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1604
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:245
emms.h
h264dec.h
ff_h264_decode_seq_parameter_set
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:284
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:156
is_avcc_extradata
static int is_avcc_extradata(const uint8_t *buf, int buf_size)
Definition: h264dec.c:914
H264Context
H264Context.
Definition: h264dec.h:340
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
H264_NAL_DPC
@ H264_NAL_DPC
Definition: h264.h:38
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: packet.c:252
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2760
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ERContext::mb_width
int mb_width
Definition: error_resilience.h:61
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
ff_h264_decode_init_vlc
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:315
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:186
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
h264_export_enc_params
static int h264_export_enc_params(AVFrame *f, const H264Picture *p)
Definition: h264dec.c:841
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:97
ret
ret
Definition: filter_design.txt:187
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ERContext::mb_height
int mb_height
Definition: error_resilience.h:61
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
FMO
#define FMO
Definition: h264dec.h:55
FF_DEBUG_GREEN_MD
#define FF_DEBUG_GREEN_MD
Definition: avcodec.h:1420
pos
unsigned int pos
Definition: spdifenc.c:414
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AVCodecContext::draw_horiz_band
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band.
Definition: avcodec.h:764
HWACCEL_VULKAN
#define HWACCEL_VULKAN(codec)
Definition: hwconfig.h:76
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1612
H264_NAL_END_SEQUENCE
@ H264_NAL_END_SEQUENCE
Definition: h264.h:44
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:285
error_resilience.h
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
H264Picture::mb_width
int mb_width
Definition: h264dec.h:160
ff_h264_unref_picture
void ff_h264_unref_picture(H264Picture *pic)
Definition: h264_picture.c:39
atomic_fetch_or_explicit
#define atomic_fetch_or_explicit(object, operand, order)
Definition: stdatomic.h:155
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
H264Picture
Definition: h264dec.h:114
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
ERContext::quarter_sample
int quarter_sample
Definition: error_resilience.h:83
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:582
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264pred.h:89
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:189
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1406
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:272
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:296
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
ERContext::er_temp_buffer
uint8_t * er_temp_buffer
Definition: error_resilience.h:68
h264_decode_flush
static void h264_decode_flush(AVCodecContext *avctx)
Definition: h264dec.c:483
desc
const char * desc
Definition: libsvtav1.c:79
H264_NAL_DPA
@ H264_NAL_DPA
Definition: h264.h:36
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
ff_h264_sei_stereo_mode
const char * ff_h264_sei_stereo_mode(const H2645SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:305
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
H264SliceContext::er
ERContext * er
Definition: h264dec.h:183
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
h264.h
imgutils.h
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:289
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
UPDATE_THREAD_CONTEXT_FOR_USER
#define UPDATE_THREAD_CONTEXT_FOR_USER(func)
Definition: codec_internal.h:307
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:49
H264SEIGreenMetaData::num_pictures
uint16_t num_pictures
Definition: h264_sei.h:110
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int nal_length_size, enum AVCodecID codec_id, int flags)
Split an input packet into NAL units.
Definition: h2645_parse.c:465
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:288
av_image_copy
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
send_next_delayed_frame
static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame, int *got_frame, int buf_index)
Definition: h264dec.c:993
H264Ref::reference
int reference
Definition: h264dec.h:173
src
#define src
Definition: vp8dsp.c:248
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:420
video_enc_params.h
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:244
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, const H264Picture *src)
Definition: h264_picture.c:166
H264SEIGreenMetaData::xsd_metric_type
uint8_t xsd_metric_type
Definition: h264_sei.h:115