[FFmpeg-cvslog] h264: use properly allocated AVFrames

Anton Khirnov git at videolan.org
Wed Apr 29 16:48:25 CEST 2015


ffmpeg | branch: master | Anton Khirnov <anton at khirnov.net> | Fri Apr 17 12:09:50 2015 +0200| [a0f2946068c62e18cb05ac25c0df3d86077251a6] | committer: Anton Khirnov

h264: use properly allocated AVFrames

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=a0f2946068c62e18cb05ac25c0df3d86077251a6
---

 libavcodec/dxva2_h264.c       |   10 ++---
 libavcodec/h264.c             |   81 +++++++++++++++++++++------------------
 libavcodec/h264.h             |    2 +-
 libavcodec/h264_mb_template.c |    8 ++--
 libavcodec/h264_picture.c     |   12 +++---
 libavcodec/h264_refs.c        |   18 ++++-----
 libavcodec/h264_slice.c       |   54 +++++++++++++-------------
 libavcodec/svq3.c             |   84 ++++++++++++++++++++++-------------------
 libavcodec/vaapi_h264.c       |    6 +--
 libavcodec/vda_h264.c         |    4 +-
 libavcodec/vdpau_h264.c       |    6 +--
 11 files changed, 151 insertions(+), 134 deletions(-)

diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index 093db99..a5e9705 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -51,7 +51,7 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context
     memset(pp, 0, sizeof(*pp));
     /* Configure current picture */
     fill_picture_entry(&pp->CurrPic,
-                       ff_dxva2_get_surface_index(ctx, &current_picture->f),
+                       ff_dxva2_get_surface_index(ctx, current_picture->f),
                        h->picture_structure == PICT_BOTTOM_FIELD);
     /* Configure the set of references */
     pp->UsedForReferenceFlags  = 0;
@@ -67,7 +67,7 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context
         }
         if (r) {
             fill_picture_entry(&pp->RefFrameList[i],
-                               ff_dxva2_get_surface_index(ctx, &r->f),
+                               ff_dxva2_get_surface_index(ctx, r->f),
                                r->long_ref != 0);
 
             if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
@@ -244,9 +244,9 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
                 unsigned plane;
                 unsigned index;
                 if (ctx->workaround & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO)
-                    index = ff_dxva2_get_surface_index(ctx, &r->f);
+                    index = ff_dxva2_get_surface_index(ctx, r->f);
                 else
-                    index = get_refpic_index(pp, ff_dxva2_get_surface_index(ctx, &r->f));
+                    index = get_refpic_index(pp, ff_dxva2_get_surface_index(ctx, r->f));
                 fill_picture_entry(&slice->RefPicList[list][i], index,
                                    r->reference == PICT_BOTTOM_FIELD);
                 for (plane = 0; plane < 3; plane++) {
@@ -454,7 +454,7 @@ static int dxva2_h264_end_frame(AVCodecContext *avctx)
 
     if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
         return -1;
-    ret = ff_dxva2_common_end_frame(avctx, &h->cur_pic_ptr->f,
+    ret = ff_dxva2_common_end_frame(avctx, h->cur_pic_ptr->f,
                                     &ctx_pic->pp, sizeof(ctx_pic->pp),
                                     &ctx_pic->qm, sizeof(ctx_pic->qm),
                                     commit_bitstream_and_slice_buffer);
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 2756edb..e39a119 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -83,7 +83,7 @@ void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
                              int y, int height)
 {
     AVCodecContext *avctx = h->avctx;
-    const AVFrame   *src  = &h->cur_pic.f;
+    const AVFrame   *src  = h->cur_pic.f;
     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
     int vshift = desc->log2_chroma_h;
     const int field_pic = h->picture_structure != PICT_FRAME;
@@ -590,9 +590,15 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
         return AVERROR(ENOMEM);
     }
 
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
-        av_frame_unref(&h->DPB[i].f);
-    av_frame_unref(&h->cur_pic.f);
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
+        h->DPB[i].f = av_frame_alloc();
+        if (!h->DPB[i].f)
+            return AVERROR(ENOMEM);
+    }
+
+    h->cur_pic.f = av_frame_alloc();
+    if (!h->cur_pic.f)
+        return AVERROR(ENOMEM);
 
     for (i = 0; i < h->nb_slice_ctx; i++)
         h->slice_ctx[i].h264 = h;
@@ -682,7 +688,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
     int i, pics, out_of_order, out_idx;
     int invalid = 0, cnt = 0;
 
-    h->cur_pic_ptr->f.pict_type = h->pict_type;
+    h->cur_pic_ptr->f->pict_type = h->pict_type;
 
     if (h->next_output_pic)
         return;
@@ -697,8 +703,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
         return;
     }
 
-    cur->f.interlaced_frame = 0;
-    cur->f.repeat_pict      = 0;
+    cur->f->interlaced_frame = 0;
+    cur->f->repeat_pict      = 0;
 
     /* Signal interlacing information externally. */
     /* Prioritize picture timing SEI information over used
@@ -710,55 +716,55 @@ static void decode_postinit(H264Context *h, int setup_finished)
             break;
         case SEI_PIC_STRUCT_TOP_FIELD:
         case SEI_PIC_STRUCT_BOTTOM_FIELD:
-            cur->f.interlaced_frame = 1;
+            cur->f->interlaced_frame = 1;
             break;
         case SEI_PIC_STRUCT_TOP_BOTTOM:
         case SEI_PIC_STRUCT_BOTTOM_TOP:
             if (FIELD_OR_MBAFF_PICTURE(h))
-                cur->f.interlaced_frame = 1;
+                cur->f->interlaced_frame = 1;
             else
                 // try to flag soft telecine progressive
-                cur->f.interlaced_frame = h->prev_interlaced_frame;
+                cur->f->interlaced_frame = h->prev_interlaced_frame;
             break;
         case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
         case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
             /* Signal the possibility of telecined film externally
              * (pic_struct 5,6). From these hints, let the applications
              * decide if they apply deinterlacing. */
-            cur->f.repeat_pict = 1;
+            cur->f->repeat_pict = 1;
             break;
         case SEI_PIC_STRUCT_FRAME_DOUBLING:
-            cur->f.repeat_pict = 2;
+            cur->f->repeat_pict = 2;
             break;
         case SEI_PIC_STRUCT_FRAME_TRIPLING:
-            cur->f.repeat_pict = 4;
+            cur->f->repeat_pict = 4;
             break;
         }
 
         if ((h->sei_ct_type & 3) &&
             h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
-            cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
+            cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
     } else {
         /* Derive interlacing flag from used decoding process. */
-        cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
+        cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
     }
-    h->prev_interlaced_frame = cur->f.interlaced_frame;
+    h->prev_interlaced_frame = cur->f->interlaced_frame;
 
     if (cur->field_poc[0] != cur->field_poc[1]) {
         /* Derive top_field_first from field pocs. */
-        cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
+        cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
     } else {
-        if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
+        if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
             /* Use picture timing SEI information. Even if it is a
              * information of a past frame, better than nothing. */
             if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
                 h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
-                cur->f.top_field_first = 1;
+                cur->f->top_field_first = 1;
             else
-                cur->f.top_field_first = 0;
+                cur->f->top_field_first = 0;
         } else {
             /* Most likely progressive */
-            cur->f.top_field_first = 0;
+            cur->f->top_field_first = 0;
         }
     }
 
@@ -767,7 +773,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
         h->frame_packing_arrangement_type <= 6 &&
         h->content_interpretation_type > 0 &&
         h->content_interpretation_type < 3) {
-        AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
+        AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
         if (!stereo)
             return;
 
@@ -805,7 +811,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
     if (h->sei_display_orientation_present &&
         (h->sei_anticlockwise_rotation || h->sei_hflip || h->sei_vflip)) {
         double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
-        AVFrameSideData *rotation = av_frame_new_side_data(&cur->f,
+        AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
                                                            AV_FRAME_DATA_DISPLAYMATRIX,
                                                            sizeof(int32_t) * 9);
         if (!rotation)
@@ -859,13 +865,13 @@ static void decode_postinit(H264Context *h, int setup_finished)
         cnt     += out->poc < h->last_pocs[i];
         invalid += out->poc == INT_MIN;
     }
-    if (!h->mmco_reset && !cur->f.key_frame &&
+    if (!h->mmco_reset && !cur->f->key_frame &&
         cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
         h->mmco_reset = 2;
         if (pics > 1)
             h->delayed_pic[pics - 2]->mmco_reset = 2;
     }
-    if (h->mmco_reset || cur->f.key_frame) {
+    if (h->mmco_reset || cur->f->key_frame) {
         for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
             h->last_pocs[i] = INT_MIN;
         cnt     = 0;
@@ -876,16 +882,16 @@ static void decode_postinit(H264Context *h, int setup_finished)
     for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
                 h->delayed_pic[i] &&
                 !h->delayed_pic[i - 1]->mmco_reset &&
-                !h->delayed_pic[i]->f.key_frame;
+                !h->delayed_pic[i]->f->key_frame;
          i++)
         if (h->delayed_pic[i]->poc < out->poc) {
             out     = h->delayed_pic[i];
             out_idx = i;
         }
     if (h->avctx->has_b_frames == 0 &&
-        (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
+        (h->delayed_pic[0]->f->key_frame || h->mmco_reset))
         h->next_outputed_poc = INT_MIN;
-    out_of_order = !out->f.key_frame && !h->mmco_reset &&
+    out_of_order = !out->f->key_frame && !h->mmco_reset &&
                    (out->poc < h->next_outputed_poc);
 
     if (h->sps.bitstream_restriction_flag &&
@@ -899,7 +905,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
     } else if (h->low_delay &&
                ((h->next_outputed_poc != INT_MIN &&
                  out->poc > h->next_outputed_poc + 2) ||
-                cur->f.pict_type == AV_PICTURE_TYPE_B)) {
+                cur->f->pict_type == AV_PICTURE_TYPE_B)) {
         h->low_delay = 0;
         h->avctx->has_b_frames++;
     }
@@ -924,7 +930,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
                 h->next_outputed_poc = INT_MIN;
             }
         } else {
-            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
+            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) {
                 h->next_outputed_poc = INT_MIN;
             } else {
                 h->next_outputed_poc = out->poc;
@@ -1445,7 +1451,7 @@ again:
                                         ((1 << h->sps.log2_max_frame_num) - 1);
                 }
 
-                h->cur_pic_ptr->f.key_frame |=
+                h->cur_pic_ptr->f->key_frame |=
                     (h->nal_unit_type == NAL_IDR_SLICE) ||
                     (h->sei_recovery_frame_cnt >= 0);
 
@@ -1626,7 +1632,7 @@ out:
         out_idx = 0;
         for (i = 1;
              h->delayed_pic[i] &&
-             !h->delayed_pic[i]->f.key_frame &&
+             !h->delayed_pic[i]->f->key_frame &&
              !h->delayed_pic[i]->mmco_reset;
              i++)
             if (h->delayed_pic[i]->poc < out->poc) {
@@ -1638,7 +1644,7 @@ out:
             h->delayed_pic[i] = h->delayed_pic[i + 1];
 
         if (out) {
-            ret = output_frame(h, pict, &out->f);
+            ret = output_frame(h, pict, out->f);
             if (ret < 0)
                 return ret;
             *got_frame = 1;
@@ -1674,9 +1680,9 @@ out:
         if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) ||
                                    h->next_output_pic->recovered)) {
             if (!h->next_output_pic->recovered)
-                h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT;
+                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
 
-            ret = output_frame(h, pict, &h->next_output_pic->f);
+            ret = output_frame(h, pict, h->next_output_pic->f);
             if (ret < 0)
                 return ret;
             *got_frame = 1;
@@ -1694,8 +1700,10 @@ av_cold void ff_h264_free_context(H264Context *h)
 
     ff_h264_free_tables(h);
 
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
         ff_h264_unref_picture(h, &h->DPB[i]);
+        av_frame_free(&h->DPB[i].f);
+    }
 
     h->cur_pic_ptr = NULL;
 
@@ -1718,6 +1726,7 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
     ff_h264_free_context(h);
 
     ff_h264_unref_picture(h, &h->cur_pic);
+    av_frame_free(&h->cur_pic.f);
 
     return 0;
 }
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index 635c4a4..12172ac 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -261,7 +261,7 @@ typedef struct MMCO {
 } MMCO;
 
 typedef struct H264Picture {
-    struct AVFrame f;
+    AVFrame *f;
     ThreadFrame tf;
 
     AVBufferRef *qscale_table_buf;
diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
index 75757a6..968c55e 100644
--- a/libavcodec/h264_mb_template.c
+++ b/libavcodec/h264_mb_template.c
@@ -57,9 +57,9 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
     const int block_h   = 16 >> h->chroma_y_shift;
     const int chroma422 = CHROMA422(h);
 
-    dest_y  = h->cur_pic.f.data[0] + ((mb_x << PIXEL_SHIFT)     + mb_y * sl->linesize)  * 16;
-    dest_cb = h->cur_pic.f.data[1] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;
-    dest_cr = h->cur_pic.f.data[2] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;
+    dest_y  = h->cur_pic.f->data[0] + ((mb_x << PIXEL_SHIFT)     + mb_y * sl->linesize)  * 16;
+    dest_cb = h->cur_pic.f->data[1] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;
+    dest_cr = h->cur_pic.f->data[2] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;
 
     h->vdsp.prefetch(dest_y  + (sl->mb_x & 3) * 4 * sl->linesize   + (64 << PIXEL_SHIFT), sl->linesize,       4);
     h->vdsp.prefetch(dest_cb + (sl->mb_x & 7)     * sl->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2);
@@ -286,7 +286,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo
     const int plane_count      = (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) ? 3 : 1;
 
     for (p = 0; p < plane_count; p++) {
-        dest[p] = h->cur_pic.f.data[p] +
+        dest[p] = h->cur_pic.f->data[p] +
                   ((mb_x << PIXEL_SHIFT) + mb_y * sl->linesize) * 16;
         h->vdsp.prefetch(dest[p] + (sl->mb_x & 3) * 4 * sl->linesize + (64 << PIXEL_SHIFT),
                          sl->linesize, 4);
diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c
index 814599d..3e2c84e 100644
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@ -48,7 +48,7 @@ void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
     int off = offsetof(H264Picture, tf) + sizeof(pic->tf);
     int i;
 
-    if (!pic->f.buf[0])
+    if (!pic->f || !pic->f->buf[0])
         return;
 
     ff_thread_release_buffer(h->avctx, &pic->tf);
@@ -68,11 +68,11 @@ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
 {
     int ret, i;
 
-    av_assert0(!dst->f.buf[0]);
-    av_assert0(src->f.buf[0]);
+    av_assert0(!dst->f->buf[0]);
+    av_assert0(src->f->buf[0]);
 
-    src->tf.f = &src->f;
-    dst->tf.f = &dst->f;
+    src->tf.f = src->f;
+    dst->tf.f = dst->f;
     ret = ff_thread_ref_frame(&dst->tf, &src->tf);
     if (ret < 0)
         goto fail;
@@ -130,7 +130,7 @@ static void h264_set_erpic(ERPicture *dst, H264Picture *src)
     if (!src)
         return;
 
-    dst->f = &src->f;
+    dst->f = src->f;
     dst->tf = &src->tf;
 
     for (i = 0; i < 2; i++) {
diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
index 1ee43b7..2ddbdb7 100644
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@ -49,8 +49,8 @@ static void pic_as_field(H264Ref *pic, const int parity)
 
 static void ref_from_h264pic(H264Ref *dst, H264Picture *src)
 {
-    memcpy(dst->data,     src->f.data,     sizeof(dst->data));
-    memcpy(dst->linesize, src->f.linesize, sizeof(dst->linesize));
+    memcpy(dst->data,     src->f->data,     sizeof(dst->data));
+    memcpy(dst->linesize, src->f->linesize, sizeof(dst->linesize));
     dst->reference = src->reference;
     dst->poc       = src->poc;
     dst->pic_id    = src->pic_id;
@@ -151,8 +151,8 @@ int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
 
         if (lens[0] == lens[1] && lens[1] > 1) {
             for (i = 0; i < lens[0] &&
-                        h->default_ref_list[0][i].parent->f.buf[0]->buffer ==
-                        h->default_ref_list[1][i].parent->f.buf[0]->buffer; i++);
+                        h->default_ref_list[0][i].parent->f->buf[0]->buffer ==
+                        h->default_ref_list[1][i].parent->f->buf[0]->buffer; i++);
             if (i == lens[0]) {
                 FFSWAP(H264Ref, h->default_ref_list[1][0], h->default_ref_list[1][1]);
             }
@@ -172,14 +172,14 @@ int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
         ff_tlog(h->avctx, "List0: %s fn:%d 0x%p\n",
                 (h->default_ref_list[0][i].long_ref ? "LT" : "ST"),
                 h->default_ref_list[0][i].pic_id,
-                h->default_ref_list[0][i].f.data[0]);
+                h->default_ref_list[0][i].f->data[0]);
     }
     if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
         for (i = 0; i < sl->ref_count[1]; i++) {
             ff_tlog(h->avctx, "List1: %s fn:%d 0x%p\n",
                     (h->default_ref_list[1][i].long_ref ? "LT" : "ST"),
                     h->default_ref_list[1][i].pic_id,
-                    h->default_ref_list[1][i].f.data[0]);
+                    h->default_ref_list[1][i].f->data[0]);
         }
     }
 #endif
@@ -355,7 +355,7 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h, H264SliceContext *sl)
             field[1] = field[0];
 
             for (j = 0; j < 3; j++)
-                field[1].data[j] += frame->parent->f.linesize[j];
+                field[1].data[j] += frame->parent->f->linesize[j];
             field[1].reference = PICT_BOTTOM_FIELD;
             field[1].poc       = field[1].parent->field_poc[1];
 
@@ -504,7 +504,7 @@ static void print_short_term(H264Context *h)
         for (i = 0; i < h->short_ref_count; i++) {
             H264Picture *pic = h->short_ref[i];
             av_log(h->avctx, AV_LOG_DEBUG, "%"PRIu32" fn:%d poc:%d %p\n",
-                   i, pic->frame_num, pic->poc, pic->f.data[0]);
+                   i, pic->frame_num, pic->poc, pic->f->data[0]);
         }
     }
 }
@@ -521,7 +521,7 @@ static void print_long_term(H264Context *h)
             H264Picture *pic = h->long_ref[i];
             if (pic) {
                 av_log(h->avctx, AV_LOG_DEBUG, "%"PRIu32" fn:%d poc:%d %p\n",
-                       i, pic->frame_num, pic->poc, pic->f.data[0]);
+                       i, pic->frame_num, pic->poc, pic->f->data[0]);
             }
         }
     }
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 425dab9..a250237 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -150,7 +150,7 @@ static void release_unused_pictures(H264Context *h, int remove_current)
 
     /* release non reference frames */
     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
-        if (h->DPB[i].f.buf[0] && !h->DPB[i].reference &&
+        if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
             (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
             ff_h264_unref_picture(h, &h->DPB[i]);
         }
@@ -220,9 +220,9 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
 {
     int i, ret = 0;
 
-    av_assert0(!pic->f.data[0]);
+    av_assert0(!pic->f->data[0]);
 
-    pic->tf.f = &pic->f;
+    pic->tf.f = pic->f;
     ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
                                                    AV_GET_BUFFER_FLAG_REF : 0);
     if (ret < 0)
@@ -271,7 +271,7 @@ fail:
 
 static inline int pic_is_unused(H264Context *h, H264Picture *pic)
 {
-    if (!pic->f.buf[0])
+    if (!pic->f->buf[0])
         return 1;
     return 0;
 }
@@ -473,14 +473,14 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
 
     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
         ff_h264_unref_picture(h, &h->DPB[i]);
-        if (h1->DPB[i].f.buf[0] &&
+        if (h1->DPB[i].f->buf[0] &&
             (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
             return ret;
     }
 
     h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
     ff_h264_unref_picture(h, &h->cur_pic);
-    if (h1->cur_pic.f.buf[0]) {
+    if (h1->cur_pic.f->buf[0]) {
         ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
         if (ret < 0)
             return ret;
@@ -556,14 +556,14 @@ static int h264_frame_start(H264Context *h)
     pic = &h->DPB[i];
 
     pic->reference              = h->droppable ? 0 : h->picture_structure;
-    pic->f.coded_picture_number = h->coded_picture_number++;
+    pic->f->coded_picture_number = h->coded_picture_number++;
     pic->field_picture          = h->picture_structure != PICT_FRAME;
     /*
      * Zero key_frame here; IDR markings per slice in frame or fields are ORed
      * in later.
      * See decode_nal_units().
      */
-    pic->f.key_frame = 0;
+    pic->f->key_frame = 0;
     pic->mmco_reset  = 0;
     pic->recovered   = 0;
 
@@ -579,14 +579,14 @@ static int h264_frame_start(H264Context *h)
         ff_er_frame_start(&h->slice_ctx[0].er);
 
     for (i = 0; i < 16; i++) {
-        h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f.linesize[0] * ((scan8[i] - scan8[0]) >> 3);
-        h->block_offset[48 + i]      = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f.linesize[0] * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[48 + i]      = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
     }
     for (i = 0; i < 16; i++) {
         h->block_offset[16 + i]      =
-        h->block_offset[32 + i]      = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f.linesize[1] * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[32 + i]      = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
         h->block_offset[48 + 16 + i] =
-        h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f.linesize[1] * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
     }
 
     /* Some macroblocks can be accessed before they're available in case
@@ -1278,7 +1278,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
          * since that can modify s->current_picture_ptr. */
         if (h->first_field) {
             assert(h->cur_pic_ptr);
-            assert(h->cur_pic_ptr->f.buf[0]);
+            assert(h->cur_pic_ptr->f->buf[0]);
             assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
 
             /* figure out if we have a complementary field pair */
@@ -1353,10 +1353,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
              * is not noticeable by comparison, but it should be fixed. */
             if (h->short_ref_count) {
                 if (prev) {
-                    av_image_copy(h->short_ref[0]->f.data,
-                                  h->short_ref[0]->f.linesize,
-                                  (const uint8_t **)prev->f.data,
-                                  prev->f.linesize,
+                    av_image_copy(h->short_ref[0]->f->data,
+                                  h->short_ref[0]->f->linesize,
+                                  (const uint8_t **)prev->f->data,
+                                  prev->f->linesize,
                                   h->avctx->pix_fmt,
                                   h->mb_width  * 16,
                                   h->mb_height * 16);
@@ -1371,7 +1371,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
          * frame, or to allocate a new one. */
         if (h->first_field) {
             assert(h->cur_pic_ptr);
-            assert(h->cur_pic_ptr->f.buf[0]);
+            assert(h->cur_pic_ptr->f->buf[0]);
             assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
 
             /* figure out if we have a complementary field pair */
@@ -1612,16 +1612,16 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         for (i = 0; i < 16; i++) {
             id_list[i] = 60;
             if (j < sl->list_count && i < sl->ref_count[j] &&
-                sl->ref_list[j][i].parent->f.buf[0]) {
+                sl->ref_list[j][i].parent->f->buf[0]) {
                 int k;
-                AVBuffer *buf = sl->ref_list[j][i].parent->f.buf[0]->buffer;
+                AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
                 for (k = 0; k < h->short_ref_count; k++)
-                    if (h->short_ref[k]->f.buf[0]->buffer == buf) {
+                    if (h->short_ref[k]->f->buf[0]->buffer == buf) {
                         id_list[i] = k;
                         break;
                     }
                 for (k = 0; k < h->long_ref_count; k++)
-                    if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
+                    if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
                         id_list[i] = h->short_ref_count + k;
                         break;
                     }
@@ -1935,12 +1935,12 @@ static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x,
 
                 sl->mb_x = mb_x;
                 sl->mb_y = mb_y;
-                dest_y  = h->cur_pic.f.data[0] +
+                dest_y  = h->cur_pic.f->data[0] +
                           ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
-                dest_cb = h->cur_pic.f.data[1] +
+                dest_cb = h->cur_pic.f->data[1] +
                           (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
                           mb_y * sl->uvlinesize * block_h;
-                dest_cr = h->cur_pic.f.data[2] +
+                dest_cr = h->cur_pic.f->data[2] +
                           (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
                           mb_y * sl->uvlinesize * block_h;
                 // FIXME simplify above
@@ -2046,8 +2046,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
     int lf_x_start = sl->mb_x;
     int ret;
 
-    sl->linesize   = h->cur_pic_ptr->f.linesize[0];
-    sl->uvlinesize = h->cur_pic_ptr->f.linesize[1];
+    sl->linesize   = h->cur_pic_ptr->f->linesize[0];
+    sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
 
     ret = alloc_scratch_buffers(sl, sl->linesize);
     if (ret < 0)
diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
index 6c877bd..5f8eb0e 100644
--- a/libavcodec/svq3.c
+++ b/libavcodec/svq3.c
@@ -313,8 +313,8 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
     }
 
     /* form component predictions */
-    dest = h->cur_pic.f.data[0] + x + y * sl->linesize;
-    src  = pic->f.data[0] + mx + my * sl->linesize;
+    dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
+    src  = pic->f->data[0] + mx + my * sl->linesize;
 
     if (emu) {
         h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
@@ -340,8 +340,8 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
         blocksize++;
 
         for (i = 1; i < 3; i++) {
-            dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
-            src  = pic->f.data[i] + mx + my * sl->uvlinesize;
+            dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
+            src  = pic->f->data[i] + mx + my * sl->uvlinesize;
 
             if (emu) {
                 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
@@ -884,9 +884,18 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
         return AVERROR(ENOMEM);
     }
 
+    s->cur_pic->f  = av_frame_alloc();
+    s->last_pic->f = av_frame_alloc();
+    s->next_pic->f = av_frame_alloc();
+    if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
+        return AVERROR(ENOMEM);
+
     if (ff_h264_decode_init(avctx) < 0)
         return -1;
 
+    // we will overwrite it later during decoding
+    av_frame_free(&h->cur_pic.f);
+
     ff_h264dsp_init(&h->h264dsp, 8, 1);
     ff_h264chroma_init(&h->h264chroma, 8);
     ff_h264qpel_init(&h->h264qpel, 8);
@@ -1066,7 +1075,7 @@ static void free_picture(AVCodecContext *avctx, H264Picture *pic)
     }
     av_buffer_unref(&pic->mb_type_buf);
 
-    av_frame_unref(&pic->f);
+    av_frame_unref(pic->f);
 }
 
 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
@@ -1102,19 +1111,19 @@ static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
     }
     pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
 
-    ret = ff_get_buffer(avctx, &pic->f,
+    ret = ff_get_buffer(avctx, pic->f,
                         pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
     if (ret < 0)
         goto fail;
 
     if (!sl->edge_emu_buffer) {
-        sl->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
+        sl->edge_emu_buffer = av_mallocz(pic->f->linesize[0] * 17);
         if (!sl->edge_emu_buffer)
             return AVERROR(ENOMEM);
     }
 
-    sl->linesize   = pic->f.linesize[0];
-    sl->uvlinesize = pic->f.linesize[1];
+    sl->linesize   = pic->f->linesize[0];
+    sl->uvlinesize = pic->f->linesize[1];
 
     return 0;
 fail:
@@ -1134,8 +1143,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
 
     /* special case for last picture */
     if (buf_size == 0) {
-        if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
-            ret = av_frame_ref(data, &s->next_pic->f);
+        if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
+            ret = av_frame_ref(data, s->next_pic->f);
             if (ret < 0)
                 return ret;
             s->last_frame_output = 1;
@@ -1156,22 +1165,18 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
     if (h->pict_type != AV_PICTURE_TYPE_B)
         FFSWAP(H264Picture*, s->next_pic, s->last_pic);
 
-    av_frame_unref(&s->cur_pic->f);
+    av_frame_unref(s->cur_pic->f);
 
     /* for skipping the frame */
-    s->cur_pic->f.pict_type = h->pict_type;
-    s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
+    s->cur_pic->f->pict_type = h->pict_type;
+    s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
 
     ret = get_buffer(avctx, s->cur_pic);
     if (ret < 0)
         return ret;
 
     h->cur_pic_ptr = s->cur_pic;
-    av_frame_unref(&h->cur_pic.f);
     h->cur_pic     = *s->cur_pic;
-    ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
-    if (ret < 0)
-        return ret;
 
     for (i = 0; i < 16; i++) {
         h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
@@ -1185,28 +1190,28 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
     }
 
     if (h->pict_type != AV_PICTURE_TYPE_I) {
-        if (!s->last_pic->f.data[0]) {
+        if (!s->last_pic->f->data[0]) {
             av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
             ret = get_buffer(avctx, s->last_pic);
             if (ret < 0)
                 return ret;
-            memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
-            memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
-                   s->last_pic->f.linesize[1]);
-            memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
-                   s->last_pic->f.linesize[2]);
+            memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
+            memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
+                   s->last_pic->f->linesize[1]);
+            memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
+                   s->last_pic->f->linesize[2]);
         }
 
-        if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
+        if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
             av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
             ret = get_buffer(avctx, s->next_pic);
             if (ret < 0)
                 return ret;
-            memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
-            memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
-                   s->next_pic->f.linesize[1]);
-            memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
-                   s->next_pic->f.linesize[2]);
+            memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
+            memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
+                   s->next_pic->f->linesize[1]);
+            memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
+                   s->next_pic->f->linesize[2]);
         }
     }
 
@@ -1296,27 +1301,27 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
                     (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
         }
 
-        ff_draw_horiz_band(avctx, &s->cur_pic->f,
-                           s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
+        ff_draw_horiz_band(avctx, s->cur_pic->f,
+                           s->last_pic->f->data[0] ? s->last_pic->f : NULL,
                            16 * sl->mb_y, 16, h->picture_structure, 0,
                            h->low_delay);
     }
 
     if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
-        ret = av_frame_ref(data, &s->cur_pic->f);
-    else if (s->last_pic->f.data[0])
-        ret = av_frame_ref(data, &s->last_pic->f);
+        ret = av_frame_ref(data, s->cur_pic->f);
+    else if (s->last_pic->f->data[0])
+        ret = av_frame_ref(data, s->last_pic->f);
     if (ret < 0)
         return ret;
 
     /* Do not output the last pic after seeking. */
-    if (s->last_pic->f.data[0] || h->low_delay)
+    if (s->last_pic->f->data[0] || h->low_delay)
         *got_frame = 1;
 
     if (h->pict_type != AV_PICTURE_TYPE_B) {
         FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
     } else {
-        av_frame_unref(&s->cur_pic->f);
+        av_frame_unref(s->cur_pic->f);
     }
 
     return buf_size;
@@ -1330,11 +1335,14 @@ static av_cold int svq3_decode_end(AVCodecContext *avctx)
     free_picture(avctx, s->cur_pic);
     free_picture(avctx, s->next_pic);
     free_picture(avctx, s->last_pic);
+    av_frame_free(&s->cur_pic->f);
+    av_frame_free(&s->next_pic->f);
+    av_frame_free(&s->last_pic->f);
     av_freep(&s->cur_pic);
     av_freep(&s->next_pic);
     av_freep(&s->last_pic);
 
-    av_frame_unref(&h->cur_pic.f);
+    memset(&h->cur_pic, 0, sizeof(h->cur_pic));
 
     ff_h264_free_context(h);
 
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index 54f4d13..82a49f6 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -59,7 +59,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic,
         pic_structure = pic->reference;
     pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */
 
-    va_pic->picture_id = ff_vaapi_get_surface_id(&pic->f);
+    va_pic->picture_id = ff_vaapi_get_surface_id(pic->f);
     va_pic->frame_idx  = pic->long_ref ? pic->pic_id : pic->frame_num;
 
     va_pic->flags      = 0;
@@ -99,7 +99,7 @@ static int dpb_add(DPB *dpb, H264Picture *pic)
 
     for (i = 0; i < dpb->size; i++) {
         VAPictureH264 * const va_pic = &dpb->va_pics[i];
-        if (va_pic->picture_id == ff_vaapi_get_surface_id(&pic->f)) {
+        if (va_pic->picture_id == ff_vaapi_get_surface_id(pic->f)) {
             VAPictureH264 temp_va_pic;
             fill_vaapi_pic(&temp_va_pic, pic, 0);
 
@@ -301,7 +301,7 @@ static int vaapi_h264_end_frame(AVCodecContext *avctx)
     if (ret < 0)
         goto finish;
 
-    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(&h->cur_pic_ptr->f));
+    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->cur_pic_ptr->f));
     if (ret < 0)
         goto finish;
 
diff --git a/libavcodec/vda_h264.c b/libavcodec/vda_h264.c
index acefde6..3c0775b 100644
--- a/libavcodec/vda_h264.c
+++ b/libavcodec/vda_h264.c
@@ -129,7 +129,7 @@ static int vda_old_h264_end_frame(AVCodecContext *avctx)
     H264Context *h                      = avctx->priv_data;
     VDAContext *vda                     = avctx->internal->hwaccel_priv_data;
     struct vda_context *vda_ctx         = avctx->hwaccel_context;
-    AVFrame *frame                      = &h->cur_pic_ptr->f;
+    AVFrame *frame                      = h->cur_pic_ptr->f;
     int status;
 
     if (!vda_ctx->decoder || !vda->bitstream)
@@ -325,7 +325,7 @@ static int vda_h264_end_frame(AVCodecContext *avctx)
     H264Context *h        = avctx->priv_data;
     VDAContext *vda       = avctx->internal->hwaccel_priv_data;
     AVVDAContext *vda_ctx = avctx->hwaccel_context;
-    AVFrame *frame        = &h->cur_pic_ptr->f;
+    AVFrame *frame        = h->cur_pic_ptr->f;
     uint32_t flush_flags  = 1 << 0; ///< kVDADecoderFlush_emitFrames
     CFDataRef coded_frame;
     OSStatus status;
diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c
index f8eab14..d03d127 100644
--- a/libavcodec/vdpau_h264.c
+++ b/libavcodec/vdpau_h264.c
@@ -51,7 +51,7 @@ static void vdpau_h264_clear_rf(VdpReferenceFrameH264 *rf)
 static void vdpau_h264_set_rf(VdpReferenceFrameH264 *rf, H264Picture *pic,
                               int pic_structure)
 {
-    VdpVideoSurface surface = ff_vdpau_get_surface_id(&pic->f);
+    VdpVideoSurface surface = ff_vdpau_get_surface_id(pic->f);
 
     if (pic_structure == 0)
         pic_structure = pic->reference;
@@ -88,7 +88,7 @@ static void vdpau_h264_set_reference_frames(AVCodecContext *avctx)
             if (!pic || !pic->reference)
                 continue;
             pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
-            surface_ref = ff_vdpau_get_surface_id(&pic->f);
+            surface_ref = ff_vdpau_get_surface_id(pic->f);
 
             rf2 = &info->referenceFrames[0];
             while (rf2 != rf) {
@@ -203,7 +203,7 @@ static int vdpau_h264_end_frame(AVCodecContext *avctx)
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     int val;
 
-    val = ff_vdpau_common_end_frame(avctx, &pic->f, pic_ctx);
+    val = ff_vdpau_common_end_frame(avctx, pic->f, pic_ctx);
     if (val < 0)
         return val;
 




More information about the ffmpeg-cvslog mailing list