[FFmpeg-cvslog] mpegvideo: operate with pointers to AVFrames instead of whole structs

wm4 git at videolan.org
Wed Apr 9 15:30:05 CEST 2014


ffmpeg | branch: master | wm4 <nfxjfg at googlemail.com> | Mon Mar 31 17:46:29 2014 +0000| [f6774f905fb3cfdc319523ac640be30b14c1bc55] | committer: Vittorio Giovara

mpegvideo: operate with pointers to AVFrames instead of whole structs

The most interesting parts are initialization in ff_MPV_common_init() and
uninitialization in ff_MPV_common_end().

ff_mpeg_unref_picture and ff_thread_release_buffer have additional NULL
checks for Picture.f, because these functions can be called on
uninitialized or partially initialized Pictures.

NULL pointer checks are added to ff_thread_release_buffer() stub function.

Signed-off-by: Vittorio Giovara <vittorio.giovara at gmail.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f6774f905fb3cfdc319523ac640be30b14c1bc55
---

 libavcodec/dxva2_mpeg2.c      |    8 +-
 libavcodec/dxva2_vc1.c        |    8 +-
 libavcodec/h261dec.c          |   10 +--
 libavcodec/h263dec.c          |   18 ++--
 libavcodec/intrax8.c          |   22 ++---
 libavcodec/motion_est.c       |   22 ++---
 libavcodec/mpeg12dec.c        |   34 +++----
 libavcodec/mpeg12enc.c        |   10 +--
 libavcodec/mpeg4videoenc.c    |   12 +--
 libavcodec/mpegvideo.c        |  185 +++++++++++++++++++++-----------------
 libavcodec/mpegvideo.h        |    2 +-
 libavcodec/mpegvideo_enc.c    |  200 ++++++++++++++++++++---------------------
 libavcodec/mpegvideo_motion.c |   10 +--
 libavcodec/mpegvideo_xvmc.c   |   12 +--
 libavcodec/msmpeg4.c          |    4 +-
 libavcodec/mss2.c             |    2 +-
 libavcodec/pthread_frame.c    |    2 +-
 libavcodec/ratecontrol.c      |   10 +--
 libavcodec/rv10.c             |    6 +-
 libavcodec/rv30.c             |    8 +-
 libavcodec/rv34.c             |   14 +--
 libavcodec/rv40.c             |    4 +-
 libavcodec/svq1enc.c          |   20 +++--
 libavcodec/utils.c            |    3 +-
 libavcodec/vaapi.c            |    2 +-
 libavcodec/vaapi_mpeg2.c      |    4 +-
 libavcodec/vaapi_mpeg4.c      |    6 +-
 libavcodec/vaapi_vc1.c        |    4 +-
 libavcodec/vc1.c              |    2 +-
 libavcodec/vc1dec.c           |  134 +++++++++++++--------------
 libavcodec/vdpau.c            |    2 +-
 libavcodec/vdpau_mpeg12.c     |    4 +-
 libavcodec/vdpau_mpeg4.c      |    4 +-
 libavcodec/vdpau_vc1.c        |    4 +-
 34 files changed, 411 insertions(+), 381 deletions(-)

diff --git a/libavcodec/dxva2_mpeg2.c b/libavcodec/dxva2_mpeg2.c
index 2620f6f..c214e45 100644
--- a/libavcodec/dxva2_mpeg2.c
+++ b/libavcodec/dxva2_mpeg2.c
@@ -44,14 +44,14 @@ static void fill_picture_parameters(AVCodecContext *avctx,
     int is_field = s->picture_structure != PICT_FRAME;
 
     memset(pp, 0, sizeof(*pp));
-    pp->wDecodedPictureIndex         = ff_dxva2_get_surface_index(ctx, &current_picture->f);
+    pp->wDecodedPictureIndex         = ff_dxva2_get_surface_index(ctx, current_picture->f);
     pp->wDeblockedPictureIndex       = 0;
     if (s->pict_type != AV_PICTURE_TYPE_I)
-        pp->wForwardRefPictureIndex  = ff_dxva2_get_surface_index(ctx, &s->last_picture.f);
+        pp->wForwardRefPictureIndex  = ff_dxva2_get_surface_index(ctx, s->last_picture.f);
     else
         pp->wForwardRefPictureIndex  = 0xffff;
     if (s->pict_type == AV_PICTURE_TYPE_B)
-        pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture.f);
+        pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, s->next_picture.f);
     else
         pp->wBackwardRefPictureIndex = 0xffff;
     pp->wPicWidthInMBminus1          = s->mb_width  - 1;
@@ -259,7 +259,7 @@ static int dxva2_mpeg2_end_frame(AVCodecContext *avctx)
 
     if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
         return -1;
-    ret = ff_dxva2_common_end_frame(avctx, &s->current_picture_ptr->f,
+    ret = ff_dxva2_common_end_frame(avctx, s->current_picture_ptr->f,
                                     &ctx_pic->pp, sizeof(ctx_pic->pp),
                                     &ctx_pic->qm, sizeof(ctx_pic->qm),
                                     commit_bitstream_and_slice_buffer);
diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
index 2e28ad3..94fbbe4 100644
--- a/libavcodec/dxva2_vc1.c
+++ b/libavcodec/dxva2_vc1.c
@@ -42,13 +42,13 @@ static void fill_picture_parameters(AVCodecContext *avctx,
 
     memset(pp, 0, sizeof(*pp));
     pp->wDecodedPictureIndex    =
-    pp->wDeblockedPictureIndex  = ff_dxva2_get_surface_index(ctx, &current_picture->f);
+    pp->wDeblockedPictureIndex  = ff_dxva2_get_surface_index(ctx, current_picture->f);
     if (s->pict_type != AV_PICTURE_TYPE_I && !v->bi_type)
-        pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture.f);
+        pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, s->last_picture.f);
     else
         pp->wForwardRefPictureIndex = 0xffff;
     if (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type)
-        pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture.f);
+        pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, s->next_picture.f);
     else
         pp->wBackwardRefPictureIndex = 0xffff;
     if (v->profile == PROFILE_ADVANCED) {
@@ -261,7 +261,7 @@ static int dxva2_vc1_end_frame(AVCodecContext *avctx)
     if (ctx_pic->bitstream_size <= 0)
         return -1;
 
-    ret = ff_dxva2_common_end_frame(avctx, &v->s.current_picture_ptr->f,
+    ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr->f,
                                     &ctx_pic->pp, sizeof(ctx_pic->pp),
                                     NULL, 0,
                                     commit_bitstream_and_slice_buffer);
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index d6e5272..0d996eb 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -608,8 +608,8 @@ retry:
     }
 
     // for skipping the frame
-    s->current_picture.f.pict_type = s->pict_type;
-    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture.f->pict_type = s->pict_type;
+    s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
         (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
@@ -632,10 +632,10 @@ retry:
     }
     ff_MPV_frame_end(s);
 
-    assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
-    assert(s->current_picture.f.pict_type == s->pict_type);
+    assert(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
+    assert(s->current_picture.f->pict_type == s->pict_type);
 
-    if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+    if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
         return ret;
     ff_print_debug_info(s, s->current_picture_ptr);
 
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index f6c79af..ad1b31d 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -374,7 +374,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
     if (buf_size == 0) {
         /* special case for last picture */
         if (s->low_delay == 0 && s->next_picture_ptr) {
-            if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
                 return ret;
             s->next_picture_ptr = NULL;
 
@@ -419,7 +419,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
 
     /* We need to set current_picture_ptr before reading the header,
      * otherwise we cannot store anyting in there */
-    if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
+    if (s->current_picture_ptr == NULL || s->current_picture_ptr->f->data[0]) {
         int i = ff_find_unused_picture(s, 0);
         if (i < 0)
             return i;
@@ -506,8 +506,8 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
         s->gob_index = ff_h263_get_gob_height(s);
 
     // for skipping the frame
-    s->current_picture.f.pict_type = s->pict_type;
-    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture.f->pict_type = s->pict_type;
+    s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     /* skip B-frames if we don't have reference frames */
     if (s->last_picture_ptr == NULL &&
@@ -611,15 +611,15 @@ intrax8_decoded:
     if (!s->divx_packed && avctx->hwaccel)
         ff_thread_finish_setup(avctx);
 
-    assert(s->current_picture.f.pict_type ==
-           s->current_picture_ptr->f.pict_type);
-    assert(s->current_picture.f.pict_type == s->pict_type);
+    assert(s->current_picture.f->pict_type ==
+           s->current_picture_ptr->f->pict_type);
+    assert(s->current_picture.f->pict_type == s->pict_type);
     if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-        if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+        if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->current_picture_ptr);
     } else if (s->last_picture_ptr != NULL) {
-        if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+        if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->last_picture_ptr);
     }
diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c
index 0cad9da..962c460 100644
--- a/libavcodec/intrax8.c
+++ b/libavcodec/intrax8.c
@@ -306,7 +306,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma
     int quant;
 
     w->dsp.setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
-                                      s->current_picture.f.linesize[chroma>0],
+                                      s->current_picture.f->linesize[chroma>0],
                                       &range, &sum, w->edges);
     if(chroma){
         w->orient=w->chroma_orient;
@@ -615,7 +615,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
             dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
 
             dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
-                                   s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
+                                   s->dest[chroma], s->current_picture.f->linesize[!!chroma]);
 
             goto block_placed;
         }
@@ -639,15 +639,15 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
     }
 
     if(w->flat_dc){
-        dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
+        dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f->linesize[!!chroma]);
     }else{
         w->dsp.spatial_compensation[w->orient]( s->edge_emu_buffer,
                                             s->dest[chroma],
-                                            s->current_picture.f.linesize[!!chroma] );
+                                            s->current_picture.f->linesize[!!chroma] );
     }
     if(!zeros_only)
         s->dsp.idct_add ( s->dest[chroma],
-                          s->current_picture.f.linesize[!!chroma],
+                          s->current_picture.f->linesize[!!chroma],
                           s->block[0] );
 
 block_placed:
@@ -658,7 +658,7 @@ block_placed:
 
     if(s->loop_filter){
         uint8_t* ptr = s->dest[chroma];
-        int linesize = s->current_picture.f.linesize[!!chroma];
+        int linesize = s->current_picture.f->linesize[!!chroma];
 
         if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
             w->dsp.h_loop_filter(ptr, linesize, w->quant);
@@ -673,12 +673,12 @@ block_placed:
 static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
 //not s->linesize as this would be wrong for field pics
 //not that IntraX8 has interlacing support ;)
-    const int linesize   = s->current_picture.f.linesize[0];
-    const int uvlinesize = s->current_picture.f.linesize[1];
+    const int linesize   = s->current_picture.f->linesize[0];
+    const int uvlinesize = s->current_picture.f->linesize[1];
 
-    s->dest[0] = s->current_picture.f.data[0];
-    s->dest[1] = s->current_picture.f.data[1];
-    s->dest[2] = s->current_picture.f.data[2];
+    s->dest[0] = s->current_picture.f->data[0];
+    s->dest[1] = s->current_picture.f->data[1];
+    s->dest[2] = s->current_picture.f->data[2];
 
     s->dest[0] +=   s->mb_y        *   linesize << 3;
     s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index 8b597e8..6a4c3e6 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -656,7 +656,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
         return INT_MAX;
 
     if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){
-        dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
+        dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
     }
 
     if(c->avctx->mb_cmp&FF_CMP_CHROMA){
@@ -671,15 +671,15 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
         offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
 
         if(s->no_rounding){
-            s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad    , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
-            s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
+            s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad    , s->last_picture.f->data[1] + offset, s->uvlinesize, 8);
+            s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8);
         }else{
-            s->hdsp.put_pixels_tab       [1][dxy](c->scratchpad    , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
-            s->hdsp.put_pixels_tab       [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
+            s->hdsp.put_pixels_tab       [1][dxy](c->scratchpad    , s->last_picture.f->data[1] + offset, s->uvlinesize, 8);
+            s->hdsp.put_pixels_tab       [1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8);
         }
 
-        dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad  , s->uvlinesize, 8);
-        dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
+        dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad  , s->uvlinesize, 8);
+        dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
     }
 
     c->pred_x= mx;
@@ -863,7 +863,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
     int mb_type=0;
     Picture * const pic= &s->current_picture;
 
-    init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
+    init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
 
     assert(s->quarter_sample==0 || s->quarter_sample==1);
     assert(s->linesize == c->stride);
@@ -1062,7 +1062,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
     int P[10][2];
     const int shift= 1+s->quarter_sample;
     const int xy= mb_x + mb_y*s->mb_stride;
-    init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
+    init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
 
     assert(s->quarter_sample==0 || s->quarter_sample==1);
 
@@ -1501,8 +1501,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
     int fmin, bmin, dmin, fbmin, bimin, fimin;
     int type=0;
     const int xy = mb_y*s->mb_stride + mb_x;
-    init_ref(c, s->new_picture.f.data, s->last_picture.f.data,
-             s->next_picture.f.data, 16 * mb_x, 16 * mb_y, 2);
+    init_ref(c, s->new_picture.f->data, s->last_picture.f->data,
+             s->next_picture.f->data, 16 * mb_x, 16 * mb_y, 2);
 
     get_limits(s, 16*mb_x, 16*mb_y);
 
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 2095eea..97c82ce 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -1354,8 +1354,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
         s->mpeg_f_code[1][0] = f_code;
         s->mpeg_f_code[1][1] = f_code;
     }
-    s->current_picture.f.pict_type = s->pict_type;
-    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture.f->pict_type = s->pict_type;
+    s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if (avctx->debug & FF_DEBUG_PICT_INFO)
         av_log(avctx, AV_LOG_DEBUG,
@@ -1517,8 +1517,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
                 s->pict_type = AV_PICTURE_TYPE_P;
         } else
             s->pict_type = AV_PICTURE_TYPE_B;
-        s->current_picture.f.pict_type = s->pict_type;
-        s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+        s->current_picture.f->pict_type = s->pict_type;
+        s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
     }
     s->intra_dc_precision         = get_bits(&s->gb, 2);
     s->picture_structure          = get_bits(&s->gb, 2);
@@ -1593,19 +1593,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         ff_mpeg_er_frame_start(s);
 
         /* first check if we must repeat the frame */
-        s->current_picture_ptr->f.repeat_pict = 0;
+        s->current_picture_ptr->f->repeat_pict = 0;
         if (s->repeat_first_field) {
             if (s->progressive_sequence) {
                 if (s->top_field_first)
-                    s->current_picture_ptr->f.repeat_pict = 4;
+                    s->current_picture_ptr->f->repeat_pict = 4;
                 else
-                    s->current_picture_ptr->f.repeat_pict = 2;
+                    s->current_picture_ptr->f->repeat_pict = 2;
             } else if (s->progressive_frame) {
-                s->current_picture_ptr->f.repeat_pict = 1;
+                s->current_picture_ptr->f->repeat_pict = 1;
             }
         }
 
-        pan_scan = av_frame_new_side_data(&s->current_picture_ptr->f,
+        pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
                                           AV_FRAME_DATA_PANSCAN,
                                           sizeof(s1->pan_scan));
         if (!pan_scan)
@@ -1614,7 +1614,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
 
         if (s1->a53_caption) {
             AVFrameSideData *sd = av_frame_new_side_data(
-                &s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
+                s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
                 s1->a53_caption_size);
             if (sd)
                 memcpy(sd->data, s1->a53_caption, s1->a53_caption_size);
@@ -1622,7 +1622,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         }
 
         if (s1->has_stereo3d) {
-            AVStereo3D *stereo = av_stereo3d_create_side_data(&s->current_picture_ptr->f);
+            AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
             if (!stereo)
                 return AVERROR(ENOMEM);
 
@@ -1647,10 +1647,10 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         }
 
         for (i = 0; i < 4; i++) {
-            s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
+            s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
             if (s->picture_structure == PICT_BOTTOM_FIELD)
-                s->current_picture.f.data[i] +=
-                    s->current_picture_ptr->f.linesize[i];
+                s->current_picture.f->data[i] +=
+                    s->current_picture_ptr->f->linesize[i];
         }
     }
 
@@ -2004,7 +2004,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
         ff_MPV_frame_end(s);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-            int ret = av_frame_ref(pict, &s->current_picture_ptr->f);
+            int ret = av_frame_ref(pict, s->current_picture_ptr->f);
             if (ret < 0)
                 return ret;
             ff_print_debug_info(s, s->current_picture_ptr);
@@ -2014,7 +2014,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
             /* latency of 1 frame for I- and P-frames */
             /* XXX: use another variable than picture_number */
             if (s->last_picture_ptr != NULL) {
-                int ret = av_frame_ref(pict, &s->last_picture_ptr->f);
+                int ret = av_frame_ref(pict, s->last_picture_ptr->f);
                 if (ret < 0)
                     return ret;
                 ff_print_debug_info(s, s->last_picture_ptr);
@@ -2581,7 +2581,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
         /* special case for last picture */
         if (s2->low_delay == 0 && s2->next_picture_ptr) {
-            int ret = av_frame_ref(picture, &s2->next_picture_ptr->f);
+            int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
             if (ret < 0)
                 return ret;
 
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index 6c4ef6d..c12491e 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -203,7 +203,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
     if (aspect_ratio == 0.0)
         aspect_ratio = 1.0;             // pixel aspect 1.1 (VGA)
 
-    if (s->current_picture.f.key_frame) {
+    if (s->current_picture.f->key_frame) {
         AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
 
         /* mpeg1 header repeated every gop */
@@ -293,10 +293,10 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
         /* time code: we must convert from the real frame rate to a
          * fake MPEG frame rate in case of low frame rate */
         fps       = (framerate.num + framerate.den / 2) / framerate.den;
-        time_code = s->current_picture_ptr->f.coded_picture_number +
+        time_code = s->current_picture_ptr->f->coded_picture_number +
                     s->avctx->timecode_frame_start;
 
-        s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
+        s->gop_picture_number = s->current_picture_ptr->f->coded_picture_number;
         if (s->drop_frame_timecode) {
             /* only works for NTSC 29.97 */
             int d = time_code / 17982;
@@ -412,7 +412,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
         if (s->progressive_sequence)
             put_bits(&s->pb, 1, 0);             /* no repeat */
         else
-            put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
+            put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
         /* XXX: optimize the generation of this flag with entropy measures */
         s->frame_pred_frame_dct = s->progressive_sequence;
 
@@ -436,7 +436,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
         for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++)
             put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
     }
-    side_data = av_frame_get_side_data(&s->current_picture_ptr->f,
+    side_data = av_frame_get_side_data(s->current_picture_ptr->f,
                                        AV_FRAME_DATA_STEREO3D);
     if (side_data) {
         AVStereo3D *stereo = (AVStereo3D *)side_data->data;
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index 3ddc3e4..189664d 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -675,7 +675,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
                         y = s->height - 16;
 
                     offset = x + y * s->linesize;
-                    p_pic  = s->new_picture.f.data[0] + offset;
+                    p_pic  = s->new_picture.f->data[0] + offset;
 
                     s->mb_skipped = 1;
                     for (i = 0; i < s->max_b_frames; i++) {
@@ -683,10 +683,10 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
                         int diff;
                         Picture *pic = s->reordered_input_picture[i + 1];
 
-                        if (!pic || pic->f.pict_type != AV_PICTURE_TYPE_B)
+                        if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
                             break;
 
-                        b_pic = pic->f.data[0] + offset;
+                        b_pic = pic->f->data[0] + offset;
                         if (!pic->shared)
                             b_pic += INPLACE_OFFSET;
                         diff = s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
@@ -906,9 +906,9 @@ static void mpeg4_encode_gop_header(MpegEncContext *s)
     put_bits(&s->pb, 16, 0);
     put_bits(&s->pb, 16, GOP_STARTCODE);
 
-    time = s->current_picture_ptr->f.pts;
+    time = s->current_picture_ptr->f->pts;
     if (s->reordered_input_picture[1])
-        time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
+        time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
     time = time * s->avctx->time_base.num;
 
     seconds  = time / s->avctx->time_base.den;
@@ -1118,7 +1118,7 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
     }
     put_bits(&s->pb, 3, 0);     /* intra dc VLC threshold */
     if (!s->progressive_sequence) {
-        put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
+        put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
         put_bits(&s->pb, 1, s->alternate_scan);
     }
     // FIXME sprite stuff
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 0fc77e8..3363ea0 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -342,9 +342,9 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
 
     s->dsp.clear_blocks(s->block[0]);
 
-    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
-    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
-    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[0] = s->current_picture.f->data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
+    s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
 
     assert(ref == 0);
     ff_MPV_decode_mb(s, s->block);
@@ -422,40 +422,40 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     int edges_needed = av_codec_is_encoder(s->avctx->codec);
     int r, ret;
 
-    pic->tf.f = &pic->f;
+    pic->tf.f = pic->f;
     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
         s->codec_id != AV_CODEC_ID_MSS2) {
         if (edges_needed) {
-            pic->f.width  = s->avctx->width  + 2 * EDGE_WIDTH;
-            pic->f.height = s->avctx->height + 2 * EDGE_WIDTH;
+            pic->f->width  = s->avctx->width  + 2 * EDGE_WIDTH;
+            pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
         }
 
         r = ff_thread_get_buffer(s->avctx, &pic->tf,
                                  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
     } else {
-        pic->f.width  = s->avctx->width;
-        pic->f.height = s->avctx->height;
-        pic->f.format = s->avctx->pix_fmt;
-        r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
+        pic->f->width  = s->avctx->width;
+        pic->f->height = s->avctx->height;
+        pic->f->format = s->avctx->pix_fmt;
+        r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
     }
 
-    if (r < 0 || !pic->f.buf[0]) {
+    if (r < 0 || !pic->f->buf[0]) {
         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
-               r, pic->f.data[0]);
+               r, pic->f->data[0]);
         return -1;
     }
 
     if (edges_needed) {
         int i;
-        for (i = 0; pic->f.data[i]; i++) {
+        for (i = 0; pic->f->data[i]; i++) {
             int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
-                         pic->f.linesize[i] +
+                         pic->f->linesize[i] +
                          (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
-            pic->f.data[i] += offset;
+            pic->f->data[i] += offset;
         }
-        pic->f.width  = s->avctx->width;
-        pic->f.height = s->avctx->height;
+        pic->f->width  = s->avctx->width;
+        pic->f->height = s->avctx->height;
     }
 
     if (s->avctx->hwaccel) {
@@ -470,15 +470,15 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
         }
     }
 
-    if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
-                        s->uvlinesize != pic->f.linesize[1])) {
+    if (s->linesize && (s->linesize   != pic->f->linesize[0] ||
+                        s->uvlinesize != pic->f->linesize[1])) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed (stride changed)\n");
         ff_mpeg_unref_picture(s, pic);
         return -1;
     }
 
-    if (pic->f.linesize[1] != pic->f.linesize[2]) {
+    if (pic->f->linesize[1] != pic->f->linesize[2]) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed (uv stride mismatch)\n");
         ff_mpeg_unref_picture(s, pic);
@@ -486,7 +486,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     }
 
     if (!s->edge_emu_buffer &&
-        (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
+        (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed to allocate context scratch buffers.\n");
         ff_mpeg_unref_picture(s, pic);
@@ -585,16 +585,16 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
     int i, ret;
 
     if (shared) {
-        assert(pic->f.data[0]);
+        assert(pic->f->data[0]);
         pic->shared = 1;
     } else {
-        assert(!pic->f.buf[0]);
+        assert(!pic->f->buf[0]);
 
         if (alloc_frame_buffer(s, pic) < 0)
             return -1;
 
-        s->linesize   = pic->f.linesize[0];
-        s->uvlinesize = pic->f.linesize[1];
+        s->linesize   = pic->f->linesize[0];
+        s->uvlinesize = pic->f->linesize[1];
     }
 
     if (!pic->qscale_table_buf)
@@ -636,15 +636,15 @@ void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
 {
     int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
 
-    pic->tf.f = &pic->f;
+    pic->tf.f = pic->f;
     /* WM Image / Screen codecs allocate internal buffers with different
      * dimensions / colorspaces; ignore user-defined callbacks for these. */
     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
         s->codec_id != AV_CODEC_ID_MSS2)
         ff_thread_release_buffer(s->avctx, &pic->tf);
-    else
-        av_frame_unref(&pic->f);
+    else if (pic->f)
+        av_frame_unref(pic->f);
 
     av_buffer_unref(&pic->hwaccel_priv_buf);
 
@@ -700,11 +700,11 @@ int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
 {
     int ret;
 
-    av_assert0(!dst->f.buf[0]);
-    av_assert0(src->f.buf[0]);
+    av_assert0(!dst->f->buf[0]);
+    av_assert0(src->f->buf[0]);
 
-    src->tf.f = &src->f;
-    dst->tf.f = &dst->f;
+    src->tf.f = src->f;
+    dst->tf.f = dst->f;
     ret = ff_thread_ref_frame(&dst->tf, &src->tf);
     if (ret < 0)
         goto fail;
@@ -902,7 +902,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
         ff_mpeg_unref_picture(s, &s->picture[i]);
-        if (s1->picture[i].f.buf[0] &&
+        if (s1->picture[i].f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
             return ret;
     }
@@ -910,7 +910,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 #define UPDATE_PICTURE(pic)\
 do {\
     ff_mpeg_unref_picture(s, &s->pic);\
-    if (s1->pic.f.buf[0])\
+    if (s1->pic.f->buf[0])\
         ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
     else\
         ret = update_picture_tables(&s->pic, &s1->pic);\
@@ -977,7 +977,7 @@ do {\
     if (!s1->first_field) {
         s->last_pict_type = s1->pict_type;
         if (s1->current_picture_ptr)
-            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
+            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
     }
 
     return 0;
@@ -1252,14 +1252,26 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        av_frame_unref(&s->picture[i].f);
+        s->picture[i].f = av_frame_alloc();
+        if (!s->picture[i].f)
+            goto fail;
     }
     memset(&s->next_picture, 0, sizeof(s->next_picture));
     memset(&s->last_picture, 0, sizeof(s->last_picture));
     memset(&s->current_picture, 0, sizeof(s->current_picture));
-    av_frame_unref(&s->next_picture.f);
-    av_frame_unref(&s->last_picture.f);
-    av_frame_unref(&s->current_picture.f);
+    memset(&s->new_picture, 0, sizeof(s->new_picture));
+    s->next_picture.f = av_frame_alloc();
+    if (!s->next_picture.f)
+        goto fail;
+    s->last_picture.f = av_frame_alloc();
+    if (!s->last_picture.f)
+        goto fail;
+    s->current_picture.f = av_frame_alloc();
+    if (!s->current_picture.f)
+        goto fail;
+    s->new_picture.f = av_frame_alloc();
+    if (!s->new_picture.f)
+        goto fail;
 
     if (s->width && s->height) {
         if (init_context_frame(s))
@@ -1453,15 +1465,22 @@ void ff_MPV_common_end(MpegEncContext *s)
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
             ff_free_picture_tables(&s->picture[i]);
             ff_mpeg_unref_picture(s, &s->picture[i]);
+            av_frame_free(&s->picture[i].f);
         }
     }
     av_freep(&s->picture);
     ff_free_picture_tables(&s->last_picture);
     ff_mpeg_unref_picture(s, &s->last_picture);
+    av_frame_free(&s->last_picture.f);
     ff_free_picture_tables(&s->current_picture);
     ff_mpeg_unref_picture(s, &s->current_picture);
+    av_frame_free(&s->current_picture.f);
     ff_free_picture_tables(&s->next_picture);
     ff_mpeg_unref_picture(s, &s->next_picture);
+    av_frame_free(&s->next_picture.f);
+    ff_free_picture_tables(&s->new_picture);
+    ff_mpeg_unref_picture(s, &s->new_picture);
+    av_frame_free(&s->new_picture.f);
 
     free_context_frame(s);
 
@@ -1577,7 +1596,7 @@ static void release_unused_pictures(MpegEncContext *s)
 
 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
 {
-    if (pic->f.buf[0] == NULL)
+    if (pic->f->buf[0] == NULL)
         return 1;
     if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
         return 1;
@@ -1590,7 +1609,7 @@ static int find_unused_picture(MpegEncContext *s, int shared)
 
     if (shared) {
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (s->picture[i].f.buf[0] == NULL)
+            if (s->picture[i].f->buf[0] == NULL)
                 return i;
         }
     } else {
@@ -1630,7 +1649,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     /* mark & release old frames */
     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
         s->last_picture_ptr != s->next_picture_ptr &&
-        s->last_picture_ptr->f.buf[0]) {
+        s->last_picture_ptr->f->buf[0]) {
         ff_mpeg_unref_picture(s, s->last_picture_ptr);
     }
 
@@ -1652,7 +1671,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     release_unused_pictures(s);
 
     if (s->current_picture_ptr &&
-        s->current_picture_ptr->f.buf[0] == NULL) {
+        s->current_picture_ptr->f->buf[0] == NULL) {
         // we already have a unused image
         // (maybe it was set before reading the header)
         pic = s->current_picture_ptr;
@@ -1671,28 +1690,28 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             pic->reference = 3;
     }
 
-    pic->f.coded_picture_number = s->coded_picture_number++;
+    pic->f->coded_picture_number = s->coded_picture_number++;
 
     if (ff_alloc_picture(s, pic, 0) < 0)
         return -1;
 
     s->current_picture_ptr = pic;
     // FIXME use only the vars from current_pic
-    s->current_picture_ptr->f.top_field_first = s->top_field_first;
+    s->current_picture_ptr->f->top_field_first = s->top_field_first;
     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
         s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
         if (s->picture_structure != PICT_FRAME)
-            s->current_picture_ptr->f.top_field_first =
+            s->current_picture_ptr->f->top_field_first =
                 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
     }
-    s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
+    s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
                                                  !s->progressive_sequence;
     s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
 
-    s->current_picture_ptr->f.pict_type = s->pict_type;
+    s->current_picture_ptr->f->pict_type = s->pict_type;
     // if (s->flags && CODEC_FLAG_QSCALE)
     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
-    s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
                                    s->current_picture_ptr)) < 0)
@@ -1705,13 +1724,13 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     }
     av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
             s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
-            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
-            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
-            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
+            s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
+            s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
+            s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
             s->pict_type, s->droppable);
 
     if ((s->last_picture_ptr == NULL ||
-         s->last_picture_ptr->f.buf[0] == NULL) &&
+         s->last_picture_ptr->f->buf[0] == NULL) &&
         (s->pict_type != AV_PICTURE_TYPE_I ||
          s->picture_structure != PICT_FRAME)) {
         int h_chroma_shift, v_chroma_shift;
@@ -1733,27 +1752,27 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         s->last_picture_ptr = &s->picture[i];
 
         s->last_picture_ptr->reference   = 3;
-        s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
+        s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
 
         if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
             s->last_picture_ptr = NULL;
             return -1;
         }
 
-        memset(s->last_picture_ptr->f.data[0], 0,
-               avctx->height * s->last_picture_ptr->f.linesize[0]);
-        memset(s->last_picture_ptr->f.data[1], 0x80,
+        memset(s->last_picture_ptr->f->data[0], 0,
+               avctx->height * s->last_picture_ptr->f->linesize[0]);
+        memset(s->last_picture_ptr->f->data[1], 0x80,
                (avctx->height >> v_chroma_shift) *
-               s->last_picture_ptr->f.linesize[1]);
-        memset(s->last_picture_ptr->f.data[2], 0x80,
+               s->last_picture_ptr->f->linesize[1]);
+        memset(s->last_picture_ptr->f->data[2], 0x80,
                (avctx->height >> v_chroma_shift) *
-               s->last_picture_ptr->f.linesize[2]);
+               s->last_picture_ptr->f->linesize[2]);
 
         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
     }
     if ((s->next_picture_ptr == NULL ||
-         s->next_picture_ptr->f.buf[0] == NULL) &&
+         s->next_picture_ptr->f->buf[0] == NULL) &&
         s->pict_type == AV_PICTURE_TYPE_B) {
         /* Allocate a dummy frame */
         i = ff_find_unused_picture(s, 0);
@@ -1764,7 +1783,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         s->next_picture_ptr = &s->picture[i];
 
         s->next_picture_ptr->reference   = 3;
-        s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
+        s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
 
         if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
             s->next_picture_ptr = NULL;
@@ -1776,21 +1795,21 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 
     if (s->last_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->last_picture);
-        if (s->last_picture_ptr->f.buf[0] &&
+        if (s->last_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->last_picture,
                                        s->last_picture_ptr)) < 0)
             return ret;
     }
     if (s->next_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->next_picture);
-        if (s->next_picture_ptr->f.buf[0] &&
+        if (s->next_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->next_picture,
                                        s->next_picture_ptr)) < 0)
             return ret;
     }
 
     if (s->pict_type != AV_PICTURE_TYPE_I &&
-        !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
+        !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
         av_log(s, AV_LOG_ERROR,
                "Non-reference picture received and no reference available\n");
         return AVERROR_INVALIDDATA;
@@ -1800,12 +1819,12 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         int i;
         for (i = 0; i < 4; i++) {
             if (s->picture_structure == PICT_BOTTOM_FIELD) {
-                s->current_picture.f.data[i] +=
-                    s->current_picture.f.linesize[i];
+                s->current_picture.f->data[i] +=
+                    s->current_picture.f->linesize[i];
             }
-            s->current_picture.f.linesize[i] *= 2;
-            s->last_picture.f.linesize[i]    *= 2;
-            s->next_picture.f.linesize[i]    *= 2;
+            s->current_picture.f->linesize[i] *= 2;
+            s->last_picture.f->linesize[i]    *= 2;
+            s->next_picture.f->linesize[i]    *= 2;
         }
     }
 
@@ -1862,7 +1881,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
     AVFrame *pict;
     if (s->avctx->hwaccel || !p || !p->mb_type)
         return;
-    pict = &p->f;
+    pict = p->f;
 
     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
         int x,y;
@@ -2110,8 +2129,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
         int dct_linesize, dct_offset;
         op_pixels_func (*op_pix)[4];
         qpel_mc_func (*op_qpix)[16];
-        const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
-        const int uvlinesize = s->current_picture.f.linesize[1];
+        const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+        const int uvlinesize = s->current_picture.f->linesize[1];
         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
         const int block_size = 8;
 
@@ -2169,12 +2188,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
                     op_pix = s->hdsp.put_no_rnd_pixels_tab;
                 }
                 if (s->mv_dir & MV_DIR_FORWARD) {
-                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
+                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
                     op_pix = s->hdsp.avg_pixels_tab;
                     op_qpix= s->me.qpel_avg;
                 }
                 if (s->mv_dir & MV_DIR_BACKWARD) {
-                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
+                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
                 }
             }
 
@@ -2308,14 +2327,14 @@ void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
 
 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
 {
-    ff_draw_horiz_band(s->avctx, &s->current_picture.f,
-                       &s->last_picture.f, y, h, s->picture_structure,
+    ff_draw_horiz_band(s->avctx, s->current_picture.f,
+                       s->last_picture.f, y, h, s->picture_structure,
                        s->first_field, s->low_delay);
 }
 
 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
-    const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
-    const int uvlinesize = s->current_picture.f.linesize[1];
+    const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+    const int uvlinesize = s->current_picture.f->linesize[1];
     const int mb_size= 4;
 
     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
@@ -2326,9 +2345,9 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
     //block_index is not used by mpeg2, so it is not affected by chroma_format
 
-    s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
-    s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
-    s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+    s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) <<  mb_size);
+    s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+    s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
 
     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
     {
@@ -2432,7 +2451,7 @@ void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
     if (!src)
         return;
 
-    dst->f = &src->f;
+    dst->f = src->f;
     dst->tf = &src->tf;
 
     for (i = 0; i < 2; i++) {
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index af2281e..e1ee40f 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -91,7 +91,7 @@ struct MpegEncContext;
  * Picture.
  */
 typedef struct Picture{
-    struct AVFrame f;
+    struct AVFrame *f;
     ThreadFrame tf;
 
     AVBufferRef *qscale_table_buf;
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index ec49db7..4e72a25 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -687,7 +687,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
     if (ARCH_X86)
         ff_MPV_encode_init_x86(s);
 
-    s->avctx->coded_frame = &s->current_picture.f;
+    s->avctx->coded_frame = s->current_picture.f;
 
     if (s->msmpeg4_version) {
         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
@@ -937,7 +937,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
             pic = &s->picture[i];
             pic->reference = 3;
 
-            if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
+            if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
                 return ret;
             if (ff_alloc_picture(s, pic, 1) < 0) {
                 return -1;
@@ -954,9 +954,9 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
                 return -1;
             }
 
-            if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
-                pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
-                pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
+            if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
+                pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
+                pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
                 // empty
             } else {
                 int h_chroma_shift, v_chroma_shift;
@@ -972,7 +972,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
                     int w = s->width  >> h_shift;
                     int h = s->height >> v_shift;
                     uint8_t *src = pic_arg->data[i];
-                    uint8_t *dst = pic->f.data[i];
+                    uint8_t *dst = pic->f->data[i];
 
                     if (!s->avctx->rc_buffer_size)
                         dst += INPLACE_OFFSET;
@@ -989,12 +989,12 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
                 }
             }
         }
-        ret = av_frame_copy_props(&pic->f, pic_arg);
+        ret = av_frame_copy_props(pic->f, pic_arg);
         if (ret < 0)
             return ret;
 
-        pic->f.display_picture_number = display_picture_number;
-        pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
+        pic->f->display_picture_number = display_picture_number;
+        pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
     }
 
     /* shift buffer entries */
@@ -1013,13 +1013,13 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
     int64_t score64 = 0;
 
     for (plane = 0; plane < 3; plane++) {
-        const int stride = p->f.linesize[plane];
+        const int stride = p->f->linesize[plane];
         const int bw = plane ? 1 : 2;
         for (y = 0; y < s->mb_height * bw; y++) {
             for (x = 0; x < s->mb_width * bw; x++) {
                 int off = p->shared ? 0 : 16;
-                uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
-                uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
+                uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
+                uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
 
                 switch (s->avctx->frame_skip_exp) {
@@ -1103,19 +1103,19 @@ static int estimate_best_b_count(MpegEncContext *s)
             pre_input = *pre_input_ptr;
 
             if (!pre_input.shared && i) {
-                pre_input.f.data[0] += INPLACE_OFFSET;
-                pre_input.f.data[1] += INPLACE_OFFSET;
-                pre_input.f.data[2] += INPLACE_OFFSET;
+                pre_input.f->data[0] += INPLACE_OFFSET;
+                pre_input.f->data[1] += INPLACE_OFFSET;
+                pre_input.f->data[2] += INPLACE_OFFSET;
             }
 
             s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
-                                 pre_input.f.data[0], pre_input.f.linesize[0],
+                                 pre_input.f->data[0], pre_input.f->linesize[0],
                                  c->width,      c->height);
             s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
-                                 pre_input.f.data[1], pre_input.f.linesize[1],
+                                 pre_input.f->data[1], pre_input.f->linesize[1],
                                  c->width >> 1, c->height >> 1);
             s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
-                                 pre_input.f.data[2], pre_input.f.linesize[2],
+                                 pre_input.f->data[2], pre_input.f->linesize[2],
                                  c->width >> 1, c->height >> 1);
         }
     }
@@ -1180,8 +1180,8 @@ static int select_input_picture(MpegEncContext *s)
         if (/*s->picture_in_gop_number >= s->gop_size ||*/
             s->next_picture_ptr == NULL || s->intra_only) {
             s->reordered_input_picture[0] = s->input_picture[0];
-            s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
-            s->reordered_input_picture[0]->f.coded_picture_number =
+            s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
+            s->reordered_input_picture[0]->f->coded_picture_number =
                 s->coded_picture_number++;
         } else {
             int b_frames;
@@ -1190,7 +1190,7 @@ static int select_input_picture(MpegEncContext *s)
                 if (s->picture_in_gop_number < s->gop_size &&
                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
                     // FIXME check that te gop check above is +-1 correct
-                    av_frame_unref(&s->input_picture[0]->f);
+                    av_frame_unref(s->input_picture[0]->f);
 
                     emms_c();
                     ff_vbv_update(s, 0);
@@ -1201,7 +1201,7 @@ static int select_input_picture(MpegEncContext *s)
 
             if (s->flags & CODEC_FLAG_PASS2) {
                 for (i = 0; i < s->max_b_frames + 1; i++) {
-                    int pict_num = s->input_picture[0]->f.display_picture_number + i;
+                    int pict_num = s->input_picture[0]->f->display_picture_number + i;
 
                     if (pict_num >= s->rc_context.num_entries)
                         break;
@@ -1210,7 +1210,7 @@ static int select_input_picture(MpegEncContext *s)
                         break;
                     }
 
-                    s->input_picture[i]->f.pict_type =
+                    s->input_picture[i]->f->pict_type =
                         s->rc_context.entry[pict_num].new_pict_type;
                 }
             }
@@ -1225,8 +1225,8 @@ static int select_input_picture(MpegEncContext *s)
                         s->input_picture[i]->b_frame_score == 0) {
                         s->input_picture[i]->b_frame_score =
                             get_intra_count(s,
-                                            s->input_picture[i    ]->f.data[0],
-                                            s->input_picture[i - 1]->f.data[0],
+                                            s->input_picture[i    ]->f->data[0],
+                                            s->input_picture[i - 1]->f->data[0],
                                             s->linesize) + 1;
                     }
                 }
@@ -1253,11 +1253,11 @@ static int select_input_picture(MpegEncContext *s)
             emms_c();
 
             for (i = b_frames - 1; i >= 0; i--) {
-                int type = s->input_picture[i]->f.pict_type;
+                int type = s->input_picture[i]->f->pict_type;
                 if (type && type != AV_PICTURE_TYPE_B)
                     b_frames = i;
             }
-            if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
+            if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
                 b_frames == s->max_b_frames) {
                 av_log(s->avctx, AV_LOG_ERROR,
                        "warning, too many b frames in a row\n");
@@ -1270,24 +1270,24 @@ static int select_input_picture(MpegEncContext *s)
                 } else {
                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
                         b_frames = 0;
-                    s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
+                    s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
                 }
             }
 
             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
-                s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
+                s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
                 b_frames--;
 
             s->reordered_input_picture[0] = s->input_picture[b_frames];
-            if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
-                s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
-            s->reordered_input_picture[0]->f.coded_picture_number =
+            if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
+                s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
+            s->reordered_input_picture[0]->f->coded_picture_number =
                 s->coded_picture_number++;
             for (i = 0; i < b_frames; i++) {
                 s->reordered_input_picture[i + 1] = s->input_picture[i];
-                s->reordered_input_picture[i + 1]->f.pict_type =
+                s->reordered_input_picture[i + 1]->f->pict_type =
                     AV_PICTURE_TYPE_B;
-                s->reordered_input_picture[i + 1]->f.coded_picture_number =
+                s->reordered_input_picture[i + 1]->f->coded_picture_number =
                     s->coded_picture_number++;
             }
         }
@@ -1295,7 +1295,7 @@ static int select_input_picture(MpegEncContext *s)
 no_output_pic:
     if (s->reordered_input_picture[0]) {
         s->reordered_input_picture[0]->reference =
-           s->reordered_input_picture[0]->f.pict_type !=
+           s->reordered_input_picture[0]->f->pict_type !=
                AV_PICTURE_TYPE_B ? 3 : 0;
 
         ff_mpeg_unref_picture(s, &s->new_picture);
@@ -1317,12 +1317,12 @@ no_output_pic:
                 return -1;
             }
 
-            ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
+            ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
             if (ret < 0)
                 return ret;
 
             /* mark us unused / free shared pic */
-            av_frame_unref(&s->reordered_input_picture[0]->f);
+            av_frame_unref(s->reordered_input_picture[0]->f);
             s->reordered_input_picture[0]->shared = 0;
 
             s->current_picture_ptr = pic;
@@ -1330,7 +1330,7 @@ no_output_pic:
             // input is not a shared pix -> reuse buffer for current_pix
             s->current_picture_ptr = s->reordered_input_picture[0];
             for (i = 0; i < 4; i++) {
-                s->new_picture.f.data[i] += INPLACE_OFFSET;
+                s->new_picture.f->data[i] += INPLACE_OFFSET;
             }
         }
         ff_mpeg_unref_picture(s, &s->current_picture);
@@ -1338,7 +1338,7 @@ no_output_pic:
                                        s->current_picture_ptr)) < 0)
             return ret;
 
-        s->picture_number = s->new_picture.f.display_picture_number;
+        s->picture_number = s->new_picture.f->display_picture_number;
     } else {
         ff_mpeg_unref_picture(s, &s->new_picture);
     }
@@ -1355,15 +1355,15 @@ static void frame_end(MpegEncContext *s)
         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
         int hshift = desc->log2_chroma_w;
         int vshift = desc->log2_chroma_h;
-        s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
+        s->dsp.draw_edges(s->current_picture.f->data[0], s->linesize,
                           s->h_edge_pos, s->v_edge_pos,
                           EDGE_WIDTH, EDGE_WIDTH,
                           EDGE_TOP | EDGE_BOTTOM);
-        s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
+        s->dsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
                           EDGE_TOP | EDGE_BOTTOM);
-        s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
+        s->dsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
                           EDGE_TOP | EDGE_BOTTOM);
@@ -1372,7 +1372,7 @@ static void frame_end(MpegEncContext *s)
     emms_c();
 
     s->last_pict_type                 = s->pict_type;
-    s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
+    s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
     if (s->pict_type!= AV_PICTURE_TYPE_B)
         s->last_non_b_pict_type = s->pict_type;
 
@@ -1384,7 +1384,7 @@ static void frame_end(MpegEncContext *s)
         }
     }
 
-    s->avctx->coded_frame = &s->current_picture_ptr->f;
+    s->avctx->coded_frame = s->current_picture_ptr->f;
 
 }
 
@@ -1416,12 +1416,12 @@ static int frame_start(MpegEncContext *s)
     /* mark & release old frames */
     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
         s->last_picture_ptr != s->next_picture_ptr &&
-        s->last_picture_ptr->f.buf[0]) {
+        s->last_picture_ptr->f->buf[0]) {
         ff_mpeg_unref_picture(s, s->last_picture_ptr);
     }
 
-    s->current_picture_ptr->f.pict_type = s->pict_type;
-    s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture_ptr->f->pict_type = s->pict_type;
+    s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     ff_mpeg_unref_picture(s, &s->current_picture);
     if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
@@ -1436,14 +1436,14 @@ static int frame_start(MpegEncContext *s)
 
     if (s->last_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->last_picture);
-        if (s->last_picture_ptr->f.buf[0] &&
+        if (s->last_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->last_picture,
                                        s->last_picture_ptr)) < 0)
             return ret;
     }
     if (s->next_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->next_picture);
-        if (s->next_picture_ptr->f.buf[0] &&
+        if (s->next_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->next_picture,
                                        s->next_picture_ptr)) < 0)
             return ret;
@@ -1453,12 +1453,12 @@ static int frame_start(MpegEncContext *s)
         int i;
         for (i = 0; i < 4; i++) {
             if (s->picture_structure == PICT_BOTTOM_FIELD) {
-                s->current_picture.f.data[i] +=
-                    s->current_picture.f.linesize[i];
+                s->current_picture.f->data[i] +=
+                    s->current_picture.f->linesize[i];
             }
-            s->current_picture.f.linesize[i] *= 2;
-            s->last_picture.f.linesize[i]    *= 2;
-            s->next_picture.f.linesize[i]    *= 2;
+            s->current_picture.f->linesize[i] *= 2;
+            s->last_picture.f->linesize[i]    *= 2;
+            s->next_picture.f->linesize[i]    *= 2;
         }
     }
 
@@ -1498,7 +1498,7 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
     }
 
     /* output? */
-    if (s->new_picture.f.data[0]) {
+    if (s->new_picture.f->data[0]) {
         if (!pkt->data &&
             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
             return ret;
@@ -1519,7 +1519,7 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
             init_put_bits(&s->thread_context[i]->pb, start, end - start);
         }
 
-        s->pict_type = s->new_picture.f.pict_type;
+        s->pict_type = s->new_picture.f->pict_type;
         //emms_c();
         ret = frame_start(s);
         if (ret < 0)
@@ -1585,8 +1585,8 @@ vbv_retry:
             ff_write_pass1_stats(s);
 
         for (i = 0; i < 4; i++) {
-            s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
-            avctx->error[i] += s->current_picture_ptr->f.error[i];
+            s->current_picture_ptr->f->error[i] = s->current_picture.f->error[i];
+            avctx->error[i] += s->current_picture_ptr->f->error[i];
         }
 
         if (s->flags & CODEC_FLAG_PASS1)
@@ -1663,16 +1663,16 @@ vbv_retry:
         s->total_bits     += s->frame_bits;
         avctx->frame_bits  = s->frame_bits;
 
-        pkt->pts = s->current_picture.f.pts;
+        pkt->pts = s->current_picture.f->pts;
         if (!s->low_delay) {
-            if (!s->current_picture.f.coded_picture_number)
+            if (!s->current_picture.f->coded_picture_number)
                 pkt->dts = pkt->pts - s->dts_delta;
             else
                 pkt->dts = s->reordered_pts;
-            s->reordered_pts = s->input_picture[0]->f.pts;
+            s->reordered_pts = s->input_picture[0]->f->pts;
         } else
             pkt->dts = pkt->pts;
-        if (s->current_picture.f.key_frame)
+        if (s->current_picture.f->key_frame)
             pkt->flags |= AV_PKT_FLAG_KEY;
         if (s->mb_info)
             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
@@ -1850,11 +1850,11 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
 
     wrap_y = s->linesize;
     wrap_c = s->uvlinesize;
-    ptr_y  = s->new_picture.f.data[0] +
+    ptr_y  = s->new_picture.f->data[0] +
              (mb_y * 16 * wrap_y)              + mb_x * 16;
-    ptr_cb = s->new_picture.f.data[1] +
+    ptr_cb = s->new_picture.f->data[1] +
              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
-    ptr_cr = s->new_picture.f.data[2] +
+    ptr_cr = s->new_picture.f->data[2] +
              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
 
     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
@@ -1939,14 +1939,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
 
         if (s->mv_dir & MV_DIR_FORWARD) {
             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
-                          s->last_picture.f.data,
+                          s->last_picture.f->data,
                           op_pix, op_qpix);
             op_pix  = s->hdsp.avg_pixels_tab;
             op_qpix = s->dsp.avg_qpel_pixels_tab;
         }
         if (s->mv_dir & MV_DIR_BACKWARD) {
             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
-                          s->next_picture.f.data,
+                          s->next_picture.f->data,
                           op_pix, op_qpix);
         }
 
@@ -2322,18 +2322,18 @@ static int sse_mb(MpegEncContext *s){
 
     if(w==16 && h==16)
       if(s->avctx->mb_cmp == FF_CMP_NSSE){
-        return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
-               +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
-               +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
+        return  s->dsp.nsse[0](s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
+               +s->dsp.nsse[1](s, s->new_picture.f->data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
+               +s->dsp.nsse[1](s, s->new_picture.f->data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
       }else{
-        return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
-               +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
-               +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
+        return  s->dsp.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
+               +s->dsp.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
+               +s->dsp.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
       }
     else
-        return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
-               +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
-               +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
+        return  sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
+               +sse(s, s->new_picture.f->data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
+               +sse(s, s->new_picture.f->data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
 }
 
 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
@@ -2388,7 +2388,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
             int xx = mb_x * 16;
             int yy = mb_y * 16;
-            uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
+            uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
             int varc;
             int sum = s->dsp.pix_sum(pix, s->linesize);
 
@@ -2495,7 +2495,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
         /* note: quant matrix value (8) is implied here */
         s->last_dc[i] = 128 << s->intra_dc_precision;
 
-        s->current_picture.f.error[i] = 0;
+        s->current_picture.f->error[i] = 0;
     }
     s->mb_skip_run = 0;
     memset(s->last_mv, 0, sizeof(s->last_mv));
@@ -3053,14 +3053,14 @@ static int encode_thread(AVCodecContext *c, void *arg){
                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
 
-                s->current_picture.f.error[0] += sse(
-                    s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
+                s->current_picture.f->error[0] += sse(
+                    s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
                     s->dest[0], w, h, s->linesize);
-                s->current_picture.f.error[1] += sse(
-                    s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
+                s->current_picture.f->error[1] += sse(
+                    s, s->new_picture.f->data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
-                s->current_picture.f.error[2] += sse(
-                    s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
+                s->current_picture.f->error[2] += sse(
+                    s, s->new_picture.f->data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
             }
             if(s->loop_filter){
@@ -3112,9 +3112,9 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
     MERGE(misc_bits);
     MERGE(er.error_count);
     MERGE(padding_bug_score);
-    MERGE(current_picture.f.error[0]);
-    MERGE(current_picture.f.error[1]);
-    MERGE(current_picture.f.error[2]);
+    MERGE(current_picture.f->error[0]);
+    MERGE(current_picture.f->error[1]);
+    MERGE(current_picture.f->error[2]);
 
     if(dst->avctx->noise_reduction){
         for(i=0; i<64; i++){
@@ -3131,13 +3131,13 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
 
 static int estimate_qp(MpegEncContext *s, int dry_run){
     if (s->next_lambda){
-        s->current_picture_ptr->f.quality =
-        s->current_picture.f.quality = s->next_lambda;
+        s->current_picture_ptr->f->quality =
+        s->current_picture.f->quality = s->next_lambda;
         if(!dry_run) s->next_lambda= 0;
     } else if (!s->fixed_qscale) {
-        s->current_picture_ptr->f.quality =
-        s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
-        if (s->current_picture.f.quality < 0)
+        s->current_picture_ptr->f->quality =
+        s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
+        if (s->current_picture.f->quality < 0)
             return -1;
     }
 
@@ -3160,15 +3160,15 @@ static int estimate_qp(MpegEncContext *s, int dry_run){
         s->lambda= s->lambda_table[0];
         //FIXME broken
     }else
-        s->lambda = s->current_picture.f.quality;
+        s->lambda = s->current_picture.f->quality;
     update_qscale(s);
     return 0;
 }
 
 /* must be called before writing the header */
 static void set_frame_distances(MpegEncContext * s){
-    assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
-    s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
+    assert(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
+    s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
 
     if(s->pict_type==AV_PICTURE_TYPE_B){
         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
@@ -3345,12 +3345,12 @@ static int encode_picture(MpegEncContext *s, int picture_number)
     }
 
     //FIXME var duplication
-    s->current_picture_ptr->f.key_frame =
-    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
-    s->current_picture_ptr->f.pict_type =
-    s->current_picture.f.pict_type = s->pict_type;
+    s->current_picture_ptr->f->key_frame =
+    s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
+    s->current_picture_ptr->f->pict_type =
+    s->current_picture.f->pict_type = s->pict_type;
 
-    if (s->current_picture.f.key_frame)
+    if (s->current_picture.f->key_frame)
         s->picture_in_gop_number=0;
 
     s->last_bits= put_bits_count(&s->pb);
diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c
index 997a722..1289da1 100644
--- a/libavcodec/mpegvideo_motion.c
+++ b/libavcodec/mpegvideo_motion.c
@@ -254,8 +254,8 @@ void mpeg_motion_internal(MpegEncContext *s,
 #endif
 
     v_edge_pos = s->v_edge_pos >> field_based;
-    linesize   = s->current_picture.f.linesize[0] << field_based;
-    uvlinesize = s->current_picture.f.linesize[1] << field_based;
+    linesize   = s->current_picture.f->linesize[0] << field_based;
+    uvlinesize = s->current_picture.f->linesize[1] << field_based;
 
     dxy   = ((motion_y & 1) << 1) | (motion_x & 1);
     src_x = s->mb_x * 16 + (motion_x >> 1);
@@ -900,7 +900,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
         } else {
             if (s->picture_structure != s->field_select[dir][0] + 1 &&
                 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
-                ref_picture = s->current_picture_ptr->f.data;
+                ref_picture = s->current_picture_ptr->f->data;
             }
 
             mpeg_motion(s, dest_y, dest_cb, dest_cr,
@@ -917,7 +917,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
                 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
                 ref2picture = ref_picture;
             } else {
-                ref2picture = s->current_picture_ptr->f.data;
+                ref2picture = s->current_picture_ptr->f->data;
             }
 
             mpeg_motion(s, dest_y, dest_cb, dest_cr,
@@ -956,7 +956,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
                 /* opposite parity is always in the same frame if this is
                  * second field */
                 if (!s->first_field) {
-                    ref_picture = s->current_picture_ptr->f.data;
+                    ref_picture = s->current_picture_ptr->f->data;
                 }
             }
         }
diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c
index 4a2982f..aa6f49a 100644
--- a/libavcodec/mpegvideo_xvmc.c
+++ b/libavcodec/mpegvideo_xvmc.c
@@ -44,7 +44,7 @@
  */
 void ff_xvmc_init_block(MpegEncContext *s)
 {
-    struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+    struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     assert(render && render->xvmc_id == AV_XVMC_ID);
 
     s->block = (int16_t (*)[64])(render->data_blocks + render->next_free_data_block_num * 64);
@@ -76,7 +76,7 @@ void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
  */
 int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
 {
-    struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+    struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     const int mb_block_count = 4 + (1 << s->chroma_format);
 
     assert(avctx);
@@ -116,7 +116,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
         case  AV_PICTURE_TYPE_I:
             return 0; // no prediction from other frames
         case  AV_PICTURE_TYPE_B:
-            next = (struct xvmc_pix_fmt*)s->next_picture.f.data[2];
+            next = (struct xvmc_pix_fmt*)s->next_picture.f->data[2];
             if (!next)
                 return -1;
             if (next->xvmc_id != AV_XVMC_ID)
@@ -124,7 +124,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
             render->p_future_surface = next->p_surface;
             // no return here, going to set forward prediction
         case  AV_PICTURE_TYPE_P:
-            last = (struct xvmc_pix_fmt*)s->last_picture.f.data[2];
+            last = (struct xvmc_pix_fmt*)s->last_picture.f->data[2];
             if (!last)
                 last = render; // predict second field from the first
             if (last->xvmc_id != AV_XVMC_ID)
@@ -144,7 +144,7 @@ return -1;
  */
 void ff_xvmc_field_end(MpegEncContext *s)
 {
-    struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+    struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     assert(render);
 
     if (render->filled_mv_blocks_num > 0)
@@ -185,7 +185,7 @@ void ff_xvmc_decode_mb(MpegEncContext *s)
     s->current_picture.qscale_table[mb_xy] = s->qscale;
 
     // start of XVMC-specific code
-    render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+    render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     assert(render);
     assert(render->xvmc_id == AV_XVMC_ID);
     assert(render->mv_blocks);
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index c76a14b..f0eaa9b 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -277,10 +277,10 @@ int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
             }else{
                 if(n<4){
                     wrap= s->linesize;
-                    dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8*  wrap ) + ((n & 1) + 2*s->mb_x) * 8;
+                    dest= s->current_picture.f->data[0] + (((n >> 1) + 2*s->mb_y) * 8*  wrap ) + ((n & 1) + 2*s->mb_x) * 8;
                 }else{
                     wrap= s->uvlinesize;
-                    dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
+                    dest= s->current_picture.f->data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
                 }
                 if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
                 else           a= get_dc(dest-8, wrap, scale*8);
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index 025c229..6f9391f 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -416,7 +416,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
 
     ff_MPV_frame_end(s);
 
-    f = &s->current_picture.f;
+    f = s->current_picture.f;
 
     if (v->respic == 3) {
         ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w,      h);
diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
index 1af8ff5..ca36be2 100644
--- a/libavcodec/pthread_frame.c
+++ b/libavcodec/pthread_frame.c
@@ -767,7 +767,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
                            avctx->get_buffer2 == avcodec_default_get_buffer2);
 FF_ENABLE_DEPRECATION_WARNINGS
 
-    if (!f->f->buf[0])
+    if (!f->f || !f->f->buf[0])
         return;
 
     if (avctx->debug & FF_DEBUG_BUFFERS)
diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index f3f19dd..dc7f087 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -48,10 +48,10 @@ void ff_write_pass1_stats(MpegEncContext *s)
     snprintf(s->avctx->stats_out, 256,
              "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
              "fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
-             s->current_picture_ptr->f.display_picture_number,
-             s->current_picture_ptr->f.coded_picture_number,
+             s->current_picture_ptr->f->display_picture_number,
+             s->current_picture_ptr->f->coded_picture_number,
              s->pict_type,
-             s->current_picture.f.quality,
+             s->current_picture.f->quality,
              s->i_tex_bits,
              s->p_tex_bits,
              s->mv_bits,
@@ -777,10 +777,10 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
         else
             dts_pic = s->last_picture_ptr;
 
-        if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE)
+        if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
             wanted_bits = (uint64_t)(s->bit_rate * (double)picture_number / fps);
         else
-            wanted_bits = (uint64_t)(s->bit_rate * (double)dts_pic->f.pts / fps);
+            wanted_bits = (uint64_t)(s->bit_rate * (double)dts_pic->f->pts / fps);
     }
 
     diff = s->total_bits - wanted_bits;
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index d0e61f0..f5533df 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -565,7 +565,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
             return ret;
         ff_mpeg_er_frame_start(s);
     } else {
-        if (s->current_picture_ptr->f.pict_type != s->pict_type) {
+        if (s->current_picture_ptr->f->pict_type != s->pict_type) {
             av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
             return AVERROR_INVALIDDATA;
         }
@@ -739,11 +739,11 @@ static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
         ff_MPV_frame_end(s);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-            if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
                 return ret;
             ff_print_debug_info(s, s->current_picture_ptr);
         } else if (s->last_picture_ptr != NULL) {
-            if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
                 return ret;
             ff_print_debug_info(s, s->last_picture_ptr);
         }
diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c
index 6b76651..1675ea2 100644
--- a/libavcodec/rv30.c
+++ b/libavcodec/rv30.c
@@ -166,7 +166,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
         if(mb_x)
             left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
         for(j = 0; j < 16; j += 4){
-            Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
+            Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
             for(i = !mb_x; i < 4; i++, Y += 4){
                 int ij = i + j;
                 loc_lim = 0;
@@ -186,7 +186,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
             if(mb_x)
                 left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
             for(j = 0; j < 8; j += 4){
-                C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
+                C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
                 for(i = !mb_x; i < 2; i++, C += 4){
                     int ij = i + (j >> 1);
                     loc_lim = 0;
@@ -208,7 +208,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
         if(row)
             top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
         for(j = 4*!row; j < 16; j += 4){
-            Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+            Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
             for(i = 0; i < 4; i++, Y += 4){
                 int ij = i + j;
                 loc_lim = 0;
@@ -228,7 +228,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
             if(row)
                 top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
             for(j = 4*!row; j < 8; j += 4){
-                C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
+                C = s->current_picture_ptr->f->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
                 for(i = 0; i < 2; i++, C += 4){
                     int ij = i + (j >> 1);
                     loc_lim = 0;
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index b3f2a23..0720ffb 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -708,9 +708,9 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
     }
 
     dxy = ly*4 + lx;
-    srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0];
-    srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1];
-    srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2];
+    srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
+    srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
+    srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
     src_x = s->mb_x * 16 + xoff + mx;
     src_y = s->mb_y * 16 + yoff + my;
     uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
@@ -1583,12 +1583,12 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
 
     if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-        if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+        if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->current_picture_ptr);
         got_picture = 1;
     } else if (s->last_picture_ptr != NULL) {
-        if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+        if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->last_picture_ptr);
         got_picture = 1;
@@ -1616,7 +1616,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
     if (buf_size == 0) {
         /* special case for last picture */
         if (s->low_delay==0 && s->next_picture_ptr) {
-            if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
                 return ret;
             s->next_picture_ptr = NULL;
 
@@ -1644,7 +1644,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
         av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
         return AVERROR_INVALIDDATA;
     }
-    if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) &&
+    if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
         si.type == AV_PICTURE_TYPE_B) {
         av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
                "reference data.\n");
diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c
index e5b1821..b7e113c 100644
--- a/libavcodec/rv40.c
+++ b/libavcodec/rv40.c
@@ -453,7 +453,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
         }
 
         for(j = 0; j < 16; j += 4){
-            Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+            Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
             for(i = 0; i < 4; i++, Y += 4){
                 int ij = i + j;
                 int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
@@ -498,7 +498,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
         }
         for(k = 0; k < 2; k++){
             for(j = 0; j < 2; j++){
-                C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
+                C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
                 for(i = 0; i < 2; i++, C += 4){
                     int ij = i + j*2;
                     int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index 7dc407f..b937b27 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -287,11 +287,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
         s->m.avctx                         = s->avctx;
         s->m.current_picture_ptr           = &s->m.current_picture;
         s->m.last_picture_ptr              = &s->m.last_picture;
-        s->m.last_picture.f.data[0]        = ref_plane;
+        s->m.last_picture.f->data[0]        = ref_plane;
         s->m.linesize                      =
-        s->m.last_picture.f.linesize[0]    =
-        s->m.new_picture.f.linesize[0]     =
-        s->m.current_picture.f.linesize[0] = stride;
+        s->m.last_picture.f->linesize[0]    =
+        s->m.new_picture.f->linesize[0]     =
+        s->m.current_picture.f->linesize[0] = stride;
         s->m.width                         = width;
         s->m.height                        = height;
         s->m.mb_width                      = block_width;
@@ -339,7 +339,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
         s->m.me.dia_size      = s->avctx->dia_size;
         s->m.first_slice_line = 1;
         for (y = 0; y < block_height; y++) {
-            s->m.new_picture.f.data[0] = src - y * 16 * stride; // ugly
+            s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
             s->m.mb_y                  = y;
 
             for (i = 0; i < 16 && i + 16 * y < height; i++) {
@@ -509,6 +509,9 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
            s->rd_total / (double)(avctx->width * avctx->height *
                                   avctx->frame_number));
 
+    s->m.mb_type = NULL;
+    ff_MPV_common_end(&s->m);
+
     av_freep(&s->m.me.scratchpad);
     av_freep(&s->m.me.map);
     av_freep(&s->m.me.score_map);
@@ -531,6 +534,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
 static av_cold int svq1_encode_init(AVCodecContext *avctx)
 {
     SVQ1Context *const s = avctx->priv_data;
+    int ret;
 
     ff_dsputil_init(&s->dsp, avctx);
     ff_hpeldsp_init(&s->hdsp, avctx->flags);
@@ -554,6 +558,12 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
 
     s->avctx               = avctx;
     s->m.avctx             = avctx;
+
+    if ((ret = ff_MPV_common_init(&s->m)) < 0) {
+        svq1_encode_end(avctx);
+        return ret;
+    }
+
     s->m.picture_structure = PICT_FRAME;
     s->m.me.temp           =
     s->m.me.scratchpad     = av_mallocz((avctx->width + 64) *
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index e04f455..d9832e2 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -2292,7 +2292,8 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
 
 void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
 {
-    av_frame_unref(f->f);
+    if (f->f)
+        av_frame_unref(f->f);
 }
 
 void ff_thread_finish_setup(AVCodecContext *avctx)
diff --git a/libavcodec/vaapi.c b/libavcodec/vaapi.c
index 6183d0b..b2dc41d 100644
--- a/libavcodec/vaapi.c
+++ b/libavcodec/vaapi.c
@@ -205,7 +205,7 @@ int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx)
         goto finish;
 
     ret = ff_vaapi_render_picture(vactx,
-                                  ff_vaapi_get_surface_id(&s->current_picture_ptr->f));
+                                  ff_vaapi_get_surface_id(s->current_picture_ptr->f));
     if (ret < 0)
         goto finish;
 
diff --git a/libavcodec/vaapi_mpeg2.c b/libavcodec/vaapi_mpeg2.c
index e0f9193..03fb670 100644
--- a/libavcodec/vaapi_mpeg2.c
+++ b/libavcodec/vaapi_mpeg2.c
@@ -73,10 +73,10 @@ static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_
 
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_B:
-        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture.f);
+        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
         // fall-through
     case AV_PICTURE_TYPE_P:
-        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture.f);
+        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
         break;
     }
 
diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c
index 098b37a..fc3a15e 100644
--- a/libavcodec/vaapi_mpeg4.c
+++ b/libavcodec/vaapi_mpeg4.c
@@ -81,7 +81,7 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
     pic_param->quant_precision                          = s->quant_precision;
     pic_param->vop_fields.value                         = 0; /* reset all bits */
     pic_param->vop_fields.bits.vop_coding_type          = s->pict_type - AV_PICTURE_TYPE_I;
-    pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f.pict_type - AV_PICTURE_TYPE_I : 0;
+    pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0;
     pic_param->vop_fields.bits.vop_rounding_type        = s->no_rounding;
     pic_param->vop_fields.bits.intra_dc_vlc_thr         = mpeg4_get_intra_dc_vlc_thr(ctx);
     pic_param->vop_fields.bits.top_field_first          = s->top_field_first;
@@ -95,9 +95,9 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
     pic_param->TRD                                      = s->pp_time;
 
     if (s->pict_type == AV_PICTURE_TYPE_B)
-        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture.f);
+        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
     if (s->pict_type != AV_PICTURE_TYPE_I)
-        pic_param->forward_reference_picture  = ff_vaapi_get_surface_id(&s->last_picture.f);
+        pic_param->forward_reference_picture  = ff_vaapi_get_surface_id(s->last_picture.f);
 
     /* Fill in VAIQMatrixBufferMPEG4 */
     /* Only the first inverse quantisation method uses the weighting matrices */
diff --git a/libavcodec/vaapi_vc1.c b/libavcodec/vaapi_vc1.c
index f50c5cf..8886b0b 100644
--- a/libavcodec/vaapi_vc1.c
+++ b/libavcodec/vaapi_vc1.c
@@ -258,10 +258,10 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
 
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_B:
-        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture.f);
+        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
         // fall-through
     case AV_PICTURE_TYPE_P:
-        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture.f);
+        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
         break;
     }
 
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index a7f2aab..81635b1 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -848,7 +848,7 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
         v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
         if (v->fptype & 4)
             v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
-        v->s.current_picture_ptr->f.pict_type = v->s.pict_type;
+        v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
         if (!v->pic_header_flag)
             goto parse_common_info;
     }
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index d697b8c..5d3ed80 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -78,9 +78,9 @@ static void init_block_index(VC1Context *v)
     MpegEncContext *s = &v->s;
     ff_init_block_index(s);
     if (v->field_mode && !(v->second_field ^ v->tff)) {
-        s->dest[0] += s->current_picture_ptr->f.linesize[0];
-        s->dest[1] += s->current_picture_ptr->f.linesize[1];
-        s->dest[2] += s->current_picture_ptr->f.linesize[2];
+        s->dest[0] += s->current_picture_ptr->f->linesize[0];
+        s->dest[1] += s->current_picture_ptr->f->linesize[1];
+        s->dest[2] += s->current_picture_ptr->f->linesize[2];
     }
 }
 
@@ -353,7 +353,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
 
     if ((!v->field_mode ||
          (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
-        !v->s.last_picture.f.data[0])
+        !v->s.last_picture.f->data[0])
         return;
 
     mx = s->mv[dir][0][0];
@@ -385,24 +385,24 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
     }
     if (!dir) {
         if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
-            srcY = s->current_picture.f.data[0];
-            srcU = s->current_picture.f.data[1];
-            srcV = s->current_picture.f.data[2];
+            srcY = s->current_picture.f->data[0];
+            srcU = s->current_picture.f->data[1];
+            srcV = s->current_picture.f->data[2];
             luty  = v->curr_luty;
             lutuv = v->curr_lutuv;
             use_ic = v->curr_use_ic;
         } else {
-            srcY = s->last_picture.f.data[0];
-            srcU = s->last_picture.f.data[1];
-            srcV = s->last_picture.f.data[2];
+            srcY = s->last_picture.f->data[0];
+            srcU = s->last_picture.f->data[1];
+            srcV = s->last_picture.f->data[2];
             luty  = v->last_luty;
             lutuv = v->last_lutuv;
             use_ic = v->last_use_ic;
         }
     } else {
-        srcY = s->next_picture.f.data[0];
-        srcU = s->next_picture.f.data[1];
-        srcV = s->next_picture.f.data[2];
+        srcY = s->next_picture.f->data[0];
+        srcU = s->next_picture.f->data[1];
+        srcV = s->next_picture.f->data[2];
         luty  = v->next_luty;
         lutuv = v->next_lutuv;
         use_ic = v->next_use_ic;
@@ -435,9 +435,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
     srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
 
     if (v->field_mode && v->ref_field_type[dir]) {
-        srcY += s->current_picture_ptr->f.linesize[0];
-        srcU += s->current_picture_ptr->f.linesize[1];
-        srcV += s->current_picture_ptr->f.linesize[2];
+        srcY += s->current_picture_ptr->f->linesize[0];
+        srcU += s->current_picture_ptr->f->linesize[1];
+        srcV += s->current_picture_ptr->f->linesize[2];
     }
 
     /* for grayscale we should not try to read from unknown area */
@@ -572,7 +572,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 
     if ((!v->field_mode ||
          (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
-        !v->s.last_picture.f.data[0])
+        !v->s.last_picture.f->data[0])
         return;
 
     mx = s->mv[dir][n][0];
@@ -580,16 +580,16 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 
     if (!dir) {
         if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
-            srcY = s->current_picture.f.data[0];
+            srcY = s->current_picture.f->data[0];
             luty = v->curr_luty;
             use_ic = v->curr_use_ic;
         } else {
-            srcY = s->last_picture.f.data[0];
+            srcY = s->last_picture.f->data[0];
             luty = v->last_luty;
             use_ic = v->last_use_ic;
         }
     } else {
-        srcY = s->next_picture.f.data[0];
+        srcY = s->next_picture.f->data[0];
         luty = v->next_luty;
         use_ic = v->next_use_ic;
     }
@@ -687,7 +687,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 
     srcY += src_y * s->linesize + src_x;
     if (v->field_mode && v->ref_field_type[dir])
-        srcY += s->current_picture_ptr->f.linesize[0];
+        srcY += s->current_picture_ptr->f->linesize[0];
 
     if (fieldmv && !(src_y & 1))
         v_edge_pos--;
@@ -817,7 +817,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
     uint8_t (*lutuv)[256];
     int use_ic;
 
-    if (!v->field_mode && !v->s.last_picture.f.data[0])
+    if (!v->field_mode && !v->s.last_picture.f->data[0])
         return;
     if (s->flags & CODEC_FLAG_GRAY)
         return;
@@ -848,7 +848,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
         if (dominant)
             chroma_ref_type = !v->cur_field_type;
     }
-    if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
+    if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
         return;
     s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
     s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
@@ -879,19 +879,19 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
 
     if (!dir) {
         if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
-            srcU = s->current_picture.f.data[1];
-            srcV = s->current_picture.f.data[2];
+            srcU = s->current_picture.f->data[1];
+            srcV = s->current_picture.f->data[2];
             lutuv = v->curr_lutuv;
             use_ic = v->curr_use_ic;
         } else {
-            srcU = s->last_picture.f.data[1];
-            srcV = s->last_picture.f.data[2];
+            srcU = s->last_picture.f->data[1];
+            srcV = s->last_picture.f->data[2];
             lutuv = v->last_lutuv;
             use_ic = v->last_use_ic;
         }
     } else {
-        srcU = s->next_picture.f.data[1];
-        srcV = s->next_picture.f.data[2];
+        srcU = s->next_picture.f->data[1];
+        srcV = s->next_picture.f->data[2];
         lutuv = v->next_lutuv;
         use_ic = v->next_use_ic;
     }
@@ -906,8 +906,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
 
     if (v->field_mode) {
         if (chroma_ref_type) {
-            srcU += s->current_picture_ptr->f.linesize[1];
-            srcV += s->current_picture_ptr->f.linesize[2];
+            srcU += s->current_picture_ptr->f->linesize[1];
+            srcV += s->current_picture_ptr->f->linesize[2];
         }
     }
 
@@ -1012,13 +1012,13 @@ static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
         uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width  >> 1);
         uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
         if (i < 2 ? dir : dir2) {
-            srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
-            srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
+            srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
+            srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
             lutuv  = v->next_lutuv;
             use_ic = v->next_use_ic;
         } else {
-            srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
-            srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
+            srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
+            srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
             lutuv  = v->last_lutuv;
             use_ic = v->last_use_ic;
         }
@@ -1908,7 +1908,7 @@ static void vc1_interp_mc(VC1Context *v)
     int v_edge_pos = s->v_edge_pos >> v->field_mode;
     int use_ic = v->next_use_ic;
 
-    if (!v->field_mode && !v->s.next_picture.f.data[0])
+    if (!v->field_mode && !v->s.next_picture.f->data[0])
         return;
 
     mx   = s->mv[1][0][0];
@@ -1924,9 +1924,9 @@ static void vc1_interp_mc(VC1Context *v)
         uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
         uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
     }
-    srcY = s->next_picture.f.data[0];
-    srcU = s->next_picture.f.data[1];
-    srcV = s->next_picture.f.data[2];
+    srcY = s->next_picture.f->data[0];
+    srcU = s->next_picture.f->data[1];
+    srcV = s->next_picture.f->data[2];
 
     src_x   = s->mb_x * 16 + (mx   >> 2);
     src_y   = s->mb_y * 16 + (my   >> 2);
@@ -1950,9 +1950,9 @@ static void vc1_interp_mc(VC1Context *v)
     srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
 
     if (v->field_mode && v->ref_field_type[1]) {
-        srcY += s->current_picture_ptr->f.linesize[0];
-        srcU += s->current_picture_ptr->f.linesize[1];
-        srcV += s->current_picture_ptr->f.linesize[2];
+        srcY += s->current_picture_ptr->f->linesize[0];
+        srcU += s->current_picture_ptr->f->linesize[1];
+        srcV += s->current_picture_ptr->f->linesize[2];
     }
 
     /* for grayscale we should not try to read from unknown area */
@@ -5160,7 +5160,7 @@ static void vc1_decode_skip_blocks(VC1Context *v)
 {
     MpegEncContext *s = &v->s;
 
-    if (!v->s.last_picture.f.data[0])
+    if (!v->s.last_picture.f->data[0])
         return;
 
     ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
@@ -5169,9 +5169,9 @@ static void vc1_decode_skip_blocks(VC1Context *v)
         s->mb_x = 0;
         init_block_index(v);
         ff_update_block_index(s);
-        memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize,   s->linesize   * 16);
-        memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
-        memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
+        memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize,   s->linesize   * 16);
+        memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
+        memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
         ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         s->first_slice_line = 0;
     }
@@ -5370,15 +5370,15 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
                            v->sprite_output_frame->linesize[plane] * row;
 
             for (sprite = 0; sprite <= v->two_sprites; sprite++) {
-                uint8_t *iplane = s->current_picture.f.data[plane];
-                int      iline  = s->current_picture.f.linesize[plane];
+                uint8_t *iplane = s->current_picture.f->data[plane];
+                int      iline  = s->current_picture.f->linesize[plane];
                 int      ycoord = yoff[sprite] + yadv[sprite] * row;
                 int      yline  = ycoord >> 16;
                 int      next_line;
                 ysub[sprite] = ycoord & 0xFFFF;
                 if (sprite) {
-                    iplane = s->last_picture.f.data[plane];
-                    iline  = s->last_picture.f.linesize[plane];
+                    iplane = s->last_picture.f->data[plane];
+                    iline  = s->last_picture.f->linesize[plane];
                 }
                 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
                 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
@@ -5447,12 +5447,12 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
 
     vc1_parse_sprites(v, gb, &sd);
 
-    if (!s->current_picture.f.data[0]) {
+    if (!s->current_picture.f->data[0]) {
         av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
         return -1;
     }
 
-    if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
+    if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
         av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
         v->two_sprites = 0;
     }
@@ -5472,7 +5472,7 @@ static void vc1_sprite_flush(AVCodecContext *avctx)
 {
     VC1Context *v     = avctx->priv_data;
     MpegEncContext *s = &v->s;
-    AVFrame *f = &s->current_picture.f;
+    AVFrame *f = s->current_picture.f;
     int plane, i;
 
     /* Windows Media Image codecs have a convergence interval of two keyframes.
@@ -5770,7 +5770,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
         /* special case for last picture */
         if (s->low_delay == 0 && s->next_picture_ptr) {
-            if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
                 return ret;
             s->next_picture_ptr = NULL;
 
@@ -5936,8 +5936,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     }
 
     // for skipping the frame
-    s->current_picture.f.pict_type = s->pict_type;
-    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture.f->pict_type = s->pict_type;
+    s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     /* skip B-frames if we don't have reference frames */
     if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
@@ -5961,15 +5961,15 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     }
 
     // process pulldown flags
-    s->current_picture_ptr->f.repeat_pict = 0;
+    s->current_picture_ptr->f->repeat_pict = 0;
     // Pulldown flags are only valid when 'broadcast' has been set.
     // So ticks_per_frame will be 2
     if (v->rff) {
         // repeat field
-        s->current_picture_ptr->f.repeat_pict = 1;
+        s->current_picture_ptr->f->repeat_pict = 1;
     } else if (v->rptfrm) {
         // repeat frames
-        s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
+        s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
     }
 
     s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
@@ -5990,9 +5990,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         v->bits = buf_size * 8;
         v->end_mb_x = s->mb_width;
         if (v->field_mode) {
-            s->current_picture.f.linesize[0] <<= 1;
-            s->current_picture.f.linesize[1] <<= 1;
-            s->current_picture.f.linesize[2] <<= 1;
+            s->current_picture.f->linesize[0] <<= 1;
+            s->current_picture.f->linesize[1] <<= 1;
+            s->current_picture.f->linesize[2] <<= 1;
             s->linesize                      <<= 1;
             s->uvlinesize                    <<= 1;
         }
@@ -6051,9 +6051,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         }
         if (v->field_mode) {
             v->second_field = 0;
-            s->current_picture.f.linesize[0] >>= 1;
-            s->current_picture.f.linesize[1] >>= 1;
-            s->current_picture.f.linesize[2] >>= 1;
+            s->current_picture.f->linesize[0] >>= 1;
+            s->current_picture.f->linesize[1] >>= 1;
+            s->current_picture.f->linesize[2] >>= 1;
             s->linesize                      >>= 1;
             s->uvlinesize                    >>= 1;
             if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
@@ -6086,12 +6086,12 @@ image:
         *got_frame = 1;
     } else {
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-            if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
                 goto err;
             ff_print_debug_info(s, s->current_picture_ptr);
             *got_frame = 1;
         } else if (s->last_picture_ptr != NULL) {
-            if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+            if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
                 goto err;
             ff_print_debug_info(s, s->last_picture_ptr);
             *got_frame = 1;
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
index 5ad5d06..5406874 100644
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@ -57,7 +57,7 @@ int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
     MpegEncContext *s = avctx->priv_data;
     Picture *pic = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
-    VdpVideoSurface surf = ff_vdpau_get_surface_id(&pic->f);
+    VdpVideoSurface surf = ff_vdpau_get_surface_id(pic->f);
 
     hwctx->render(hwctx->decoder, surf, (void *)&pic_ctx->info,
                   pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
diff --git a/libavcodec/vdpau_mpeg12.c b/libavcodec/vdpau_mpeg12.c
index 0f92cef..6356293 100644
--- a/libavcodec/vdpau_mpeg12.c
+++ b/libavcodec/vdpau_mpeg12.c
@@ -43,12 +43,12 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
 
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_B:
-        ref = ff_vdpau_get_surface_id(&s->next_picture.f);
+        ref = ff_vdpau_get_surface_id(s->next_picture.f);
         assert(ref != VDP_INVALID_HANDLE);
         info->backward_reference = ref;
         /* fall through to forward prediction */
     case AV_PICTURE_TYPE_P:
-        ref = ff_vdpau_get_surface_id(&s->last_picture.f);
+        ref = ff_vdpau_get_surface_id(s->last_picture.f);
         info->forward_reference  = ref;
     }
 
diff --git a/libavcodec/vdpau_mpeg4.c b/libavcodec/vdpau_mpeg4.c
index 3c6c8b3..57df867 100644
--- a/libavcodec/vdpau_mpeg4.c
+++ b/libavcodec/vdpau_mpeg4.c
@@ -46,13 +46,13 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
 
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_B:
-        ref = ff_vdpau_get_surface_id(&s->next_picture.f);
+        ref = ff_vdpau_get_surface_id(s->next_picture.f);
         assert(ref != VDP_INVALID_HANDLE);
         info->backward_reference = ref;
         info->vop_coding_type    = 2;
         /* fall-through */
     case AV_PICTURE_TYPE_P:
-        ref = ff_vdpau_get_surface_id(&s->last_picture.f);
+        ref = ff_vdpau_get_surface_id(s->last_picture.f);
         assert(ref != VDP_INVALID_HANDLE);
         info->forward_reference  = ref;
     }
diff --git a/libavcodec/vdpau_vc1.c b/libavcodec/vdpau_vc1.c
index 392c511..b100daa 100644
--- a/libavcodec/vdpau_vc1.c
+++ b/libavcodec/vdpau_vc1.c
@@ -44,12 +44,12 @@ static int vdpau_vc1_start_frame(AVCodecContext *avctx,
 
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_B:
-        ref = ff_vdpau_get_surface_id(&s->next_picture.f);
+        ref = ff_vdpau_get_surface_id(s->next_picture.f);
         assert(ref != VDP_INVALID_HANDLE);
         info->backward_reference = ref;
         /* fall-through */
     case AV_PICTURE_TYPE_P:
-        ref = ff_vdpau_get_surface_id(&s->last_picture.f);
+        ref = ff_vdpau_get_surface_id(s->last_picture.f);
         assert(ref != VDP_INVALID_HANDLE);
         info->forward_reference  = ref;
     }



More information about the ffmpeg-cvslog mailing list