[PATCH 2/3] Move the definition of the ffplay lavfi source to cmdutils.[hc], where it can be shared with ffmpeg.c.

Stefano Sabatini stefano.sabatini-lala
Mon Nov 1 17:43:45 CET 2010


---
 cmdutils.c |  167 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 cmdutils.h |   10 +++
 ffplay.c   |  187 +++++-------------------------------------------------------
 3 files changed, 192 insertions(+), 172 deletions(-)

diff --git a/cmdutils.c b/cmdutils.c
index 49c6ad5..97f2ffb 100644
--- a/cmdutils.c
+++ b/cmdutils.c
@@ -38,6 +38,7 @@
 #include "libavutil/pixdesc.h"
 #include "libavutil/eval.h"
 #include "libavcodec/opt.h"
+#include "libavcore/imgutils.h"
 #include "libavcore/avcore.h"
 #include "cmdutils.h"
 #include "version.h"
@@ -809,4 +810,170 @@ int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
     return 1;
 }
 
+static int ffsrc_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    AVFilterContext *ctx = avctx->opaque;
+    AVFilterBufferRef *picref;
+    int perms = AV_PERM_WRITE;
+    int i, w, h, stride[4];
+    unsigned edge;
+
+    if (frame->buffer_hints & FF_BUFFER_HINTS_VALID) {
+        if (frame->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
+        if (frame->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
+        if (frame->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
+    }
+    if (frame->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
+
+    w = avctx->width;
+    h = avctx->height;
+    avcodec_align_dimensions2(avctx, &w, &h, stride);
+    edge = avctx->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
+    w += edge << 1;
+    h += edge << 1;
+
+    if (!(picref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
+        return AVERROR(ENOMEM);
+
+    picref->video->w = avctx->width;
+    picref->video->h = avctx->height;
+    for (i = 0; i < 4; i ++) {
+        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[picref->format].log2_chroma_w : 0;
+        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[picref->format].log2_chroma_h : 0;
+
+        if (picref->data[i]) {
+            picref->data[i] += (edge >> hshift) + ((edge * picref->linesize[i]) >> vshift);
+        }
+        frame->data[i]     = picref->data[i];
+        frame->linesize[i] = picref->linesize[i];
+    }
+    frame->opaque = picref;
+    frame->age    = INT_MAX;
+    frame->type   = FF_BUFFER_TYPE_USER;
+    frame->reordered_opaque = avctx->reordered_opaque;
+    return 0;
+}
+
+static void ffsrc_release_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    memset(frame->data, 0, sizeof(frame->data));
+    avfilter_unref_buffer(frame->opaque);
+}
+
+static int ffsrc_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    AVFilterBufferRef *picref = frame->opaque;
+
+    if (frame->data[0] == NULL) {
+        frame->buffer_hints |= FF_BUFFER_HINTS_READABLE;
+        return avctx->get_buffer(avctx, frame);
+    }
+
+    if ((avctx->width != picref->video->w) || (avctx->height != picref->video->h) ||
+        (avctx->pix_fmt != picref->format)) {
+        av_log(avctx, AV_LOG_ERROR, "Picture properties changed.\n");
+        return AVERROR(EINVAL);
+    }
+
+    frame->reordered_opaque = avctx->reordered_opaque;
+    return 0;
+}
+
+static int ffsrc_init(AVFilterContext *ctx, const char *args, void *opaque)
+{
+    FFSrcContext *priv = ctx->priv;
+    if (!opaque)
+        return AVERROR(EINVAL);
+
+    *priv = *(FFSrcContext *)opaque;
+    priv->stream->codec->opaque = ctx;
+
+    if (priv->stream->codec->codec->capabilities & CODEC_CAP_DR1) {
+        priv->use_dr1 = 1;
+        priv->stream->codec->get_buffer     = ffsrc_get_buffer;
+        priv->stream->codec->release_buffer = ffsrc_release_buffer;
+        priv->stream->codec->reget_buffer   = ffsrc_reget_buffer;
+    }
+
+    priv->frame = avcodec_alloc_frame();
+
+    return 0;
+}
+
+static void ffsrc_uninit(AVFilterContext *ctx)
+{
+    FFSrcContext *priv = ctx->priv;
+    av_free(priv->frame);
+}
+
+static int ffsrc_request_frame(AVFilterLink *outlink)
+{
+    FFSrcContext *priv = outlink->src->priv;
+    AVFilterBufferRef *picref;
+    int64_t pts = 0, pos = 0;
+    int ret;
+
+    if ((ret = priv->get_video_frame(outlink->src, priv->frame, &pts, &pos) < 0))
+        return ret;
+
+    if (priv->use_dr1) {
+        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
+    } else {
+        picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+        av_image_copy(picref->data, picref->linesize,
+                      priv->frame->data, priv->frame->linesize,
+                      picref->format, outlink->w, outlink->h);
+    }
+
+    picref->pts = pts;
+    picref->pos = pos;
+    picref->video->pixel_aspect = priv->stream->codec->sample_aspect_ratio;
+    avfilter_start_frame(outlink, picref);
+    avfilter_draw_slice(outlink, 0, outlink->h, 1);
+    avfilter_end_frame(outlink);
+
+    return 0;
+}
+
+static int ffsrc_query_formats(AVFilterContext *ctx)
+{
+    FFSrcContext *priv = ctx->priv;
+    enum PixelFormat pix_fmts[] = {
+        priv->stream->codec->pix_fmt, PIX_FMT_NONE
+    };
+
+    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
+    return 0;
+}
+
+static int ffsrc_config_props(AVFilterLink *outlink)
+{
+    FFSrcContext *priv = outlink->src->priv;
+    AVCodecContext *avctx = priv->stream->codec;
+
+    outlink->w = avctx->width;
+    outlink->h = avctx->height;
+    outlink->time_base = priv->stream->time_base;
+
+    return 0;
+}
+
+AVFilter ffsrc = {
+    .name      = "ffsrc",
+
+    .priv_size = sizeof(FFSrcContext),
+
+    .init      = ffsrc_init,
+    .uninit    = ffsrc_uninit,
+
+    .query_formats = ffsrc_query_formats,
+
+    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
+    .outputs   = (AVFilterPad[]) {{ .name = "default",
+                                    .type = AVMEDIA_TYPE_VIDEO,
+                                    .request_frame = ffsrc_request_frame,
+                                    .config_props  = ffsrc_config_props, },
+                                  { .name = NULL }},
+};
+
 #endif /* CONFIG_AVFILTER */
diff --git a/cmdutils.h b/cmdutils.h
index b431b2e..4a21c50 100644
--- a/cmdutils.h
+++ b/cmdutils.h
@@ -279,6 +279,16 @@ extern AVFilter ffsink;
 int get_filtered_video_frame(AVFilterContext *sink, AVFrame *frame,
                              AVFilterBufferRef **picref, AVRational *pts_tb);
 
+typedef struct {
+    AVStream *stream;
+    AVFrame *frame;
+    int (*get_video_frame)(AVFilterContext *, AVFrame *, int64_t *pts, int64_t *pos);
+    int use_dr1;
+    void *priv;
+} FFSrcContext;
+
+extern AVFilter ffsrc;
+
 #endif /* CONFIG_AVFILTER */
 
 #endif /* FFMPEG_CMDUTILS_H */
diff --git a/ffplay.c b/ffplay.c
index eecf16a..7fb554d 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -1602,183 +1602,21 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
 }
 
 #if CONFIG_AVFILTER
-typedef struct {
-    VideoState *is;
-    AVFrame *frame;
-    int use_dr1;
-} FilterPriv;
-
-static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    AVFilterContext *ctx = codec->opaque;
-    AVFilterBufferRef  *ref;
-    int perms = AV_PERM_WRITE;
-    int i, w, h, stride[4];
-    unsigned edge;
-
-    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
-        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
-        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
-        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
-    }
-    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
-
-    w = codec->width;
-    h = codec->height;
-    avcodec_align_dimensions2(codec, &w, &h, stride);
-    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
-    w += edge << 1;
-    h += edge << 1;
-
-    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
-        return -1;
-
-    ref->video->w = codec->width;
-    ref->video->h = codec->height;
-    for(i = 0; i < 4; i ++) {
-        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
-        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
-
-        if (ref->data[i]) {
-            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
-        }
-        pic->data[i]     = ref->data[i];
-        pic->linesize[i] = ref->linesize[i];
-    }
-    pic->opaque = ref;
-    pic->age    = INT_MAX;
-    pic->type   = FF_BUFFER_TYPE_USER;
-    pic->reordered_opaque = codec->reordered_opaque;
-    return 0;
-}
-
-static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    memset(pic->data, 0, sizeof(pic->data));
-    avfilter_unref_buffer(pic->opaque);
-}
-
-static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    AVFilterBufferRef *ref = pic->opaque;
-
-    if (pic->data[0] == NULL) {
-        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
-        return codec->get_buffer(codec, pic);
-    }
-
-    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
-        (codec->pix_fmt != ref->format)) {
-        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
-        return -1;
-    }
-
-    pic->reordered_opaque = codec->reordered_opaque;
-    return 0;
-}
-
-static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
-{
-    FilterPriv *priv = ctx->priv;
-    AVCodecContext *codec;
-    if(!opaque) return -1;
-
-    priv->is = opaque;
-    codec    = priv->is->video_st->codec;
-    codec->opaque = ctx;
-    if(codec->codec->capabilities & CODEC_CAP_DR1) {
-        priv->use_dr1 = 1;
-        codec->get_buffer     = input_get_buffer;
-        codec->release_buffer = input_release_buffer;
-        codec->reget_buffer   = input_reget_buffer;
-    }
-
-    priv->frame = avcodec_alloc_frame();
-
-    return 0;
-}
-
-static void input_uninit(AVFilterContext *ctx)
+static int ffsrc_get_video_frame(AVFilterContext *ctx, AVFrame *frame,
+                                 int64_t *pts, int64_t *pos)
 {
-    FilterPriv *priv = ctx->priv;
-    av_free(priv->frame);
-}
-
-static int input_request_frame(AVFilterLink *link)
-{
-    FilterPriv *priv = link->src->priv;
-    AVFilterBufferRef *picref;
-    int64_t pts = 0;
+    FFSrcContext *priv = ctx->priv;
+    VideoState *is = priv->priv;
     AVPacket pkt;
     int ret;
 
-    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
+    while (!(ret = get_video_frame(is, frame, pts, &pkt)))
         av_free_packet(&pkt);
-    if (ret < 0)
-        return -1;
 
-    if(priv->use_dr1) {
-        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
-    } else {
-        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
-        av_image_copy(picref->data, picref->linesize,
-                      priv->frame->data, priv->frame->linesize,
-                      picref->format, link->w, link->h);
-    }
+    *pos = pkt.pos;
     av_free_packet(&pkt);
-
-    picref->pts = pts;
-    picref->pos = pkt.pos;
-    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
-    avfilter_start_frame(link, picref);
-    avfilter_draw_slice(link, 0, link->h, 1);
-    avfilter_end_frame(link);
-
-    return 0;
-}
-
-static int input_query_formats(AVFilterContext *ctx)
-{
-    FilterPriv *priv = ctx->priv;
-    enum PixelFormat pix_fmts[] = {
-        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
-    };
-
-    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
-    return 0;
-}
-
-static int input_config_props(AVFilterLink *link)
-{
-    FilterPriv *priv  = link->src->priv;
-    AVCodecContext *c = priv->is->video_st->codec;
-
-    link->w = c->width;
-    link->h = c->height;
-    link->time_base = priv->is->video_st->time_base;
-
-    return 0;
+    return ret;
 }
-
-static AVFilter input_filter =
-{
-    .name      = "ffplay_input",
-
-    .priv_size = sizeof(FilterPriv),
-
-    .init      = input_init,
-    .uninit    = input_uninit,
-
-    .query_formats = input_query_formats,
-
-    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
-    .outputs   = (AVFilterPad[]) {{ .name = "default",
-                                    .type = AVMEDIA_TYPE_VIDEO,
-                                    .request_frame = input_request_frame,
-                                    .config_props  = input_config_props, },
-                                  { .name = NULL }},
-};
-
 #endif  /* CONFIG_AVFILTER */
 
 static int video_thread(void *arg)
@@ -1793,15 +1631,20 @@ static int video_thread(void *arg)
     int64_t pos;
     char sws_flags_str[128];
     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
+    FFSrcContext ffsrc_ctx = {
+        .stream = is->video_st,
+        .get_video_frame = ffsrc_get_video_frame,
+        .priv = is,
+    };
     AVFilterContext *filt_src = NULL, *filt_out = NULL;
     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
     graph->scale_sws_opts = av_strdup(sws_flags_str);
 
-    if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
-    if (avfilter_open(&filt_out, &ffsink      ,  "out") < 0) goto the_end;
+    if (avfilter_open(&filt_src, &ffsrc,  "src") < 0) goto the_end;
+    if (avfilter_open(&filt_out, &ffsink, "out") < 0) goto the_end;
 
-    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
+    if(avfilter_init_filter(filt_src, NULL, &ffsrc_ctx))     goto the_end;
     if(avfilter_init_filter(filt_out, NULL, &ffsink_ctx))    goto the_end;
 
 
-- 
1.7.1


--x+6KMIRAuhnl3hBn
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="0003-Make-ffmpeg-use-the-ffsrc-defined-in-cmdutils.-hc-ra.patch"




More information about the ffmpeg-devel mailing list