[FFmpeg-cvslog] lavfi/overlay: add dynamic expression evaluation support

Stefano Sabatini git at videolan.org
Wed Apr 10 13:20:50 CEST 2013


ffmpeg | branch: master | Stefano Sabatini <stefasab at gmail.com> | Tue Feb 19 20:10:02 2013 +0100| [cc3edd99a678b9ecfe70cb3632029cb8db40e7f9] | committer: Stefano Sabatini

lavfi/overlay: add dynamic expression evaluation support

Add support for dynamic x, y expressions evaluation.

Also add support for an evaluation mode which allows to disable per-frame
evaluation, so that there is no speedloss in case the expression does not
depend on frame variables.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=cc3edd99a678b9ecfe70cb3632029cb8db40e7f9
---

 doc/filters.texi         |   79 +++++++++++++++++++++++-------
 libavfilter/version.h    |    2 +-
 libavfilter/vf_overlay.c |  120 +++++++++++++++++++++++++++++++++-------------
 3 files changed, 150 insertions(+), 51 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index 512f943..4668b7f 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -4260,26 +4260,31 @@ arguments are interpreted according to the syntax @var{x}:@var{y}.
 A description of the accepted options follows.
 
 @table @option
- at item x, y
+ at item x
+ at item y
 Set the expression for the x and y coordinates of the overlayed video
-on the main video. Default value is 0.
-
-The @var{x} and @var{y} expressions can contain the following
-parameters:
- at table @option
- at item main_w, main_h
-main input width and height
+on the main video. Default value is "0" for both expressions. In case
+the expression is invalid, it is set to a huge value (meaning that the
+overlay will not be displayed within the output visible area).
 
- at item W, H
-same as @var{main_w} and @var{main_h}
+ at item eval
+Set when the expressions for @option{x} and @option{y} are evaluated.
 
- at item overlay_w, overlay_h
-overlay input width and height
+It accepts the following values:
+ at table @samp
+ at item init
+only evaluate expressions once during the filter initialization
 
- at item w, h
-same as @var{overlay_w} and @var{overlay_h}
+ at item frame
+evaluate expressions for each incoming frame
 @end table
 
+Default value is @samp{frame}.
+
+ at item shortest
+If set to 1, force the output to terminate when the shortest input
+terminates. Default value is 0.
+
 @item format
 Set the format for the output video.
 
@@ -4301,12 +4306,45 @@ Default value is @samp{yuv420}.
 If set to 1, force the filter to accept inputs in the RGB
 color space. Default value is 0. This option is deprecated, use
 @option{format} instead.
+ at end table
 
- at item shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
+The @option{x}, and @option{y} expressions can contain the following
+parameters.
+
+ at table @option
+ at item main_w, W
+ at item main_h, H
+main input width and height
+
+ at item overlay_w, w
+ at item overlay_h, h
+overlay input width and height
+
+ at item x
+ at item y
+the computed values for @var{x} and @var{y}. They are evaluated for
+each new frame.
+
+ at item hsub
+ at item vsub
+horizontal and vertical chroma subsample values of the output
+format. For example for the pixel format "yuv422p" @var{hsub} is 2 and
+ at var{vsub} is 1.
+
+ at item n
+the number of input frame, starting from 0
+
+ at item pos
+the position in the file of the input frame, NAN if unknown
+
+ at item t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
 @end table
 
+Note that the @var{n}, @var{pos}, @var{t} variables are available only
+when evaluation is done @emph{per frame}, and will evaluate to NAN
+when @option{eval} is set to @samp{init}.
+
 Be aware that frames are taken from each input video in timestamp
 order, hence, if their initial timestamps differ, it is a a good idea
 to pass the two inputs through a @var{setpts=PTS-STARTPTS} filter to
@@ -4365,6 +4403,13 @@ ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w'
 @end example
 
 @item
+Make a sliding overlay appearing from the left to the right top part of the
+screen starting since time 2:
+ at example
+overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0
+ at end example
+
+ at item
 Compose output by putting two input videos side to side:
 @example
 ffmpeg -i left.avi -i right.avi -filter_complex "
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 2a70671..7e9784e 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -30,7 +30,7 @@
 
 #define LIBAVFILTER_VERSION_MAJOR  3
 #define LIBAVFILTER_VERSION_MINOR  50
-#define LIBAVFILTER_VERSION_MICRO 100
+#define LIBAVFILTER_VERSION_MICRO 101
 
 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
                                                LIBAVFILTER_VERSION_MINOR, \
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 0dcd0b8..62bdd4e 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -47,6 +47,13 @@ static const char *const var_names[] = {
     "main_h",    "H", ///< height of the main    video
     "overlay_w", "w", ///< width  of the overlay video
     "overlay_h", "h", ///< height of the overlay video
+    "hsub",
+    "vsub",
+    "x",
+    "y",
+    "n",            ///< number of frame
+    "pos",          ///< position in the file
+    "t",            ///< timestamp expressed in seconds
     NULL
 };
 
@@ -55,6 +62,13 @@ enum var_name {
     VAR_MAIN_H,    VAR_MH,
     VAR_OVERLAY_W, VAR_OW,
     VAR_OVERLAY_H, VAR_OH,
+    VAR_HSUB,
+    VAR_VSUB,
+    VAR_X,
+    VAR_Y,
+    VAR_N,
+    VAR_POS,
+    VAR_T,
     VAR_VARS_NB
 };
 
@@ -84,6 +98,7 @@ typedef struct {
     uint8_t overlay_rgba_map[4];
     uint8_t overlay_has_alpha;
     enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format;
+    enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
 
     AVFrame *overpicref;
     struct FFBufQueue queue_main;
@@ -94,7 +109,9 @@ typedef struct {
     int hsub, vsub;             ///< chroma subsampling values
     int shortest;               ///< terminate stream when the shortest input terminates
 
+    double var_values[VAR_VARS_NB];
     char *x_expr, *y_expr;
+    AVExpr *x_pexpr, *y_pexpr;
 } OverlayContext;
 
 #define OFFSET(x) offsetof(OverlayContext, x)
@@ -103,6 +120,11 @@ typedef struct {
 static const AVOption overlay_options[] = {
     { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
     { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+
+    { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+    { "init",  "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+    { "frame", "eval expressions per-frame",   0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+
     { "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
     { "shortest", "force termination when the shortest input terminates", OFFSET(shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
 
@@ -135,6 +157,8 @@ static av_cold void uninit(AVFilterContext *ctx)
     av_frame_free(&over->overpicref);
     ff_bufqueue_discard_all(&over->queue_main);
     ff_bufqueue_discard_all(&over->queue_over);
+    av_expr_free(over->x_pexpr); over->x_pexpr = NULL;
+    av_expr_free(over->y_pexpr); over->y_pexpr = NULL;
 }
 
 static int query_formats(AVFilterContext *ctx)
@@ -217,12 +241,29 @@ static int config_input_main(AVFilterLink *inlink)
     return 0;
 }
 
+static inline int normalize_xy(double d, int chroma_sub)
+{
+    if (isnan(d))
+        return INT_MAX;
+    return (int)d & ~((1 << chroma_sub) - 1);
+}
+
+static void eval_expr(AVFilterContext *ctx)
+{
+    OverlayContext  *over = ctx->priv;
+
+    over->var_values[VAR_X] = av_expr_eval(over->x_pexpr, over->var_values, NULL);
+    over->var_values[VAR_Y] = av_expr_eval(over->y_pexpr, over->var_values, NULL);
+    over->var_values[VAR_X] = av_expr_eval(over->x_pexpr, over->var_values, NULL);
+    over->x = normalize_xy(over->var_values[VAR_X], over->hsub);
+    over->y = normalize_xy(over->var_values[VAR_Y], over->vsub);
+}
+
 static int config_input_overlay(AVFilterLink *inlink)
 {
     AVFilterContext *ctx  = inlink->dst;
     OverlayContext  *over = inlink->dst->priv;
     char *expr;
-    double var_values[VAR_VARS_NB], res;
     int ret;
     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
 
@@ -230,53 +271,49 @@ static int config_input_overlay(AVFilterLink *inlink)
 
     /* Finish the configuration by evaluating the expressions
        now when both inputs are configured. */
-    var_values[VAR_MAIN_W   ] = var_values[VAR_MW] = ctx->inputs[MAIN   ]->w;
-    var_values[VAR_MAIN_H   ] = var_values[VAR_MH] = ctx->inputs[MAIN   ]->h;
-    var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
-    var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
-
-    if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values,
-                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
-        goto fail;
-    over->x = res;
-    if ((ret = av_expr_parse_and_eval(&res, (expr = over->y_expr), var_names, var_values,
-                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)))
+    over->var_values[VAR_MAIN_W   ] = over->var_values[VAR_MW] = ctx->inputs[MAIN   ]->w;
+    over->var_values[VAR_MAIN_H   ] = over->var_values[VAR_MH] = ctx->inputs[MAIN   ]->h;
+    over->var_values[VAR_OVERLAY_W] = over->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
+    over->var_values[VAR_OVERLAY_H] = over->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
+    over->var_values[VAR_HSUB]  = 1<<pix_desc->log2_chroma_w;
+    over->var_values[VAR_VSUB]  = 1<<pix_desc->log2_chroma_h;
+    over->var_values[VAR_X]     = NAN;
+    over->var_values[VAR_Y]     = NAN;
+    over->var_values[VAR_N]     = 0;
+    over->var_values[VAR_T]     = NAN;
+    over->var_values[VAR_POS]   = NAN;
+
+    expr = over->x_expr;
+    if ((ret = av_expr_parse(&over->x_pexpr, expr, var_names,
+                             NULL, NULL, NULL, NULL, 0, ctx)) < 0)
         goto fail;
-    over->y = res;
-    /* x may depend on y */
-    if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values,
-                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+    expr = over->y_expr;
+    if ((ret = av_expr_parse(&over->y_pexpr, expr, var_names,
+                             NULL, NULL, NULL, NULL, 0, ctx)) < 0)
         goto fail;
-    over->x = res;
 
     over->overlay_is_packed_rgb =
         ff_fill_rgba_map(over->overlay_rgba_map, inlink->format) >= 0;
     over->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
 
+    if (over->eval_mode == EVAL_MODE_INIT) {
+        eval_expr(ctx);
+        av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+               over->var_values[VAR_X], over->x,
+               over->var_values[VAR_Y], over->y);
+    }
+
     av_log(ctx, AV_LOG_VERBOSE,
-           "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n",
+           "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
            ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
            av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
-           over->x, over->y,
            ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
            av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
-
-    if (over->x < 0 || over->y < 0 ||
-        over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
-        over->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) {
-        av_log(ctx, AV_LOG_WARNING,
-               "Overlay area with coordinates x1:%d y1:%d x2:%d y2:%d "
-               "is not completely contained within the output with size %dx%d\n",
-               over->x, over->y,
-               (int)(over->x + var_values[VAR_OVERLAY_W]),
-               (int)(over->y + var_values[VAR_OVERLAY_H]),
-               (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]);
-    }
     return 0;
 
 fail:
     av_log(NULL, AV_LOG_ERROR,
-           "Error when evaluating the expression '%s'\n", expr);
+           "Error when parsing the expression '%s'\n", expr);
     return ret;
 }
 
@@ -495,6 +532,7 @@ static void blend_image(AVFilterContext *ctx,
 static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
 {
     OverlayContext *over = ctx->priv;
+    AVFilterLink *inlink = ctx->inputs[0];
     AVFrame *next_overpic;
     int ret;
 
@@ -526,8 +564,24 @@ static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
                 av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &ctx->inputs[OVERLAY]->time_base));
     av_dlog(ctx, "\n");
 
-    if (over->overpicref)
+    if (over->overpicref) {
+        if (over->eval_mode == EVAL_MODE_FRAME) {
+            int64_t pos = av_frame_get_pkt_pos(mainpic);
+
+            over->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
+                NAN : mainpic->pts * av_q2d(inlink->time_base);
+            over->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+            eval_expr(ctx);
+            av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
+                   over->var_values[VAR_N], over->var_values[VAR_T], over->var_values[VAR_POS],
+                   over->var_values[VAR_X], over->x,
+                   over->var_values[VAR_Y], over->y);
+        }
         blend_image(ctx, mainpic, over->overpicref, over->x, over->y);
+
+        over->var_values[VAR_N] += 1.0;
+    }
     ret = ff_filter_frame(ctx->outputs[0], mainpic);
     av_assert1(ret != AVERROR(EAGAIN));
     over->frame_requested = 0;



More information about the ffmpeg-cvslog mailing list