[FFmpeg-devel] [PATCH 1/2] lavfi: add astreamsync audio filter.

Nicolas George nicolas.george at normalesup.org
Fri Dec 23 14:17:13 CET 2011


Signed-off-by: Nicolas George <nicolas.george at normalesup.org>
---
 Changelog                    |    1 +
 doc/filters.texi             |   29 ++++++
 libavfilter/Makefile         |    1 +
 libavfilter/af_astreamsync.c |  204 ++++++++++++++++++++++++++++++++++++++++++
 libavfilter/allfilters.c     |    1 +
 5 files changed, 236 insertions(+), 0 deletions(-)
 create mode 100644 libavfilter/af_astreamsync.c

diff --git a/Changelog b/Changelog
index b4e7b58..7044b4c 100644
--- a/Changelog
+++ b/Changelog
@@ -7,6 +7,7 @@ version next:
 - SBaGen (SBG) binaural beats script demuxer
 - OpenMG Audio muxer
 - SMJPEG demuxer
+- astreamsync audio filter
 
 
 version 0.9:
diff --git a/doc/filters.texi b/doc/filters.texi
index 699e0c1..ce4a74f 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -224,6 +224,35 @@ expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3} @var{c4} @var{c5}
 @var{c6} @var{c7}]"
 @end table
 
+ at section astreamsync
+
+Forward two audio streams and control the order the buffers are forwarded.
+
+The argument to the filter is an expression deciding which stream should be
+forwarded next: if the result is negative, the first stream is forwarded; if
+the result is positive or zero, the second stream is forwarded. It can use
+the following variables:
+
+ at table @var
+ at item b1 b2
+number of buffers forwarded so far on each stream
+ at item s1 s2
+number of samples forwarded so far on each stream
+ at item t1 t2
+current timestamp of each stream
+ at end table
+
+The default value is @code{t1-t2}, which means to always forward the stream
+that has a smaller timestamp.
+
+Example: stress-test @code{amerge} by randomly sending buffers on the wrong
+input, while avoiding too much of a desynchronization:
+ at example
+amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+ at end example
+
 @section earwax
 
 Make audio easier to listen to on headphones.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 9977753..555767b 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -29,6 +29,7 @@ OBJS-$(CONFIG_AFORMAT_FILTER)                += af_aformat.o
 OBJS-$(CONFIG_ANULL_FILTER)                  += af_anull.o
 OBJS-$(CONFIG_ARESAMPLE_FILTER)              += af_aresample.o
 OBJS-$(CONFIG_ASHOWINFO_FILTER)              += af_ashowinfo.o
+OBJS-$(CONFIG_ASTREAMSYNC_FILTER)            += af_astreamsync.o
 OBJS-$(CONFIG_EARWAX_FILTER)                 += af_earwax.o
 OBJS-$(CONFIG_PAN_FILTER)                    += af_pan.o
 OBJS-$(CONFIG_VOLUME_FILTER)                 += af_volume.o
diff --git a/libavfilter/af_astreamsync.c b/libavfilter/af_astreamsync.c
new file mode 100644
index 0000000..66a8b63
--- /dev/null
+++ b/libavfilter/af_astreamsync.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2011 Nicolas George <nicolas.george at normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Stream (de)synchronization filter
+ */
+
+#include <stdlib.h>
+#include "libavcodec/avcodec.h"
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libswresample/swresample.h" // only for SWR_CH_MAX
+#include "avfilter.h"
+#include "internal.h"
+
+#define QUEUE_SIZE 16
+
+static const char * const var_names[] = {
+    "b1", "b2",
+    "s1", "s2",
+    "t1", "t2",
+    NULL
+};
+
+enum var_name {
+    VAR_B1, VAR_B2,
+    VAR_S1, VAR_S2,
+    VAR_T1, VAR_T2,
+    VAR_NB
+};
+
+struct astreamsync_context {
+    AVExpr *expr;
+    double var_values[VAR_NB];
+    struct {
+        AVFilterBufferRef *buf[QUEUE_SIZE];
+        unsigned tail, nb;
+    } queue[2];
+    int req[2];
+    int next_out;
+    int eof;
+};
+
+static const char *default_expr = "t1-t2";
+
+static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
+{
+    struct astreamsync_context *as = ctx->priv;
+    const char *expr = args0 ? args0 : default_expr;
+    int r, i;
+
+    r = av_expr_parse(&as->expr, expr, var_names,
+                      NULL, NULL, NULL, NULL, 0, ctx);
+    if (r < 0) {
+        av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr);
+        return r;
+    }
+    for (i = 0; i < 42; i++)
+        av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
+    return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+    int i;
+    AVFilterFormats *formats;
+
+    for (i = 0; i < 2; i++) {
+        formats = ctx->inputs[i]->in_formats;
+        avfilter_formats_ref(formats, &ctx->inputs[i]->out_formats);
+        avfilter_formats_ref(formats, &ctx->outputs[i]->in_formats);
+        formats = ctx->inputs[i]->in_packing;
+        avfilter_formats_ref(formats, &ctx->inputs[i]->out_packing);
+        avfilter_formats_ref(formats, &ctx->outputs[i]->in_packing);
+        formats = ctx->inputs[i]->in_chlayouts;
+        avfilter_formats_ref(formats, &ctx->inputs[i]->out_chlayouts);
+        avfilter_formats_ref(formats, &ctx->outputs[i]->in_chlayouts);
+    }
+    return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    return 0;
+}
+
+static void send_out(AVFilterContext *ctx, int id)
+{
+    struct astreamsync_context *as = ctx->priv;
+    AVFilterBufferRef *buf = as->queue[id].buf[as->queue[id].tail];
+
+    as->queue[id].buf[as->queue[id].tail] = NULL;
+    as->var_values[VAR_B1 + id]++;
+    as->var_values[VAR_S1 + id] += buf->audio->nb_samples;
+    if (buf->pts != AV_NOPTS_VALUE)
+        as->var_values[VAR_T1 + id] = av_q2d(ctx->inputs[id]->time_base) *
+                                      buf->pts;
+    as->var_values[VAR_T1 + id] += buf->audio->nb_samples /
+                                   (double)ctx->inputs[id]->sample_rate;
+    avfilter_filter_samples(ctx->outputs[id], buf);
+    as->queue[id].nb--;
+    as->queue[id].tail = (as->queue[id].tail + 1) % QUEUE_SIZE;
+    if (as->req[id])
+        as->req[id]--;
+}
+
+static void send_next(AVFilterContext *ctx)
+{
+    struct astreamsync_context *as = ctx->priv;
+    int i;
+
+    while (1) {
+        if (!as->queue[as->next_out].nb)
+            break;
+        send_out(ctx, as->next_out);
+        if (!as->eof)
+            as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
+    }
+    for (i = 0; i < 2; i++)
+        if (as->queue[i].nb == QUEUE_SIZE)
+            send_out(ctx, i);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    struct astreamsync_context *as = ctx->priv;
+    int id = outlink == ctx->outputs[1];
+
+    as->req[id]++;
+    while (as->req[id] && !(as->eof & (1 << id))) {
+        if (as->queue[as->next_out].nb) {
+            send_next(ctx);
+        } else {
+            as->eof |= 1 << as->next_out;
+            avfilter_request_frame(ctx->inputs[as->next_out]);
+            if (as->eof & (1 << as->next_out))
+                as->next_out = !as->next_out;
+        }
+    }
+    return 0;
+}
+
+static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
+{
+    AVFilterContext *ctx = inlink->dst;
+    struct astreamsync_context *as = ctx->priv;
+    int id = inlink == ctx->inputs[1];
+
+    as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
+        insamples;
+    as->eof &= ~(1 << id);
+    send_next(ctx);
+}
+
+AVFilter avfilter_af_astreamsync = {
+    .name          = "astreamsync",
+    .description   = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
+                                          "in a configurable order"),
+    .priv_size     = sizeof(struct astreamsync_context),
+    .init          = init,
+    .query_formats = query_formats,
+
+    .inputs    = (const AVFilterPad[]) {
+        { .name             = "in1",
+          .type             = AVMEDIA_TYPE_AUDIO,
+          .filter_samples   = filter_samples,
+          .min_perms        = AV_PERM_READ, },
+        { .name             = "in2",
+          .type             = AVMEDIA_TYPE_AUDIO,
+          .filter_samples   = filter_samples,
+          .min_perms        = AV_PERM_READ, },
+        { .name = NULL }
+    },
+    .outputs   = (const AVFilterPad[]) {
+        { .name             = "out1",
+          .type             = AVMEDIA_TYPE_AUDIO,
+          .config_props     = config_output,
+          .request_frame    = request_frame, },
+        { .name             = "out2",
+          .type             = AVMEDIA_TYPE_AUDIO,
+          .config_props     = config_output,
+          .request_frame    = request_frame, },
+        { .name = NULL }
+    },
+};
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 77a8a48..bf17a7d 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -39,6 +39,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER (ANULL,       anull,       af);
     REGISTER_FILTER (ARESAMPLE,   aresample,   af);
     REGISTER_FILTER (ASHOWINFO,   ashowinfo,   af);
+    REGISTER_FILTER (ASTREAMSYNC, astreamsync, af);
     REGISTER_FILTER (EARWAX,      earwax,      af);
     REGISTER_FILTER (PAN,         pan,         af);
     REGISTER_FILTER (VOLUME,      volume,      af);
-- 
1.7.2.5



More information about the ffmpeg-devel mailing list