[FFmpeg-devel] [PATCH] lavfi/WIP: add haldclut filter.

Clément Bœsch ubitux at gmail.com
Sat May 25 01:17:35 CEST 2013


---

OK, so this version is working, but the multi inputs code is ugly as
hell: I basically copy/pasted verbatim the 150 lines of code from vf
overlay, and slightly changed a chunk of code. I have no idea how I am
supposed to refactor this (yup I'm calling for help).

Anyway, the patch is also missing some doc, so here are some demos and
hints on how to use it:

  # Generate a cool hald clut stream:
  # preview for the lazy: http://lucy.pkh.me/clut.webm
  ./ffmpeg -f lavfi -i "haldclutsrc=8,hue='H=2*PI*t':'s=sin(2*PI*t)+1'" -t 5 -c:v ffv1 clut.nut

  # Then use it to apply on some random stream:
  # preview for the lazy: http://lucy.pkh.me/mandelclut.webm
  ./ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 5 mandelclut.mkv

Feel free to test on github/ubitux/ffmpeg/haldclut (includes haldclutsrc
and haldclut).

Comments welcome!
---
 libavfilter/Makefile     |   1 +
 libavfilter/allfilters.c |   1 +
 libavfilter/vf_lut3d.c   | 377 +++++++++++++++++++++++++++++++++++++++++++++--
 3 files changed, 365 insertions(+), 14 deletions(-)

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index f53afb2..2f19024 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -135,6 +135,7 @@ OBJS-$(CONFIG_FPS_FILTER)                    += vf_fps.o
 OBJS-$(CONFIG_FREI0R_FILTER)                 += vf_frei0r.o
 OBJS-$(CONFIG_GEQ_FILTER)                    += vf_geq.o
 OBJS-$(CONFIG_GRADFUN_FILTER)                += vf_gradfun.o
+OBJS-$(CONFIG_HALDCLUT_FILTER)               += vf_lut3d.o
 OBJS-$(CONFIG_HFLIP_FILTER)                  += vf_hflip.o
 OBJS-$(CONFIG_HISTEQ_FILTER)                 += vf_histeq.o
 OBJS-$(CONFIG_HISTOGRAM_FILTER)              += vf_histogram.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 075ecca..58bffc7 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -133,6 +133,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER(FREI0R,         frei0r,         vf);
     REGISTER_FILTER(GEQ,            geq,            vf);
     REGISTER_FILTER(GRADFUN,        gradfun,        vf);
+    REGISTER_FILTER(HALDCLUT,       haldclut,       vf);
     REGISTER_FILTER(HFLIP,          hflip,          vf);
     REGISTER_FILTER(HISTEQ,         histeq,         vf);
     REGISTER_FILTER(HISTOGRAM,      histogram,      vf);
diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c
index 21206ca..31bcaac 100644
--- a/libavfilter/vf_lut3d.c
+++ b/libavfilter/vf_lut3d.c
@@ -29,7 +29,9 @@
 #include "libavutil/avassert.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/avstring.h"
+#include "libavutil/timestamp.h"
 #include "avfilter.h"
+#include "bufferqueue.h"
 #include "drawutils.h"
 #include "formats.h"
 #include "internal.h"
@@ -51,7 +53,9 @@ struct rgbvec {
     float r, g, b;
 };
 
-#define MAX_LEVEL 36
+/* 3D LUT don't often go up to level 32, but it is common to have a Hald CLUT
+ * of 512x512 (64x64x64) */
+#define MAX_LEVEL 64
 
 typedef struct LUT3DContext {
     const AVClass *class;
@@ -64,20 +68,29 @@ typedef struct LUT3DContext {
     struct rgbvec (*interp_16)(const struct LUT3DContext*, uint16_t, uint16_t, uint16_t);
     struct rgbvec lut[MAX_LEVEL][MAX_LEVEL][MAX_LEVEL];
     int lutsize;
+#if CONFIG_HALDCLUT_FILTER
+    uint8_t clut_rgba_map[4];
+    int clut_step;
+    int clut_is16bit;
+    int clut_eof;
+    int repeatlast;
+    int shortest;
+    int frame_requested;
+
+    AVFrame *clut_frame;
+    struct FFBufQueue queue_main;
+    struct FFBufQueue queue_clut;
+#endif
 } LUT3DContext;
 
 #define OFFSET(x) offsetof(LUT3DContext, x)
 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption lut3d_options[] = {
-    { "file",   "set 3D LUT file name", OFFSET(file), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
-    { "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" },
-        { "nearest",     "use values from the nearest defined points",            0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST},     INT_MIN, INT_MAX, FLAGS, "interp_mode" },
-        { "trilinear",   "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR},   INT_MIN, INT_MAX, FLAGS, "interp_mode" },
-        { "tetrahedral", "interpolate values using a tetrahedron",                0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" },
+#define COMMON_OPTIONS \
+    { "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" }, \
+        { "nearest",     "use values from the nearest defined points",            0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST},     INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+        { "trilinear",   "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR},   INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+        { "tetrahedral", "interpolate values using a tetrahedron",                0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
     { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(lut3d);
 
 static inline float lerpf(float v0, float v1, float f)
 {
@@ -394,7 +407,9 @@ static void set_identity_matrix(LUT3DContext *lut3d, int size)
     }
 }
 
-static av_cold int init(AVFilterContext *ctx)
+#if CONFIG_LUT3D_FILTER
+/* TODO: move to the CONFIG_LUT3D_FILTER definition scope at the bottom */
+static av_cold int lut3d_init(AVFilterContext *ctx)
 {
     int ret;
     FILE *f;
@@ -444,6 +459,7 @@ end:
     fclose(f);
     return ret;
 }
+#endif
 
 static int query_formats(AVFilterContext *ctx)
 {
@@ -513,7 +529,7 @@ static int config_input(AVFilterLink *inlink)
     }                                                                                               \
 } while (0)
 
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+static AVFrame *apply_lut(AVFilterLink *inlink, AVFrame *in)
 {
     int x, y, direct = 0;
     AVFilterContext *ctx = inlink->dst;
@@ -533,7 +549,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
         if (!out) {
             av_frame_free(&in);
-            return AVERROR(ENOMEM);
+            return NULL;
         }
         av_frame_copy_props(out, in);
     }
@@ -544,9 +560,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     if (!direct)
         av_frame_free(&in);
 
+    return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+    AVFilterLink *outlink = inlink->dst->outputs[0];
+    AVFrame *out = apply_lut(inlink, in);
+    if (!out)
+        return AVERROR(ENOMEM);
     return ff_filter_frame(outlink, out);
 }
 
+#if CONFIG_LUT3D_FILTER
+static const AVOption lut3d_options[] = {
+    { "file",   "set 3D LUT file name", OFFSET(file), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+    COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(lut3d);
+
 static const AVFilterPad lut3d_inputs[] = {
     {
         .name         = "default",
@@ -569,10 +602,326 @@ AVFilter avfilter_vf_lut3d = {
     .name          = "lut3d",
     .description   = NULL_IF_CONFIG_SMALL("Adjust colors using a 3D LUT."),
     .priv_size     = sizeof(LUT3DContext),
-    .init          = init,
+    .init          = lut3d_init,
     .query_formats = query_formats,
     .inputs        = lut3d_inputs,
     .outputs       = lut3d_outputs,
     .priv_class    = &lut3d_class,
     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
 };
+#endif
+
+#if CONFIG_HALDCLUT_FILTER
+
+#define MAIN 0
+#define CLUT 1
+
+static void update_clut(LUT3DContext *lut3d, const AVFrame *frame)
+{
+    const uint8_t *data = frame->data[0];
+    const int linesize  = frame->linesize[0];
+    const int w = frame->width;
+    const int step = lut3d->clut_step;
+    const uint8_t *rgba_map = lut3d->clut_rgba_map;
+    const int level = lut3d->lutsize;
+
+#define LOAD_CLUT(nbits) do {                                           \
+    int i, j, k, x = 0, y = 0;                                          \
+                                                                        \
+    for (k = 0; k < level; k++) {                                       \
+        for (j = 0; j < level; j++) {                                   \
+            for (i = 0; i < level; i++) {                               \
+                const uint##nbits##_t *src = (const uint##nbits##_t *)  \
+                    (data + y*linesize + x*step);                       \
+                struct rgbvec *vec = &lut3d->lut[k][j][i];              \
+                vec->r = src[rgba_map[0]] / (float)((1<<(nbits)) - 1);  \
+                vec->g = src[rgba_map[1]] / (float)((1<<(nbits)) - 1);  \
+                vec->b = src[rgba_map[2]] / (float)((1<<(nbits)) - 1);  \
+                if (++x == w) {                                         \
+                    x = 0;                                              \
+                    y++;                                                \
+                }                                                       \
+            }                                                           \
+        }                                                               \
+    }                                                                   \
+} while (0)
+
+    if (!lut3d->clut_is16bit) LOAD_CLUT(8);
+    else                      LOAD_CLUT(16);
+}
+
+
+/*******************************************************************************
+ *  FIXME following code is copy pasted from vf_overlay and needs refactoring  *
+ *******************************************************************************/
+
+/* HACK HACK HACK */
+#define OverlayContext LUT3DContext
+#define OVERLAY CLUT
+#define queue_over queue_clut
+#define overpicref clut_frame
+#define filter_frame_over filter_frame_clut
+#define blend_image apply_clut
+#define overlay_eof clut_eof
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+
+    outlink->w = ctx->inputs[MAIN]->w;
+    outlink->h = ctx->inputs[MAIN]->h;
+    outlink->time_base = ctx->inputs[MAIN]->time_base;
+
+    return 0;
+}
+
+static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
+{
+    OverlayContext *s = ctx->priv;
+    AVFilterLink *inlink = ctx->inputs[0];
+    AVFrame *next_overpic;
+    int ret;
+
+    /* Discard obsolete overlay frames: if there is a next overlay frame with pts
+     * before the main frame, we can drop the current overlay. */
+    while (1) {
+        next_overpic = ff_bufqueue_peek(&s->queue_over, 0);
+        if (!next_overpic && s->overlay_eof && !s->repeatlast) {
+            av_frame_free(&s->overpicref);
+            break;
+        }
+        if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[OVERLAY]->time_base,
+                                           mainpic->pts     , ctx->inputs[MAIN]->time_base) > 0)
+            break;
+        ff_bufqueue_get(&s->queue_over);
+        av_frame_free(&s->overpicref);
+        s->overpicref = next_overpic;
+    }
+
+    /* If there is no next frame and no EOF and the overlay frame is before
+     * the main frame, we can not know yet if it will be superseded. */
+    if (!s->queue_over.available && !s->overlay_eof &&
+        (!s->overpicref || av_compare_ts(s->overpicref->pts, ctx->inputs[OVERLAY]->time_base,
+                                            mainpic->pts         , ctx->inputs[MAIN]->time_base) < 0))
+        return AVERROR(EAGAIN);
+
+    /* At this point, we know that the current overlay frame extends to the
+     * time of the main frame. */
+    av_dlog(ctx, "main_pts:%s main_pts_time:%s",
+            av_ts2str(mainpic->pts), av_ts2timestr(mainpic->pts, &ctx->inputs[MAIN]->time_base));
+    if (s->overpicref)
+        av_dlog(ctx, " over_pts:%s over_pts_time:%s",
+                av_ts2str(s->overpicref->pts), av_ts2timestr(s->overpicref->pts, &ctx->inputs[OVERLAY]->time_base));
+    av_dlog(ctx, "\n");
+
+    if (s->overpicref) {
+#if 0
+        if (s->eval_mode == EVAL_MODE_FRAME) {
+            int64_t pos = av_frame_get_pkt_pos(mainpic);
+
+            s->var_values[VAR_N] = inlink->frame_count;
+            s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
+                NAN : mainpic->pts * av_q2d(inlink->time_base);
+            s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+            eval_expr(ctx);
+            av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
+                   s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+                   s->var_values[VAR_X], s->x,
+                   s->var_values[VAR_Y], s->y);
+        }
+        if (!ctx->is_disabled)
+            blend_image(ctx, mainpic, s->overpicref, s->x, s->y);
+#else
+        /* XXX: the only different chunk */
+        if (!ctx->is_disabled) {
+            update_clut(s, s->overpicref);
+            mainpic = apply_lut(inlink, mainpic);
+        }
+#endif
+    }
+    ret = ff_filter_frame(ctx->outputs[0], mainpic);
+    av_assert1(ret != AVERROR(EAGAIN));
+    s->frame_requested = 0;
+    return ret;
+}
+
+static int try_filter_next_frame(AVFilterContext *ctx)
+{
+    OverlayContext *s = ctx->priv;
+    AVFrame *next_mainpic = ff_bufqueue_peek(&s->queue_main, 0);
+    int ret;
+
+    if (!next_mainpic)
+        return AVERROR(EAGAIN);
+    if ((ret = try_filter_frame(ctx, next_mainpic)) == AVERROR(EAGAIN))
+        return ret;
+    ff_bufqueue_get(&s->queue_main);
+    return ret;
+}
+
+static int flush_frames(AVFilterContext *ctx)
+{
+    int ret;
+
+    while (!(ret = try_filter_next_frame(ctx)));
+    return ret == AVERROR(EAGAIN) ? 0 : ret;
+}
+
+static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref)
+{
+    AVFilterContext *ctx = inlink->dst;
+    OverlayContext *s = ctx->priv;
+    int ret;
+
+    if ((ret = flush_frames(ctx)) < 0)
+        return ret;
+    if ((ret = try_filter_frame(ctx, inpicref)) < 0) {
+        if (ret != AVERROR(EAGAIN))
+            return ret;
+        ff_bufqueue_add(ctx, &s->queue_main, inpicref);
+    }
+
+    if (!s->overpicref)
+        return 0;
+    flush_frames(ctx);
+
+    return 0;
+}
+
+static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref)
+{
+    AVFilterContext *ctx = inlink->dst;
+    OverlayContext *s = ctx->priv;
+    int ret;
+
+    if ((ret = flush_frames(ctx)) < 0)
+        return ret;
+    ff_bufqueue_add(ctx, &s->queue_over, inpicref);
+    ret = try_filter_next_frame(ctx);
+    return ret == AVERROR(EAGAIN) ? 0 : ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    OverlayContext *s = ctx->priv;
+    int input, ret;
+
+    if (!try_filter_next_frame(ctx))
+        return 0;
+    s->frame_requested = 1;
+    while (s->frame_requested) {
+        /* TODO if we had a frame duration, we could guess more accurately */
+        input = !s->overlay_eof && (s->queue_main.available ||
+                                       s->queue_over.available < 2) ?
+                OVERLAY : MAIN;
+        ret = ff_request_frame(ctx->inputs[input]);
+        /* EOF on main is reported immediately */
+        if (ret == AVERROR_EOF && input == OVERLAY) {
+            s->overlay_eof = 1;
+            if (s->shortest)
+                return ret;
+            if ((ret = try_filter_next_frame(ctx)) != AVERROR(EAGAIN))
+                return ret;
+            ret = 0; /* continue requesting frames on main */
+        }
+        if (ret < 0)
+            return ret;
+    }
+    return 0;
+}
+
+/*******************************************************************************
+ *                            END FIXME                                        *
+ *******************************************************************************/
+
+static int config_clut(AVFilterLink *inlink)
+{
+    int size, level;
+    AVFilterContext *ctx = inlink->dst;
+    LUT3DContext *lut3d = ctx->priv;
+    const int w = inlink->w;
+    const int h = inlink->h;
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+    lut3d->clut_is16bit = 0;
+    switch (inlink->format) {
+    case AV_PIX_FMT_RGB48:
+    case AV_PIX_FMT_BGR48:
+    case AV_PIX_FMT_RGBA64:
+    case AV_PIX_FMT_BGRA64:
+        lut3d->clut_is16bit = 1;
+    }
+
+    lut3d->clut_step = av_get_padded_bits_per_pixel(desc) >> 3;
+    ff_fill_rgba_map(lut3d->clut_rgba_map, inlink->format);
+
+    if (w != h) {
+        av_log(ctx, AV_LOG_ERROR, "The Hald CLUT must be a squared image\n");
+        return AVERROR(EINVAL);
+    }
+
+    for (level = 1; level*level*level < w; level++);
+    size = level*level*level;
+    if (size != w) {
+        av_log(ctx, AV_LOG_WARNING, "The Hald CLUT width does not match the level\n");
+        return AVERROR_INVALIDDATA;
+    }
+    av_assert0(w == h && w == size);
+    level *= level;
+    if (level > MAX_LEVEL) {
+        av_log(ctx, AV_LOG_ERROR, "Too large Hald CLUT\n");
+        return AVERROR(EINVAL);
+    }
+    lut3d->lutsize = level;
+
+    return 0;
+}
+
+static const AVOption haldclut_options[] = {
+    { "shortest", "force termination when the shortest input terminates", OFFSET(shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+    { "repeatlast", "continue applying the last clut after eos", OFFSET(repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+    COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(haldclut);
+
+static const AVFilterPad haldclut_inputs[] = {
+    {
+        .name         = "main",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .get_video_buffer = ff_null_get_video_buffer,
+        .filter_frame = filter_frame_main,
+        .config_props = config_input,
+    },{
+        .name         = "clut",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame_clut,
+        .config_props = config_clut,
+    },
+    { NULL }
+};
+
+static const AVFilterPad haldclut_outputs[] = {
+    {
+        .name = "default",
+        .type = AVMEDIA_TYPE_VIDEO,
+        .request_frame = request_frame,
+        .config_props = config_output,
+    },
+    { NULL }
+};
+
+AVFilter avfilter_vf_haldclut = {
+    .name          = "haldclut",
+    .description   = NULL_IF_CONFIG_SMALL("Adjust colors using a Hald CLUT."),
+    .priv_size     = sizeof(LUT3DContext),
+    //.init          = init,
+    .query_formats = query_formats,
+    .inputs        = haldclut_inputs,
+    .outputs       = haldclut_outputs,
+    .priv_class    = &haldclut_class,
+    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
+#endif
-- 
1.8.2.3



More information about the ffmpeg-devel mailing list