[FFmpeg-devel] [PATCH] lavfi: new colorspace conversion filter.

Ronald S. Bultje rsbultje at gmail.com
Wed Mar 30 22:35:18 CEST 2016


The intent here is similar to colormatrix, but it's LGPLv2.1-or-later
(instead of GPLv2.0) and supports gamma/chromaticity correction.
---
 libavfilter/Makefile                 |   1 +
 libavfilter/allfilters.c             |   1 +
 libavfilter/colorspacedsp.c          | 130 ++++++
 libavfilter/colorspacedsp.h          |  51 +++
 libavfilter/colorspacedsp_template.c | 256 ++++++++++++
 libavfilter/vf_colorspace.c          | 773 +++++++++++++++++++++++++++++++++++
 6 files changed, 1212 insertions(+)
 create mode 100644 libavfilter/colorspacedsp.c
 create mode 100644 libavfilter/colorspacedsp.h
 create mode 100644 libavfilter/colorspacedsp_template.c
 create mode 100644 libavfilter/vf_colorspace.c

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index b6e1999..9b7546d 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -131,6 +131,7 @@ OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER)      += vf_colorchannelmixer.o
 OBJS-$(CONFIG_COLORKEY_FILTER)               += vf_colorkey.o
 OBJS-$(CONFIG_COLORLEVELS_FILTER)            += vf_colorlevels.o
 OBJS-$(CONFIG_COLORMATRIX_FILTER)            += vf_colormatrix.o
+OBJS-$(CONFIG_COLORSPACE_FILTER)             += vf_colorspace.o colorspacedsp.o
 OBJS-$(CONFIG_CONVOLUTION_FILTER)            += vf_convolution.o
 OBJS-$(CONFIG_COPY_FILTER)                   += vf_copy.o
 OBJS-$(CONFIG_COREIMAGE_FILTER)              += vf_coreimage.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 5c18fd1..3fc450f 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -152,6 +152,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER(COLORKEY,       colorkey,       vf);
     REGISTER_FILTER(COLORLEVELS,    colorlevels,    vf);
     REGISTER_FILTER(COLORMATRIX,    colormatrix,    vf);
+    REGISTER_FILTER(COLORSPACE,     colorspace,     vf);
     REGISTER_FILTER(CONVOLUTION,    convolution,    vf);
     REGISTER_FILTER(COPY,           copy,           vf);
     REGISTER_FILTER(COREIMAGE,      coreimage,      vf);
diff --git a/libavfilter/colorspacedsp.c b/libavfilter/colorspacedsp.c
new file mode 100644
index 0000000..e71bf1f
--- /dev/null
+++ b/libavfilter/colorspacedsp.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "colorspacedsp.h"
+
+#define SS_W 0
+#define SS_H 0
+
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 0
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 1
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+static void multiply3x3_c(int16_t *buf[3], ptrdiff_t stride,
+                          int w, int h, const int16_t m[3][3][8])
+{
+    int y, x;
+    int16_t *buf0 = buf[0], *buf1 = buf[1], *buf2 = buf[2];
+
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int v0 = buf0[x], v1 = buf1[x], v2 = buf2[x];
+
+            buf0[x] = av_clip_int16((m[0][0][0] * v0 + m[0][1][0] * v1 +
+                                     m[0][2][0] * v2 + 8192) >> 14);
+            buf1[x] = av_clip_int16((m[1][0][0] * v0 + m[1][1][0] * v1 +
+                                     m[1][2][0] * v2 + 8192) >> 14);
+            buf2[x] = av_clip_int16((m[2][0][0] * v0 + m[2][1][0] * v1 +
+                                     m[2][2][0] * v2 + 8192) >> 14);
+        }
+
+        buf0 += stride;
+        buf1 += stride;
+        buf2 += stride;
+    }
+}
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
+{
+    dsp->yuv2rgb[0][0] = yuv2rgb_444_8_c;
+    dsp->yuv2rgb[0][1] = yuv2rgb_422_8_c;
+    dsp->yuv2rgb[0][2] = yuv2rgb_420_8_c;
+    dsp->yuv2rgb[1][0] = yuv2rgb_444_10_c;
+    dsp->yuv2rgb[1][1] = yuv2rgb_422_10_c;
+    dsp->yuv2rgb[1][2] = yuv2rgb_420_10_c;
+    dsp->yuv2rgb[2][0] = yuv2rgb_444_12_c;
+    dsp->yuv2rgb[2][1] = yuv2rgb_422_12_c;
+    dsp->yuv2rgb[2][2] = yuv2rgb_420_12_c;
+
+    dsp->rgb2yuv[0][0] = rgb2yuv_444_8_c;
+    dsp->rgb2yuv[0][1] = rgb2yuv_422_8_c;
+    dsp->rgb2yuv[0][2] = rgb2yuv_420_8_c;
+    dsp->rgb2yuv[1][0] = rgb2yuv_444_10_c;
+    dsp->rgb2yuv[1][1] = rgb2yuv_422_10_c;
+    dsp->rgb2yuv[1][2] = rgb2yuv_420_10_c;
+    dsp->rgb2yuv[2][0] = rgb2yuv_444_12_c;
+    dsp->rgb2yuv[2][1] = rgb2yuv_422_12_c;
+    dsp->rgb2yuv[2][2] = rgb2yuv_420_12_c;
+
+    dsp->yuv2yuv[0][0] = yuv2yuv_444_8_c;
+    dsp->yuv2yuv[0][1] = yuv2yuv_422_8_c;
+    dsp->yuv2yuv[0][2] = yuv2yuv_420_8_c;
+    dsp->yuv2yuv[1][0] = yuv2yuv_444_10_c;
+    dsp->yuv2yuv[1][1] = yuv2yuv_422_10_c;
+    dsp->yuv2yuv[1][2] = yuv2yuv_420_10_c;
+    dsp->yuv2yuv[2][0] = yuv2yuv_444_12_c;
+    dsp->yuv2yuv[2][1] = yuv2yuv_422_12_c;
+    dsp->yuv2yuv[2][2] = yuv2yuv_420_12_c;
+
+    dsp->multiply3x3 = multiply3x3_c;
+}
diff --git a/libavfilter/colorspacedsp.h b/libavfilter/colorspacedsp.h
new file mode 100644
index 0000000..89c1e1b
--- /dev/null
+++ b/libavfilter/colorspacedsp.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_COLORSPACEDSP_H
+#define AVFILTER_COLORSPACEDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef void (*yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+                           uint8_t *yuv[3], ptrdiff_t yuv_stride[3],
+                           int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+                           const int16_t yuv_offset[8]);
+typedef void (*rgb2yuv_fn)(uint8_t *yuv[3], ptrdiff_t yuv_stride[3],
+                           int16_t *rgb[3], ptrdiff_t rgb_stride,
+                           int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+                           const int16_t yuv_offset[8]);
+typedef void (*yuv2yuv_fn)(uint8_t *yuv_out[3], ptrdiff_t yuv_out_stride[3],
+                           uint8_t *yuv_in[3], ptrdiff_t yuv_in_stride[3],
+                           int w, int h, const int16_t yuv2yuv_coeffs[3][3][8],
+                           const int16_t yuv_offset[2][8]);
+
+typedef struct ColorSpaceDSPContext {
+    yuv2rgb_fn yuv2rgb[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+    rgb2yuv_fn rgb2yuv[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+    yuv2yuv_fn yuv2yuv[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+
+    void (*multiply3x3)(int16_t *data[3], ptrdiff_t stride,
+                        int w, int h, const int16_t m[3][3][8]);
+} ColorSpaceDSPContext;
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp);
+
+#endif /* AVFILTER_COLORSPACEDSP_H */
diff --git a/libavfilter/colorspacedsp_template.c b/libavfilter/colorspacedsp_template.c
new file mode 100644
index 0000000..3f4547c
--- /dev/null
+++ b/libavfilter/colorspacedsp_template.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavcodec/bit_depth_template.c"
+
+#undef fn
+#undef avg
+#if SS_W == 0
+#define fn(x) FUNCC(x##_444)
+#define avg(a,b,c,d) (a)
+#elif SS_H == 0
+#define fn(x) FUNCC(x##_422)
+#define avg(a,b,c,d) ((a + b + 1) >> 1)
+#else
+#define fn(x) FUNCC(x##_420)
+#define avg(a,b,c,d) ((a + b + c + d + 2) >> 2)
+#endif
+
+static void fn(yuv2rgb)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+                        uint8_t *_yuv[3], ptrdiff_t yuv_stride[3],
+                        int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+                        const int16_t yuv_offset[8])
+{
+    pixel **yuv = (pixel **) _yuv;
+    const pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+    int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+    int y, x;
+    int cy = yuv2rgb_coeffs[0][0][0];
+    int crv = yuv2rgb_coeffs[0][2][0];
+    int cgu = yuv2rgb_coeffs[1][1][0];
+    int cgv = yuv2rgb_coeffs[1][2][0];
+    int cbu = yuv2rgb_coeffs[2][1][0];
+    int sh = BIT_DEPTH - 1;
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+    av_assert2(yuv2rgb_coeffs[0][1][0] == 0);
+    av_assert2(yuv2rgb_coeffs[2][2][0] == 0);
+    av_assert2(yuv2rgb_coeffs[1][0][0] == cy && yuv2rgb_coeffs[2][0][0] == cy);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int y00 = yuv0[x << SS_W] - yuv_offset[0];
+#if SS_W == 1
+            int y01 = yuv0[2 * x + 1] - yuv_offset[0];
+#if SS_H == 1
+            int y10 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x] - yuv_offset[0];
+            int y11 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x + 1] - yuv_offset[0];
+#endif
+#endif
+            int u = yuv1[x] - uv_offset, v = yuv2[x] - uv_offset;
+
+            rgb0[x << SS_W]              = av_clip_int16((y00 * cy + crv * v + 8192) >> sh);
+#if SS_W == 1
+            rgb0[2 * x + 1]              = av_clip_int16((y01 * cy + crv * v + 8192) >> sh);
+#if SS_H == 1
+            rgb0[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + crv * v + 8192) >> sh);
+            rgb0[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + crv * v + 8192) >> sh);
+#endif
+#endif
+
+            rgb1[x << SS_W]              = av_clip_int16((y00 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#if SS_W == 1
+            rgb1[2 * x + 1]              = av_clip_int16((y01 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#if SS_H == 1
+            rgb1[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+            rgb1[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#endif
+#endif
+
+            rgb2[x << SS_W]              = av_clip_int16((y00 * cy + cbu * u + 8192) >> sh);
+#if SS_W == 1
+            rgb2[2 * x + 1]              = av_clip_int16((y01 * cy + cbu * u + 8192) >> sh);
+#if SS_H == 1
+            rgb2[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + cbu * u + 8192) >> sh);
+            rgb2[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cbu * u + 8192) >> sh);
+#endif
+#endif
+        }
+
+        yuv0 += (yuv_stride[0] << SS_H) / sizeof(pixel);
+        yuv1 += yuv_stride[1] / sizeof(pixel);
+        yuv2 += yuv_stride[2] / sizeof(pixel);
+        rgb0 += rgb_stride << SS_H;
+        rgb1 += rgb_stride << SS_H;
+        rgb2 += rgb_stride << SS_H;
+    }
+}
+
+static void fn(rgb2yuv)(uint8_t *_yuv[3], ptrdiff_t yuv_stride[3],
+                        int16_t *rgb[3], ptrdiff_t s,
+                        int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+                        const int16_t yuv_offset[8])
+{
+    pixel **yuv = (pixel **) _yuv;
+    pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+    const int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+    int y, x;
+    int sh = 29 - BIT_DEPTH;
+    int rnd = 1 << (sh - 1);
+    int cry = rgb2yuv_coeffs[0][0][0];
+    int cgy = rgb2yuv_coeffs[0][1][0];
+    int cby = rgb2yuv_coeffs[0][2][0];
+    int cru = rgb2yuv_coeffs[1][0][0];
+    int cgu = rgb2yuv_coeffs[1][1][0];
+    int cbu = rgb2yuv_coeffs[1][2][0];
+    int crv = rgb2yuv_coeffs[2][0][0];
+    int cgv = rgb2yuv_coeffs[2][1][0];
+    int cbv = rgb2yuv_coeffs[2][2][0];
+    ptrdiff_t s0 = yuv_stride[0] / sizeof(pixel);
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int r00 = rgb0[x << SS_W], g00 = rgb1[x << SS_W], b00 = rgb2[x << SS_W];
+#if SS_W == 1
+            int r01 = rgb0[x * 2 + 1], g01 = rgb1[x * 2 + 1], b01 = rgb2[x * 2 + 1];
+#if SS_H == 1
+            int r10 = rgb0[x * 2 + 0 + s], g10 = rgb1[x * 2 + 0 + s], b10 = rgb2[x * 2 + 0 + s];
+            int r11 = rgb0[x * 2 + 1 + s], g11 = rgb1[x * 2 + 1 + s], b11 = rgb2[x * 2 + 1 + s];
+#endif
+#endif
+
+            yuv0[x << SS_W]      = av_clip_pixel(yuv_offset[0] +
+                                                 ((r00 * cry + g00 * cgy +
+                                                   b00 * cby + rnd) >> sh));
+#if SS_W == 1
+            yuv0[x * 2 + 1]      = av_clip_pixel(yuv_offset[0] +
+                                                 ((r01 * cry + g01 * cgy +
+                                                   b01 * cby + rnd) >> sh));
+#if SS_H == 1
+            yuv0[x * 2 + 0 + s0] = av_clip_pixel(yuv_offset[0] +
+                                                 ((r10 * cry + g10 * cgy +
+                                                   b10 * cby + rnd) >> sh));
+            yuv0[x * 2 + 1 + s0] = av_clip_pixel(yuv_offset[0] +
+                                                 ((r11 * cry + g11 * cgy +
+                                                   b11 * cby + rnd) >> sh));
+#endif
+#endif
+
+            yuv1[x]      = av_clip_pixel(uv_offset +
+                                         ((avg(r00, r01, r10, r11) * cru +
+                                           avg(g00, g01, g10, g11) * cgu +
+                                           avg(b00, b01, b10, b11) * cbu + rnd) >> sh));
+            yuv2[x]      = av_clip_pixel(uv_offset +
+                                         ((avg(r00, r01, r10, r11) * crv +
+                                           avg(g00, g01, g10, g11) * cgv +
+                                           avg(b00, b01, b10, b11) * cbv + rnd) >> sh));
+        }
+
+        yuv0 += s0 << SS_H;
+        yuv1 += yuv_stride[1] / sizeof(pixel);
+        yuv2 += yuv_stride[2] / sizeof(pixel);
+        rgb0 += s << SS_H;
+        rgb1 += s << SS_H;
+        rgb2 += s << SS_H;
+    }
+}
+
+static void fn(yuv2yuv)(uint8_t *_dst[3], ptrdiff_t dst_stride[3],
+                        uint8_t *_src[3], ptrdiff_t src_stride[3],
+                        int w, int h, const int16_t c[3][3][8],
+                        const int16_t yuv_offset[2][8])
+{
+    pixel **dst = (pixel **) _dst, **src = (pixel **) _src;
+    const pixel *src0 = src[0], *src1 = src[1], *src2 = src[2];
+    pixel *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
+    int y, x;
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int y00 = src0[x << SS_W] - yuv_offset[0][0];
+#if SS_W == 1
+            int y01 = src0[2 * x + 1] - yuv_offset[0][0];
+#if SS_H == 1
+            int y10 = src0[src_stride[0] / sizeof(pixel) + 2 * x] - yuv_offset[0][0];
+            int y11 = src0[src_stride[0] / sizeof(pixel) + 2 * x + 1] - yuv_offset[0][0];
+#endif
+#endif
+            int u = src1[x] - uv_offset, v = src2[x] - uv_offset;
+
+            dst0[x << SS_W] = av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y00 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#if SS_W == 1
+            dst0[x * 2 + 1] = av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y01 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#if SS_H == 1
+            dst0[x * 2 + 0 + dst_stride[0] / sizeof(pixel)] =
+                              av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y10 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+            dst0[x * 2 + 1 + dst_stride[0] / sizeof(pixel)] =
+                              av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y11 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#endif
+#endif
+
+            dst1[x] = av_clip_pixel(uv_offset +
+                                    ((avg(y00, y01, y10, y11) * c[1][0][0] +
+                                      u * c[1][1][0] + v * c[1][2][0] + 8192) >> 14));
+            dst2[x] = av_clip_pixel(uv_offset +
+                                    ((avg(y00, y01, y10, y11) * c[2][0][0] +
+                                      u * c[2][1][0] + v * c[2][2][0] + 8192) >> 14));
+        }
+
+        dst0 += (dst_stride[0] << SS_H) / sizeof(pixel);
+        dst1 += dst_stride[1] / sizeof(pixel);
+        dst2 += dst_stride[2] / sizeof(pixel);
+        src0 += (src_stride[0] << SS_H) / sizeof(pixel);
+        src1 += src_stride[1] / sizeof(pixel);
+        src2 += src_stride[2] / sizeof(pixel);
+    }
+}
diff --git a/libavfilter/vf_colorspace.c b/libavfilter/vf_colorspace.c
new file mode 100644
index 0000000..2183afc
--- /dev/null
+++ b/libavfilter/vf_colorspace.c
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * @file
+ * Convert between colorspaces.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+
+#include "avfilter.h"
+#include "colorspacedsp.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+struct ColorPrimaries {
+    double xw, yw, xr, yr, xg, yg, xb, yb;
+};
+
+struct TransferCharacteristics {
+    double alpha, beta, gamma, delta;
+};
+
+struct LumaCoefficients {
+    double cr, cg, cb;
+};
+
+typedef struct ColorSpaceContext {
+    const AVClass *class;
+
+    ColorSpaceDSPContext dsp;
+
+    enum AVColorSpace in_csp, out_csp;
+    enum AVColorRange in_rng, out_rng;
+    enum AVColorTransferCharacteristic in_trc, out_trc;
+    enum AVColorPrimaries in_prm, out_prm;
+    enum AVPixelFormat in_format, out_format;
+    int fast_mode;
+
+    int16_t *rgb[3];
+    ptrdiff_t rgb_stride;
+    unsigned rgb_sz;
+
+    const struct ColorPrimaries *in_primaries, *out_primaries;
+    int lrgb2lrgb_passthrough;
+    int16_t lrgb2lrgb_coeffs[3][3][8];
+
+    const struct TransferCharacteristics *in_txchr, *out_txchr;
+    int rgb2rgb_passthrough;
+    int16_t *lin_lut, *delin_lut;
+
+    const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
+    int yuv2yuv_passthrough, yuv2yuv_fastmode;
+    int16_t yuv2rgb_coeffs[3][3][8], rgb2yuv_coeffs[3][3][8], yuv2yuv_coeffs[3][3][8];
+    int16_t yuv_offset[2 /* in, out */][8];
+    yuv2rgb_fn yuv2rgb;
+    rgb2yuv_fn rgb2yuv;
+    yuv2yuv_fn yuv2yuv;
+    double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
+    int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
+} ColorSpaceContext;
+
+// FIXME deal with odd width/heights (or just forbid it)
+// FIXME if out_format is not set, it should default to the same as in_format
+// FIXME we should have a utility property to set all relevant colorspace/* variables
+// to the same (e.g. everything to bt709)
+// FIXME if range is not set, it should default to the same as input
+// FIXME simd
+// FIXME add some asserts in random parts of the table generation code to ensure
+// that we never overflow, e.g. if coeffs are 14bit, they can't exceed [-2.0,2.0>
+// range, and I'm not entirely sure that's always true (e.g. yuv2yuv for bt2020
+// to/from 601, blue might go off quite a bit? If it exceeds, change the bitrange.
+// FIXME constant alignment in ColorSpaceContext
+// FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
+// FIXME test that the values in (de)lin_lut don't exceed their container storage
+// type size
+
+/*
+ * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
+ */
+static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
+    [AVCOL_SPC_FCC]        = { 0.30,   0.59,   0.11   },
+    [AVCOL_SPC_BT470BG]    = { 0.299,  0.587,  0.114  },
+    [AVCOL_SPC_SMPTE170M]  = { 0.299,  0.587,  0.114  },
+    [AVCOL_SPC_BT709]      = { 0.2126, 0.7152, 0.0722 },
+    [AVCOL_SPC_SMPTE240M]  = { 0.212,  0.701,  0.087  },
+    [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
+    [AVCOL_SPC_BT2020_CL]  = { 0.2627, 0.6780, 0.0593 },
+};
+
+static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
+{
+    const struct LumaCoefficients *coeffs;
+
+    if (csp >= AVCOL_SPC_NB)
+        return NULL;
+    coeffs = &luma_coefficients[csp];
+    if (!coeffs->cr)
+        return NULL;
+
+    return coeffs;
+}
+
+static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
+                               double rgb2yuv[3][3])
+{
+    double bscale, rscale;
+
+    rgb2yuv[0][0] = coeffs->cr;
+    rgb2yuv[0][1] = coeffs->cg;
+    rgb2yuv[0][2] = coeffs->cb;
+    bscale = 0.5 / (coeffs->cb - 1.0);
+    rscale = 0.5 / (coeffs->cr - 1.0);
+    rgb2yuv[1][0] = bscale * coeffs->cr;
+    rgb2yuv[1][1] = bscale * coeffs->cg;
+    rgb2yuv[1][2] = 0.5;
+    rgb2yuv[2][0] = 0.5;
+    rgb2yuv[2][1] = rscale * coeffs->cg;
+    rgb2yuv[2][2] = rscale * coeffs->cb;
+}
+
+// FIXME I'm not actually sure whether gamma = 0.45 or gamma = 1.0 / 2.2 for
+// most of these entries
+static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
+    [AVCOL_TRC_BT709]     = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_GAMMA22]   = { 1.0,    0.0,    1.0 / 2.2, 0.0 },
+    [AVCOL_TRC_GAMMA28]   = { 1.0,    0.0,    1.0 / 2.8, 0.0 },
+    [AVCOL_TRC_SMPTE170M] = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
+    [AVCOL_TRC_BT2020_10] = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
+};
+
+static const struct TransferCharacteristics *
+    get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
+{
+    const struct TransferCharacteristics *coeffs;
+
+    if (trc >= AVCOL_TRC_NB)
+        return NULL;
+    coeffs = &transfer_characteristics[trc];
+    if (!coeffs->alpha)
+        return NULL;
+
+    return coeffs;
+}
+
+static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
+    [AVCOL_PRI_BT709]     = { 0.3127, 0.3290, 0.64,  0.33,  0.30,  0.60,  0.15,  0.06 },
+    [AVCOL_PRI_BT470M]    = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_BT470BG]   = { 0.3127, 0.3290, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
+    [AVCOL_PRI_SMPTE170M] = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_SMPTE240M] = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_BT2020]    = { 0.3127, 0.3290, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
+};
+
+static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
+{
+    const struct ColorPrimaries *coeffs;
+
+    if (prm >= AVCOL_PRI_NB)
+        return NULL;
+    coeffs = &color_primaries[prm];
+    if (!coeffs->xw)
+        return NULL;
+
+    return coeffs;
+}
+
+static void invert_matrix3x3(const double in[3][3], double out[3][3])
+{
+    double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
+           m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
+           m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
+    int i, j;
+    double det;
+
+    out[0][0] =  (m11 * m22 - m21 * m12);
+    out[0][1] = -(m01 * m22 - m21 * m02);
+    out[0][2] =  (m01 * m12 - m11 * m02);
+    out[1][0] = -(m10 * m22 - m20 * m12);
+    out[1][1] =  (m00 * m22 - m20 * m02);
+    out[1][2] = -(m00 * m12 - m10 * m02);
+    out[2][0] =  (m10 * m21 - m20 * m11);
+    out[2][1] = -(m00 * m21 - m20 * m01);
+    out[2][2] =  (m00 * m11 - m10 * m01);
+
+    det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
+    det = 1.0 / det;
+
+    for (i = 0; i < 3; i++) {
+        for (j = 0; j < 3; j++)
+            out[i][j] *= det;
+    }
+}
+
+static int fill_gamma_table(ColorSpaceContext *s)
+{
+    int n;
+    double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
+    double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
+    double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
+    double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
+    double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
+
+    s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
+    if (!s->lin_lut)
+        return AVERROR(ENOMEM);
+    s->delin_lut = &s->lin_lut[32768];
+    for (n = 0; n < 32768; n++) {
+        double v = (n - 2048.0) / 28672.0, d, l;
+
+        // delinearize
+        if (v <= -out_beta) {
+            d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
+        } else if (v < out_beta) {
+            d = out_delta * v;
+        } else {
+            d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
+        }
+        s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
+
+        // linearize
+        if (v <= -in_beta) {
+            l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
+        } else if (v < in_beta) {
+            l = v * in_idelta;
+        } else {
+            l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
+        }
+        s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
+    }
+
+    return 0;
+}
+
+/*
+ * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
+ */
+static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
+                               double rgb2xyz[3][3])
+{
+    double i[3][3], sr, sg, sb, zw;
+
+    rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
+    rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
+    rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
+    rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
+    rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
+    rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
+    rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
+    invert_matrix3x3(rgb2xyz, i);
+    zw = 1.0 - coeffs->xw - coeffs->yw;
+    sr = i[0][0] * coeffs->xw + i[0][1] * coeffs->yw + i[0][2] * zw;
+    sg = i[1][0] * coeffs->xw + i[1][1] * coeffs->yw + i[1][2] * zw;
+    sb = i[2][0] * coeffs->xw + i[2][1] * coeffs->yw + i[2][2] * zw;
+    rgb2xyz[0][0] *= sr;
+    rgb2xyz[0][1] *= sg;
+    rgb2xyz[0][2] *= sb;
+    rgb2xyz[1][0] *= sr;
+    rgb2xyz[1][1] *= sg;
+    rgb2xyz[1][2] *= sb;
+    rgb2xyz[2][0] *= sr;
+    rgb2xyz[2][1] *= sg;
+    rgb2xyz[2][2] *= sb;
+}
+
+static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
+{
+    int m, n;
+
+    for (m = 0; m < 3; m++)
+        for (n = 0; n < 3; n++)
+            dst[m][n] = src2[m][0] * src1[0][n] +
+                        src2[m][1] * src1[1][n] +
+                        src2[m][2] * src1[2][n];
+}
+
+static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
+                      int w, int h, const int16_t *lut)
+{
+    int y, x, n;
+
+    for (n = 0; n < 3; n++) {
+        int16_t *data = buf[n];
+
+        for (y = 0; y < h; y++) {
+            for (x = 0; x < w; x++)
+                data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
+
+            data += stride;
+        }
+    }
+}
+
+static void convert(ColorSpaceContext *s, AVFrame *in, AVFrame *out)
+{
+    int w = in->width, h = in->height;
+    ptrdiff_t in_linesize[3] = { in->linesize[0], in->linesize[1], in->linesize[2] };
+    ptrdiff_t out_linesize[3] = { out->linesize[0], out->linesize[1], out->linesize[2] };
+
+    // FIXME for simd, also make sure we do pictures with negative stride
+    // top-down so we don't overwrite lines with padding of data before it
+    // in the same buffer (same as swscale)
+
+    if (s->yuv2yuv_passthrough) {
+        av_frame_copy(out, in); // FIXME see comment in filter_frame()
+    } else if (s->yuv2yuv_fastmode) {
+        // FIXME possibly use a fast mode in case only the y range changes?
+        // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
+        // are non-zero
+        s->yuv2yuv(out->data, out_linesize, in->data, in_linesize, w, h,
+                   s->yuv2yuv_coeffs, s->yuv_offset);
+    } else {
+        // FIXME maybe (for caching effciency) do pipeline per-line instead of
+        // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
+        // 2 lines, for yuv420.)
+        /*
+         * General design:
+         * - yuv2rgb converts from whatever range the input was ([16-235/240] or
+         *   [0,255] or the 10/12bpp equivalents thereof) to an integer version
+         *   of RGB in psuedo-restricted 15+sign bits. That means that the float
+         *   range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
+         *   range is used for overflow/underflow outside the representable
+         *   range of this RGB type. rgb2yuv is the exact opposite.
+         * - gamma correction is done using a LUT since that appears to work
+         *   fairly fast.
+         * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
+         *   (or rgb2yuv conversion) uses nearest-neighbour sampling to read
+         *   read chroma pixels at luma resolution. If you want some more fancy
+         *   filter, you can use swscale to convert to yuv444p.
+         * - all coefficients are 14bit (so in the [-2.0,2.0] range).
+         */
+        s->yuv2rgb(s->rgb, s->rgb_stride, in->data, in_linesize, w, h,
+                   s->yuv2rgb_coeffs, s->yuv_offset[0]);
+        if (!s->rgb2rgb_passthrough) {
+            apply_lut(s->rgb, s->rgb_stride, w, h, s->lin_lut);
+            if (!s->lrgb2lrgb_passthrough)
+                s->dsp.multiply3x3(s->rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
+            apply_lut(s->rgb, s->rgb_stride, w, h, s->delin_lut);
+        }
+        s->rgb2yuv(out->data, out_linesize, s->rgb, s->rgb_stride, w, h,
+                   s->rgb2yuv_coeffs, s->yuv_offset[1]);
+    }
+}
+
+static int get_range_off(int *off, int *y_rng, int *uv_rng,
+                         enum AVColorRange rng, int depth)
+{
+    switch (rng) {
+    case AVCOL_RANGE_MPEG:
+        *off = 16 << (depth - 8);
+        *y_rng = 219 << (depth - 8);
+        *uv_rng = 224 << (depth - 8);
+        break;
+    case AVCOL_RANGE_JPEG:
+        *off = 0;
+        *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
+        break;
+    default:
+        return AVERROR(EINVAL);
+    }
+
+    return 0;
+}
+
+static int create_filtergraph(ColorSpaceContext *s,
+                              const AVFrame *in, const AVFrame *out)
+{
+    const AVPixFmtDescriptor *in_desc  = av_pix_fmt_desc_get(in->format);
+    const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
+    int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
+
+#define supported_depth(d) (d == 8 || d == 10 || d == 12)
+#define supported_subsampling(lcw, lch) \
+    ((lcw == 0 && lch == 0) || (lcw == 1 && lch == 0) || (lcw == 1 && lch == 1))
+#define supported_format(d) \
+    (d != NULL && d->nb_components == 3 && \
+     !(d->flags & AV_PIX_FMT_FLAG_RGB) && \
+     supported_depth(d->comp[0].depth) && \
+     supported_subsampling(d->log2_chroma_w, d->log2_chroma_h))
+
+    if (!supported_format(in_desc) || !supported_format(out_desc))
+        return AVERROR(EINVAL);
+
+    if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
+    if (in->color_trc       != s->in_trc) s->in_txchr     = NULL;
+    if (in->colorspace      != s->in_csp ||
+        in->color_range     != s->in_rng) s->in_lumacoef  = NULL;
+
+    if (!s->out_primaries || !s->in_primaries) {
+        s->in_prm = in->color_primaries;
+        s->in_primaries = get_color_primaries(s->in_prm);
+        if (!s->in_primaries)
+            return AVERROR(EINVAL);
+        s->out_primaries = get_color_primaries(s->out_prm);
+        if (!s->out_primaries)
+            return AVERROR(EINVAL);
+        s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
+                                           sizeof(*s->in_primaries));
+        if (!s->lrgb2lrgb_passthrough) {
+            double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
+
+            fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
+            invert_matrix3x3(rgb2xyz, xyz2rgb);
+            fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
+            mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
+            for (m = 0; m < 3; m++)
+                for (n = 0; n < 3; n++) {
+                    s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
+                    for (o = 1; o < 8; o++)
+                        s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
+                }
+
+            emms = 1;
+        }
+    }
+
+    if (!s->in_txchr) {
+        av_freep(&s->lin_lut);
+        s->in_trc = in->color_trc;
+        s->in_txchr = get_transfer_characteristics(s->in_trc);
+        if (!s->in_txchr)
+            return AVERROR(EINVAL);
+    }
+
+    if (!s->out_txchr) {
+        av_freep(&s->lin_lut);
+        s->out_txchr = get_transfer_characteristics(s->out_trc);
+        if (!s->out_txchr)
+            return AVERROR(EINVAL);
+    }
+
+    s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
+                             !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
+    if (!s->rgb2rgb_passthrough && !s->lin_lut) {
+        res = fill_gamma_table(s);
+        if (res < 0)
+            return res;
+        emms = 1;
+    }
+
+    if (!s->in_lumacoef) {
+        s->in_csp = in->colorspace;
+        s->in_rng = in->color_range;
+        s->in_lumacoef = get_luma_coefficients(s->in_csp);
+        if (!s->in_lumacoef)
+            return AVERROR(EINVAL);
+        redo_yuv2rgb = 1;
+    }
+
+    if (!s->out_lumacoef) {
+        s->out_lumacoef = get_luma_coefficients(s->out_csp);
+        if (!s->out_lumacoef)
+            return AVERROR(EINVAL);
+        redo_rgb2yuv = 1;
+    }
+
+    fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
+                    in_desc->log2_chroma_w == out_desc->log2_chroma_w &&
+                    in_desc->comp[0].depth == out_desc->comp[0].depth;
+    s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
+    s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
+                             !memcmp(s->in_lumacoef, s->out_lumacoef,
+                                     sizeof(*s->in_lumacoef));
+    if (!s->yuv2yuv_passthrough) {
+        if (redo_yuv2rgb) {
+            double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+            int off, bits, in_rng;
+
+            res = get_range_off(&off, &s->in_y_rng, &s->in_uv_rng,
+                                s->in_rng, in_desc->comp[0].depth);
+            if (res < 0)
+                return res;
+            for (n = 0; n < 8; n++)
+                s->yuv_offset[0][n] = off;
+            fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
+            invert_matrix3x3(rgb2yuv, yuv2rgb);
+            bits = 1 << (in_desc->comp[0].depth - 1);
+            for (n = 0; n < 3; n++) {
+                for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
+                    s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
+                    for (o = 1; o < 8; o++)
+                        s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
+                }
+            }
+            s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
+                                       [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+            emms = 1;
+        }
+        
+        if (redo_rgb2yuv) {
+            double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+            int off, out_rng, bits;
+
+            res = get_range_off(&off, &s->out_y_rng, &s->out_uv_rng,
+                                s->out_rng, out_desc->comp[0].depth);
+            if (res < 0)
+                return res;
+            for (n = 0; n < 8; n++)
+                s->yuv_offset[1][n] = off;
+            fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
+            bits = 1 << (29 - out_desc->comp[0].depth);
+            for (n = 0; n < 3; n++) {
+                for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
+                    s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
+                    for (o = 1; o < 8; o++)
+                        s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
+                }
+            }
+            s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
+                                       [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
+            emms = 1;
+        }
+
+        if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
+            double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+            double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+            double yuv2yuv[3][3];
+            int in_rng, out_rng;
+
+            mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
+            for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
+                for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
+                    s->yuv2yuv_coeffs[m][n][0] =
+                        lrint(16384 * yuv2yuv[m][n] * out_rng / in_rng);
+                    for (o = 1; o < 8; o++)
+                        s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
+                }
+            }
+            s->yuv2yuv = s->dsp.yuv2yuv[(in_desc->comp[0].depth - 8) >> 1]
+                                       [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+        }
+    }
+
+    if (emms)
+        emms_c();
+
+    return 0;
+}
+
+static int init(AVFilterContext *ctx)
+{
+    ColorSpaceContext *s = ctx->priv;
+
+    ff_colorspacedsp_init(&s->dsp);
+
+    return 0;
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+    ColorSpaceContext *s = ctx->priv;
+
+    av_freep(&s->rgb[0]);
+    av_freep(&s->rgb[1]);
+    av_freep(&s->rgb[2]);
+    s->rgb_sz = 0;
+
+    av_freep(&s->lin_lut);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+    AVFilterContext *ctx = link->dst;
+    AVFilterLink *outlink = ctx->outputs[0];
+    ColorSpaceContext *s = ctx->priv;
+    // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
+    // input one if it is writable *OR* the actual literal values of in_*
+    // and out_* are identical (not just their respective properties)
+    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+    int res;
+    ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
+    unsigned rgb_sz = rgb_stride * in->height;
+
+    out->color_primaries = s->out_prm;
+    out->color_trc       = s->out_trc;
+    out->colorspace      = s->out_csp;
+    out->color_range     = s->out_rng;
+    if (rgb_sz != s->rgb_sz) {
+        av_freep(&s->rgb[0]);
+        av_freep(&s->rgb[1]);
+        av_freep(&s->rgb[2]);
+        s->rgb_sz = 0;
+
+        s->rgb[0] = av_malloc(rgb_sz);
+        s->rgb[1] = av_malloc(rgb_sz);
+        s->rgb[2] = av_malloc(rgb_sz);
+        if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2]) {
+            uninit(ctx);
+            return AVERROR(ENOMEM);
+        }
+        s->rgb_sz = rgb_sz;
+    }
+    res = create_filtergraph(s, in, out);
+    if (res < 0)
+        return res;
+    s->rgb_stride = rgb_stride / sizeof(int16_t);
+    convert(s, in, out);
+
+    return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+    static const enum AVPixelFormat pix_fmts[] = {
+        AV_PIX_FMT_YUV420P,   AV_PIX_FMT_YUV422P,   AV_PIX_FMT_YUV444P,
+        AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+        AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+        AV_PIX_FMT_NONE
+    };
+    int res;
+    ColorSpaceContext *s = ctx->priv;
+    AVFilterFormats *formats = ff_make_format_list(pix_fmts);
+
+    if (!formats)
+        return AVERROR(ENOMEM);
+    res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
+    if (res < 0)
+        return res;
+    formats = NULL;
+    res = ff_add_format(&formats, s->out_format);
+    if (res < 0)
+        return res;
+
+    return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+    AVFilterLink *inlink = outlink->src->inputs[0];
+
+    outlink->w = inlink->w;
+    outlink->h = inlink->h;
+    outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+    outlink->time_base = inlink->time_base;
+
+    return 0;
+}
+
+#define OFFSET(x) offsetof(ColorSpaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+#define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
+
+static const AVOption colorspace_options[] = {
+    { "space",      "Output colorspace",
+      OFFSET(out_csp),   AV_OPT_TYPE_INT, { .str = NULL },
+      AVCOL_SPC_RGB, AVCOL_SPC_BT2020_CL, FLAGS, "csp" },
+    ENUM("rgb",         AVCOL_SPC_RGB,         "csp"),
+    ENUM("bt709",       AVCOL_SPC_BT709,       "csp"),
+    ENUM("unspecified", AVCOL_SPC_UNSPECIFIED, "csp"),
+    ENUM("reserved",    AVCOL_SPC_RESERVED,    "csp"),
+    ENUM("bt470g",      AVCOL_SPC_BT470BG,     "csp"),
+    ENUM("smpte170m",   AVCOL_SPC_SMPTE170M,   "csp"),
+    ENUM("smpte240m",   AVCOL_SPC_SMPTE240M,   "csp"),
+    ENUM("ycocg",       AVCOL_SPC_YCOCG,       "csp"),
+    ENUM("bt2020ncl",   AVCOL_SPC_BT2020_NCL,  "csp"),
+    ENUM("bt2020cl",    AVCOL_SPC_BT2020_CL,   "csp"),
+
+    { "range",      "Output color range",
+      OFFSET(out_rng),   AV_OPT_TYPE_INT, { .str = NULL },
+      AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_JPEG, FLAGS, "rng" },
+    ENUM("unspecified", AVCOL_RANGE_UNSPECIFIED, "rng"),
+    ENUM("mpeg",        AVCOL_RANGE_MPEG,        "rng"),
+    ENUM("jpeg",        AVCOL_RANGE_JPEG,        "rng"),
+
+    { "primaries",  "Output color primaries",
+      OFFSET(out_prm),   AV_OPT_TYPE_INT, { .str = NULL },
+      AVCOL_PRI_RESERVED0, AVCOL_PRI_SMPTEST428_1, FLAGS, "prm" },
+    ENUM("reserved0",    AVCOL_PRI_RESERVED0, "prm"),
+    ENUM("bt709",        AVCOL_PRI_BT709,     "prm"),
+    ENUM("unspecified",  AVCOL_PRI_UNSPECIFIED,     "prm"),
+    ENUM("reserved",     AVCOL_PRI_RESERVED,     "prm"),
+    ENUM("bt470m",       AVCOL_PRI_BT470M,     "prm"),
+    ENUM("bt470bg",      AVCOL_PRI_BT470BG,     "prm"),
+    ENUM("smpte170m",    AVCOL_PRI_SMPTE170M,     "prm"),
+    ENUM("smpte240m",    AVCOL_PRI_SMPTE240M,     "prm"),
+    ENUM("film",         AVCOL_PRI_FILM,     "prm"),
+    ENUM("bt2020",       AVCOL_PRI_BT2020,     "prm"),
+    ENUM("smptest428-1", AVCOL_PRI_SMPTEST428_1,     "prm"),
+
+    { "trc",        "Output transfer characteristics",
+      OFFSET(out_trc),   AV_OPT_TYPE_INT, { .str = NULL },
+      AVCOL_TRC_RESERVED0, AVCOL_TRC_SMPTEST428_1, FLAGS, "trc" },
+    ENUM("reserved0",    AVCOL_TRC_RESERVED0,    "trc"),
+    ENUM("bt709",        AVCOL_TRC_BT709,        "trc"),
+    ENUM("unspecified",  AVCOL_TRC_UNSPECIFIED,  "trc"),
+    ENUM("reserved",     AVCOL_TRC_RESERVED,     "trc"),
+    ENUM("gamma22",      AVCOL_TRC_GAMMA22,      "trc"),
+    ENUM("gamma28",      AVCOL_TRC_GAMMA28,      "trc"),
+    ENUM("smpte170m",    AVCOL_TRC_SMPTE170M,    "trc"),
+    ENUM("smpte240m",    AVCOL_TRC_SMPTE240M,    "trc"),
+    ENUM("linear",       AVCOL_TRC_LINEAR,       "trc"),
+    ENUM("log",          AVCOL_TRC_LOG,          "trc"),
+    ENUM("log-sqrt",     AVCOL_TRC_LOG_SQRT,     "trc"),
+    ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
+    ENUM("bt1361-ecg",   AVCOL_TRC_BT1361_ECG,   "trc"),
+    ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
+    ENUM("bt2020-10",    AVCOL_TRC_BT2020_10,    "trc"),
+    ENUM("bt2020-12",    AVCOL_TRC_BT2020_12,    "trc"),
+    ENUM("smptest2084",  AVCOL_TRC_SMPTEST2084,  "trc"),
+    ENUM("smptest428-1", AVCOL_TRC_SMPTEST428_1, "trc"),
+
+    { "format",   "Output pixel format",
+      OFFSET(out_format), AV_OPT_TYPE_INT,  { .i64 = 0    },
+      AV_PIX_FMT_YUV420P, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
+    ENUM("yuv420p",   AV_PIX_FMT_YUV420P,   "fmt"),
+    ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
+    ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
+    ENUM("yuv422p",   AV_PIX_FMT_YUV422P,   "fmt"),
+    ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
+    ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
+    ENUM("yuv424p",   AV_PIX_FMT_YUV444P,   "fmt"),
+    ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
+    ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
+
+    { "fast",     "Ignore primary chromaticity and gamma correction",
+      OFFSET(fast_mode), AV_OPT_TYPE_BOOL,  { .i64 = 0    },
+      0, 1, FLAGS },
+    { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorspace);
+
+static const AVFilterPad inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_props,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_colorspace = {
+    .name            = "colorspace",
+    .description     = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
+    .init            = init,
+    .uninit          = uninit,
+    .query_formats   = query_formats,
+    .priv_size       = sizeof(ColorSpaceContext),
+    .priv_class      = &colorspace_class,
+    .inputs          = inputs,
+    .outputs         = outputs,
+};
-- 
2.1.2



More information about the ffmpeg-devel mailing list