[FFmpeg-devel] [PATCH] lavfi: new colorspace conversion filter.

Ronald S. Bultje rsbultje at gmail.com
Fri Apr 1 02:29:37 CEST 2016


The intent here is similar to colormatrix, but it's LGPLv2.1-or-later
(instead of GPLv2.0) and supports gamma/chromaticity correction.
---
 doc/filters.texi                     | 183 +++++++
 libavfilter/Makefile                 |   1 +
 libavfilter/allfilters.c             |   1 +
 libavfilter/colorspacedsp.c          | 130 +++++
 libavfilter/colorspacedsp.h          |  51 ++
 libavfilter/colorspacedsp_template.c | 256 ++++++++++
 libavfilter/vf_colorspace.c          | 909 +++++++++++++++++++++++++++++++++++
 7 files changed, 1531 insertions(+)
 create mode 100644 libavfilter/colorspacedsp.c
 create mode 100644 libavfilter/colorspacedsp.h
 create mode 100644 libavfilter/colorspacedsp_template.c
 create mode 100644 libavfilter/vf_colorspace.c

diff --git a/doc/filters.texi b/doc/filters.texi
index 528e0f8..93b260b 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -4888,6 +4888,189 @@ For example to convert from BT.601 to SMPTE-240M, use the command:
 colormatrix=bt601:smpte240m
 @end example
 
+ at section colorspace
+
+Convert colorspace, transfer characteristics or color primaries.
+
+The filter accepts the following options:
+
+ at table @option
+ at item all
+Specify all color properties at once.
+
+The accepted values are:
+ at table @samp
+ at item bt470m
+BT.470M
+
+ at item bt470bg
+BT.470BG
+
+ at item bt601-6-525
+BT.601-6 525
+
+ at item bt601-6-625
+BT.601-6 625
+
+ at item bt709
+BT.709
+
+ at item smpte170m
+SMPTE-170M
+
+ at item smpte240m
+SMPTE-240M
+
+ at item bt2020
+BT.2020
+
+ at end table
+
+ at item space
+Specify output colorspace.
+
+The accepted values are:
+ at table @samp
+ at item bt709
+BT.709
+
+ at item fcc
+FCC
+
+ at item bt470bg
+BT.470BG or BT.601-6 625
+
+ at item smpte170m
+SMPTE-170M or BT.601-6 525
+
+ at item smpte240m
+SMPTE-240M
+
+ at item bt2020ncl
+BT.2020 with non-constant luminance
+
+ at end table
+
+ at item trc
+Specify output transfer characteristics.
+
+The accepted values are:
+ at table @samp
+ at item bt709
+BT.709
+
+ at item gamma22
+Constant gamma of 2.2
+
+ at item gamma28
+Constant gamma of 2.8
+
+ at item smpte170m
+SMPTE-170M, BT.601-6 625 or BT.601-6 525
+
+ at item smpte240m
+SMPTE-240M
+
+ at item bt2020-10
+BT.2020 for 10-bits content
+
+ at item bt2020-12
+BT.2020 for 12-bits content
+
+ at end table
+
+ at item prm
+Specify output color primaries.
+
+The accepted values are:
+ at table @samp
+ at item bt709
+BT.709
+
+ at item bt470m
+BT.470M
+
+ at item bt470bg
+BT.470BG or BT.601-6 625
+
+ at item smpte170m
+SMPTE-170M or BT.601-6 525
+
+ at item smpte240m
+SMPTE-240M
+
+ at item bt2020
+BT.2020
+
+ at end table
+
+ at item rng
+Specify output color range.
+
+The accepted values are:
+ at table @samp
+ at item mpeg
+MPEG (restricted) range
+
+ at item jpeg
+JPEG (full) range
+
+ at end table
+
+ at item format
+Specify output color format.
+
+The accepted values are:
+ at table @samp
+ at item yuv420p
+YUV 4:2:0 planar 8-bits
+
+ at item yuv420p
+YUV 4:2:0 planar 10-bits
+
+ at item yuv420p
+YUV 4:2:0 planar 12-bits
+
+ at item yuv420p
+YUV 4:2:2 planar 8-bits
+
+ at item yuv420p
+YUV 4:2:2 planar 10-bits
+
+ at item yuv420p
+YUV 4:2:2 planar 12-bits
+
+ at item yuv420p
+YUV 4:4:4 planar 8-bits
+
+ at item yuv420p
+YUV 4:4:4 planar 10-bits
+
+ at item yuv420p
+YUV 4:4:4 planar 12-bits
+
+ at end table
+
+ at item fast
+Do a fast conversion, which skips gamma/primary correction. This will take
+significantly less CPU, but will be mathematically incorrect. To get output
+compatible with that produced by the colormatrix filter, use fast=1.
+ at end table
+
+The filter converts the transfer characteristics, color space and color
+primaries to the specified user values. The output value, if not specified,
+is set to a default value based on the "all" property. If that property is
+also not specified, the filter will log an error. The output color range and
+format default to the same value as the input color range and format. The
+input transfer characteristics, color space, color primaries and color range
+should be set on the input data. If any of these are missing, the filter will
+log an error and no conversion will take place.
+
+For example to convert the input to SMPTE-240M, use the command:
+ at example
+colorspace=smpte240m
+ at end example
+
 @section convolution
 
 Apply convolution 3x3 or 5x5 filter.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index b6e1999..9b7546d 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -131,6 +131,7 @@ OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER)      += vf_colorchannelmixer.o
 OBJS-$(CONFIG_COLORKEY_FILTER)               += vf_colorkey.o
 OBJS-$(CONFIG_COLORLEVELS_FILTER)            += vf_colorlevels.o
 OBJS-$(CONFIG_COLORMATRIX_FILTER)            += vf_colormatrix.o
+OBJS-$(CONFIG_COLORSPACE_FILTER)             += vf_colorspace.o colorspacedsp.o
 OBJS-$(CONFIG_CONVOLUTION_FILTER)            += vf_convolution.o
 OBJS-$(CONFIG_COPY_FILTER)                   += vf_copy.o
 OBJS-$(CONFIG_COREIMAGE_FILTER)              += vf_coreimage.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 5c18fd1..3fc450f 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -152,6 +152,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER(COLORKEY,       colorkey,       vf);
     REGISTER_FILTER(COLORLEVELS,    colorlevels,    vf);
     REGISTER_FILTER(COLORMATRIX,    colormatrix,    vf);
+    REGISTER_FILTER(COLORSPACE,     colorspace,     vf);
     REGISTER_FILTER(CONVOLUTION,    convolution,    vf);
     REGISTER_FILTER(COPY,           copy,           vf);
     REGISTER_FILTER(COREIMAGE,      coreimage,      vf);
diff --git a/libavfilter/colorspacedsp.c b/libavfilter/colorspacedsp.c
new file mode 100644
index 0000000..e71bf1f
--- /dev/null
+++ b/libavfilter/colorspacedsp.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "colorspacedsp.h"
+
+#define SS_W 0
+#define SS_H 0
+
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 0
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 1
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+static void multiply3x3_c(int16_t *buf[3], ptrdiff_t stride,
+                          int w, int h, const int16_t m[3][3][8])
+{
+    int y, x;
+    int16_t *buf0 = buf[0], *buf1 = buf[1], *buf2 = buf[2];
+
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int v0 = buf0[x], v1 = buf1[x], v2 = buf2[x];
+
+            buf0[x] = av_clip_int16((m[0][0][0] * v0 + m[0][1][0] * v1 +
+                                     m[0][2][0] * v2 + 8192) >> 14);
+            buf1[x] = av_clip_int16((m[1][0][0] * v0 + m[1][1][0] * v1 +
+                                     m[1][2][0] * v2 + 8192) >> 14);
+            buf2[x] = av_clip_int16((m[2][0][0] * v0 + m[2][1][0] * v1 +
+                                     m[2][2][0] * v2 + 8192) >> 14);
+        }
+
+        buf0 += stride;
+        buf1 += stride;
+        buf2 += stride;
+    }
+}
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
+{
+    dsp->yuv2rgb[0][0] = yuv2rgb_444_8_c;
+    dsp->yuv2rgb[0][1] = yuv2rgb_422_8_c;
+    dsp->yuv2rgb[0][2] = yuv2rgb_420_8_c;
+    dsp->yuv2rgb[1][0] = yuv2rgb_444_10_c;
+    dsp->yuv2rgb[1][1] = yuv2rgb_422_10_c;
+    dsp->yuv2rgb[1][2] = yuv2rgb_420_10_c;
+    dsp->yuv2rgb[2][0] = yuv2rgb_444_12_c;
+    dsp->yuv2rgb[2][1] = yuv2rgb_422_12_c;
+    dsp->yuv2rgb[2][2] = yuv2rgb_420_12_c;
+
+    dsp->rgb2yuv[0][0] = rgb2yuv_444_8_c;
+    dsp->rgb2yuv[0][1] = rgb2yuv_422_8_c;
+    dsp->rgb2yuv[0][2] = rgb2yuv_420_8_c;
+    dsp->rgb2yuv[1][0] = rgb2yuv_444_10_c;
+    dsp->rgb2yuv[1][1] = rgb2yuv_422_10_c;
+    dsp->rgb2yuv[1][2] = rgb2yuv_420_10_c;
+    dsp->rgb2yuv[2][0] = rgb2yuv_444_12_c;
+    dsp->rgb2yuv[2][1] = rgb2yuv_422_12_c;
+    dsp->rgb2yuv[2][2] = rgb2yuv_420_12_c;
+
+    dsp->yuv2yuv[0][0] = yuv2yuv_444_8_c;
+    dsp->yuv2yuv[0][1] = yuv2yuv_422_8_c;
+    dsp->yuv2yuv[0][2] = yuv2yuv_420_8_c;
+    dsp->yuv2yuv[1][0] = yuv2yuv_444_10_c;
+    dsp->yuv2yuv[1][1] = yuv2yuv_422_10_c;
+    dsp->yuv2yuv[1][2] = yuv2yuv_420_10_c;
+    dsp->yuv2yuv[2][0] = yuv2yuv_444_12_c;
+    dsp->yuv2yuv[2][1] = yuv2yuv_422_12_c;
+    dsp->yuv2yuv[2][2] = yuv2yuv_420_12_c;
+
+    dsp->multiply3x3 = multiply3x3_c;
+}
diff --git a/libavfilter/colorspacedsp.h b/libavfilter/colorspacedsp.h
new file mode 100644
index 0000000..89c1e1b
--- /dev/null
+++ b/libavfilter/colorspacedsp.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_COLORSPACEDSP_H
+#define AVFILTER_COLORSPACEDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef void (*yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+                           uint8_t *yuv[3], ptrdiff_t yuv_stride[3],
+                           int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+                           const int16_t yuv_offset[8]);
+typedef void (*rgb2yuv_fn)(uint8_t *yuv[3], ptrdiff_t yuv_stride[3],
+                           int16_t *rgb[3], ptrdiff_t rgb_stride,
+                           int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+                           const int16_t yuv_offset[8]);
+typedef void (*yuv2yuv_fn)(uint8_t *yuv_out[3], ptrdiff_t yuv_out_stride[3],
+                           uint8_t *yuv_in[3], ptrdiff_t yuv_in_stride[3],
+                           int w, int h, const int16_t yuv2yuv_coeffs[3][3][8],
+                           const int16_t yuv_offset[2][8]);
+
+typedef struct ColorSpaceDSPContext {
+    yuv2rgb_fn yuv2rgb[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+    rgb2yuv_fn rgb2yuv[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+    yuv2yuv_fn yuv2yuv[3 /* 0: 8bit, 1: 10bit, 2: 12bit */][3 /* 0: 444, 1: 422, 2: 420 */];
+
+    void (*multiply3x3)(int16_t *data[3], ptrdiff_t stride,
+                        int w, int h, const int16_t m[3][3][8]);
+} ColorSpaceDSPContext;
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp);
+
+#endif /* AVFILTER_COLORSPACEDSP_H */
diff --git a/libavfilter/colorspacedsp_template.c b/libavfilter/colorspacedsp_template.c
new file mode 100644
index 0000000..a997583
--- /dev/null
+++ b/libavfilter/colorspacedsp_template.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavcodec/bit_depth_template.c"
+
+#undef fn
+#undef avg
+#if SS_W == 0
+#define fn(x) FUNCC(x##_444)
+#define avg(a,b,c,d) (a)
+#elif SS_H == 0
+#define fn(x) FUNCC(x##_422)
+#define avg(a,b,c,d) ((a + b + 1) >> 1)
+#else
+#define fn(x) FUNCC(x##_420)
+#define avg(a,b,c,d) ((a + b + c + d + 2) >> 2)
+#endif
+
+static void fn(yuv2rgb)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+                        uint8_t *_yuv[3], ptrdiff_t yuv_stride[3],
+                        int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+                        const int16_t yuv_offset[8])
+{
+    pixel **yuv = (pixel **) _yuv;
+    const pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+    int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+    int y, x;
+    int cy = yuv2rgb_coeffs[0][0][0];
+    int crv = yuv2rgb_coeffs[0][2][0];
+    int cgu = yuv2rgb_coeffs[1][1][0];
+    int cgv = yuv2rgb_coeffs[1][2][0];
+    int cbu = yuv2rgb_coeffs[2][1][0];
+    int sh = BIT_DEPTH - 1;
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+    av_assert2(yuv2rgb_coeffs[0][1][0] == 0);
+    av_assert2(yuv2rgb_coeffs[2][2][0] == 0);
+    av_assert2(yuv2rgb_coeffs[1][0][0] == cy && yuv2rgb_coeffs[2][0][0] == cy);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int y00 = yuv0[x << SS_W] - yuv_offset[0];
+#if SS_W == 1
+            int y01 = yuv0[2 * x + 1] - yuv_offset[0];
+#if SS_H == 1
+            int y10 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x] - yuv_offset[0];
+            int y11 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x + 1] - yuv_offset[0];
+#endif
+#endif
+            int u = yuv1[x] - uv_offset, v = yuv2[x] - uv_offset;
+
+            rgb0[x << SS_W]              = av_clip_int16((y00 * cy + crv * v + 8192) >> sh);
+#if SS_W == 1
+            rgb0[2 * x + 1]              = av_clip_int16((y01 * cy + crv * v + 8192) >> sh);
+#if SS_H == 1
+            rgb0[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + crv * v + 8192) >> sh);
+            rgb0[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + crv * v + 8192) >> sh);
+#endif
+#endif
+
+            rgb1[x << SS_W]              = av_clip_int16((y00 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#if SS_W == 1
+            rgb1[2 * x + 1]              = av_clip_int16((y01 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#if SS_H == 1
+            rgb1[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+            rgb1[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cgu * u +
+                                                          cgv * v + 8192) >> sh);
+#endif
+#endif
+
+            rgb2[x << SS_W]              = av_clip_int16((y00 * cy + cbu * u + 8192) >> sh);
+#if SS_W == 1
+            rgb2[2 * x + 1]              = av_clip_int16((y01 * cy + cbu * u + 8192) >> sh);
+#if SS_H == 1
+            rgb2[2 * x + rgb_stride]     = av_clip_int16((y10 * cy + cbu * u + 8192) >> sh);
+            rgb2[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cbu * u + 8192) >> sh);
+#endif
+#endif
+        }
+
+        yuv0 += (yuv_stride[0] * (1 << SS_H)) / sizeof(pixel);
+        yuv1 += yuv_stride[1] / sizeof(pixel);
+        yuv2 += yuv_stride[2] / sizeof(pixel);
+        rgb0 += rgb_stride * (1 << SS_H);
+        rgb1 += rgb_stride * (1 << SS_H);
+        rgb2 += rgb_stride * (1 << SS_H);
+    }
+}
+
+static void fn(rgb2yuv)(uint8_t *_yuv[3], ptrdiff_t yuv_stride[3],
+                        int16_t *rgb[3], ptrdiff_t s,
+                        int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+                        const int16_t yuv_offset[8])
+{
+    pixel **yuv = (pixel **) _yuv;
+    pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+    const int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+    int y, x;
+    int sh = 29 - BIT_DEPTH;
+    int rnd = 1 << (sh - 1);
+    int cry = rgb2yuv_coeffs[0][0][0];
+    int cgy = rgb2yuv_coeffs[0][1][0];
+    int cby = rgb2yuv_coeffs[0][2][0];
+    int cru = rgb2yuv_coeffs[1][0][0];
+    int cgu = rgb2yuv_coeffs[1][1][0];
+    int cbu = rgb2yuv_coeffs[1][2][0];
+    int crv = rgb2yuv_coeffs[2][0][0];
+    int cgv = rgb2yuv_coeffs[2][1][0];
+    int cbv = rgb2yuv_coeffs[2][2][0];
+    ptrdiff_t s0 = yuv_stride[0] / sizeof(pixel);
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int r00 = rgb0[x << SS_W], g00 = rgb1[x << SS_W], b00 = rgb2[x << SS_W];
+#if SS_W == 1
+            int r01 = rgb0[x * 2 + 1], g01 = rgb1[x * 2 + 1], b01 = rgb2[x * 2 + 1];
+#if SS_H == 1
+            int r10 = rgb0[x * 2 + 0 + s], g10 = rgb1[x * 2 + 0 + s], b10 = rgb2[x * 2 + 0 + s];
+            int r11 = rgb0[x * 2 + 1 + s], g11 = rgb1[x * 2 + 1 + s], b11 = rgb2[x * 2 + 1 + s];
+#endif
+#endif
+
+            yuv0[x << SS_W]      = av_clip_pixel(yuv_offset[0] +
+                                                 ((r00 * cry + g00 * cgy +
+                                                   b00 * cby + rnd) >> sh));
+#if SS_W == 1
+            yuv0[x * 2 + 1]      = av_clip_pixel(yuv_offset[0] +
+                                                 ((r01 * cry + g01 * cgy +
+                                                   b01 * cby + rnd) >> sh));
+#if SS_H == 1
+            yuv0[x * 2 + 0 + s0] = av_clip_pixel(yuv_offset[0] +
+                                                 ((r10 * cry + g10 * cgy +
+                                                   b10 * cby + rnd) >> sh));
+            yuv0[x * 2 + 1 + s0] = av_clip_pixel(yuv_offset[0] +
+                                                 ((r11 * cry + g11 * cgy +
+                                                   b11 * cby + rnd) >> sh));
+#endif
+#endif
+
+            yuv1[x]      = av_clip_pixel(uv_offset +
+                                         ((avg(r00, r01, r10, r11) * cru +
+                                           avg(g00, g01, g10, g11) * cgu +
+                                           avg(b00, b01, b10, b11) * cbu + rnd) >> sh));
+            yuv2[x]      = av_clip_pixel(uv_offset +
+                                         ((avg(r00, r01, r10, r11) * crv +
+                                           avg(g00, g01, g10, g11) * cgv +
+                                           avg(b00, b01, b10, b11) * cbv + rnd) >> sh));
+        }
+
+        yuv0 += s0 * (1 << SS_H);
+        yuv1 += yuv_stride[1] / sizeof(pixel);
+        yuv2 += yuv_stride[2] / sizeof(pixel);
+        rgb0 += s * (1 << SS_H);
+        rgb1 += s * (1 << SS_H);
+        rgb2 += s * (1 << SS_H);
+    }
+}
+
+static void fn(yuv2yuv)(uint8_t *_dst[3], ptrdiff_t dst_stride[3],
+                        uint8_t *_src[3], ptrdiff_t src_stride[3],
+                        int w, int h, const int16_t c[3][3][8],
+                        const int16_t yuv_offset[2][8])
+{
+    pixel **dst = (pixel **) _dst, **src = (pixel **) _src;
+    const pixel *src0 = src[0], *src1 = src[1], *src2 = src[2];
+    pixel *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
+    int y, x;
+    int uv_offset = 128 << (BIT_DEPTH - 8);
+
+#if SS_W == 1
+    w = (w + 1) >> 1;
+#if SS_H == 1
+    h = (h + 1) >> 1;
+#endif
+#endif
+    for (y = 0; y < h; y++) {
+        for (x = 0; x < w; x++) {
+            int y00 = src0[x << SS_W] - yuv_offset[0][0];
+#if SS_W == 1
+            int y01 = src0[2 * x + 1] - yuv_offset[0][0];
+#if SS_H == 1
+            int y10 = src0[src_stride[0] / sizeof(pixel) + 2 * x] - yuv_offset[0][0];
+            int y11 = src0[src_stride[0] / sizeof(pixel) + 2 * x + 1] - yuv_offset[0][0];
+#endif
+#endif
+            int u = src1[x] - uv_offset, v = src2[x] - uv_offset;
+
+            dst0[x << SS_W] = av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y00 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#if SS_W == 1
+            dst0[x * 2 + 1] = av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y01 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#if SS_H == 1
+            dst0[x * 2 + 0 + dst_stride[0] / sizeof(pixel)] =
+                              av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y10 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+            dst0[x * 2 + 1 + dst_stride[0] / sizeof(pixel)] =
+                              av_clip_pixel(yuv_offset[1][0] +
+                                            ((c[0][0][0] * y11 + c[0][1][0] * u +
+                                              c[0][2][0] * v + 8192) >> 14));
+#endif
+#endif
+
+            dst1[x] = av_clip_pixel(uv_offset +
+                                    ((avg(y00, y01, y10, y11) * c[1][0][0] +
+                                      u * c[1][1][0] + v * c[1][2][0] + 8192) >> 14));
+            dst2[x] = av_clip_pixel(uv_offset +
+                                    ((avg(y00, y01, y10, y11) * c[2][0][0] +
+                                      u * c[2][1][0] + v * c[2][2][0] + 8192) >> 14));
+        }
+
+        dst0 += (dst_stride[0] * (1 << SS_H)) / sizeof(pixel);
+        dst1 += dst_stride[1] / sizeof(pixel);
+        dst2 += dst_stride[2] / sizeof(pixel);
+        src0 += (src_stride[0] * (1 << SS_H)) / sizeof(pixel);
+        src1 += src_stride[1] / sizeof(pixel);
+        src2 += src_stride[2] / sizeof(pixel);
+    }
+}
diff --git a/libavfilter/vf_colorspace.c b/libavfilter/vf_colorspace.c
new file mode 100644
index 0000000..7180489
--- /dev/null
+++ b/libavfilter/vf_colorspace.c
@@ -0,0 +1,909 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje at gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * @file
+ * Convert between colorspaces.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+
+#include "avfilter.h"
+#include "colorspacedsp.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum Colorspace {
+    CS_UNSPECIFIED,
+    CS_BT470M,
+    CS_BT470BG,
+    CS_BT601_6_525,
+    CS_BT601_6_625,
+    CS_BT709,
+    CS_SMPTE170M,
+    CS_SMPTE240M,
+    CS_BT2020,
+    CS_NB,
+};
+
+static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = {
+    [CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED,
+    [CS_BT470M]      = AVCOL_TRC_GAMMA22,
+    [CS_BT470BG]     = AVCOL_TRC_GAMMA28,
+    [CS_BT601_6_525] = AVCOL_TRC_SMPTE170M,
+    [CS_BT601_6_625] = AVCOL_TRC_SMPTE170M,
+    [CS_BT709]       = AVCOL_TRC_BT709,
+    [CS_SMPTE170M]   = AVCOL_TRC_SMPTE170M,
+    [CS_SMPTE240M]   = AVCOL_TRC_SMPTE240M,
+    [CS_BT2020]      = AVCOL_TRC_BT2020_10,
+    [CS_NB]          = AVCOL_TRC_UNSPECIFIED,
+};
+
+static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
+    [CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED,
+    [CS_BT470M]      = AVCOL_PRI_BT470M,
+    [CS_BT470BG]     = AVCOL_PRI_BT470BG,
+    [CS_BT601_6_525] = AVCOL_PRI_SMPTE170M,
+    [CS_BT601_6_625] = AVCOL_PRI_BT470BG,
+    [CS_BT709]       = AVCOL_PRI_BT709,
+    [CS_SMPTE170M]   = AVCOL_PRI_SMPTE170M,
+    [CS_SMPTE240M]   = AVCOL_PRI_SMPTE240M,
+    [CS_BT2020]      = AVCOL_PRI_BT2020,
+    [CS_NB]          = AVCOL_PRI_UNSPECIFIED,
+};
+
+static const enum AVColorSpace default_csp[CS_NB + 1] = {
+    [CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED,
+    [CS_BT470M]      = AVCOL_SPC_FCC,
+    [CS_BT470BG]     = AVCOL_SPC_BT470BG,
+    [CS_BT601_6_525] = AVCOL_SPC_SMPTE170M,
+    [CS_BT601_6_625] = AVCOL_SPC_BT470BG,
+    [CS_BT709]       = AVCOL_SPC_BT709,
+    [CS_SMPTE170M]   = AVCOL_SPC_SMPTE170M,
+    [CS_SMPTE240M]   = AVCOL_SPC_SMPTE240M,
+    [CS_BT2020]      = AVCOL_SPC_BT2020_NCL,
+    [CS_NB]          = AVCOL_SPC_UNSPECIFIED,
+};
+
+struct ColorPrimaries {
+    double xw, yw, xr, yr, xg, yg, xb, yb;
+};
+
+struct TransferCharacteristics {
+    double alpha, beta, gamma, delta;
+};
+
+struct LumaCoefficients {
+    double cr, cg, cb;
+};
+
+typedef struct ColorSpaceContext {
+    const AVClass *class;
+
+    ColorSpaceDSPContext dsp;
+
+    enum Colorspace user_all;
+    enum AVColorSpace in_csp, out_csp, user_csp;
+    enum AVColorRange in_rng, out_rng, user_rng;
+    enum AVColorTransferCharacteristic in_trc, out_trc, user_trc;
+    enum AVColorPrimaries in_prm, out_prm, user_prm;
+    enum AVPixelFormat in_format, user_format;
+    int fast_mode;
+
+    int16_t *rgb[3];
+    ptrdiff_t rgb_stride;
+    unsigned rgb_sz;
+
+    const struct ColorPrimaries *in_primaries, *out_primaries;
+    int lrgb2lrgb_passthrough;
+    DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
+
+    const struct TransferCharacteristics *in_txchr, *out_txchr;
+    int rgb2rgb_passthrough;
+    int16_t *lin_lut, *delin_lut;
+
+    const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
+    int yuv2yuv_passthrough, yuv2yuv_fastmode;
+    DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
+    DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
+    DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
+    DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
+    yuv2rgb_fn yuv2rgb;
+    rgb2yuv_fn rgb2yuv;
+    yuv2yuv_fn yuv2yuv;
+    double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
+    int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
+} ColorSpaceContext;
+
+// FIXME deal with odd width/heights (or just forbid it)
+// FIXME simd
+// FIXME add some asserts in random parts of the table generation code to ensure
+// that we never overflow, e.g. if coeffs are 14bit, they can't exceed [-2.0,2.0>
+// range, and I'm not entirely sure that's always true (e.g. yuv2yuv for bt2020
+// to/from 601, blue might go off quite a bit? If it exceeds, change the bitrange.
+// FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
+// FIXME test that the values in (de)lin_lut don't exceed their container storage
+// type size
+
+/*
+ * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
+ */
+static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
+    [AVCOL_SPC_FCC]        = { 0.30,   0.59,   0.11   },
+    [AVCOL_SPC_BT470BG]    = { 0.299,  0.587,  0.114  },
+    [AVCOL_SPC_SMPTE170M]  = { 0.299,  0.587,  0.114  },
+    [AVCOL_SPC_BT709]      = { 0.2126, 0.7152, 0.0722 },
+    [AVCOL_SPC_SMPTE240M]  = { 0.212,  0.701,  0.087  },
+    [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
+    [AVCOL_SPC_BT2020_CL]  = { 0.2627, 0.6780, 0.0593 },
+};
+
+static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
+{
+    const struct LumaCoefficients *coeffs;
+
+    if (csp >= AVCOL_SPC_NB)
+        return NULL;
+    coeffs = &luma_coefficients[csp];
+    if (!coeffs->cr)
+        return NULL;
+
+    return coeffs;
+}
+
+static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
+                               double rgb2yuv[3][3])
+{
+    double bscale, rscale;
+
+    rgb2yuv[0][0] = coeffs->cr;
+    rgb2yuv[0][1] = coeffs->cg;
+    rgb2yuv[0][2] = coeffs->cb;
+    bscale = 0.5 / (coeffs->cb - 1.0);
+    rscale = 0.5 / (coeffs->cr - 1.0);
+    rgb2yuv[1][0] = bscale * coeffs->cr;
+    rgb2yuv[1][1] = bscale * coeffs->cg;
+    rgb2yuv[1][2] = 0.5;
+    rgb2yuv[2][0] = 0.5;
+    rgb2yuv[2][1] = rscale * coeffs->cg;
+    rgb2yuv[2][2] = rscale * coeffs->cb;
+}
+
+// FIXME I'm not actually sure whether gamma = 0.45 or gamma = 1.0 / 2.2 for
+// most of these entries
+static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
+    [AVCOL_TRC_BT709]     = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_GAMMA22]   = { 1.0,    0.0,    1.0 / 2.2, 0.0 },
+    [AVCOL_TRC_GAMMA28]   = { 1.0,    0.0,    1.0 / 2.8, 0.0 },
+    [AVCOL_TRC_SMPTE170M] = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
+    [AVCOL_TRC_BT2020_10] = { 1.099,  0.018,  0.45, 4.5 },
+    [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
+};
+
+static const struct TransferCharacteristics *
+    get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
+{
+    const struct TransferCharacteristics *coeffs;
+
+    if (trc >= AVCOL_TRC_NB)
+        return NULL;
+    coeffs = &transfer_characteristics[trc];
+    if (!coeffs->alpha)
+        return NULL;
+
+    return coeffs;
+}
+
+static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
+    [AVCOL_PRI_BT709]     = { 0.3127, 0.3290, 0.64,  0.33,  0.30,  0.60,  0.15,  0.06 },
+    [AVCOL_PRI_BT470M]    = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_BT470BG]   = { 0.3127, 0.3290, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
+    [AVCOL_PRI_SMPTE170M] = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_SMPTE240M] = { 0.3127, 0.3290, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+    [AVCOL_PRI_BT2020]    = { 0.3127, 0.3290, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
+};
+
+static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
+{
+    const struct ColorPrimaries *coeffs;
+
+    if (prm >= AVCOL_PRI_NB)
+        return NULL;
+    coeffs = &color_primaries[prm];
+    if (!coeffs->xw)
+        return NULL;
+
+    return coeffs;
+}
+
+static void invert_matrix3x3(const double in[3][3], double out[3][3])
+{
+    double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
+           m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
+           m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
+    int i, j;
+    double det;
+
+    out[0][0] =  (m11 * m22 - m21 * m12);
+    out[0][1] = -(m01 * m22 - m21 * m02);
+    out[0][2] =  (m01 * m12 - m11 * m02);
+    out[1][0] = -(m10 * m22 - m20 * m12);
+    out[1][1] =  (m00 * m22 - m20 * m02);
+    out[1][2] = -(m00 * m12 - m10 * m02);
+    out[2][0] =  (m10 * m21 - m20 * m11);
+    out[2][1] = -(m00 * m21 - m20 * m01);
+    out[2][2] =  (m00 * m11 - m10 * m01);
+
+    det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
+    det = 1.0 / det;
+
+    for (i = 0; i < 3; i++) {
+        for (j = 0; j < 3; j++)
+            out[i][j] *= det;
+    }
+}
+
+static int fill_gamma_table(ColorSpaceContext *s)
+{
+    int n;
+    double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
+    double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
+    double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
+    double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
+    double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
+
+    s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
+    if (!s->lin_lut)
+        return AVERROR(ENOMEM);
+    s->delin_lut = &s->lin_lut[32768];
+    for (n = 0; n < 32768; n++) {
+        double v = (n - 2048.0) / 28672.0, d, l;
+
+        // delinearize
+        if (v <= -out_beta) {
+            d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
+        } else if (v < out_beta) {
+            d = out_delta * v;
+        } else {
+            d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
+        }
+        s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
+
+        // linearize
+        if (v <= -in_beta) {
+            l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
+        } else if (v < in_beta) {
+            l = v * in_idelta;
+        } else {
+            l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
+        }
+        s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
+    }
+
+    return 0;
+}
+
+/*
+ * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
+ */
+static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
+                               double rgb2xyz[3][3])
+{
+    double i[3][3], sr, sg, sb, zw;
+
+    rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
+    rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
+    rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
+    rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
+    rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
+    rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
+    rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
+    invert_matrix3x3(rgb2xyz, i);
+    zw = 1.0 - coeffs->xw - coeffs->yw;
+    sr = i[0][0] * coeffs->xw + i[0][1] * coeffs->yw + i[0][2] * zw;
+    sg = i[1][0] * coeffs->xw + i[1][1] * coeffs->yw + i[1][2] * zw;
+    sb = i[2][0] * coeffs->xw + i[2][1] * coeffs->yw + i[2][2] * zw;
+    rgb2xyz[0][0] *= sr;
+    rgb2xyz[0][1] *= sg;
+    rgb2xyz[0][2] *= sb;
+    rgb2xyz[1][0] *= sr;
+    rgb2xyz[1][1] *= sg;
+    rgb2xyz[1][2] *= sb;
+    rgb2xyz[2][0] *= sr;
+    rgb2xyz[2][1] *= sg;
+    rgb2xyz[2][2] *= sb;
+}
+
+static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
+{
+    int m, n;
+
+    for (m = 0; m < 3; m++)
+        for (n = 0; n < 3; n++)
+            dst[m][n] = src2[m][0] * src1[0][n] +
+                        src2[m][1] * src1[1][n] +
+                        src2[m][2] * src1[2][n];
+}
+
+static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
+                      int w, int h, const int16_t *lut)
+{
+    int y, x, n;
+
+    for (n = 0; n < 3; n++) {
+        int16_t *data = buf[n];
+
+        for (y = 0; y < h; y++) {
+            for (x = 0; x < w; x++)
+                data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
+
+            data += stride;
+        }
+    }
+}
+
+static void convert(ColorSpaceContext *s, AVFrame *in, AVFrame *out)
+{
+    int w = in->width, h = in->height;
+    ptrdiff_t in_linesize[3] = { in->linesize[0], in->linesize[1], in->linesize[2] };
+    ptrdiff_t out_linesize[3] = { out->linesize[0], out->linesize[1], out->linesize[2] };
+
+    // FIXME for simd, also make sure we do pictures with negative stride
+    // top-down so we don't overwrite lines with padding of data before it
+    // in the same buffer (same as swscale)
+
+    if (s->yuv2yuv_passthrough) {
+        av_frame_copy(out, in); // FIXME see comment in filter_frame()
+    } else if (s->yuv2yuv_fastmode) {
+        // FIXME possibly use a fast mode in case only the y range changes?
+        // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
+        // are non-zero
+        s->yuv2yuv(out->data, out_linesize, in->data, in_linesize, w, h,
+                   s->yuv2yuv_coeffs, s->yuv_offset);
+    } else {
+        // FIXME maybe (for caching effciency) do pipeline per-line instead of
+        // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
+        // 2 lines, for yuv420.)
+        /*
+         * General design:
+         * - yuv2rgb converts from whatever range the input was ([16-235/240] or
+         *   [0,255] or the 10/12bpp equivalents thereof) to an integer version
+         *   of RGB in psuedo-restricted 15+sign bits. That means that the float
+         *   range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
+         *   range is used for overflow/underflow outside the representable
+         *   range of this RGB type. rgb2yuv is the exact opposite.
+         * - gamma correction is done using a LUT since that appears to work
+         *   fairly fast.
+         * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
+         *   (or rgb2yuv conversion) uses nearest-neighbour sampling to read
+         *   read chroma pixels at luma resolution. If you want some more fancy
+         *   filter, you can use swscale to convert to yuv444p.
+         * - all coefficients are 14bit (so in the [-2.0,2.0] range).
+         */
+        s->yuv2rgb(s->rgb, s->rgb_stride, in->data, in_linesize, w, h,
+                   s->yuv2rgb_coeffs, s->yuv_offset[0]);
+        if (!s->rgb2rgb_passthrough) {
+            apply_lut(s->rgb, s->rgb_stride, w, h, s->lin_lut);
+            if (!s->lrgb2lrgb_passthrough)
+                s->dsp.multiply3x3(s->rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
+            apply_lut(s->rgb, s->rgb_stride, w, h, s->delin_lut);
+        }
+        s->rgb2yuv(out->data, out_linesize, s->rgb, s->rgb_stride, w, h,
+                   s->rgb2yuv_coeffs, s->yuv_offset[1]);
+    }
+}
+
+static int get_range_off(int *off, int *y_rng, int *uv_rng,
+                         enum AVColorRange rng, int depth)
+{
+    switch (rng) {
+    case AVCOL_RANGE_MPEG:
+        *off = 16 << (depth - 8);
+        *y_rng = 219 << (depth - 8);
+        *uv_rng = 224 << (depth - 8);
+        break;
+    case AVCOL_RANGE_JPEG:
+        *off = 0;
+        *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
+        break;
+    default:
+        return AVERROR(EINVAL);
+    }
+
+    return 0;
+}
+
+static int create_filtergraph(AVFilterContext *ctx,
+                              const AVFrame *in, const AVFrame *out)
+{
+    ColorSpaceContext *s = ctx->priv;
+    const AVPixFmtDescriptor *in_desc  = av_pix_fmt_desc_get(in->format);
+    const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
+    int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
+
+#define supported_depth(d) (d == 8 || d == 10 || d == 12)
+#define supported_subsampling(lcw, lch) \
+    ((lcw == 0 && lch == 0) || (lcw == 1 && lch == 0) || (lcw == 1 && lch == 1))
+#define supported_format(d) \
+    (d != NULL && d->nb_components == 3 && \
+     !(d->flags & AV_PIX_FMT_FLAG_RGB) && \
+     supported_depth(d->comp[0].depth) && \
+     supported_subsampling(d->log2_chroma_w, d->log2_chroma_h))
+
+    if (!supported_format(in_desc)) {
+        av_log(ctx, AV_LOG_ERROR,
+               "Unsupported input format %d (%s) or bitdepth (%d)\n",
+               in->format, av_get_pix_fmt_name(in->format),
+               in_desc ? in_desc->comp[0].depth : -1);
+        return AVERROR(EINVAL);
+    }
+    if (!supported_format(out_desc)) {
+        av_log(ctx, AV_LOG_ERROR,
+               "Unsupported output format %d (%s) or bitdepth (%d)\n",
+               out->format, av_get_pix_fmt_name(out->format),
+               out_desc ? out_desc->comp[0].depth : -1);
+        return AVERROR(EINVAL);
+    }
+
+    if (in->color_primaries  != s->in_prm)  s->in_primaries  = NULL;
+    if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
+    if (in->color_trc        != s->in_trc)  s->in_txchr      = NULL;
+    if (out->color_trc       != s->out_trc) s->out_txchr     = NULL;
+    if (in->colorspace       != s->in_csp ||
+        in->color_range      != s->in_rng)  s->in_lumacoef   = NULL;
+    if (out->colorspace      != s->out_csp ||
+        out->color_range     != s->out_rng) s->out_lumacoef  = NULL;
+
+    if (!s->out_primaries || !s->in_primaries) {
+        s->in_prm = in->color_primaries;
+        s->in_primaries = get_color_primaries(s->in_prm);
+        if (!s->in_primaries) {
+            av_log(ctx, AV_LOG_ERROR,
+                   "Unsupported input primaries %d (%s)\n",
+                   s->in_prm, av_color_primaries_name(s->in_prm));
+            return AVERROR(EINVAL);
+        }
+        s->out_prm = out->color_primaries;
+        s->out_primaries = get_color_primaries(s->out_prm);
+        if (!s->out_primaries) {
+            if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
+                if (s->user_all == CS_UNSPECIFIED) {
+                    av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
+                } else {
+                    av_log(ctx, AV_LOG_ERROR,
+                           "Unsupported output color property %d\n", s->user_all);
+                }
+            } else {
+                av_log(ctx, AV_LOG_ERROR,
+                       "Unsupported output primaries %d (%s)\n",
+                       s->out_prm, av_color_primaries_name(s->out_prm));
+            }
+            return AVERROR(EINVAL);
+        }
+        s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
+                                           sizeof(*s->in_primaries));
+        if (!s->lrgb2lrgb_passthrough) {
+            double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
+
+            fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
+            invert_matrix3x3(rgb2xyz, xyz2rgb);
+            fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
+            mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
+            for (m = 0; m < 3; m++)
+                for (n = 0; n < 3; n++) {
+                    s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
+                    for (o = 1; o < 8; o++)
+                        s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
+                }
+
+            emms = 1;
+        }
+    }
+
+    if (!s->in_txchr) {
+        av_freep(&s->lin_lut);
+        s->in_trc = in->color_trc;
+        s->in_txchr = get_transfer_characteristics(s->in_trc);
+        if (!s->in_txchr) {
+            av_log(ctx, AV_LOG_ERROR,
+                   "Unsupported input transfer characteristics %d (%s)\n",
+                   s->in_trc, av_color_transfer_name(s->in_trc));
+            return AVERROR(EINVAL);
+        }
+    }
+
+    if (!s->out_txchr) {
+        av_freep(&s->lin_lut);
+        s->out_trc = out->color_trc;
+        s->out_txchr = get_transfer_characteristics(s->out_trc);
+        if (!s->out_txchr) {
+            if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
+                if (s->user_all == CS_UNSPECIFIED) {
+                    av_log(ctx, AV_LOG_ERROR,
+                           "Please specify output transfer characteristics\n");
+                } else {
+                    av_log(ctx, AV_LOG_ERROR,
+                           "Unsupported output color property %d\n", s->user_all);
+                }
+            } else {
+                av_log(ctx, AV_LOG_ERROR,
+                       "Unsupported output transfer characteristics %d (%s)\n",
+                       s->out_trc, av_color_transfer_name(s->out_trc));
+            }
+            return AVERROR(EINVAL);
+        }
+    }
+
+    s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
+                             !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
+    if (!s->rgb2rgb_passthrough && !s->lin_lut) {
+        res = fill_gamma_table(s);
+        if (res < 0)
+            return res;
+        emms = 1;
+    }
+
+    if (!s->in_lumacoef) {
+        s->in_csp = in->colorspace;
+        s->in_rng = in->color_range;
+        s->in_lumacoef = get_luma_coefficients(s->in_csp);
+        if (!s->in_lumacoef) {
+            av_log(ctx, AV_LOG_ERROR,
+                   "Unsupported input colorspace %d (%s)\n",
+                   s->in_csp, av_color_space_name(s->in_csp));
+            return AVERROR(EINVAL);
+        }
+        redo_yuv2rgb = 1;
+    }
+
+    if (!s->out_lumacoef) {
+        s->out_csp = out->colorspace;
+        s->out_rng = out->color_range;
+        s->out_lumacoef = get_luma_coefficients(s->out_csp);
+        if (!s->out_lumacoef) {
+            if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
+                if (s->user_all == CS_UNSPECIFIED) {
+                    av_log(ctx, AV_LOG_ERROR,
+                           "Please specify output transfer characteristics\n");
+                } else {
+                    av_log(ctx, AV_LOG_ERROR,
+                           "Unsupported output color property %d\n", s->user_all);
+                }
+            } else {
+                av_log(ctx, AV_LOG_ERROR,
+                       "Unsupported output transfer characteristics %d (%s)\n",
+                       s->out_csp, av_color_space_name(s->out_csp));
+            }
+            return AVERROR(EINVAL);
+        }
+        redo_rgb2yuv = 1;
+    }
+
+    fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
+                    in_desc->log2_chroma_w == out_desc->log2_chroma_w &&
+                    in_desc->comp[0].depth == out_desc->comp[0].depth;
+    s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
+    s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
+                             !memcmp(s->in_lumacoef, s->out_lumacoef,
+                                     sizeof(*s->in_lumacoef));
+    if (!s->yuv2yuv_passthrough) {
+        if (redo_yuv2rgb) {
+            double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+            int off, bits, in_rng;
+
+            res = get_range_off(&off, &s->in_y_rng, &s->in_uv_rng,
+                                s->in_rng, in_desc->comp[0].depth);
+            if (res < 0) {
+                av_log(ctx, AV_LOG_ERROR,
+                       "Unsupported input color range %d (%s)\n",
+                       s->in_rng, av_color_range_name(s->in_rng));
+                return res;
+            }
+            for (n = 0; n < 8; n++)
+                s->yuv_offset[0][n] = off;
+            fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
+            invert_matrix3x3(rgb2yuv, yuv2rgb);
+            bits = 1 << (in_desc->comp[0].depth - 1);
+            for (n = 0; n < 3; n++) {
+                for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
+                    s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
+                    for (o = 1; o < 8; o++)
+                        s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
+                }
+            }
+            s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
+                                       [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+            emms = 1;
+        }
+
+        if (redo_rgb2yuv) {
+            double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+            int off, out_rng, bits;
+
+            res = get_range_off(&off, &s->out_y_rng, &s->out_uv_rng,
+                                s->out_rng, out_desc->comp[0].depth);
+            if (res < 0) {
+                av_log(ctx, AV_LOG_ERROR,
+                       "Unsupported output color range %d (%s)\n",
+                       s->out_rng, av_color_range_name(s->out_rng));
+                return res;
+            }
+            for (n = 0; n < 8; n++)
+                s->yuv_offset[1][n] = off;
+            fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
+            bits = 1 << (29 - out_desc->comp[0].depth);
+            for (n = 0; n < 3; n++) {
+                for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
+                    s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
+                    for (o = 1; o < 8; o++)
+                        s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
+                }
+            }
+            s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
+                                       [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
+            emms = 1;
+        }
+
+        if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
+            double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+            double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+            double yuv2yuv[3][3];
+            int in_rng, out_rng;
+
+            mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
+            for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
+                for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
+                    s->yuv2yuv_coeffs[m][n][0] =
+                        lrint(16384 * yuv2yuv[m][n] * out_rng / in_rng);
+                    for (o = 1; o < 8; o++)
+                        s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
+                }
+            }
+            s->yuv2yuv = s->dsp.yuv2yuv[(in_desc->comp[0].depth - 8) >> 1]
+                                       [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+        }
+    }
+
+    if (emms)
+        emms_c();
+
+    return 0;
+}
+
+static int init(AVFilterContext *ctx)
+{
+    ColorSpaceContext *s = ctx->priv;
+
+    ff_colorspacedsp_init(&s->dsp);
+
+    return 0;
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+    ColorSpaceContext *s = ctx->priv;
+
+    av_freep(&s->rgb[0]);
+    av_freep(&s->rgb[1]);
+    av_freep(&s->rgb[2]);
+    s->rgb_sz = 0;
+
+    av_freep(&s->lin_lut);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+    AVFilterContext *ctx = link->dst;
+    AVFilterLink *outlink = ctx->outputs[0];
+    ColorSpaceContext *s = ctx->priv;
+    // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
+    // input one if it is writable *OR* the actual literal values of in_*
+    // and out_* are identical (not just their respective properties)
+    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+    int res;
+    ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
+    unsigned rgb_sz = rgb_stride * in->height;
+
+    out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
+                           default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
+    if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
+        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
+
+        out->color_trc   = default_trc[FFMIN(s->user_all, CS_NB)];
+        if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
+            out->color_trc = AVCOL_TRC_BT2020_12;
+    } else {
+        out->color_trc   = s->user_trc;
+    }
+    out->colorspace      = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
+                           default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
+    out->color_range     = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
+                           in->color_range : s->user_rng;
+    if (rgb_sz != s->rgb_sz) {
+        av_freep(&s->rgb[0]);
+        av_freep(&s->rgb[1]);
+        av_freep(&s->rgb[2]);
+        s->rgb_sz = 0;
+
+        s->rgb[0] = av_malloc(rgb_sz);
+        s->rgb[1] = av_malloc(rgb_sz);
+        s->rgb[2] = av_malloc(rgb_sz);
+        if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2]) {
+            uninit(ctx);
+            return AVERROR(ENOMEM);
+        }
+        s->rgb_sz = rgb_sz;
+    }
+    res = create_filtergraph(ctx, in, out);
+    if (res < 0)
+        return res;
+    s->rgb_stride = rgb_stride / sizeof(int16_t);
+    convert(s, in, out);
+
+    return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+    static const enum AVPixelFormat pix_fmts[] = {
+        AV_PIX_FMT_YUV420P,   AV_PIX_FMT_YUV422P,   AV_PIX_FMT_YUV444P,
+        AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+        AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+        AV_PIX_FMT_NONE
+    };
+    int res;
+    ColorSpaceContext *s = ctx->priv;
+    AVFilterFormats *formats = ff_make_format_list(pix_fmts);
+
+    if (!formats)
+        return AVERROR(ENOMEM);
+    if (s->user_format == AV_PIX_FMT_NONE)
+        return ff_set_common_formats(ctx, formats);
+    res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
+    if (res < 0)
+        return res;
+    formats = NULL;
+    res = ff_add_format(&formats, s->user_format);
+    if (res < 0)
+        return res;
+
+    return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+    AVFilterLink *inlink = outlink->src->inputs[0];
+
+    outlink->w = inlink->w;
+    outlink->h = inlink->h;
+    outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+    outlink->time_base = inlink->time_base;
+
+    return 0;
+}
+
+#define OFFSET(x) offsetof(ColorSpaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+#define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
+
+static const AVOption colorspace_options[] = {
+    { "all",        "Set all color properties together",
+      OFFSET(user_all),   AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
+      CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
+    ENUM("bt470m",      CS_BT470M,             "all"),
+    ENUM("bt470bg",     CS_BT470BG,            "all"),
+    ENUM("bt601-6-525", CS_BT601_6_525,        "all"),
+    ENUM("bt601-6-625", CS_BT601_6_625,        "all"),
+    ENUM("bt709",       CS_BT709,              "all"),
+    ENUM("smpte170m",   CS_SMPTE170M,          "all"),
+    ENUM("smpte240m",   CS_SMPTE240M,          "all"),
+    ENUM("bt2020",      CS_BT2020,             "all"),
+
+    { "space",      "Output colorspace",
+      OFFSET(user_csp),   AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
+      AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
+    ENUM("bt709",       AVCOL_SPC_BT709,       "csp"),
+    ENUM("fcc",         AVCOL_SPC_FCC,         "csp"),
+    ENUM("bt470bg",     AVCOL_SPC_BT470BG,     "csp"),
+    ENUM("smpte170m",   AVCOL_SPC_SMPTE170M,   "csp"),
+    ENUM("smpte240m",   AVCOL_SPC_SMPTE240M,   "csp"),
+    ENUM("bt2020ncl",   AVCOL_SPC_BT2020_NCL,  "csp"),
+
+    { "range",      "Output color range",
+      OFFSET(user_rng),   AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
+      AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
+    ENUM("mpeg",        AVCOL_RANGE_MPEG,      "rng"),
+    ENUM("jpeg",        AVCOL_RANGE_JPEG,      "rng"),
+
+    { "primaries",  "Output color primaries",
+      OFFSET(user_prm),   AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
+      AVCOL_PRI_UNSPECIFIED, AVCOL_PRI_NB - 1, FLAGS, "prm" },
+    ENUM("bt709",        AVCOL_PRI_BT709,      "prm"),
+    ENUM("bt470m",       AVCOL_PRI_BT470M,     "prm"),
+    ENUM("bt470bg",      AVCOL_PRI_BT470BG,    "prm"),
+    ENUM("smpte170m",    AVCOL_PRI_SMPTE170M,  "prm"),
+    ENUM("smpte240m",    AVCOL_PRI_SMPTE240M,  "prm"),
+    ENUM("bt2020",       AVCOL_PRI_BT2020,     "prm"),
+
+    { "trc",        "Output transfer characteristics",
+      OFFSET(user_trc),   AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
+      AVCOL_PRI_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
+    ENUM("bt709",        AVCOL_TRC_BT709,        "trc"),
+    ENUM("gamma22",      AVCOL_TRC_GAMMA22,      "trc"),
+    ENUM("gamma28",      AVCOL_TRC_GAMMA28,      "trc"),
+    ENUM("smpte170m",    AVCOL_TRC_SMPTE170M,    "trc"),
+    ENUM("smpte240m",    AVCOL_TRC_SMPTE240M,    "trc"),
+    ENUM("bt2020-10",    AVCOL_TRC_BT2020_10,    "trc"),
+    ENUM("bt2020-12",    AVCOL_TRC_BT2020_12,    "trc"),
+
+    { "format",   "Output pixel format",
+      OFFSET(user_format), AV_OPT_TYPE_INT,  { .i64 = AV_PIX_FMT_NONE },
+      AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
+    ENUM("yuv420p",   AV_PIX_FMT_YUV420P,   "fmt"),
+    ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
+    ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
+    ENUM("yuv422p",   AV_PIX_FMT_YUV422P,   "fmt"),
+    ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
+    ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
+    ENUM("yuv424p",   AV_PIX_FMT_YUV444P,   "fmt"),
+    ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
+    ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
+
+    { "fast",     "Ignore primary chromaticity and gamma correction",
+      OFFSET(fast_mode), AV_OPT_TYPE_BOOL,  { .i64 = 0    },
+      0, 1, FLAGS },
+    { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorspace);
+
+static const AVFilterPad inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_props,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_colorspace = {
+    .name            = "colorspace",
+    .description     = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
+    .init            = init,
+    .uninit          = uninit,
+    .query_formats   = query_formats,
+    .priv_size       = sizeof(ColorSpaceContext),
+    .priv_class      = &colorspace_class,
+    .inputs          = inputs,
+    .outputs         = outputs,
+};
-- 
2.1.2



More information about the ffmpeg-devel mailing list