FFmpeg
vf_colortemperature.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "drawutils.h"
27 #include "filters.h"
28 #include "video.h"
29 
30 #define R 0
31 #define G 1
32 #define B 2
33 
34 typedef struct ColorTemperatureContext {
35  const AVClass *class;
36 
37  float temperature;
38  float mix;
39  float preserve;
40 
41  float color[3];
42 
43  int step;
44  int depth;
45  uint8_t rgba_map[4];
46 
47  int (*do_slice)(AVFilterContext *s, void *arg,
48  int jobnr, int nb_jobs);
50 
51 static float saturate(float input)
52 {
53  return av_clipf(input, 0.f, 1.f);
54 }
55 
56 static void kelvin2rgb(float k, float *rgb)
57 {
58  float kelvin = k / 100.0f;
59 
60  if (kelvin <= 66.0f) {
61  rgb[0] = 1.0f;
62  rgb[1] = saturate(0.39008157876901960784f * logf(kelvin) - 0.63184144378862745098f);
63  } else {
64  const float t = fmaxf(kelvin - 60.0f, 0.0f);
65  rgb[0] = saturate(1.29293618606274509804f * powf(t, -0.1332047592f));
66  rgb[1] = saturate(1.12989086089529411765f * powf(t, -0.0755148492f));
67  }
68 
69  if (kelvin >= 66.0f)
70  rgb[2] = 1.0f;
71  else if (kelvin <= 19.0f)
72  rgb[2] = 0.0f;
73  else
74  rgb[2] = saturate(0.54320678911019607843f * logf(kelvin - 10.0f) - 1.19625408914f);
75 }
76 
77 static float lerpf(float v0, float v1, float f)
78 {
79  return v0 + (v1 - v0) * f;
80 }
81 
82 #define PROCESS() \
83  nr = r * color[0]; \
84  ng = g * color[1]; \
85  nb = b * color[2]; \
86  \
87  nr = lerpf(r, nr, mix); \
88  ng = lerpf(g, ng, mix); \
89  nb = lerpf(b, nb, mix); \
90  \
91  l0 = (FFMAX3(r, g, b) + FFMIN3(r, g, b)) + FLT_EPSILON; \
92  l1 = (FFMAX3(nr, ng, nb) + FFMIN3(nr, ng, nb)) + FLT_EPSILON; \
93  l = l0 / l1; \
94  \
95  r = nr * l; \
96  g = ng * l; \
97  b = nb * l; \
98  \
99  nr = lerpf(nr, r, preserve); \
100  ng = lerpf(ng, g, preserve); \
101  nb = lerpf(nb, b, preserve);
102 
103 static int temperature_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
104 {
105  ColorTemperatureContext *s = ctx->priv;
106  AVFrame *frame = arg;
107  const int width = frame->width;
108  const int height = frame->height;
109  const float mix = s->mix;
110  const float preserve = s->preserve;
111  const float *color = s->color;
112  const int slice_start = (height * jobnr) / nb_jobs;
113  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
114  const ptrdiff_t glinesize = frame->linesize[0];
115  const ptrdiff_t blinesize = frame->linesize[1];
116  const ptrdiff_t rlinesize = frame->linesize[2];
117  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
118  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
119  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
120 
121  for (int y = slice_start; y < slice_end; y++) {
122  for (int x = 0; x < width; x++) {
123  float g = gptr[x];
124  float b = bptr[x];
125  float r = rptr[x];
126  float nr, ng, nb;
127  float l0, l1, l;
128 
129  PROCESS()
130 
131  gptr[x] = av_clip_uint8(ng);
132  bptr[x] = av_clip_uint8(nb);
133  rptr[x] = av_clip_uint8(nr);
134  }
135 
136  gptr += glinesize;
137  bptr += blinesize;
138  rptr += rlinesize;
139  }
140 
141  return 0;
142 }
143 
144 static int temperature_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
145 {
146  ColorTemperatureContext *s = ctx->priv;
147  AVFrame *frame = arg;
148  const int depth = s->depth;
149  const int width = frame->width;
150  const int height = frame->height;
151  const float preserve = s->preserve;
152  const float mix = s->mix;
153  const float *color = s->color;
154  const int slice_start = (height * jobnr) / nb_jobs;
155  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
156  const ptrdiff_t glinesize = frame->linesize[0] / sizeof(uint16_t);
157  const ptrdiff_t blinesize = frame->linesize[1] / sizeof(uint16_t);
158  const ptrdiff_t rlinesize = frame->linesize[2] / sizeof(uint16_t);
159  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
160  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
161  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
162 
163  for (int y = slice_start; y < slice_end; y++) {
164  for (int x = 0; x < width; x++) {
165  float g = gptr[x];
166  float b = bptr[x];
167  float r = rptr[x];
168  float nr, ng, nb;
169  float l0, l1, l;
170 
171  PROCESS()
172 
173  gptr[x] = av_clip_uintp2_c(ng, depth);
174  bptr[x] = av_clip_uintp2_c(nb, depth);
175  rptr[x] = av_clip_uintp2_c(nr, depth);
176  }
177 
178  gptr += glinesize;
179  bptr += blinesize;
180  rptr += rlinesize;
181  }
182 
183  return 0;
184 }
185 
186 static int temperature_slice32(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
187 {
188  ColorTemperatureContext *s = ctx->priv;
189  AVFrame *frame = arg;
190  const int width = frame->width;
191  const int height = frame->height;
192  const float preserve = s->preserve;
193  const float mix = s->mix;
194  const float *color = s->color;
195  const int slice_start = (height * jobnr) / nb_jobs;
196  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
197  const ptrdiff_t glinesize = frame->linesize[0] / sizeof(float);
198  const ptrdiff_t blinesize = frame->linesize[1] / sizeof(float);
199  const ptrdiff_t rlinesize = frame->linesize[2] / sizeof(float);
200  float *gptr = (float *)frame->data[0] + slice_start * glinesize;
201  float *bptr = (float *)frame->data[1] + slice_start * blinesize;
202  float *rptr = (float *)frame->data[2] + slice_start * rlinesize;
203 
204  for (int y = slice_start; y < slice_end; y++) {
205  for (int x = 0; x < width; x++) {
206  float g = gptr[x];
207  float b = bptr[x];
208  float r = rptr[x];
209  float nr, ng, nb;
210  float l0, l1, l;
211 
212  PROCESS()
213 
214  gptr[x] = ng;
215  bptr[x] = nb;
216  rptr[x] = nr;
217  }
218 
219  gptr += glinesize;
220  bptr += blinesize;
221  rptr += rlinesize;
222  }
223 
224  return 0;
225 }
226 
227 static int temperature_slice8p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
228 {
229  ColorTemperatureContext *s = ctx->priv;
230  AVFrame *frame = arg;
231  const int step = s->step;
232  const int width = frame->width;
233  const int height = frame->height;
234  const float mix = s->mix;
235  const float preserve = s->preserve;
236  const float *color = s->color;
237  const uint8_t roffset = s->rgba_map[R];
238  const uint8_t goffset = s->rgba_map[G];
239  const uint8_t boffset = s->rgba_map[B];
240  const int slice_start = (height * jobnr) / nb_jobs;
241  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
242  const ptrdiff_t linesize = frame->linesize[0];
243  uint8_t *ptr = frame->data[0] + slice_start * linesize;
244 
245  for (int y = slice_start; y < slice_end; y++) {
246  for (int x = 0; x < width; x++) {
247  float g = ptr[x * step + goffset];
248  float b = ptr[x * step + boffset];
249  float r = ptr[x * step + roffset];
250  float nr, ng, nb;
251  float l0, l1, l;
252 
253  PROCESS()
254 
255  ptr[x * step + goffset] = av_clip_uint8(ng);
256  ptr[x * step + boffset] = av_clip_uint8(nb);
257  ptr[x * step + roffset] = av_clip_uint8(nr);
258  }
259 
260  ptr += linesize;
261  }
262 
263  return 0;
264 }
265 
266 static int temperature_slice16p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
267 {
268  ColorTemperatureContext *s = ctx->priv;
269  AVFrame *frame = arg;
270  const int step = s->step;
271  const int depth = s->depth;
272  const int width = frame->width;
273  const int height = frame->height;
274  const float preserve = s->preserve;
275  const float mix = s->mix;
276  const float *color = s->color;
277  const uint8_t roffset = s->rgba_map[R];
278  const uint8_t goffset = s->rgba_map[G];
279  const uint8_t boffset = s->rgba_map[B];
280  const int slice_start = (height * jobnr) / nb_jobs;
281  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
282  const ptrdiff_t linesize = frame->linesize[0] / sizeof(uint16_t);
283  uint16_t *ptr = (uint16_t *)frame->data[0] + slice_start * linesize;
284 
285  for (int y = slice_start; y < slice_end; y++) {
286  for (int x = 0; x < width; x++) {
287  float g = ptr[x * step + goffset];
288  float b = ptr[x * step + boffset];
289  float r = ptr[x * step + roffset];
290  float nr, ng, nb;
291  float l0, l1, l;
292 
293  PROCESS()
294 
295  ptr[x * step + goffset] = av_clip_uintp2_c(ng, depth);
296  ptr[x * step + boffset] = av_clip_uintp2_c(nb, depth);
297  ptr[x * step + roffset] = av_clip_uintp2_c(nr, depth);
298  }
299 
300  ptr += linesize;
301  }
302 
303  return 0;
304 }
305 
307 {
308  AVFilterContext *ctx = inlink->dst;
309  ColorTemperatureContext *s = ctx->priv;
310 
311  kelvin2rgb(s->temperature, s->color);
312 
313  ff_filter_execute(ctx, s->do_slice, frame, NULL,
315 
316  return ff_filter_frame(ctx->outputs[0], frame);
317 }
318 
319 static const enum AVPixelFormat pixel_fmts[] = {
333 };
334 
336 {
337  AVFilterContext *ctx = inlink->dst;
338  ColorTemperatureContext *s = ctx->priv;
340  int planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
341 
342  s->step = desc->nb_components;
343  if (inlink->format == AV_PIX_FMT_RGB0 ||
344  inlink->format == AV_PIX_FMT_0RGB ||
345  inlink->format == AV_PIX_FMT_BGR0 ||
346  inlink->format == AV_PIX_FMT_0BGR)
347  s->step = 4;
348 
349  s->depth = desc->comp[0].depth;
350  s->do_slice = s->depth <= 8 ? temperature_slice8 : temperature_slice16;
351  if (!planar)
352  s->do_slice = s->depth <= 8 ? temperature_slice8p : temperature_slice16p;
353  if (s->depth == 32)
354  s->do_slice = temperature_slice32;
355 
356  ff_fill_rgba_map(s->rgba_map, inlink->format);
357 
358  return 0;
359 }
360 
361 static const AVFilterPad inputs[] = {
362  {
363  .name = "default",
364  .type = AVMEDIA_TYPE_VIDEO,
366  .filter_frame = filter_frame,
367  .config_props = config_input,
368  },
369 };
370 
371 #define OFFSET(x) offsetof(ColorTemperatureContext, x)
372 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
373 
375  { "temperature", "set the temperature in Kelvin", OFFSET(temperature), AV_OPT_TYPE_FLOAT, {.dbl=6500}, 1000, 40000, VF },
376  { "mix", "set the mix with filtered output", OFFSET(mix), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, VF },
377  { "pl", "set the amount of preserving lightness", OFFSET(preserve), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
378  { NULL }
379 };
380 
381 AVFILTER_DEFINE_CLASS(colortemperature);
382 
384  .name = "colortemperature",
385  .description = NULL_IF_CONFIG_SMALL("Adjust color temperature of video."),
386  .priv_size = sizeof(ColorTemperatureContext),
387  .priv_class = &colortemperature_class,
392  .process_command = ff_filter_process_command,
393 };
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
ColorTemperatureContext::depth
int depth
Definition: vf_colortemperature.c:44
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
mix
static int mix(int c0, int c1)
Definition: 4xm.c:716
r
const char * r
Definition: vf_curves.c:127
opt.h
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
lerpf
static float lerpf(float v0, float v1, float f)
Definition: vf_colortemperature.c:77
color
Definition: vf_paletteuse.c:511
ColorTemperatureContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:47
temperature_slice32
static int temperature_slice32(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:186
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:280
temperature_slice8
static int temperature_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:103
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
kelvin2rgb
static void kelvin2rgb(float k, float *rgb)
Definition: vf_colortemperature.c:56
colortemperature_options
static const AVOption colortemperature_options[]
Definition: vf_colortemperature.c:374
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:41
R
#define R
Definition: vf_colortemperature.c:30
float.h
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
video.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_colortemperature.c:306
ColorTemperatureContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_colortemperature.c:45
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_colortemperature.c:335
rgb
Definition: rpzaenc.c:60
ColorTemperatureContext::mix
float mix
Definition: vf_colortemperature.c:38
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1719
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
ColorTemperatureContext::temperature
float temperature
Definition: vf_colortemperature.c:37
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
av_cold
#define av_cold
Definition: attributes.h:90
VF
#define VF
Definition: vf_colortemperature.c:372
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
float
float
Definition: af_crystalizer.c:122
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
g
const char * g
Definition: vf_curves.c:128
inputs
static const AVFilterPad inputs[]
Definition: vf_colortemperature.c:361
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:49
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
arg
const char * arg
Definition: jacosubdec.c:67
temperature_slice16
static int temperature_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:144
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:469
NULL
#define NULL
Definition: coverity.c:32
ColorTemperatureContext::preserve
float preserve
Definition: vf_colortemperature.c:39
PROCESS
#define PROCESS()
Definition: vf_colortemperature.c:82
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colortemperature)
saturate
static float saturate(float input)
Definition: vf_colortemperature.c:51
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
av_clipf
av_clipf
Definition: af_crystalizer.c:122
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: filters.h:57
f
f
Definition: af_crystalizer.c:122
G
#define G
Definition: vf_colortemperature.c:31
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
powf
#define powf(x, y)
Definition: libm.h:50
height
#define height
Definition: dsp.h:85
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
fmaxf
float fmaxf(float, float)
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:508
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:464
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:901
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:182
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
temperature_slice16p
static int temperature_slice16p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:266
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:473
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:841
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
OFFSET
#define OFFSET(x)
Definition: vf_colortemperature.c:371
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:737
AVFilter
Filter definition.
Definition: avfilter.h:201
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_vf_colortemperature
const AVFilter ff_vf_colortemperature
Definition: vf_colortemperature.c:383
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1667
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:509
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ColorTemperatureContext::step
int step
Definition: vf_colortemperature.c:43
B
#define B
Definition: vf_colortemperature.c:32
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
ColorTemperatureContext
Definition: vf_colortemperature.c:34
width
#define width
Definition: dsp.h:85
drawutils.h
temperature_slice8p
static int temperature_slice8p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colortemperature.c:227
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_colortemperature.c:319