Go to the documentation of this file.
38 #define RED (1 << REDS)
39 #define YELLOW (1 << YELLOWS)
40 #define GREEN (1 << GREENS)
41 #define CYAN (1 << CYANS)
42 #define BLUE (1 << BLUES)
43 #define MAGENTA (1 << MAGENTAS)
75 const int ir = *
r, ig = *
g,
ib = *
b;
77 *
r = (ir * m[0][0] + ig * m[1][0] +
ib * m[2][0] ) >> 16;
78 *
g = (ir * m[0][1] + ig * m[1][1] +
ib * m[2][1] ) >> 16;
79 *
b = (ir * m[0][2] + ig * m[1][2] +
ib * m[2][2] ) >> 16;
82 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
91 return v0 + (v1 -
v0) * (int64_t)
f /
max;
94 #define HUESATURATION(name, type, clip, xall) \
95 static int do_slice_##name##_##xall(AVFilterContext *ctx, \
97 int jobnr, int nb_jobs) \
99 HueSaturationContext *s = ctx->priv; \
100 AVFrame *frame = arg; \
101 const int imax = (1 << name) - 1; \
102 const float strength = s->strength; \
103 const int colors = s->colors; \
104 const int step = s->step; \
105 const int width = frame->width; \
106 const int process_h = frame->height; \
107 const int slice_start = (process_h * jobnr ) / nb_jobs; \
108 const int slice_end = (process_h * (jobnr+1)) / nb_jobs; \
109 const int linesize = frame->linesize[0] / sizeof(type); \
110 type *row = (type *)frame->data[0] + linesize * slice_start; \
111 const uint8_t offset_r = s->rgba_map[R]; \
112 const uint8_t offset_g = s->rgba_map[G]; \
113 const uint8_t offset_b = s->rgba_map[B]; \
114 type *dst_r = row + offset_r; \
115 type *dst_g = row + offset_g; \
116 type *dst_b = row + offset_b; \
118 for (int y = slice_start; y < slice_end; y++) { \
119 for (int x = 0; x < width * step; x += step) { \
120 int ir, ig, ib, ro, go, bo; \
122 ir = ro = dst_r[x]; \
123 ig = go = dst_g[x]; \
124 ib = bo = dst_b[x]; \
127 get_triplet(s->imatrix, &ir, &ig, &ib); \
129 const int min = FFMIN3(ir, ig, ib); \
130 const int max = FFMAX3(ir, ig, ib); \
131 const int flags = (ir == max) << REDS \
132 | (ir == min) << CYANS \
133 | (ig == max) << GREENS \
134 | (ig == min) << MAGENTAS \
135 | (ib == max) << BLUES \
136 | (ib == min) << YELLOWS; \
137 if (colors & flags) { \
141 f = FFMAX(f, ir - FFMAX(ig, ib)); \
142 if (colors & YELLOW) \
143 f = FFMAX(f, FFMIN(ir, ig) - ib); \
144 if (colors & GREEN) \
145 f = FFMAX(f, ig - FFMAX(ir, ib)); \
147 f = FFMAX(f, FFMIN(ig, ib) - ir); \
149 f = FFMAX(f, ib - FFMAX(ir, ig)); \
150 if (colors & MAGENTA) \
151 f = FFMAX(f, FFMIN(ir, ib) - ig); \
152 f = FFMIN(f * strength, imax); \
153 get_triplet(s->imatrix, &ir, &ig, &ib); \
154 ir = lerpi##name(ro, ir, f, imax); \
155 ig = lerpi##name(go, ig, f, imax); \
156 ib = lerpi##name(bo, ib, f, imax); \
160 dst_r[x] = clip(ir); \
161 dst_g[x] = clip(ig); \
162 dst_b[x] = clip(ib); \
181 for (
int y = 0; y < 4; y++)
182 for (
int x = 0; x < 4; x++)
190 for (
int y = 0; y < 4; y++) {
191 for (
int x = 0; x < 4; x++) {
192 temp[y][x] =
b[y][0] *
a[0][x]
199 for (
int y = 0; y < 4; y++) {
200 for (
int x = 0; x < 4; x++)
201 c[y][x] =
temp[y][x];
218 float rlw,
float glw,
float blw)
220 float s = 1.f - saturation;
221 float a =
s * rlw + saturation;
225 float e =
s * glw + saturation;
229 float i =
s * blw + saturation;
232 m[0][0] =
a; m[0][1] =
b; m[0][2] =
c; m[0][3] = 0.f;
233 m[1][0] =
d; m[1][1] = e; m[1][2] =
f; m[1][3] = 0.f;
234 m[2][0] =
g; m[2][1] =
h; m[2][2] =
i; m[2][3] = 0.f;
235 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
242 for (
int y = 0; y < 4; y++)
243 for (
int x = 0; x < 4; x++)
251 m[0][0] = 1.f; m[0][1] = 0.f; m[0][2] = 0.f; m[0][3] = 0.f;
252 m[1][0] = 0.f; m[1][1] = rc; m[1][2] = rs; m[1][3] = 0.f;
253 m[2][0] = 0.f; m[2][1] = -rs; m[2][2] = rc; m[2][3] = 0.f;
254 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
263 m[0][0] = rc; m[0][1] = 0.f; m[0][2] = -rs; m[0][3] = 0.f;
264 m[1][0] = 0.f; m[1][1] = 1.f; m[1][2] = 0.f; m[1][3] = 0.f;
265 m[2][0] = rs; m[2][1] = 0.f; m[2][2] = rc; m[2][3] = 0.f;
266 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
275 m[0][0] = rc; m[0][1] = rs; m[0][2] = 0.f; m[0][3] = 0.f;
276 m[1][0] = -rs; m[1][1] = rc; m[1][2] = 0.f; m[1][3] = 0.f;
277 m[2][0] = 0.f; m[2][1] = 0.f; m[2][2] = 1.f; m[2][3] = 0.f;
278 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
287 m[0][0] = 1.f; m[0][1] = 0.f; m[0][2] = dx; m[0][3] = 0.f;
288 m[1][0] = 0.f; m[1][1] = 1.f; m[1][2] = dy; m[1][3] = 0.f;
289 m[2][0] = 0.f; m[2][1] = 0.f; m[2][2] = 1.f; m[2][3] = 0.f;
290 m[3][0] = 0.f; m[3][1] = 0.f; m[3][2] = 0.f; m[3][3] = 1.f;
296 float x,
float y,
float z,
297 float *tx,
float *ty,
float *tz)
306 float rlw,
float glw,
float blw)
308 float mag, lx, ly, lz;
341 float xrs, xrc, yrs, yrc, zrs, zrc, mag;
363 float i = 1.f +
s->intensity;
364 float saturation = 1.f +
s->saturation;
370 s->rlw,
s->glw,
s->blw);
374 s->rlw,
s->glw,
s->blw);
411 s->depth =
desc->comp[0].depth;
412 s->bpp =
s->depth >> 3;
417 s->planewidth[0] =
s->planewidth[3] =
inlink->w;
419 s->planeheight[0] =
s->planeheight[3] =
inlink->h;
421 s->do_slice[0] =
s->depth <= 8 ? do_slice_8_0 : do_slice_16_0;
422 s->do_slice[1] =
s->depth <= 8 ? do_slice_8_1 : do_slice_16_1;
444 #define OFFSET(x) offsetof(HueSaturationContext, x)
445 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
470 .
name =
"huesaturation",
473 .priv_class = &huesaturation_class,
static void x_rotate_matrix(float matrix[4][4], float rs, float rc)
static void get_triplet(int64_t m[4][4], int *r, int *g, int *b)
static int lerpi16(int v0, int v1, int f, int max)
AVPixelFormat
Pixel format.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define FILTER_PIXFMTS_ARRAY(array)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
This structure describes decoded (raw) audio or video data.
static void saturation_matrix(float matrix[4][4], float saturation, float rlw, float glw, float blw)
static int lerpi8(int v0, int v1, int f, int max)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
const char * name
Filter name.
static void init_matrix(HueSaturationContext *s)
A link between two filters.
static void y_rotate_matrix(float matrix[4][4], float rs, float rc)
static void matrix2imatrix(float matrix[4][4], int64_t imatrix[4][4])
A filter pad used for either input or output.
int(* do_slice[2])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFILTER_DEFINE_CLASS(huesaturation)
#define AV_CEIL_RSHIFT(a, b)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#define FILTER_INPUTS(array)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static void z_shear_matrix(float matrix[4][4], float dx, float dy)
#define AV_PIX_FMT_RGBA64
Describe the class of an AVClass context structure.
static void transform_point(float matrix[4][4], float x, float y, float z, float *tx, float *ty, float *tz)
static __device__ float sqrtf(float a)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
static void matrix_multiply(float a[4][4], float b[4][4], float c[4][4])
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
const AVFilter ff_vf_huesaturation
static const AVFilterPad huesaturation_inputs[]
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void identity_matrix(float matrix[4][4])
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
#define AV_PIX_FMT_BGRA64
static const AVOption huesaturation_options[]
#define i(width, name, range_min, range_max)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static void hue_rotate_matrix(float matrix[4][4], float rotation, float rlw, float glw, float blw)
const char * name
Pad name.
static void colorscale_matrix(float matrix[4][4], float r, float g, float b)
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void z_rotate_matrix(float matrix[4][4], float rs, float rc)
#define HUESATURATION(name, type, clip, xall)
static const AVFilterPad huesaturation_outputs[]
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
static void shue_rotate_matrix(float m[4][4], float rotation)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static enum AVPixelFormat pixel_fmts[]
#define FILTER_OUTPUTS(array)
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
static av_cold int config_input(AVFilterLink *inlink)
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.