Go to the source code of this file.
|
enum | {
VAR_X,
VAR_Y,
VAR_W,
VAR_H,
VAR_SW,
VAR_SH,
VAR_T,
VAR_N,
VAR_A,
VAR_B,
VAR_TOP,
VAR_BOTTOM,
VAR_VARS_NB
} |
|
|
| FRAMESYNC_DEFINE_CLASS (blend, BlendContext, fs) |
|
static int | filter_slice (AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) |
|
static AVFrame * | blend_frame (AVFilterContext *ctx, AVFrame *top_buf, const AVFrame *bottom_buf) |
|
static int | blend_frame_for_dualinput (FFFrameSync *fs) |
|
static av_cold int | init (AVFilterContext *ctx) |
|
static av_cold void | uninit (AVFilterContext *ctx) |
|
void | ff_blend_init (FilterParams *param, int depth) |
|
static int | config_params (AVFilterContext *ctx) |
|
static int | config_output (AVFilterLink *outlink) |
|
static int | process_command (AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags) |
|
|
static const char *const | var_names [] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL } |
|
static const AVOption | blend_options [] |
|
static enum AVPixelFormat | pix_fmts [] |
|
◆ TOP
◆ BOTTOM
◆ DEPTH [1/7]
◆ DEPTH [2/7]
◆ DEPTH [3/7]
◆ DEPTH [4/7]
◆ DEPTH [5/7]
◆ DEPTH [6/7]
◆ DEPTH [7/7]
◆ OFFSET
◆ FLAGS
◆ COPY
#define COPY |
( |
|
src, |
|
|
|
depth |
|
) |
| |
Value:static void blend_copy ##
src##
_##depth(
const uint8_t *top, ptrdiff_t top_linesize, \
const uint8_t *bottom, ptrdiff_t bottom_linesize,\
uint8_t *dst, ptrdiff_t dst_linesize, \
{ \
av_image_copy_plane(dst, dst_linesize,
src,
src ## _linesize, \
}
Definition at line 156 of file vf_blend.c.
◆ BLEND_NORMAL
Value:static void blend_normal_##
name(
const uint8_t *_top, ptrdiff_t top_linesize, \
const uint8_t *_bottom, ptrdiff_t bottom_linesize,\
uint8_t *_dst, ptrdiff_t dst_linesize, \
{ \
type *dst = (
type*)_dst; \
const float opacity = param->opacity; \
\
dst_linesize /=
sizeof(
type); \
top_linesize /=
sizeof(
type); \
bottom_linesize /=
sizeof(
type);
\
for (
int j = 0; j <
width; j++) { \
dst[j] = top[j] * opacity + bottom[j] * (1.f - opacity); \
} \
dst += dst_linesize; \
top += top_linesize; \
bottom += bottom_linesize; \
} \
}
Definition at line 178 of file vf_blend.c.
◆ DEFINE_BLEND_EXPR
#define DEFINE_BLEND_EXPR |
( |
|
type, |
|
|
|
name, |
|
|
|
div |
|
) |
| |
Value:static void blend_expr_##
name(
const uint8_t *_top, ptrdiff_t top_linesize, \
const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
uint8_t *_dst, ptrdiff_t dst_linesize, \
{ \
type *dst = (
type*)_dst; \
AVExpr *e = param->e; \
int y, x; \
dst_linesize /= div; \
top_linesize /= div; \
bottom_linesize /= div;
\
values[
VAR_Y] = y + starty; \
for (x = 0; x <
width; x++) { \
} \
dst += dst_linesize; \
top += top_linesize; \
bottom += bottom_linesize; \
} \
}
Definition at line 208 of file vf_blend.c.
◆ DEFINE_INIT_BLEND_FUNC
#define DEFINE_INIT_BLEND_FUNC |
( |
|
depth, |
|
|
|
nbits |
|
) |
| |
◆ anonymous enum
Enumerator |
---|
VAR_X | |
VAR_Y | |
VAR_W | |
VAR_H | |
VAR_SW | |
VAR_SH | |
VAR_T | |
VAR_N | |
VAR_A | |
VAR_B | |
VAR_TOP | |
VAR_BOTTOM | |
VAR_VARS_NB | |
Definition at line 79 of file vf_blend.c.
◆ FRAMESYNC_DEFINE_CLASS()
◆ filter_slice()
◆ blend_frame()
◆ blend_frame_for_dualinput()
◆ init()
◆ uninit()
◆ ff_blend_init()
◆ config_params()
◆ config_output()
◆ process_command()
static int process_command |
( |
AVFilterContext * |
ctx, |
|
|
const char * |
cmd, |
|
|
const char * |
args, |
|
|
char * |
res, |
|
|
int |
res_len, |
|
|
int |
flags |
|
) |
| |
|
static |
◆ var_names
const char* const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL } |
|
static |
◆ blend_options
◆ pix_fmts
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
#define AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAYF32
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV444P12
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
#define i(width, name, range_min, range_max)
#define AV_PIX_FMT_GBRP12
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_GBRAPF32
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GRAY12
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14