FFmpeg
f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/opt.h"
31 #include "avfilter.h"
32 #include "audio.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "scene_sad.h"
37 
38 static const char *const var_names[] = {
39  "TB", ///< timebase
40 
41  "pts", ///< original pts in the file of the frame
42  "start_pts", ///< first PTS in the stream, expressed in TB units
43  "prev_pts", ///< previous frame PTS
44  "prev_selected_pts", ///< previous selected frame PTS
45 
46  "t", ///< timestamp expressed in seconds
47  "start_t", ///< first PTS in the stream, expressed in seconds
48  "prev_t", ///< previous frame time
49  "prev_selected_t", ///< previously selected time
50 
51  "pict_type", ///< the type of picture in the movie
52  "I",
53  "P",
54  "B",
55  "S",
56  "SI",
57  "SP",
58  "BI",
59  "PICT_TYPE_I",
60  "PICT_TYPE_P",
61  "PICT_TYPE_B",
62  "PICT_TYPE_S",
63  "PICT_TYPE_SI",
64  "PICT_TYPE_SP",
65  "PICT_TYPE_BI",
66 
67  "interlace_type", ///< the frame interlace type
68  "PROGRESSIVE",
69  "TOPFIRST",
70  "BOTTOMFIRST",
71 
72  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
73  "samples_n", ///< number of samples in the current frame (only audio)
74  "sample_rate", ///< sample rate (only audio)
75 
76  "n", ///< frame number (starting from zero)
77  "selected_n", ///< selected frame number (starting from zero)
78  "prev_selected_n", ///< number of the last selected frame
79 
80  "key", ///< tell if the frame is a key frame
81  "pos", ///< original position in the file of the frame
82 
83  "scene",
84 
85  "concatdec_select", ///< frame is within the interval set by the concat demuxer
86 
87  NULL
88 };
89 
90 enum var_name {
92 
97 
102 
118 
123 
127 
131 
134 
136 
138 
140 };
141 
142 typedef struct SelectContext {
143  const AVClass *class;
144  char *expr_str;
147  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
148  ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
149  double prev_mafd; ///< previous MAFD (scene detect only)
150  AVFrame *prev_picref; ///< previous frame (scene detect only)
151  double select;
152  int select_out; ///< mark the selected output pad index
154 } SelectContext;
155 
156 #define OFFSET(x) offsetof(SelectContext, x)
157 #define DEFINE_OPTIONS(filt_name, FLAGS) \
158 static const AVOption filt_name##_options[] = { \
159  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
160  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
161  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
162  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
163  { NULL } \
164 }
165 
166 static int request_frame(AVFilterLink *outlink);
167 
169 {
170  SelectContext *select = ctx->priv;
171  int i, ret;
172 
173  if ((ret = av_expr_parse(&select->expr, select->expr_str,
174  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
175  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
176  select->expr_str);
177  return ret;
178  }
179  select->do_scene_detect = !!strstr(select->expr_str, "scene");
180 
181  for (i = 0; i < select->nb_outputs; i++) {
182  AVFilterPad pad = { 0 };
183 
184  pad.name = av_asprintf("output%d", i);
185  if (!pad.name)
186  return AVERROR(ENOMEM);
187  pad.type = ctx->filter->inputs[0].type;
189  if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
190  av_freep(&pad.name);
191  return ret;
192  }
193  }
194 
195  return 0;
196 }
197 
198 #define INTERLACE_TYPE_P 0
199 #define INTERLACE_TYPE_T 1
200 #define INTERLACE_TYPE_B 2
201 
203 {
204  SelectContext *select = inlink->dst->priv;
205 
206  select->var_values[VAR_N] = 0.0;
207  select->var_values[VAR_SELECTED_N] = 0.0;
208 
209  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
210 
211  select->var_values[VAR_PREV_PTS] = NAN;
214  select->var_values[VAR_PREV_T] = NAN;
215  select->var_values[VAR_START_PTS] = NAN;
216  select->var_values[VAR_START_T] = NAN;
217 
230 
234 
235  select->var_values[VAR_PICT_TYPE] = NAN;
236  select->var_values[VAR_INTERLACE_TYPE] = NAN;
237  select->var_values[VAR_SCENE] = NAN;
239  select->var_values[VAR_SAMPLES_N] = NAN;
240 
241  select->var_values[VAR_SAMPLE_RATE] =
242  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
243 
244  if (CONFIG_SELECT_FILTER && select->do_scene_detect) {
245  select->sad = ff_scene_sad_get_fn(8);
246  if (!select->sad)
247  return AVERROR(EINVAL);
248  }
249  return 0;
250 }
251 
253 {
254  double ret = 0;
255  SelectContext *select = ctx->priv;
256  AVFrame *prev_picref = select->prev_picref;
257 
258  if (prev_picref &&
259  frame->height == prev_picref->height &&
260  frame->width == prev_picref->width) {
261  uint64_t sad;
262  double mafd, diff;
263 
264  select->sad(prev_picref->data[0], prev_picref->linesize[0], frame->data[0], frame->linesize[0], frame->width * 3, frame->height, &sad);
265  emms_c();
266  mafd = (double)sad / (frame->width * 3 * frame->height);
267  diff = fabs(mafd - select->prev_mafd);
268  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
269  select->prev_mafd = mafd;
270  av_frame_free(&prev_picref);
271  }
272  select->prev_picref = av_frame_clone(frame);
273  return ret;
274 }
275 
276 static double get_concatdec_select(AVFrame *frame, int64_t pts)
277 {
278  AVDictionary *metadata = frame->metadata;
279  AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
280  AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
281  if (start_time_entry) {
282  int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
283  if (pts >= start_time) {
284  if (duration_entry) {
285  int64_t duration = strtoll(duration_entry->value, NULL, 10);
286  if (pts < start_time + duration)
287  return -1;
288  else
289  return 0;
290  }
291  return -1;
292  }
293  return 0;
294  }
295  return NAN;
296 }
297 
298 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
299 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
300 
302 {
303  SelectContext *select = ctx->priv;
304  AVFilterLink *inlink = ctx->inputs[0];
305  double res;
306 
307  if (isnan(select->var_values[VAR_START_PTS]))
308  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
309  if (isnan(select->var_values[VAR_START_T]))
310  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
311 
312  select->var_values[VAR_N ] = inlink->frame_count_out;
313  select->var_values[VAR_PTS] = TS2D(frame->pts);
314  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
315  select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
316  select->var_values[VAR_KEY] = frame->key_frame;
318 
319  switch (inlink->type) {
320  case AVMEDIA_TYPE_AUDIO:
321  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
322  break;
323 
324  case AVMEDIA_TYPE_VIDEO:
325  select->var_values[VAR_INTERLACE_TYPE] =
326  !frame->interlaced_frame ? INTERLACE_TYPE_P :
327  frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
328  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
329  if (select->do_scene_detect) {
330  char buf[32];
332  // TODO: document metadata
333  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
334  av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
335  }
336  break;
337  }
338 
339  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
340  av_log(inlink->dst, AV_LOG_DEBUG,
341  "n:%f pts:%f t:%f key:%d",
342  select->var_values[VAR_N],
343  select->var_values[VAR_PTS],
344  select->var_values[VAR_T],
345  frame->key_frame);
346 
347  switch (inlink->type) {
348  case AVMEDIA_TYPE_VIDEO:
349  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
350  (!frame->interlaced_frame) ? 'P' :
351  frame->top_field_first ? 'T' : 'B',
352  av_get_picture_type_char(frame->pict_type),
353  select->var_values[VAR_SCENE]);
354  break;
355  case AVMEDIA_TYPE_AUDIO:
356  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
357  frame->nb_samples,
359  break;
360  }
361 
362  if (res == 0) {
363  select->select_out = -1; /* drop */
364  } else if (isnan(res) || res < 0) {
365  select->select_out = 0; /* first output */
366  } else {
367  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
368  }
369 
370  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
371 
372  if (res) {
373  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
375  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
376  select->var_values[VAR_SELECTED_N] += 1.0;
377  if (inlink->type == AVMEDIA_TYPE_AUDIO)
378  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
379  }
380 
381  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
382  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
383 }
384 
386 {
387  AVFilterContext *ctx = inlink->dst;
388  SelectContext *select = ctx->priv;
389 
391  if (select->select)
392  return ff_filter_frame(ctx->outputs[select->select_out], frame);
393 
395  return 0;
396 }
397 
398 static int request_frame(AVFilterLink *outlink)
399 {
400  AVFilterLink *inlink = outlink->src->inputs[0];
401  int ret = ff_request_frame(inlink);
402  return ret;
403 }
404 
406 {
407  SelectContext *select = ctx->priv;
408  int i;
409 
410  av_expr_free(select->expr);
411  select->expr = NULL;
412 
413  for (i = 0; i < ctx->nb_outputs; i++)
414  av_freep(&ctx->output_pads[i].name);
415 
416  if (select->do_scene_detect) {
417  av_frame_free(&select->prev_picref);
418  }
419 }
420 
421 #if CONFIG_ASELECT_FILTER
422 
424 AVFILTER_DEFINE_CLASS(aselect);
425 
426 static av_cold int aselect_init(AVFilterContext *ctx)
427 {
428  SelectContext *select = ctx->priv;
429  int ret;
430 
431  if ((ret = init(ctx)) < 0)
432  return ret;
433 
434  if (select->do_scene_detect) {
435  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
436  return AVERROR(EINVAL);
437  }
438 
439  return 0;
440 }
441 
442 static const AVFilterPad avfilter_af_aselect_inputs[] = {
443  {
444  .name = "default",
445  .type = AVMEDIA_TYPE_AUDIO,
446  .config_props = config_input,
447  .filter_frame = filter_frame,
448  },
449  { NULL }
450 };
451 
453  .name = "aselect",
454  .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
455  .init = aselect_init,
456  .uninit = uninit,
457  .priv_size = sizeof(SelectContext),
458  .inputs = avfilter_af_aselect_inputs,
459  .priv_class = &aselect_class,
461 };
462 #endif /* CONFIG_ASELECT_FILTER */
463 
464 #if CONFIG_SELECT_FILTER
465 
466 static int query_formats(AVFilterContext *ctx)
467 {
468  SelectContext *select = ctx->priv;
469 
470  if (!select->do_scene_detect) {
472  } else {
473  int ret;
474  static const enum AVPixelFormat pix_fmts[] = {
477  };
479 
480  if (!fmts_list)
481  return AVERROR(ENOMEM);
482  ret = ff_set_common_formats(ctx, fmts_list);
483  if (ret < 0)
484  return ret;
485  }
486  return 0;
487 }
488 
490 AVFILTER_DEFINE_CLASS(select);
491 
492 static av_cold int select_init(AVFilterContext *ctx)
493 {
494  int ret;
495 
496  if ((ret = init(ctx)) < 0)
497  return ret;
498 
499  return 0;
500 }
501 
502 static const AVFilterPad avfilter_vf_select_inputs[] = {
503  {
504  .name = "default",
505  .type = AVMEDIA_TYPE_VIDEO,
506  .config_props = config_input,
507  .filter_frame = filter_frame,
508  },
509  { NULL }
510 };
511 
513  .name = "select",
514  .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
515  .init = select_init,
516  .uninit = uninit,
517  .query_formats = query_formats,
518  .priv_size = sizeof(SelectContext),
519  .priv_class = &select_class,
520  .inputs = avfilter_vf_select_inputs,
522 };
523 #endif /* CONFIG_SELECT_FILTER */
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
VAR_PICT_TYPE_S
@ VAR_PICT_TYPE_S
Definition: f_select.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
VAR_INTERLACE_TYPE_P
@ VAR_INTERLACE_TYPE_P
Definition: f_select.c:120
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
VAR_P
@ VAR_P
Definition: f_select.c:105
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:398
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
SelectContext
Definition: f_select.c:142
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
VAR_PREV_T
@ VAR_PREV_T
Definition: f_select.c:100
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
get_concatdec_select
static double get_concatdec_select(AVFrame *frame, int64_t pts)
Definition: f_select.c:276
AVFrame::width
int width
Definition: frame.h:353
ff_af_aselect
AVFilter ff_af_aselect
VAR_INTERLACE_TYPE_B
@ VAR_INTERLACE_TYPE_B
Definition: f_select.c:122
VAR_T
@ VAR_T
Definition: f_select.c:98
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:405
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
VAR_START_T
@ VAR_START_T
Definition: f_select.c:99
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVDictionary
Definition: dict.c:30
VAR_POS
@ VAR_POS
Definition: f_select.c:133
VAR_SAMPLE_RATE
@ VAR_SAMPLE_RATE
Definition: f_select.c:126
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
SelectContext::sad
ff_scene_sad_fn sad
Sum of the absolute difference function (scene detect only)
Definition: f_select.c:148
video.h
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
VAR_SCENE
@ VAR_SCENE
Definition: f_select.c:135
VAR_CONSUMED_SAMPLES_N
@ VAR_CONSUMED_SAMPLES_N
Definition: f_select.c:124
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
VAR_PREV_SELECTED_N
@ VAR_PREV_SELECTED_N
Definition: f_select.c:130
fifo.h
VAR_SAMPLES_N
@ VAR_SAMPLES_N
Definition: f_select.c:125
VAR_START_PTS
@ VAR_START_PTS
Definition: f_select.c:94
VAR_PICT_TYPE_P
@ VAR_PICT_TYPE_P
Definition: f_select.c:112
pts
static int64_t pts
Definition: transcode_aac.c:647
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
VAR_BI
@ VAR_BI
Definition: f_select.c:110
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
AVFilterPad::request_frame
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: internal.h:113
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
duration
int64_t duration
Definition: movenc.c:63
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
get_scene_score
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:252
INTERLACE_TYPE_T
#define INTERLACE_TYPE_T
Definition: f_select.c:199
VAR_SI
@ VAR_SI
Definition: f_select.c:108
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VAR_PREV_SELECTED_PTS
@ VAR_PREV_SELECTED_PTS
Definition: f_select.c:96
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
VAR_PICT_TYPE
@ VAR_PICT_TYPE
Definition: f_select.c:103
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVExpr
Definition: eval.c:157
SelectContext::select_out
int select_out
mark the selected output pad index
Definition: f_select.c:152
TS2D
#define TS2D(ts)
Definition: f_select.c:299
VAR_PREV_SELECTED_T
@ VAR_PREV_SELECTED_T
Definition: f_select.c:101
NAN
#define NAN
Definition: mathematics.h:64
AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:278
INTERLACE_TYPE_B
#define INTERLACE_TYPE_B
Definition: f_select.c:200
ff_scene_sad_get_fn
ff_scene_sad_fn ff_scene_sad_get_fn(int depth)
Definition: scene_sad.c:59
VAR_N
@ VAR_N
Definition: f_select.c:128
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
SelectContext::select
double select
Definition: f_select.c:151
SelectContext::prev_picref
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:150
DEFINE_OPTIONS
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:157
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
init
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:168
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
SelectContext::prev_mafd
double prev_mafd
previous MAFD (scene detect only)
Definition: f_select.c:149
SelectContext::do_scene_detect
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:147
scene_sad.h
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
config_input
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:202
eval.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
var_name
var_name
Definition: aeval.c:46
ff_default_query_formats
int ff_default_query_formats(AVFilterContext *ctx)
Definition: formats.c:597
start_time
static int64_t start_time
Definition: ffplay.c:331
VAR_INTERLACE_TYPE
@ VAR_INTERLACE_TYPE
Definition: f_select.c:119
VAR_VARS_NB
@ VAR_VARS_NB
Definition: f_select.c:139
ff_scene_sad_fn
void(* ff_scene_sad_fn)(SCENE_SAD_PARAMS)
Definition: scene_sad.h:34
var_names
static const char *const var_names[]
Definition: f_select.c:38
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
SelectContext::expr
AVExpr * expr
Definition: f_select.c:145
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
VAR_TB
@ VAR_TB
Definition: f_select.c:91
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
SelectContext::expr_str
char * expr_str
Definition: f_select.c:144
internal.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:385
VAR_PICT_TYPE_SI
@ VAR_PICT_TYPE_SI
Definition: f_select.c:115
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
select_frame
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:301
VAR_B
@ VAR_B
Definition: f_select.c:106
AVFilter
Filter definition.
Definition: avfilter.h:144
VAR_PICT_TYPE_BI
@ VAR_PICT_TYPE_BI
Definition: f_select.c:117
ret
ret
Definition: filter_design.txt:187
AVFilterPad::type
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
VAR_KEY
@ VAR_KEY
Definition: f_select.c:132
VAR_SELECTED_N
@ VAR_SELECTED_N
Definition: f_select.c:129
SelectContext::var_values
double var_values[VAR_VARS_NB]
Definition: f_select.c:146
SelectContext::nb_outputs
int nb_outputs
Definition: f_select.c:153
AVFrame::height
int height
Definition: frame.h:353
VAR_S
@ VAR_S
Definition: f_select.c:107
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
VAR_PICT_TYPE_I
@ VAR_PICT_TYPE_I
Definition: f_select.c:111
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
VAR_PREV_PTS
@ VAR_PREV_PTS
Definition: f_select.c:95
avfilter.h
VAR_SP
@ VAR_SP
Definition: f_select.c:109
VAR_PICT_TYPE_SP
@ VAR_PICT_TYPE_SP
Definition: f_select.c:116
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
AVDictionaryEntry
Definition: dict.h:81
VAR_PTS
@ VAR_PTS
Definition: f_select.c:93
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AV_PICTURE_TYPE_BI
@ AV_PICTURE_TYPE_BI
BI type.
Definition: avutil.h:280
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_vf_select
AVFilter ff_vf_select
AVDictionaryEntry::value
char * value
Definition: dict.h:83
avstring.h
VAR_PICT_TYPE_B
@ VAR_PICT_TYPE_B
Definition: f_select.c:113
VAR_INTERLACE_TYPE_T
@ VAR_INTERLACE_TYPE_T
Definition: f_select.c:121
snprintf
#define snprintf
Definition: snprintf.h:34
VAR_I
@ VAR_I
Definition: f_select.c:104
VAR_CONCATDEC_SELECT
@ VAR_CONCATDEC_SELECT
Definition: f_select.c:137
INTERLACE_TYPE_P
#define INTERLACE_TYPE_P
Definition: f_select.c:198