FFmpeg
vf_drawbox.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
3  * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Box and grid drawing filters. Also a nice template for a filter
25  * that needs to write in the input frame.
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/colorspace.h"
31 #include "libavutil/common.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/eval.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/parseutils.h"
37 #include "avfilter.h"
38 #include "drawutils.h"
39 #include "filters.h"
40 #include "video.h"
41 
42 static const char *const var_names[] = {
43  "dar",
44  "hsub", "vsub",
45  "in_h", "ih", ///< height of the input video
46  "in_w", "iw", ///< width of the input video
47  "sar",
48  "x",
49  "y",
50  "h", ///< height of the rendered box
51  "w", ///< width of the rendered box
52  "t",
53  "fill",
54  NULL
55 };
56 
57 enum { Y, U, V, A };
58 enum { R, G, B };
59 
60 enum var_name {
73 };
74 
75 struct DrawBoxContext;
76 
77 typedef int (*PixelBelongsToRegion)(struct DrawBoxContext *s, int x, int y);
78 
79 typedef struct DrawBoxContext {
80  const AVClass *class;
81  int x, y, w, h;
82  int thickness;
83  char *color_str;
84  uint8_t rgba_map[4];
85  uint8_t rgba_color[4];
86  unsigned char yuv_color[4];
87  int invert_color; ///< invert luma color
88  int vsub, hsub; ///< chroma subsampling
89  char *x_expr, *y_expr; ///< expression for x and y
90  char *w_expr, *h_expr; ///< expression for width and height
91  char *t_expr; ///< expression for thickness
92  char *box_source_string; ///< string for box data source
94  int replace;
95  int step;
97 
98  void (*draw_region)(AVFrame *frame, struct DrawBoxContext *ctx, int left, int top, int right, int down,
99  PixelBelongsToRegion pixel_belongs_to_region);
101 
102 static const int NUM_EXPR_EVALS = 5;
103 
104 #define ASSIGN_THREE_CHANNELS \
105  row[0] = frame->data[0] + y * frame->linesize[0]; \
106  row[1] = frame->data[1] + (y >> ctx->vsub) * frame->linesize[1]; \
107  row[2] = frame->data[2] + (y >> ctx->vsub) * frame->linesize[2];
108 
109 #define ASSIGN_FOUR_CHANNELS \
110  ASSIGN_THREE_CHANNELS \
111  row[3] = frame->data[3] + y * frame->linesize[3];
112 
113 static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down,
114  PixelBelongsToRegion pixel_belongs_to_region)
115 {
116  unsigned char *row[4];
117  int x, y;
118  if (ctx->have_alpha && ctx->replace) {
119  for (y = top; y < down; y++) {
121  if (ctx->invert_color) {
122  for (x = left; x < right; x++)
123  if (pixel_belongs_to_region(ctx, x, y))
124  row[0][x] = 0xff - row[0][x];
125  } else {
126  for (x = left; x < right; x++) {
127  if (pixel_belongs_to_region(ctx, x, y)) {
128  row[0][x ] = ctx->yuv_color[Y];
129  row[1][x >> ctx->hsub] = ctx->yuv_color[U];
130  row[2][x >> ctx->hsub] = ctx->yuv_color[V];
131  row[3][x ] = ctx->yuv_color[A];
132  }
133  }
134  }
135  }
136  } else {
137  for (y = top; y < down; y++) {
139  if (ctx->invert_color) {
140  for (x = left; x < right; x++)
141  if (pixel_belongs_to_region(ctx, x, y))
142  row[0][x] = 0xff - row[0][x];
143  } else {
144  for (x = left; x < right; x++) {
145  double alpha = (double)ctx->yuv_color[A] / 255;
146 
147  if (pixel_belongs_to_region(ctx, x, y)) {
148  row[0][x ] = (1 - alpha) * row[0][x ] + alpha * ctx->yuv_color[Y];
149  row[1][x >> ctx->hsub] = (1 - alpha) * row[1][x >> ctx->hsub] + alpha * ctx->yuv_color[U];
150  row[2][x >> ctx->hsub] = (1 - alpha) * row[2][x >> ctx->hsub] + alpha * ctx->yuv_color[V];
151  }
152  }
153  }
154  }
155  }
156 }
157 
158 #define ASSIGN_THREE_CHANNELS_PACKED \
159  row[0] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[0]; \
160  row[1] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[1]; \
161  row[2] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[2];
162 
163 #define ASSIGN_FOUR_CHANNELS_PACKED \
164  ASSIGN_THREE_CHANNELS_PACKED \
165  row[3] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[3];
166 
167 static void draw_region_rgb_packed(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down,
168  PixelBelongsToRegion pixel_belongs_to_region)
169 {
170  const int C = ctx->step;
171  uint8_t *row[4];
172 
173  if (ctx->have_alpha && ctx->replace) {
174  for (int y = top; y < down; y++) {
176  if (ctx->invert_color) {
177  for (int x = left; x < right; x++)
178  if (pixel_belongs_to_region(ctx, x, y)) {
179  row[0][x*C] = 0xff - row[0][x*C];
180  row[1][x*C] = 0xff - row[1][x*C];
181  row[2][x*C] = 0xff - row[2][x*C];
182  }
183  } else {
184  for (int x = left; x < right; x++) {
185  if (pixel_belongs_to_region(ctx, x, y)) {
186  row[0][x*C] = ctx->rgba_color[R];
187  row[1][x*C] = ctx->rgba_color[G];
188  row[2][x*C] = ctx->rgba_color[B];
189  row[3][x*C] = ctx->rgba_color[A];
190  }
191  }
192  }
193  }
194  } else {
195  for (int y = top; y < down; y++) {
197  if (ctx->invert_color) {
198  for (int x = left; x < right; x++)
199  if (pixel_belongs_to_region(ctx, x, y)) {
200  row[0][x*C] = 0xff - row[0][x*C];
201  row[1][x*C] = 0xff - row[1][x*C];
202  row[2][x*C] = 0xff - row[2][x*C];
203  }
204  } else {
205  for (int x = left; x < right; x++) {
206  float alpha = (float)ctx->rgba_color[A] / 255.f;
207 
208  if (pixel_belongs_to_region(ctx, x, y)) {
209  row[0][x*C] = (1.f - alpha) * row[0][x*C] + alpha * ctx->rgba_color[R];
210  row[1][x*C] = (1.f - alpha) * row[1][x*C] + alpha * ctx->rgba_color[G];
211  row[2][x*C] = (1.f - alpha) * row[2][x*C] + alpha * ctx->rgba_color[B];
212  }
213  }
214  }
215  }
216  }
217 }
218 
220 {
222  if (!strcmp(box_source_string, "side_data_detection_bboxes")) {
224  } else {
225  // will support side_data_regions_of_interest next
226  return AVERROR(EINVAL);
227  }
228 }
229 
231 {
232  DrawBoxContext *s = ctx->priv;
233 
234  if (s->box_source_string) {
235  s->box_source = box_source_string_parse(s->box_source_string);
236  if ((int)s->box_source < 0) {
237  av_log(ctx, AV_LOG_ERROR, "Error box source: %s\n",s->box_source_string);
238  return AVERROR(EINVAL);
239  }
240  }
241 
242  if (!strcmp(s->color_str, "invert"))
243  s->invert_color = 1;
244  else if (av_parse_color(s->rgba_color, s->color_str, -1, ctx) < 0)
245  return AVERROR(EINVAL);
246 
247  if (!s->invert_color) {
248  s->yuv_color[Y] = RGB_TO_Y_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2]);
249  s->yuv_color[U] = RGB_TO_U_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], 0);
250  s->yuv_color[V] = RGB_TO_V_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], 0);
251  s->yuv_color[A] = s->rgba_color[3];
252  }
253 
254  return 0;
255 }
256 
257 static const enum AVPixelFormat pix_fmts[] = {
269 };
270 
272 {
273  AVFilterContext *ctx = inlink->dst;
274  DrawBoxContext *s = ctx->priv;
276  double var_values[VARS_NB], res;
277  char *expr;
278  int ret;
279  int i;
280 
281  ff_fill_rgba_map(s->rgba_map, inlink->format);
282 
283  if (!(desc->flags & AV_PIX_FMT_FLAG_RGB))
284  s->draw_region = draw_region;
285  else
286  s->draw_region = draw_region_rgb_packed;
287 
288  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
289  s->hsub = desc->log2_chroma_w;
290  s->vsub = desc->log2_chroma_h;
291  s->have_alpha = desc->flags & AV_PIX_FMT_FLAG_ALPHA;
292 
293  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
294  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
295  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
296  var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
297  var_values[VAR_HSUB] = s->hsub;
298  var_values[VAR_VSUB] = s->vsub;
299  var_values[VAR_X] = NAN;
300  var_values[VAR_Y] = NAN;
301  var_values[VAR_H] = NAN;
302  var_values[VAR_W] = NAN;
303  var_values[VAR_T] = NAN;
304 
305  for (i = 0; i <= NUM_EXPR_EVALS; i++) {
306  /* evaluate expressions, fail on last iteration */
307  var_values[VAR_MAX] = inlink->w;
308  if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
309  var_names, var_values,
310  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
311  goto fail;
312  s->x = var_values[VAR_X] = res;
313 
314  var_values[VAR_MAX] = inlink->h;
315  if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
316  var_names, var_values,
317  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
318  goto fail;
319  s->y = var_values[VAR_Y] = res;
320 
321  var_values[VAR_MAX] = inlink->w - s->x;
322  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
323  var_names, var_values,
324  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
325  goto fail;
326  s->w = var_values[VAR_W] = res;
327 
328  var_values[VAR_MAX] = inlink->h - s->y;
329  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
330  var_names, var_values,
331  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
332  goto fail;
333  s->h = var_values[VAR_H] = res;
334 
335  var_values[VAR_MAX] = INT_MAX;
336  if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
337  var_names, var_values,
338  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
339  goto fail;
340  s->thickness = var_values[VAR_T] = res;
341  }
342 
343  /* if w or h are zero, use the input w/h */
344  s->w = (s->w > 0) ? s->w : inlink->w;
345  s->h = (s->h > 0) ? s->h : inlink->h;
346 
347  /* sanity check width and height */
348  if (s->w < 0 || s->h < 0) {
349  av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
350  return AVERROR(EINVAL);
351  }
352 
353  av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
354  s->x, s->y, s->w, s->h,
355  s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
356 
357  return 0;
358 
359 fail:
361  "Error when evaluating the expression '%s'.\n",
362  expr);
363  return ret;
364 }
365 
367 {
368  return (y - s->y < s->thickness) || (s->y + s->h - 1 - y < s->thickness) ||
369  (x - s->x < s->thickness) || (s->x + s->w - 1 - x < s->thickness);
370 }
371 
373 {
374  DrawBoxContext *s = inlink->dst->priv;
376  const AVDetectionBBox *bbox;
377  AVFrameSideData *sd;
378  int loop = 1;
379 
380  if (s->box_source == AV_FRAME_DATA_DETECTION_BBOXES) {
382  if (sd) {
384  loop = header->nb_bboxes;
385  } else {
386  av_log(s, AV_LOG_WARNING, "No detection bboxes.\n");
387  return ff_filter_frame(inlink->dst->outputs[0], frame);
388  }
389  }
390 
391  for (int i = 0; i < loop; i++) {
392  if (header) {
393  bbox = av_get_detection_bbox(header, i);
394  s->y = bbox->y;
395  s->x = bbox->x;
396  s->h = bbox->h;
397  s->w = bbox->w;
398  }
399 
400  s->draw_region(frame, s, FFMAX(s->x, 0), FFMAX(s->y, 0), FFMIN(s->x + s->w, frame->width),
401  FFMIN(s->y + s->h, frame->height), pixel_belongs_to_box);
402  }
403 
404  return ff_filter_frame(inlink->dst->outputs[0], frame);
405 }
406 
407 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
408 {
409  AVFilterLink *inlink = ctx->inputs[0];
410  DrawBoxContext *s = ctx->priv;
411  int old_x = s->x;
412  int old_y = s->y;
413  int old_w = s->w;
414  int old_h = s->h;
415  int old_t = s->thickness;
416  int old_r = s->replace;
417  int ret;
418 
419  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
420  if (ret < 0)
421  return ret;
422 
423  ret = init(ctx);
424  if (ret < 0)
425  goto end;
427 end:
428  if (ret < 0) {
429  s->x = old_x;
430  s->y = old_y;
431  s->w = old_w;
432  s->h = old_h;
433  s->thickness = old_t;
434  s->replace = old_r;
435  }
436 
437  return ret;
438 }
439 
440 #define OFFSET(x) offsetof(DrawBoxContext, x)
441 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
442 
443 #if CONFIG_DRAWBOX_FILTER
444 
445 static const AVOption drawbox_options[] = {
446  { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
447  { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
448  { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
449  { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
450  { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
451  { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
452  { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
453  { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
454  { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
455  { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
456  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
457  { "box_source", "use datas from bounding box in side data", OFFSET(box_source_string), AV_OPT_TYPE_STRING, { .str=NULL }, 0, 1, FLAGS },
458  { NULL }
459 };
460 
461 AVFILTER_DEFINE_CLASS(drawbox);
462 
463 static const AVFilterPad drawbox_inputs[] = {
464  {
465  .name = "default",
466  .type = AVMEDIA_TYPE_VIDEO,
468  .config_props = config_input,
469  .filter_frame = filter_frame,
470  },
471 };
472 
473 const AVFilter ff_vf_drawbox = {
474  .name = "drawbox",
475  .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
476  .priv_size = sizeof(DrawBoxContext),
477  .priv_class = &drawbox_class,
478  .init = init,
479  FILTER_INPUTS(drawbox_inputs),
482  .process_command = process_command,
484 };
485 #endif /* CONFIG_DRAWBOX_FILTER */
486 
487 #if CONFIG_DRAWGRID_FILTER
488 static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
489 {
490  // x is horizontal (width) coord,
491  // y is vertical (height) coord
492  int x_modulo;
493  int y_modulo;
494 
495  // Abstract from the offset
496  x -= drawgrid->x;
497  y -= drawgrid->y;
498 
499  x_modulo = x % drawgrid->w;
500  y_modulo = y % drawgrid->h;
501 
502  // If x or y got negative, fix values to preserve logics
503  if (x_modulo < 0)
504  x_modulo += drawgrid->w;
505  if (y_modulo < 0)
506  y_modulo += drawgrid->h;
507 
508  return x_modulo < drawgrid->thickness // Belongs to vertical line
509  || y_modulo < drawgrid->thickness; // Belongs to horizontal line
510 }
511 
512 static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
513 {
514  DrawBoxContext *drawgrid = inlink->dst->priv;
515 
516  drawgrid->draw_region(frame, drawgrid, 0, 0, frame->width, frame->height, pixel_belongs_to_grid);
517 
518  return ff_filter_frame(inlink->dst->outputs[0], frame);
519 }
520 
521 static const AVOption drawgrid_options[] = {
522  { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
523  { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
524  { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
525  { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
526  { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
527  { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
528  { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
529  { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
530  { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
531  { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
532  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
533  { NULL }
534 };
535 
536 AVFILTER_DEFINE_CLASS(drawgrid);
537 
538 static const AVFilterPad drawgrid_inputs[] = {
539  {
540  .name = "default",
541  .type = AVMEDIA_TYPE_VIDEO,
543  .config_props = config_input,
544  .filter_frame = drawgrid_filter_frame,
545  },
546 };
547 
548 const AVFilter ff_vf_drawgrid = {
549  .name = "drawgrid",
550  .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
551  .priv_size = sizeof(DrawBoxContext),
552  .priv_class = &drawgrid_class,
553  .init = init,
554  FILTER_INPUTS(drawgrid_inputs),
558  .process_command = process_command,
559 };
560 
561 #endif /* CONFIG_DRAWGRID_FILTER */
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
DrawBoxContext::step
int step
Definition: vf_drawbox.c:95
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
var_name
var_name
Definition: noise.c:47
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:963
ASSIGN_FOUR_CHANNELS
#define ASSIGN_FOUR_CHANNELS
Definition: vf_drawbox.c:109
DrawBoxContext::vsub
int vsub
Definition: vf_drawbox.c:88
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1061
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:359
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_drawbox.c:407
V
@ V
Definition: vf_drawbox.c:57
VARS_NB
@ VARS_NB
Definition: vf_drawbox.c:72
RGB_TO_U_CCIR
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:102
U
@ U
Definition: vf_drawbox.c:57
VAR_MAX
@ VAR_MAX
Definition: vf_drawbox.c:71
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
DrawBoxContext::thickness
int thickness
Definition: vf_drawbox.c:82
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_drawbox.c:271
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
AVOption
AVOption.
Definition: opt.h:429
DrawBoxContext::draw_region
void(* draw_region)(AVFrame *frame, struct DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:98
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
var_names
static const char *const var_names[]
Definition: vf_drawbox.c:42
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
box_source_string_parse
static enum AVFrameSideDataType box_source_string_parse(const char *box_source_string)
Definition: vf_drawbox.c:219
av_pure
#define av_pure
Definition: attributes.h:78
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
video.h
G
@ G
Definition: vf_drawbox.c:58
VAR_Y
@ VAR_Y
Definition: vf_drawbox.c:67
PixelBelongsToRegion
int(* PixelBelongsToRegion)(struct DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:77
DrawBoxContext::box_source_string
char * box_source_string
string for box data source
Definition: vf_drawbox.c:92
fail
#define fail()
Definition: checkasm.h:193
DrawBoxContext::y
int y
Definition: vf_drawbox.c:81
VAR_HSUB
@ VAR_HSUB
Definition: vf_drawbox.c:62
loop
static int loop
Definition: ffplay.c:335
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
FLAGS
#define FLAGS
Definition: vf_drawbox.c:441
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
colorspace.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
DrawBoxContext::h_expr
char * h_expr
expression for width and height
Definition: vf_drawbox.c:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_drawbox.c:230
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
float
float
Definition: af_crystalizer.c:122
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
RGB_TO_Y_CCIR
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:98
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
filters.h
ASSIGN_THREE_CHANNELS_PACKED
#define ASSIGN_THREE_CHANNELS_PACKED
Definition: vf_drawbox.c:158
DrawBoxContext::rgba_color
uint8_t rgba_color[4]
Definition: vf_drawbox.c:85
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:49
DrawBoxContext::yuv_color
unsigned char yuv_color[4]
Definition: vf_drawbox.c:86
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
R
@ R
Definition: vf_drawbox.c:58
NAN
#define NAN
Definition: mathematics.h:115
DrawBoxContext::color_str
char * color_str
Definition: vf_drawbox.c:83
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
if
if(ret)
Definition: filter_design.txt:179
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
VAR_W
@ VAR_W
Definition: vf_drawbox.c:69
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
VAR_IN_H
@ VAR_IN_H
Definition: vf_drawbox.c:63
RGB_TO_V_CCIR
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:106
parseutils.h
double
double
Definition: af_crystalizer.c:132
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
VAR_IW
@ VAR_IW
Definition: vf_drawbox.c:64
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: filters.h:273
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
draw_region
static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:113
VAR_X
@ VAR_X
Definition: vf_drawbox.c:66
eval.h
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: filters.h:57
pixel_belongs_to_box
static av_pure av_always_inline int pixel_belongs_to_box(DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:366
Y
@ Y
Definition: vf_drawbox.c:57
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
DrawBoxContext::x
int x
Definition: vf_drawbox.c:81
DrawBoxContext::box_source
enum AVFrameSideDataType box_source
Definition: vf_drawbox.c:96
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:803
DrawBoxContext::x_expr
char * x_expr
Definition: vf_drawbox.c:89
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:3135
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vf_drawgrid
const AVFilter ff_vf_drawgrid
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:900
header
static const uint8_t header[24]
Definition: sdr2.c:68
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
A
@ A
Definition: vf_drawbox.c:57
VAR_SAR
@ VAR_SAR
Definition: vf_drawbox.c:65
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:182
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
DrawBoxContext::t_expr
char * t_expr
expression for thickness
Definition: vf_drawbox.c:91
ASSIGN_FOUR_CHANNELS_PACKED
#define ASSIGN_FOUR_CHANNELS_PACKED
Definition: vf_drawbox.c:163
VAR_VSUB
@ VAR_VSUB
Definition: vf_drawbox.c:62
ASSIGN_THREE_CHANNELS
#define ASSIGN_THREE_CHANNELS
Definition: vf_drawbox.c:104
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_drawbox.c:257
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
common.h
DrawBoxContext
Definition: vf_drawbox.c:79
DrawBoxContext::w_expr
char * w_expr
Definition: vf_drawbox.c:90
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
DrawBoxContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_drawbox.c:84
VAR_DAR
@ VAR_DAR
Definition: vf_drawbox.c:61
B
@ B
Definition: vf_drawbox.c:58
AVFilter
Filter definition.
Definition: avfilter.h:201
VAR_H
@ VAR_H
Definition: vf_drawbox.c:68
ret
ret
Definition: filter_design.txt:187
ff_vf_drawbox
const AVFilter ff_vf_drawbox
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
VAR_IN_W
@ VAR_IN_W
Definition: vf_drawbox.c:64
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
DrawBoxContext::invert_color
int invert_color
invert luma color
Definition: vf_drawbox.c:87
avfilter.h
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
draw_region_rgb_packed
static void draw_region_rgb_packed(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:167
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
DrawBoxContext::have_alpha
int have_alpha
Definition: vf_drawbox.c:93
DrawBoxContext::y_expr
char * y_expr
expression for x and y
Definition: vf_drawbox.c:89
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
NUM_EXPR_EVALS
static const int NUM_EXPR_EVALS
Definition: vf_drawbox.c:102
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
VAR_IH
@ VAR_IH
Definition: vf_drawbox.c:63
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_drawbox.c:372
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:79
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DrawBoxContext::h
int h
Definition: vf_drawbox.c:81
AVDetectionBBox
Definition: detection_bbox.h:26
DrawBoxContext::replace
int replace
Definition: vf_drawbox.c:94
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
drawutils.h
DrawBoxContext::hsub
int hsub
chroma subsampling
Definition: vf_drawbox.c:88
DrawBoxContext::w
int w
Definition: vf_drawbox.c:81
OFFSET
#define OFFSET(x)
Definition: vf_drawbox.c:440
detection_bbox.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194
VAR_T
@ VAR_T
Definition: vf_drawbox.c:70