FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vf_coreimage.m
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Thilo Borgmann
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Video processing based on Apple's CoreImage API
24  */
25 
26 #import <CoreImage/CoreImage.h>
27 #import <AppKit/AppKit.h>
28 
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "video.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/pixdesc.h"
37 
38 typedef struct CoreImageContext {
39  const AVClass *class;
40 
41  int is_video_source; ///< filter is used as video source
42 
43  int w, h; ///< video size
44  AVRational sar; ///< sample aspect ratio
45  AVRational frame_rate; ///< video frame rate
46  AVRational time_base; ///< stream time base
47  int64_t duration; ///< duration expressed in microseconds
48  int64_t pts; ///< increasing presentation time stamp
49  AVFrame *picref; ///< cached reference containing the painted picture
50 
51  CFTypeRef glctx; ///< OpenGL context
52  CGContextRef cgctx; ///< Bitmap context for image copy
53  CFTypeRef input_image; ///< Input image container for passing into Core Image API
54  CGColorSpaceRef color_space; ///< Common color space for input image and cgcontext
55  int bits_per_component; ///< Shared bpc for input-output operation
56 
57  char *filter_string; ///< The complete user provided filter definition
58  CFTypeRef *filters; ///< CIFilter object for all requested filters
59  int num_filters; ///< Amount of filters in *filters
60 
61  char *output_rect; ///< Rectangle to be filled with filter intput
62  int list_filters; ///< Option used to list all available filters including generators
63  int list_generators; ///< Option used to list all available generators
65 
67 {
69  CoreImageContext *ctx = link->src->priv;
70 
71  link->w = ctx->w;
72  link->h = ctx->h;
74  l->frame_rate = ctx->frame_rate;
75  link->time_base = ctx->time_base;
76 
78  ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
79 
80  return 0;
81 }
82 
83 /** Determine image properties from input link of filter chain.
84  */
86 {
87  CoreImageContext *ctx = link->dst->priv;
89  ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
90 
91  return 0;
92 }
93 
94 /** Print a list of all available filters including options and respective value ranges and defaults.
95  */
97 {
98  // querying filters and attributes
99  NSArray *filter_categories = nil;
100 
101  if (ctx->list_generators && !ctx->list_filters) {
102  filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
103  }
104 
105  for (NSString *filter_name in [CIFilter filterNamesInCategories:filter_categories]) {
106  CIFilter *filter = [CIFilter filterWithName:filter_name];
107  NSDictionary<NSString *, id> *filter_attribs = [filter attributes];
108 
109  av_log(ctx, AV_LOG_INFO, "Filter: %s\n", [filter_name UTF8String]);
110 
111  for (NSString *input in [filter inputKeys]) {
112  NSDictionary *input_attribs = [filter_attribs valueForKey:input];
113  NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
114  if ([input_class isEqualToString:@"NSNumber"]) {
115  NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
116  NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
117  NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
118 
119  av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\t[%s %s][%s]\n",
120  [input UTF8String],
121  [input_class UTF8String],
122  [[value_min stringValue] UTF8String],
123  [[value_max stringValue] UTF8String],
124  [[value_default stringValue] UTF8String]);
125  } else {
126  av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\n",
127  [input UTF8String],
128  [input_class UTF8String]);
129  }
130  }
131  }
132 }
133 
135 {
136  int i;
137 
138  // (re-)initialize input image
139  const CGSize frame_size = {
140  frame->width,
141  frame->height
142  };
143 
144  NSData *data = [NSData dataWithBytesNoCopy:frame->data[0]
145  length:frame->height*frame->linesize[0]
146  freeWhenDone:NO];
147 
148  CIImage *ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
149  bytesPerRow:frame->linesize[0]
150  size:frame_size
151  format:kCIFormatARGB8
152  colorSpace:ctx->color_space]; //kCGColorSpaceGenericRGB
153  if (!ret) {
154  av_log(ctx, AV_LOG_ERROR, "Input image could not be initialized.\n");
155  return AVERROR_EXTERNAL;
156  }
157 
158  CIFilter *filter = NULL;
159  CIImage *filter_input = (__bridge CIImage*)ctx->input_image;
160  CIImage *filter_output = NULL;
161 
162  // successively apply all filters
163  for (i = 0; i < ctx->num_filters; i++) {
164  if (i) {
165  // set filter input to previous filter output
166  filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
167  CGRect out_rect = [filter_input extent];
168  if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
169  // do not keep padded image regions after filtering
170  out_rect.origin.x = 0.0f;
171  out_rect.origin.y = 0.0f;
172  out_rect.size.width = frame->width;
173  out_rect.size.height = frame->height;
174  }
175  filter_input = [filter_input imageByCroppingToRect:out_rect];
176  }
177 
178  filter = (__bridge CIFilter*)ctx->filters[i];
179 
180  // do not set input image for the first filter if used as video source
181  if (!ctx->is_video_source || i) {
182  @try {
183  [filter setValue:filter_input forKey:kCIInputImageKey];
184  } @catch (NSException *exception) {
185  if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
186  av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
187  return AVERROR_EXTERNAL;
188  } else {
189  av_log(ctx, AV_LOG_WARNING, "Selected filter does not accept an input image.\n");
190  }
191  }
192  }
193  }
194 
195  // get output of last filter
196  filter_output = [filter valueForKey:kCIOutputImageKey];
197 
198  if (!filter_output) {
199  av_log(ctx, AV_LOG_ERROR, "Filter output not available.\n");
200  return AVERROR_EXTERNAL;
201  }
202 
203  // do not keep padded image regions after filtering
204  CGRect out_rect = [filter_output extent];
205  if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
206  av_log(ctx, AV_LOG_DEBUG, "Cropping output image.\n");
207  out_rect.origin.x = 0.0f;
208  out_rect.origin.y = 0.0f;
209  out_rect.size.width = frame->width;
210  out_rect.size.height = frame->height;
211  }
212 
213  CGImageRef out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
214  fromRect:out_rect];
215 
216  if (!out) {
217  av_log(ctx, AV_LOG_ERROR, "Cannot create valid output image.\n");
218  }
219 
220  // create bitmap context on the fly for rendering into current frame->data[]
221  if (ctx->cgctx) {
222  CGContextRelease(ctx->cgctx);
223  ctx->cgctx = NULL;
224  }
225  size_t out_width = CGImageGetWidth(out);
226  size_t out_height = CGImageGetHeight(out);
227 
228  if (out_width > frame->width || out_height > frame->height) { // this might result in segfault
229  av_log(ctx, AV_LOG_WARNING, "Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
230  out_width, out_height, frame->width, frame->height);
231  }
232  ctx->cgctx = CGBitmapContextCreate(frame->data[0],
233  frame->width,
234  frame->height,
235  ctx->bits_per_component,
236  frame->linesize[0],
237  ctx->color_space,
238  (uint32_t)kCGImageAlphaPremultipliedFirst); // ARGB
239  if (!ctx->cgctx) {
240  av_log(ctx, AV_LOG_ERROR, "CGBitmap context cannot be created.\n");
241  return AVERROR_EXTERNAL;
242  }
243 
244  // copy ("draw") the output image into the frame data
245  CGRect rect = {{0,0},{frame->width, frame->height}};
246  if (ctx->output_rect) {
247  @try {
248  NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
249  NSRect tmp = NSRectFromString(tmp_string);
250  rect = NSRectToCGRect(tmp);
251  } @catch (NSException *exception) {
252  av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
253  return AVERROR_EXTERNAL;
254  }
255  if (rect.size.width == 0.0f) {
256  av_log(ctx, AV_LOG_WARNING, "Width of output rect is zero.\n");
257  }
258  if (rect.size.height == 0.0f) {
259  av_log(ctx, AV_LOG_WARNING, "Height of output rect is zero.\n");
260  }
261  }
262 
263  CGContextDrawImage(ctx->cgctx, rect, out);
264 
265  return ff_filter_frame(link, frame);
266 }
267 
268 /** Apply all valid filters successively to the input image.
269  * The final output image is copied from the GPU by "drawing" using a bitmap context.
270  */
272 {
273  return apply_filter(link->dst->priv, link->dst->outputs[0], frame);
274 }
275 
277 {
278  CoreImageContext *ctx = link->src->priv;
279  AVFrame *frame;
280 
281  if (ctx->duration >= 0 &&
282  av_rescale_q(ctx->pts, ctx->time_base, AV_TIME_BASE_Q) >= ctx->duration) {
283  return AVERROR_EOF;
284  }
285 
286  if (!ctx->picref) {
287  ctx->picref = ff_get_video_buffer(link, ctx->w, ctx->h);
288  if (!ctx->picref) {
289  return AVERROR(ENOMEM);
290  }
291  }
292 
293  frame = av_frame_clone(ctx->picref);
294  if (!frame) {
295  return AVERROR(ENOMEM);
296  }
297 
298  frame->pts = ctx->pts;
299  frame->duration = 1;
300  frame->flags |= AV_FRAME_FLAG_KEY;
301  frame->flags &= ~AV_FRAME_FLAG_INTERLACED;
302  frame->pict_type = AV_PICTURE_TYPE_I;
303  frame->sample_aspect_ratio = ctx->sar;
304 
305  ctx->pts++;
306 
307  return apply_filter(ctx, link, frame);
308 }
309 
310 /** Set an option of the given filter to the provided key-value pair.
311  */
312 static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
313 {
314  NSString *input_key = [NSString stringWithUTF8String:key];
315  NSString *input_val = [NSString stringWithUTF8String:value];
316 
317  NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
318  NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
319 
320  NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
321  NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
322 
323  if (!input_attribs) {
324  av_log(ctx, AV_LOG_WARNING, "Skipping unknown option: \"%s\".\n",
325  [input_key UTF8String]); // [[filter name] UTF8String]) not currently defined...
326  return;
327  }
328 
329  av_log(ctx, AV_LOG_DEBUG, "key: %s, val: %s, #attribs: %lu, class: %s, type: %s\n",
330  [input_key UTF8String],
331  [input_val UTF8String],
332  input_attribs ? (unsigned long)[input_attribs count] : -1,
333  [input_class UTF8String],
334  [input_type UTF8String]);
335 
336  if ([input_class isEqualToString:@"NSNumber"]) {
337  float input = input_val.floatValue;
338  NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
339  NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
340  NSNumber *used_value = nil;
341 
342 #define CLAMP_WARNING do { \
343 av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
344  input, \
345  [input_key UTF8String], \
346  min_value.floatValue, \
347  max_value.floatValue, \
348  used_value.floatValue); \
349 } while(0)
350  if (input > max_value.floatValue) {
351  used_value = max_value;
353  } else if (input < min_value.floatValue) {
354  used_value = min_value;
356  } else {
357  used_value = [NSNumber numberWithFloat:input];
358  }
359 
360  [filter setValue:used_value forKey:input_key];
361  } else if ([input_class isEqualToString:@"CIVector"]) {
362  CIVector *input = [CIVector vectorWithString:input_val];
363 
364  if (!input) {
365  av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIVctor description: \"%s\".\n",
366  [input_val UTF8String]);
367  return;
368  }
369 
370  [filter setValue:input forKey:input_key];
371  } else if ([input_class isEqualToString:@"CIColor"]) {
372  CIColor *input = [CIColor colorWithString:input_val];
373 
374  if (!input) {
375  av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIColor description: \"%s\".\n",
376  [input_val UTF8String]);
377  return;
378  }
379 
380  [filter setValue:input forKey:input_key];
381  } else if ([input_class isEqualToString:@"NSString"]) { // set display name as string with latin1 encoding
382  [filter setValue:input_val forKey:input_key];
383  } else if ([input_class isEqualToString:@"NSData"]) { // set display name as string with latin1 encoding
384  NSData *input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
385  length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
386 
387  if (!input) {
388  av_log(ctx, AV_LOG_WARNING, "Skipping invalid NSData description: \"%s\".\n",
389  [input_val UTF8String]);
390  return;
391  }
392 
393  [filter setValue:input forKey:input_key];
394  } else {
395  av_log(ctx, AV_LOG_WARNING, "Skipping unsupported option class: \"%s\".\n",
396  [input_class UTF8String]);
397  avpriv_report_missing_feature(ctx, "Handling of some option classes");
398  return;
399  }
400 }
401 
402 /** Create a filter object by a given name and set all options to defaults.
403  * Overwrite any option given by the user to the provided value in filter_options.
404  */
405 static CIFilter* create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
406 {
407  // create filter object
408  CIFilter *filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
409 
410  // set default options
411  [filter setDefaults];
412 
413  // set user options
414  if (filter_options) {
415  const AVDictionaryEntry *o = NULL;
416  while ((o = av_dict_iterate(filter_options, o))) {
417  set_option(ctx, filter, o->key, o->value);
418  }
419  }
420 
421  return filter;
422 }
423 
424 static av_cold int init(AVFilterContext *fctx)
425 {
426  CoreImageContext *ctx = fctx->priv;
427  AVDictionary *filter_dict = NULL;
428  const AVDictionaryEntry *f = NULL;
429  const AVDictionaryEntry *o = NULL;
430  int ret;
431  int i;
432 
433  if (ctx->list_filters || ctx->list_generators) {
434  list_filters(ctx);
435  return AVERROR_EXIT;
436  }
437 
438  if (ctx->filter_string) {
439  // parse filter string (filter=name@opt=val@opt2=val2#name2@opt3=val3) for filters separated by #
440  av_log(ctx, AV_LOG_DEBUG, "Filter_string: %s\n", ctx->filter_string);
441  ret = av_dict_parse_string(&filter_dict, ctx->filter_string, "@", "#", AV_DICT_MULTIKEY); // parse filter_name:all_filter_options
442  if (ret) {
443  av_dict_free(&filter_dict);
444  av_log(ctx, AV_LOG_ERROR, "Parsing of filters failed.\n");
445  return AVERROR(EIO);
446  }
447  ctx->num_filters = av_dict_count(filter_dict);
448  av_log(ctx, AV_LOG_DEBUG, "Filter count: %i\n", ctx->num_filters);
449 
450  // allocate CIFilter array
451  ctx->filters = av_calloc(ctx->num_filters, sizeof(CIFilter*));
452  if (!ctx->filters) {
453  av_log(ctx, AV_LOG_ERROR, "Could not allocate filter array.\n");
454  return AVERROR(ENOMEM);
455  }
456 
457  // parse filters for option key-value pairs (opt=val@opt2=val2) separated by @
458  i = 0;
459  while ((f = av_dict_iterate(filter_dict, f))) {
460  AVDictionary *filter_options = NULL;
461 
462  if (strncmp(f->value, "default", 7)) { // not default
463  ret = av_dict_parse_string(&filter_options, f->value, "=", "@", 0); // parse option_name:option_value
464  if (ret) {
465  av_dict_free(&filter_options);
466  av_log(ctx, AV_LOG_ERROR, "Parsing of filter options for \"%s\" failed.\n", f->key);
467  return AVERROR(EIO);
468  }
469  }
470 
471  if (av_log_get_level() >= AV_LOG_DEBUG) {
472  av_log(ctx, AV_LOG_DEBUG, "Creating filter %i: \"%s\":\n", i, f->key);
473  if (!filter_options) {
474  av_log(ctx, AV_LOG_DEBUG, "\tusing default options\n");
475  } else {
476  while ((o = av_dict_iterate(filter_options, o))) {
477  av_log(ctx, AV_LOG_DEBUG, "\t%s: %s\n", o->key, o->value);
478  }
479  }
480  }
481 
482  ctx->filters[i] = CFBridgingRetain(create_filter(ctx, f->key, filter_options));
483  if (!ctx->filters[i]) {
484  av_log(ctx, AV_LOG_ERROR, "Could not create filter \"%s\".\n", f->key);
485  return AVERROR(EINVAL);
486  }
487 
488  i++;
489  }
490  } else {
491  av_log(ctx, AV_LOG_ERROR, "No filters specified.\n");
492  return AVERROR(EINVAL);
493  }
494 
495  // create GPU context on OSX
496  const NSOpenGLPixelFormatAttribute attr[] = {
497  NSOpenGLPFAAccelerated,
498  NSOpenGLPFANoRecovery,
499  NSOpenGLPFAColorSize, 32,
500  0
501  };
502 
503  NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
504  ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
505  ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
506  pixelFormat:[pixel_format CGLPixelFormatObj]
507  colorSpace:ctx->color_space
508  options:nil]);
509 
510  if (!ctx->glctx) {
511  av_log(ctx, AV_LOG_ERROR, "CIContext not created.\n");
512  return AVERROR_EXTERNAL;
513  }
514 
515  // Creating an empty input image as input container for the context
516  ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
517 
518  return 0;
519 }
520 
522 {
523  CoreImageContext *ctx = fctx->priv;
524 
525  ctx->is_video_source = 1;
526  ctx->time_base = av_inv_q(ctx->frame_rate);
527  ctx->pts = 0;
528 
529  return init(fctx);
530 }
531 
532 static av_cold void uninit(AVFilterContext *fctx)
533 {
534 #define SafeCFRelease(ptr) do { \
535  if (ptr) { \
536  CFRelease(ptr); \
537  ptr = NULL; \
538  } \
539 } while (0)
540 
541  CoreImageContext *ctx = fctx->priv;
542 
543  SafeCFRelease(ctx->glctx);
544  SafeCFRelease(ctx->cgctx);
545  SafeCFRelease(ctx->color_space);
546  SafeCFRelease(ctx->input_image);
547 
548  if (ctx->filters) {
549  for (int i = 0; i < ctx->num_filters; i++) {
550  SafeCFRelease(ctx->filters[i]);
551  }
552  av_freep(&ctx->filters);
553  }
554 
555  av_frame_free(&ctx->picref);
556 }
557 
559  {
560  .name = "default",
561  .type = AVMEDIA_TYPE_VIDEO,
562  .filter_frame = filter_frame,
563  .config_props = config_input,
564  },
565 };
566 
568  {
569  .name = "default",
570  .type = AVMEDIA_TYPE_VIDEO,
571  },
572 };
573 
575  {
576  .name = "default",
577  .type = AVMEDIA_TYPE_VIDEO,
578  .request_frame = request_frame,
579  .config_props = config_output,
580  },
581 };
582 
583 #define OFFSET(x) offsetof(CoreImageContext, x)
584 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
585 
586 #define GENERATOR_OPTIONS \
587  {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
588  {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
589  {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
590  {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
591  {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
592  {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
593  {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
594 
595 #define FILTER_OPTIONS \
596  {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
597  {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
598  {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
599  {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
600 
601 
602 // definitions for coreimage video filter
603 static const AVOption coreimage_options[] = {
605  { NULL }
606 };
607 
608 AVFILTER_DEFINE_CLASS(coreimage);
609 
611  .p.name = "coreimage",
612  .p.description = NULL_IF_CONFIG_SMALL("Video filtering using CoreImage API."),
613  .p.priv_class = &coreimage_class,
614  .init = init,
615  .uninit = uninit,
616  .priv_size = sizeof(CoreImageContext),
620 };
621 
622 // definitions for coreimagesrc video source
623 static const AVOption coreimagesrc_options[] = {
626  { NULL }
627 };
628 
629 AVFILTER_DEFINE_CLASS(coreimagesrc);
630 
632  .p.name = "coreimagesrc",
633  .p.description = NULL_IF_CONFIG_SMALL("Video source using image generators of CoreImage API."),
634  .p.priv_class = &coreimagesrc_class,
635  .p.inputs = NULL,
636  .init = init_src,
637  .uninit = uninit,
638  .priv_size = sizeof(CoreImageContext),
641 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:116
CoreImageContext::glctx
CFTypeRef glctx
OpenGL context.
Definition: vf_coreimage.m:51
CoreImageContext::w
int w
Definition: vf_coreimage.m:43
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
CoreImageContext::duration
int64_t duration
duration expressed in microseconds
Definition: vf_coreimage.m:47
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:55
CoreImageContext::cgctx
CGContextRef cgctx
Bitmap context for image copy.
Definition: vf_coreimage.m:52
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1053
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
rect
Definition: f_ebur128.c:76
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(coreimage)
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
int64_t
long long int64_t
Definition: coverity.c:34
ff_vsrc_coreimagesrc
const FFFilter ff_vsrc_coreimagesrc
Definition: vf_coreimage.m:631
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVOption
AVOption.
Definition: opt.h:429
create_filter
static CIFilter * create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
Create a filter object by a given name and set all options to defaults.
Definition: vf_coreimage.m:405
data
const char data[16]
Definition: mxf.c:149
CoreImageContext::color_space
CGColorSpaceRef color_space
Common color space for input image and cgcontext.
Definition: vf_coreimage.m:54
vf_coreimage_outputs
static const AVFilterPad vf_coreimage_outputs[]
Definition: vf_coreimage.m:567
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:3293
AVDictionary
Definition: dict.c:32
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:203
coreimage_options
static const AVOption coreimage_options[]
Definition: vf_coreimage.m:603
video.h
CoreImageContext
Definition: vf_coreimage.m:38
formats.h
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Apply all valid filters successively to the input image.
Definition: vf_coreimage.m:271
CLAMP_WARNING
#define CLAMP_WARNING
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:272
vsrc_coreimagesrc_outputs
static const AVFilterPad vsrc_coreimagesrc_outputs[]
Definition: vf_coreimage.m:574
request_frame
static int request_frame(AVFilterLink *link)
Definition: vf_coreimage.m:276
init
static av_cold int init(AVFilterContext *fctx)
Definition: vf_coreimage.m:424
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
config_output
static int config_output(AVFilterLink *link)
Definition: vf_coreimage.m:66
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
FFFilter
Definition: filters.h:265
CoreImageContext::picref
AVFrame * picref
cached reference containing the painted picture
Definition: vf_coreimage.m:49
AVDictionaryEntry::key
char * key
Definition: dict.h:90
frame_size
int frame_size
Definition: mxfenc.c:2446
FILTER_OPTIONS
#define FILTER_OPTIONS
Definition: vf_coreimage.m:595
CoreImageContext::h
int h
video size
Definition: vf_coreimage.m:43
filters.h
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:485
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
CoreImageContext::time_base
AVRational time_base
stream time base
Definition: vf_coreimage.m:46
key
const char * key
Definition: hwcontext_opencl.c:189
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
if
if(ret)
Definition: filter_design.txt:179
config_input
static int config_input(AVFilterLink *link)
Determine image properties from input link of filter chain.
Definition: vf_coreimage.m:85
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
CoreImageContext::filters
CFTypeRef * filters
CIFilter object for all requested filters.
Definition: vf_coreimage.m:58
CoreImageContext::bits_per_component
int bits_per_component
Shared bpc for input-output operation.
Definition: vf_coreimage.m:55
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
options
Definition: swscale.c:43
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
CoreImageContext::pts
int64_t pts
increasing presentation time stamp
Definition: vf_coreimage.m:48
f
f
Definition: af_crystalizer.c:122
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
CoreImageContext::sar
AVRational sar
sample aspect ratio
Definition: vf_coreimage.m:44
CoreImageContext::input_image
CFTypeRef input_image
Input image container for passing into Core Image API.
Definition: vf_coreimage.m:53
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:527
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:221
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
list_filters
static void list_filters(CoreImageContext *ctx)
Print a list of all available filters including options and respective value ranges and defaults.
Definition: vf_coreimage.m:96
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
coreimagesrc_options
static const AVOption coreimagesrc_options[]
Definition: vf_coreimage.m:623
internal.h
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
CoreImageContext::filter_string
char * filter_string
The complete user provided filter definition.
Definition: vf_coreimage.m:57
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
set_option
static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
Set an option of the given filter to the provided key-value pair.
Definition: vf_coreimage.m:312
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
CoreImageContext::frame_rate
AVRational frame_rate
video frame rate
Definition: vf_coreimage.m:45
ret
ret
Definition: filter_design.txt:187
CoreImageContext::output_rect
char * output_rect
Rectangle to be filled with filter intput.
Definition: vf_coreimage.m:61
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_vf_coreimage
const FFFilter ff_vf_coreimage
Definition: vf_coreimage.m:610
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:507
CoreImageContext::num_filters
int num_filters
Amount of filters in *filters.
Definition: vf_coreimage.m:59
CoreImageContext::list_generators
int list_generators
Option used to list all available generators.
Definition: vf_coreimage.m:63
init_src
static av_cold int init_src(AVFilterContext *fctx)
Definition: vf_coreimage.m:521
avfilter.h
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:198
CoreImageContext::is_video_source
int is_video_source
filter is used as video source
Definition: vf_coreimage.m:41
apply_filter
static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
Definition: vf_coreimage.m:134
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1400
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:269
uninit
static av_cold void uninit(AVFilterContext *fctx)
Definition: vf_coreimage.m:532
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVDictionaryEntry
Definition: dict.h:89
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
vf_coreimage_inputs
static const AVFilterPad vf_coreimage_inputs[]
Definition: vf_coreimage.m:558
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
GENERATOR_OPTIONS
#define GENERATOR_OPTIONS
Definition: vf_coreimage.m:586
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
SafeCFRelease
#define SafeCFRelease(ptr)
AVDictionaryEntry::value
char * value
Definition: dict.h:91
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: filters.h:252
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
CoreImageContext::list_filters
int list_filters
Option used to list all available filters including generators.
Definition: vf_coreimage.m:62