FFmpeg
avfoundation.m
Go to the documentation of this file.
1 /*
2  * AVFoundation input device
3  * Copyright (c) 2014 Thilo Borgmann <thilo.borgmann@mail.de>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * AVFoundation input device
25  * @author Thilo Borgmann <thilo.borgmann@mail.de>
26  */
27 
28 #import <AVFoundation/AVFoundation.h>
29 #include <pthread.h>
30 
32 #include "libavutil/mem.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/avstring.h"
36 #include "libavformat/demux.h"
37 #include "libavformat/internal.h"
38 #include "libavutil/internal.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/time.h"
41 #include "libavutil/imgutils.h"
42 #include "avdevice.h"
43 
44 static const int avf_time_base = 1000000;
45 
46 static const AVRational avf_time_base_q = {
47  .num = 1,
48  .den = avf_time_base
49 };
50 
53  OSType avf_id;
54 };
55 
56 static const struct AVFPixelFormatSpec avf_pixel_formats[] = {
57  { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
58  { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
59  { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
60  { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
61  { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
62  { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
63  { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
64  { AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB },
65  { AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA },
66  { AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR },
67  { AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA },
68  { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
69  { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
70  { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
71  { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
72  { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
73  { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
74  { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
75  { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
76  { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
77  { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
78  { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
79 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
80  { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
81 #endif
82  { AV_PIX_FMT_NONE, 0 }
83 };
84 
85 typedef struct
86 {
87  AVClass* class;
88 
94 
96  int width, height;
97 
104 
110 
111  char *url;
114 
116 
120  int audio_be;
124 
127 
128  enum AVPixelFormat pixel_format;
129 
130  AVCaptureSession *capture_session;
131  AVCaptureVideoDataOutput *video_output;
132  AVCaptureAudioDataOutput *audio_output;
133  CMSampleBufferRef current_frame;
134  CMSampleBufferRef current_audio_frame;
135 
136  AVCaptureDevice *observed_device;
137 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
138  AVCaptureDeviceTransportControlsPlaybackMode observed_mode;
139 #endif
141 } AVFContext;
142 
144 {
145  pthread_mutex_lock(&ctx->frame_lock);
146 }
147 
149 {
150  pthread_mutex_unlock(&ctx->frame_lock);
151 }
152 
153 /** FrameReciever class - delegate for AVCaptureSession
154  */
155 @interface AVFFrameReceiver : NSObject
156 {
158 }
159 
160 - (id)initWithContext:(AVFContext*)context;
161 
162 - (void) captureOutput:(AVCaptureOutput *)captureOutput
163  didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
164  fromConnection:(AVCaptureConnection *)connection;
165 
166 @end
167 
168 @implementation AVFFrameReceiver
169 
170 - (id)initWithContext:(AVFContext*)context
171 {
172  if (self = [super init]) {
173  _context = context;
174 
175  // start observing if a device is set for it
176 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
177  if (_context->observed_device) {
178  NSString *keyPath = NSStringFromSelector(@selector(transportControlsPlaybackMode));
179  NSKeyValueObservingOptions options = NSKeyValueObservingOptionNew;
180 
181  [_context->observed_device addObserver: self
182  forKeyPath: keyPath
183  options: options
184  context: _context];
185  }
186 #endif
187  }
188  return self;
189 }
190 
191 - (void)dealloc {
192  // stop observing if a device is set for it
193 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
194  if (_context->observed_device) {
195  NSString *keyPath = NSStringFromSelector(@selector(transportControlsPlaybackMode));
196  [_context->observed_device removeObserver: self forKeyPath: keyPath];
197  }
198 #endif
199  [super dealloc];
200 }
201 
202 - (void)observeValueForKeyPath:(NSString *)keyPath
203  ofObject:(id)object
204  change:(NSDictionary *)change
205  context:(void *)context {
206  if (context == _context) {
207 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
208  AVCaptureDeviceTransportControlsPlaybackMode mode =
209  [change[NSKeyValueChangeNewKey] integerValue];
210 
211  if (mode != _context->observed_mode) {
212  if (mode == AVCaptureDeviceTransportControlsNotPlayingMode) {
213  _context->observed_quit = 1;
214  }
215  _context->observed_mode = mode;
216  }
217 #endif
218  } else {
219  [super observeValueForKeyPath: keyPath
220  ofObject: object
221  change: change
222  context: context];
223  }
224 }
225 
226 - (void) captureOutput:(AVCaptureOutput *)captureOutput
227  didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
228  fromConnection:(AVCaptureConnection *)connection
229 {
231 
232  if (_context->current_frame != nil) {
233  CFRelease(_context->current_frame);
234  }
235 
236  _context->current_frame = (CMSampleBufferRef)CFRetain(videoFrame);
237 
239 
241 }
242 
243 @end
244 
245 /** AudioReciever class - delegate for AVCaptureSession
246  */
247 @interface AVFAudioReceiver : NSObject
248 {
250 }
251 
252 - (id)initWithContext:(AVFContext*)context;
253 
254 - (void) captureOutput:(AVCaptureOutput *)captureOutput
255  didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
256  fromConnection:(AVCaptureConnection *)connection;
257 
258 @end
259 
260 @implementation AVFAudioReceiver
261 
262 - (id)initWithContext:(AVFContext*)context
263 {
264  if (self = [super init]) {
265  _context = context;
266  }
267  return self;
268 }
269 
270 - (void) captureOutput:(AVCaptureOutput *)captureOutput
271  didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
272  fromConnection:(AVCaptureConnection *)connection
273 {
275 
276  if (_context->current_audio_frame != nil) {
277  CFRelease(_context->current_audio_frame);
278  }
279 
280  _context->current_audio_frame = (CMSampleBufferRef)CFRetain(audioFrame);
281 
283 
285 }
286 
287 @end
288 
290 {
291  [ctx->capture_session stopRunning];
292 
293  [ctx->capture_session release];
294  [ctx->video_output release];
295  [ctx->audio_output release];
296  [ctx->avf_delegate release];
297  [ctx->avf_audio_delegate release];
298 
299  ctx->capture_session = NULL;
300  ctx->video_output = NULL;
301  ctx->audio_output = NULL;
302  ctx->avf_delegate = NULL;
303  ctx->avf_audio_delegate = NULL;
304 
305  av_freep(&ctx->url);
306  av_freep(&ctx->audio_buffer);
307 
308  pthread_mutex_destroy(&ctx->frame_lock);
309 
310  if (ctx->current_frame) {
311  CFRelease(ctx->current_frame);
312  }
313 }
314 
316 {
317  AVFContext *ctx = (AVFContext*)s->priv_data;
318  char *save;
319 
320  ctx->url = av_strdup(s->url);
321 
322  if (!ctx->url)
323  return AVERROR(ENOMEM);
324  if (ctx->url[0] != ':') {
325  ctx->video_filename = av_strtok(ctx->url, ":", &save);
326  ctx->audio_filename = av_strtok(NULL, ":", &save);
327  } else {
328  ctx->audio_filename = av_strtok(ctx->url, ":", &save);
329  }
330  return 0;
331 }
332 
333 /**
334  * Configure the video device.
335  *
336  * Configure the video device using a run-time approach to access properties
337  * since formats, activeFormat are available since iOS >= 7.0 or OSX >= 10.7
338  * and activeVideoMaxFrameDuration is available since i0S >= 7.0 and OSX >= 10.9.
339  *
340  * The NSUndefinedKeyException must be handled by the caller of this function.
341  *
342  */
343 static int configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
344 {
345  AVFContext *ctx = (AVFContext*)s->priv_data;
346 
347  double framerate = av_q2d(ctx->framerate);
348  NSObject *range = nil;
349  NSObject *format = nil;
350  NSObject *selected_range = nil;
351  NSObject *selected_format = nil;
352 
353  // try to configure format by formats list
354  // might raise an exception if no format list is given
355  // (then fallback to default, no configuration)
356  @try {
357  for (format in [video_device valueForKey:@"formats"]) {
358  CMFormatDescriptionRef formatDescription;
359  CMVideoDimensions dimensions;
360 
361  formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
362  dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
363 
364  if ((ctx->width == 0 && ctx->height == 0) ||
365  (dimensions.width == ctx->width && dimensions.height == ctx->height)) {
366 
367  selected_format = format;
368 
369  for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
370  double max_framerate;
371 
372  [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
373  if (fabs (framerate - max_framerate) < 0.01) {
374  selected_range = range;
375  break;
376  }
377  }
378  }
379  }
380 
381  if (!selected_format) {
382  av_log(s, AV_LOG_ERROR, "Selected video size (%dx%d) is not supported by the device.\n",
383  ctx->width, ctx->height);
384  goto unsupported_format;
385  }
386 
387  if (!selected_range) {
388  av_log(s, AV_LOG_ERROR, "Selected framerate (%f) is not supported by the device.\n",
389  framerate);
390  if (ctx->video_is_muxed) {
391  av_log(s, AV_LOG_ERROR, "Falling back to default.\n");
392  } else {
393  goto unsupported_format;
394  }
395  }
396 
397  if ([video_device lockForConfiguration:NULL] == YES) {
398  if (selected_format) {
399  [video_device setValue:selected_format forKey:@"activeFormat"];
400  }
401  if (selected_range) {
402  NSValue *min_frame_duration = [selected_range valueForKey:@"minFrameDuration"];
403  [video_device setValue:min_frame_duration forKey:@"activeVideoMinFrameDuration"];
404  [video_device setValue:min_frame_duration forKey:@"activeVideoMaxFrameDuration"];
405  }
406  } else {
407  av_log(s, AV_LOG_ERROR, "Could not lock device for configuration.\n");
408  return AVERROR(EINVAL);
409  }
410  } @catch(NSException *e) {
411  av_log(ctx, AV_LOG_WARNING, "Configuration of video device failed, falling back to default.\n");
412  }
413 
414  return 0;
415 
416 unsupported_format:
417 
418  av_log(s, AV_LOG_ERROR, "Supported modes:\n");
419  for (format in [video_device valueForKey:@"formats"]) {
420  CMFormatDescriptionRef formatDescription;
421  CMVideoDimensions dimensions;
422 
423  formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
424  dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
425 
426  for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
427  double min_framerate;
428  double max_framerate;
429 
430  [[range valueForKey:@"minFrameRate"] getValue:&min_framerate];
431  [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
432  av_log(s, AV_LOG_ERROR, " %dx%d@[%f %f]fps\n",
433  dimensions.width, dimensions.height,
434  min_framerate, max_framerate);
435  }
436  }
437  return AVERROR(EINVAL);
438 }
439 
440 static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
441 {
442  AVFContext *ctx = (AVFContext*)s->priv_data;
443  int ret;
444  NSError *error = nil;
445  AVCaptureInput* capture_input = nil;
446  struct AVFPixelFormatSpec pxl_fmt_spec;
447  NSNumber *pixel_format;
448  NSDictionary *capture_dict;
449  dispatch_queue_t queue;
450 
451  if (ctx->video_device_index < ctx->num_video_devices) {
452  capture_input = (AVCaptureInput*) [[[AVCaptureDeviceInput alloc] initWithDevice:video_device error:&error] autorelease];
453  } else {
454  capture_input = (AVCaptureInput*) video_device;
455  }
456 
457  if (!capture_input) {
458  av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
459  [[error localizedDescription] UTF8String]);
460  return 1;
461  }
462 
463  if ([ctx->capture_session canAddInput:capture_input]) {
464  [ctx->capture_session addInput:capture_input];
465  } else {
466  av_log(s, AV_LOG_ERROR, "can't add video input to capture session\n");
467  return 1;
468  }
469 
470  // Attaching output
471  ctx->video_output = [[AVCaptureVideoDataOutput alloc] init];
472 
473  if (!ctx->video_output) {
474  av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n");
475  return 1;
476  }
477 
478  // Configure device framerate and video size
479  @try {
480  if ((ret = configure_video_device(s, video_device)) < 0) {
481  return ret;
482  }
483  } @catch (NSException *exception) {
484  if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
485  av_log (s, AV_LOG_ERROR, "An error occurred: %s", [exception.reason UTF8String]);
486  return AVERROR_EXTERNAL;
487  }
488  }
489 
490  // select pixel format
491  pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
492 
493  for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
494  if (ctx->pixel_format == avf_pixel_formats[i].ff_id) {
495  pxl_fmt_spec = avf_pixel_formats[i];
496  break;
497  }
498  }
499 
500  // check if selected pixel format is supported by AVFoundation
501  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
502  av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by AVFoundation.\n",
503  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
504  return 1;
505  }
506 
507  // check if the pixel format is available for this device
508  if ([[ctx->video_output availableVideoCVPixelFormatTypes] indexOfObject:[NSNumber numberWithInt:pxl_fmt_spec.avf_id]] == NSNotFound) {
509  av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by the input device.\n",
510  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
511 
512  pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
513 
514  av_log(s, AV_LOG_ERROR, "Supported pixel formats:\n");
515  for (NSNumber *pxl_fmt in [ctx->video_output availableVideoCVPixelFormatTypes]) {
516  struct AVFPixelFormatSpec pxl_fmt_dummy;
517  pxl_fmt_dummy.ff_id = AV_PIX_FMT_NONE;
518  for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
519  if ([pxl_fmt intValue] == avf_pixel_formats[i].avf_id) {
520  pxl_fmt_dummy = avf_pixel_formats[i];
521  break;
522  }
523  }
524 
525  if (pxl_fmt_dummy.ff_id != AV_PIX_FMT_NONE) {
526  av_log(s, AV_LOG_ERROR, " %s\n", av_get_pix_fmt_name(pxl_fmt_dummy.ff_id));
527 
528  // select first supported pixel format instead of user selected (or default) pixel format
529  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
530  pxl_fmt_spec = pxl_fmt_dummy;
531  }
532  }
533  }
534 
535  // fail if there is no appropriate pixel format or print a warning about overriding the pixel format
536  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
537  return 1;
538  } else {
539  av_log(s, AV_LOG_WARNING, "Overriding selected pixel format to use %s instead.\n",
540  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
541  }
542  }
543 
544  // set videoSettings to an empty dict for receiving raw data of muxed devices
545  if (ctx->capture_raw_data) {
546  ctx->pixel_format = pxl_fmt_spec.ff_id;
547  ctx->video_output.videoSettings = @{ };
548  } else {
549  ctx->pixel_format = pxl_fmt_spec.ff_id;
550  pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id];
551  capture_dict = [NSDictionary dictionaryWithObject:pixel_format
552  forKey:(id)kCVPixelBufferPixelFormatTypeKey];
553 
554  [ctx->video_output setVideoSettings:capture_dict];
555  }
556  [ctx->video_output setAlwaysDiscardsLateVideoFrames:ctx->drop_late_frames];
557 
558 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
559  // check for transport control support and set observer device if supported
560  if (!ctx->video_is_screen) {
561  int trans_ctrl = [video_device transportControlsSupported];
562  AVCaptureDeviceTransportControlsPlaybackMode trans_mode = [video_device transportControlsPlaybackMode];
563 
564  if (trans_ctrl) {
565  ctx->observed_mode = trans_mode;
566  ctx->observed_device = video_device;
567  }
568  }
569 #endif
570 
571  ctx->avf_delegate = [[AVFFrameReceiver alloc] initWithContext:ctx];
572 
573  queue = dispatch_queue_create("avf_queue", NULL);
574  [ctx->video_output setSampleBufferDelegate:ctx->avf_delegate queue:queue];
575  dispatch_release(queue);
576 
577  if ([ctx->capture_session canAddOutput:ctx->video_output]) {
578  [ctx->capture_session addOutput:ctx->video_output];
579  } else {
580  av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n");
581  return 1;
582  }
583 
584  return 0;
585 }
586 
587 static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
588 {
589  AVFContext *ctx = (AVFContext*)s->priv_data;
590  NSError *error = nil;
591  AVCaptureDeviceInput* audio_dev_input = [[[AVCaptureDeviceInput alloc] initWithDevice:audio_device error:&error] autorelease];
592  dispatch_queue_t queue;
593 
594  if (!audio_dev_input) {
595  av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
596  [[error localizedDescription] UTF8String]);
597  return 1;
598  }
599 
600  if ([ctx->capture_session canAddInput:audio_dev_input]) {
601  [ctx->capture_session addInput:audio_dev_input];
602  } else {
603  av_log(s, AV_LOG_ERROR, "can't add audio input to capture session\n");
604  return 1;
605  }
606 
607  // Attaching output
608  ctx->audio_output = [[AVCaptureAudioDataOutput alloc] init];
609 
610  if (!ctx->audio_output) {
611  av_log(s, AV_LOG_ERROR, "Failed to init AV audio output\n");
612  return 1;
613  }
614 
615  ctx->avf_audio_delegate = [[AVFAudioReceiver alloc] initWithContext:ctx];
616 
617  queue = dispatch_queue_create("avf_audio_queue", NULL);
618  [ctx->audio_output setSampleBufferDelegate:ctx->avf_audio_delegate queue:queue];
619  dispatch_release(queue);
620 
621  if ([ctx->capture_session canAddOutput:ctx->audio_output]) {
622  [ctx->capture_session addOutput:ctx->audio_output];
623  } else {
624  av_log(s, AV_LOG_ERROR, "adding audio output to capture session failed\n");
625  return 1;
626  }
627 
628  return 0;
629 }
630 
632 {
633  AVFContext *ctx = (AVFContext*)s->priv_data;
634  CVImageBufferRef image_buffer;
635  CMBlockBufferRef block_buffer;
636  CGSize image_buffer_size;
637  AVStream* stream = avformat_new_stream(s, NULL);
638 
639  if (!stream) {
640  return 1;
641  }
642 
643  // Take stream info from the first frame.
644  while (ctx->frames_captured < 1) {
645  CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
646  }
647 
648  lock_frames(ctx);
649 
650  ctx->video_stream_index = stream->index;
651 
652  avpriv_set_pts_info(stream, 64, 1, avf_time_base);
653 
654  image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
655  block_buffer = CMSampleBufferGetDataBuffer(ctx->current_frame);
656 
657  if (image_buffer) {
658  image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
659 
660  stream->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
661  stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
662  stream->codecpar->width = (int)image_buffer_size.width;
663  stream->codecpar->height = (int)image_buffer_size.height;
664  stream->codecpar->format = ctx->pixel_format;
665  } else {
666  stream->codecpar->codec_id = AV_CODEC_ID_DVVIDEO;
667  stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
668  stream->codecpar->format = ctx->pixel_format;
669  }
670 
671  CFRelease(ctx->current_frame);
672  ctx->current_frame = nil;
673 
675 
676  return 0;
677 }
678 
680 {
681  AVFContext *ctx = (AVFContext*)s->priv_data;
682  CMFormatDescriptionRef format_desc;
683  AVStream* stream = avformat_new_stream(s, NULL);
684 
685  if (!stream) {
686  return 1;
687  }
688 
689  // Take stream info from the first frame.
690  while (ctx->audio_frames_captured < 1) {
691  CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
692  }
693 
694  lock_frames(ctx);
695 
696  ctx->audio_stream_index = stream->index;
697 
698  avpriv_set_pts_info(stream, 64, 1, avf_time_base);
699 
700  format_desc = CMSampleBufferGetFormatDescription(ctx->current_audio_frame);
701  const AudioStreamBasicDescription *basic_desc = CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
702 
703  if (!basic_desc) {
705  av_log(s, AV_LOG_ERROR, "audio format not available\n");
706  return 1;
707  }
708 
709  stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
710  stream->codecpar->sample_rate = basic_desc->mSampleRate;
711  av_channel_layout_default(&stream->codecpar->ch_layout, basic_desc->mChannelsPerFrame);
712 
713  ctx->audio_channels = basic_desc->mChannelsPerFrame;
714  ctx->audio_bits_per_sample = basic_desc->mBitsPerChannel;
715  ctx->audio_float = basic_desc->mFormatFlags & kAudioFormatFlagIsFloat;
716  ctx->audio_be = basic_desc->mFormatFlags & kAudioFormatFlagIsBigEndian;
717  ctx->audio_signed_integer = basic_desc->mFormatFlags & kAudioFormatFlagIsSignedInteger;
718  ctx->audio_packed = basic_desc->mFormatFlags & kAudioFormatFlagIsPacked;
719  ctx->audio_non_interleaved = basic_desc->mFormatFlags & kAudioFormatFlagIsNonInterleaved;
720 
721  if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
722  ctx->audio_float &&
723  ctx->audio_bits_per_sample == 32 &&
724  ctx->audio_packed) {
725  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
726  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
727  ctx->audio_signed_integer &&
728  ctx->audio_bits_per_sample == 16 &&
729  ctx->audio_packed) {
730  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
731  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
732  ctx->audio_signed_integer &&
733  ctx->audio_bits_per_sample == 24 &&
734  ctx->audio_packed) {
735  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
736  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
737  ctx->audio_signed_integer &&
738  ctx->audio_bits_per_sample == 32 &&
739  ctx->audio_packed) {
740  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
741  } else {
743  av_log(s, AV_LOG_ERROR, "audio format is not supported\n");
744  return 1;
745  }
746 
747  if (ctx->audio_non_interleaved) {
748  CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
749  ctx->audio_buffer_size = CMBlockBufferGetDataLength(block_buffer);
750  ctx->audio_buffer = av_malloc(ctx->audio_buffer_size);
751  if (!ctx->audio_buffer) {
753  av_log(s, AV_LOG_ERROR, "error allocating audio buffer\n");
754  return 1;
755  }
756  }
757 
758  CFRelease(ctx->current_audio_frame);
759  ctx->current_audio_frame = nil;
760 
762 
763  return 0;
764 }
765 
766 static NSArray* getDevicesWithMediaType(AVMediaType mediaType) {
767 #if ((TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000) || (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101500))
768  NSMutableArray *deviceTypes = nil;
769  if (mediaType == AVMediaTypeVideo) {
770  deviceTypes = [NSMutableArray arrayWithArray:@[AVCaptureDeviceTypeBuiltInWideAngleCamera]];
771  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
772  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInDualCamera];
773  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInTelephotoCamera];
774  #endif
775  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 110100)
776  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInTrueDepthCamera];
777  #endif
778  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 130000)
779  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInTripleCamera];
780  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInDualWideCamera];
781  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInUltraWideCamera];
782  #endif
783  #if (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 130000)
784  [deviceTypes addObject: AVCaptureDeviceTypeDeskViewCamera];
785  #endif
786  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 150400)
787  [deviceTypes addObject: AVCaptureDeviceTypeBuiltInLiDARDepthCamera];
788  #endif
789  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 170000 || (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 140000))
790  [deviceTypes addObject: AVCaptureDeviceTypeContinuityCamera];
791  [deviceTypes addObject: AVCaptureDeviceTypeExternal];
792  #elif (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000)
793  [deviceTypes addObject: AVCaptureDeviceTypeExternalUnknown];
794  #endif
795  } else if (mediaType == AVMediaTypeAudio) {
796  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 170000 || (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 140000))
797  deviceTypes = [NSMutableArray arrayWithArray:@[AVCaptureDeviceTypeMicrophone]];
798  #else
799  deviceTypes = [NSMutableArray arrayWithArray:@[AVCaptureDeviceTypeBuiltInMicrophone]];
800  #endif
801  } else if (mediaType == AVMediaTypeMuxed) {
802  #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 170000 || (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 140000))
803  deviceTypes = [NSMutableArray arrayWithArray:@[AVCaptureDeviceTypeExternal]];
804  #elif (TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000)
805  deviceTypes = [NSMutableArray arrayWithArray:@[AVCaptureDeviceTypeExternalUnknown]];
806  #else
807  return nil;
808  #endif
809  } else {
810  return nil;
811  }
812 
813  AVCaptureDeviceDiscoverySession *captureDeviceDiscoverySession =
814  [AVCaptureDeviceDiscoverySession
815  discoverySessionWithDeviceTypes:deviceTypes
816  mediaType:mediaType
817  position:AVCaptureDevicePositionUnspecified];
818  return [captureDeviceDiscoverySession devices];
819 #else
820  return [AVCaptureDevice devicesWithMediaType:mediaType];
821 #endif
822 }
823 
825 {
826  int ret = 0;
827  NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
828  uint32_t num_screens = 0;
829  AVFContext *ctx = (AVFContext*)s->priv_data;
830  AVCaptureDevice *video_device = nil;
831  AVCaptureDevice *audio_device = nil;
832  // Find capture device
833  NSArray *devices = getDevicesWithMediaType(AVMediaTypeVideo);
834  NSArray *devices_muxed = getDevicesWithMediaType(AVMediaTypeMuxed);
835 
836  ctx->num_video_devices = [devices count] + [devices_muxed count];
837 
838  pthread_mutex_init(&ctx->frame_lock, NULL);
839 
840 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
841  CGGetActiveDisplayList(0, NULL, &num_screens);
842 #endif
843 
844  // List devices if requested
845  if (ctx->list_devices) {
846  int index = 0;
847  av_log(ctx, AV_LOG_INFO, "AVFoundation video devices:\n");
848  for (AVCaptureDevice *device in devices) {
849  const char *name = [[device localizedName] UTF8String];
850  index = [devices indexOfObject:device];
851  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
852  }
853  for (AVCaptureDevice *device in devices_muxed) {
854  const char *name = [[device localizedName] UTF8String];
855  index = [devices count] + [devices_muxed indexOfObject:device];
856  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
857  }
858 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
859  if (num_screens > 0) {
860  CGDirectDisplayID screens[num_screens];
861  CGGetActiveDisplayList(num_screens, screens, &num_screens);
862  for (int i = 0; i < num_screens; i++) {
863  av_log(ctx, AV_LOG_INFO, "[%d] Capture screen %d\n", ctx->num_video_devices + i, i);
864  }
865  }
866 #endif
867 
868  av_log(ctx, AV_LOG_INFO, "AVFoundation audio devices:\n");
869  devices = getDevicesWithMediaType(AVMediaTypeAudio);
870  for (AVCaptureDevice *device in devices) {
871  const char *name = [[device localizedName] UTF8String];
872  int index = [devices indexOfObject:device];
873  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
874  }
875  goto fail;
876  }
877 
878  // parse input filename for video and audio device
880  if (ret)
881  goto fail;
882 
883  // check for device index given in filename
884  if (ctx->video_device_index == -1 && ctx->video_filename) {
885  sscanf(ctx->video_filename, "%d", &ctx->video_device_index);
886  }
887  if (ctx->audio_device_index == -1 && ctx->audio_filename) {
888  sscanf(ctx->audio_filename, "%d", &ctx->audio_device_index);
889  }
890 
891  if (ctx->video_device_index >= 0) {
892  if (ctx->video_device_index < ctx->num_video_devices) {
893  if (ctx->video_device_index < [devices count]) {
894  video_device = [devices objectAtIndex:ctx->video_device_index];
895  } else {
896  video_device = [devices_muxed objectAtIndex:(ctx->video_device_index - [devices count])];
897  ctx->video_is_muxed = 1;
898  }
899  } else if (ctx->video_device_index < ctx->num_video_devices + num_screens) {
900 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
901  CGDirectDisplayID screens[num_screens];
902  CGGetActiveDisplayList(num_screens, screens, &num_screens);
903  AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] autorelease];
904 
905  if (ctx->framerate.num > 0) {
906  capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
907  }
908 
909 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
910  if (ctx->capture_cursor) {
911  capture_screen_input.capturesCursor = YES;
912  } else {
913  capture_screen_input.capturesCursor = NO;
914  }
915 #endif
916 
917  if (ctx->capture_mouse_clicks) {
918  capture_screen_input.capturesMouseClicks = YES;
919  } else {
920  capture_screen_input.capturesMouseClicks = NO;
921  }
922 
923  video_device = (AVCaptureDevice*) capture_screen_input;
924  ctx->video_is_screen = 1;
925 #endif
926  } else {
927  av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
928  goto fail;
929  }
930  } else if (ctx->video_filename &&
931  strncmp(ctx->video_filename, "none", 4)) {
932  if (!strncmp(ctx->video_filename, "default", 7)) {
933  video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
934  } else {
935  // looking for video inputs
936  for (AVCaptureDevice *device in devices) {
937  if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
938  video_device = device;
939  break;
940  }
941  }
942  // looking for muxed inputs
943  for (AVCaptureDevice *device in devices_muxed) {
944  if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
945  video_device = device;
946  ctx->video_is_muxed = 1;
947  break;
948  }
949  }
950 
951 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
952  // looking for screen inputs
953  if (!video_device) {
954  int idx;
955  if(sscanf(ctx->video_filename, "Capture screen %d", &idx) && idx < num_screens) {
956  CGDirectDisplayID screens[num_screens];
957  CGGetActiveDisplayList(num_screens, screens, &num_screens);
958  AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[idx]] autorelease];
959  video_device = (AVCaptureDevice*) capture_screen_input;
960  ctx->video_device_index = ctx->num_video_devices + idx;
961  ctx->video_is_screen = 1;
962 
963  if (ctx->framerate.num > 0) {
964  capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
965  }
966 
967 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
968  if (ctx->capture_cursor) {
969  capture_screen_input.capturesCursor = YES;
970  } else {
971  capture_screen_input.capturesCursor = NO;
972  }
973 #endif
974 
975  if (ctx->capture_mouse_clicks) {
976  capture_screen_input.capturesMouseClicks = YES;
977  } else {
978  capture_screen_input.capturesMouseClicks = NO;
979  }
980  }
981  }
982 #endif
983  }
984 
985  if (!video_device) {
986  av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
987  goto fail;
988  }
989  }
990 
991  // get audio device
992  if (ctx->audio_device_index >= 0) {
993  NSArray *devices = getDevicesWithMediaType(AVMediaTypeAudio);
994 
995  if (ctx->audio_device_index >= [devices count]) {
996  av_log(ctx, AV_LOG_ERROR, "Invalid audio device index\n");
997  goto fail;
998  }
999 
1000  audio_device = [devices objectAtIndex:ctx->audio_device_index];
1001  } else if (ctx->audio_filename &&
1002  strncmp(ctx->audio_filename, "none", 4)) {
1003  if (!strncmp(ctx->audio_filename, "default", 7)) {
1004  audio_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
1005  } else {
1006  NSArray *devices = getDevicesWithMediaType(AVMediaTypeAudio);
1007 
1008  for (AVCaptureDevice *device in devices) {
1009  if (!strncmp(ctx->audio_filename, [[device localizedName] UTF8String], strlen(ctx->audio_filename))) {
1010  audio_device = device;
1011  break;
1012  }
1013  }
1014  }
1015 
1016  if (!audio_device) {
1017  av_log(ctx, AV_LOG_ERROR, "Audio device not found\n");
1018  goto fail;
1019  }
1020  }
1021 
1022  // Video nor Audio capture device not found, looking for AVMediaTypeVideo/Audio
1023  if (!video_device && !audio_device) {
1024  av_log(s, AV_LOG_ERROR, "No AV capture device found\n");
1025  goto fail;
1026  }
1027 
1028  if (video_device) {
1029  if (ctx->video_device_index < ctx->num_video_devices) {
1030  av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device localizedName] UTF8String]);
1031  } else {
1032  av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device description] UTF8String]);
1033  }
1034  }
1035  if (audio_device) {
1036  av_log(s, AV_LOG_DEBUG, "audio device '%s' opened\n", [[audio_device localizedName] UTF8String]);
1037  }
1038 
1039  // Initialize capture session
1040  ctx->capture_session = [[AVCaptureSession alloc] init];
1041 
1042  if (video_device && add_video_device(s, video_device)) {
1043  goto fail;
1044  }
1045  if (audio_device && add_audio_device(s, audio_device)) {
1046  }
1047 
1048  [ctx->capture_session startRunning];
1049 
1050  /* Unlock device configuration only after the session is started so it
1051  * does not reset the capture formats */
1052  if (!ctx->video_is_screen) {
1053  [video_device unlockForConfiguration];
1054  }
1055 
1056  if (video_device && get_video_config(s)) {
1057  goto fail;
1058  }
1059 
1060  // set audio stream
1061  if (audio_device && get_audio_config(s)) {
1062  goto fail;
1063  }
1064 
1065  [pool release];
1066  return 0;
1067 
1068 fail:
1069  [pool release];
1071  if (ret)
1072  return ret;
1073  return AVERROR(EIO);
1074 }
1075 
1077  CVPixelBufferRef image_buffer,
1078  AVPacket *pkt)
1079 {
1080  AVFContext *ctx = s->priv_data;
1081  int src_linesize[4];
1082  const uint8_t *src_data[4];
1083  int width = CVPixelBufferGetWidth(image_buffer);
1084  int height = CVPixelBufferGetHeight(image_buffer);
1085  int status;
1086 
1087  memset(src_linesize, 0, sizeof(src_linesize));
1088  memset(src_data, 0, sizeof(src_data));
1089 
1090  status = CVPixelBufferLockBaseAddress(image_buffer, 0);
1091  if (status != kCVReturnSuccess) {
1092  av_log(s, AV_LOG_ERROR, "Could not lock base address: %d (%dx%d)\n", status, width, height);
1093  return AVERROR_EXTERNAL;
1094  }
1095 
1096  if (CVPixelBufferIsPlanar(image_buffer)) {
1097  size_t plane_count = CVPixelBufferGetPlaneCount(image_buffer);
1098  int i;
1099  for(i = 0; i < plane_count; i++){
1100  src_linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(image_buffer, i);
1101  src_data[i] = CVPixelBufferGetBaseAddressOfPlane(image_buffer, i);
1102  }
1103  } else {
1104  src_linesize[0] = CVPixelBufferGetBytesPerRow(image_buffer);
1105  src_data[0] = CVPixelBufferGetBaseAddress(image_buffer);
1106  }
1107 
1109  src_data, src_linesize,
1110  ctx->pixel_format, width, height, 1);
1111 
1112 
1113 
1114  CVPixelBufferUnlockBaseAddress(image_buffer, 0);
1115 
1116  return status;
1117 }
1118 
1120 {
1121  AVFContext* ctx = (AVFContext*)s->priv_data;
1122 
1123  do {
1124  CVImageBufferRef image_buffer;
1125  CMBlockBufferRef block_buffer;
1126  lock_frames(ctx);
1127 
1128  if (ctx->current_frame != nil) {
1129  int status;
1130  int length = 0;
1131 
1132  image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
1133  block_buffer = CMSampleBufferGetDataBuffer(ctx->current_frame);
1134 
1135  if (image_buffer != nil) {
1136  length = (int)CVPixelBufferGetDataSize(image_buffer);
1137  } else if (block_buffer != nil) {
1138  length = (int)CMBlockBufferGetDataLength(block_buffer);
1139  } else {
1140  unlock_frames(ctx);
1141  return AVERROR(EINVAL);
1142  }
1143 
1144  if (av_new_packet(pkt, length) < 0) {
1145  unlock_frames(ctx);
1146  return AVERROR(EIO);
1147  }
1148 
1149  CMItemCount count;
1150  CMSampleTimingInfo timing_info;
1151 
1152  if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_frame, 1, &timing_info, &count) == noErr) {
1153  AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
1154  pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
1155  }
1156 
1157  pkt->stream_index = ctx->video_stream_index;
1159 
1160  if (image_buffer) {
1161  status = copy_cvpixelbuffer(s, image_buffer, pkt);
1162  } else {
1163  status = 0;
1164  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
1165  if (ret != kCMBlockBufferNoErr) {
1166  status = AVERROR(EIO);
1167  }
1168  }
1169  CFRelease(ctx->current_frame);
1170  ctx->current_frame = nil;
1171 
1172  if (status < 0) {
1173  unlock_frames(ctx);
1174  return status;
1175  }
1176  } else if (ctx->current_audio_frame != nil) {
1177  CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
1178  int block_buffer_size = CMBlockBufferGetDataLength(block_buffer);
1179 
1180  if (!block_buffer || !block_buffer_size) {
1181  unlock_frames(ctx);
1182  return AVERROR(EIO);
1183  }
1184 
1185  if (ctx->audio_non_interleaved && block_buffer_size > ctx->audio_buffer_size) {
1186  unlock_frames(ctx);
1187  return AVERROR_BUFFER_TOO_SMALL;
1188  }
1189 
1190  if (av_new_packet(pkt, block_buffer_size) < 0) {
1191  unlock_frames(ctx);
1192  return AVERROR(EIO);
1193  }
1194 
1195  CMItemCount count;
1196  CMSampleTimingInfo timing_info;
1197 
1198  if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_audio_frame, 1, &timing_info, &count) == noErr) {
1199  AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
1200  pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
1201  }
1202 
1203  pkt->stream_index = ctx->audio_stream_index;
1205 
1206  if (ctx->audio_non_interleaved) {
1207  int sample, c, shift, num_samples;
1208 
1209  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, ctx->audio_buffer);
1210  if (ret != kCMBlockBufferNoErr) {
1211  unlock_frames(ctx);
1212  return AVERROR(EIO);
1213  }
1214 
1215  num_samples = pkt->size / (ctx->audio_channels * (ctx->audio_bits_per_sample >> 3));
1216 
1217  // transform decoded frame into output format
1218  #define INTERLEAVE_OUTPUT(bps) \
1219  { \
1220  int##bps##_t **src; \
1221  int##bps##_t *dest; \
1222  src = av_malloc(ctx->audio_channels * sizeof(int##bps##_t*)); \
1223  if (!src) { \
1224  unlock_frames(ctx); \
1225  return AVERROR(EIO); \
1226  } \
1227  \
1228  for (c = 0; c < ctx->audio_channels; c++) { \
1229  src[c] = ((int##bps##_t*)ctx->audio_buffer) + c * num_samples; \
1230  } \
1231  dest = (int##bps##_t*)pkt->data; \
1232  shift = bps - ctx->audio_bits_per_sample; \
1233  for (sample = 0; sample < num_samples; sample++) \
1234  for (c = 0; c < ctx->audio_channels; c++) \
1235  *dest++ = src[c][sample] << shift; \
1236  av_freep(&src); \
1237  }
1238 
1239  if (ctx->audio_bits_per_sample <= 16) {
1240  INTERLEAVE_OUTPUT(16)
1241  } else {
1242  INTERLEAVE_OUTPUT(32)
1243  }
1244  } else {
1245  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
1246  if (ret != kCMBlockBufferNoErr) {
1247  unlock_frames(ctx);
1248  return AVERROR(EIO);
1249  }
1250  }
1251 
1252  CFRelease(ctx->current_audio_frame);
1253  ctx->current_audio_frame = nil;
1254  } else {
1255  pkt->data = NULL;
1256  unlock_frames(ctx);
1257  if (ctx->observed_quit) {
1258  return AVERROR_EOF;
1259  } else {
1260  return AVERROR(EAGAIN);
1261  }
1262  }
1263 
1264  unlock_frames(ctx);
1265  } while (!pkt->data);
1266 
1267  return 0;
1268 }
1269 
1271 {
1272  AVFContext* ctx = (AVFContext*)s->priv_data;
1274  return 0;
1275 }
1276 
1277 static const AVOption options[] = {
1278  { "list_devices", "list available devices", offsetof(AVFContext, list_devices), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1279  { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(AVFContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1280  { "audio_device_index", "select audio device by index for devices with same name (starts at 0)", offsetof(AVFContext, audio_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1281  { "pixel_format", "set pixel format", offsetof(AVFContext, pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_YUV420P}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
1282  { "framerate", "set frame rate", offsetof(AVFContext, framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1283  { "video_size", "set video size", offsetof(AVFContext, width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
1284  { "capture_cursor", "capture the screen cursor", offsetof(AVFContext, capture_cursor), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1285  { "capture_mouse_clicks", "capture the screen mouse clicks", offsetof(AVFContext, capture_mouse_clicks), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1286  { "capture_raw_data", "capture the raw data from device connection", offsetof(AVFContext, capture_raw_data), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1287  { "drop_late_frames", "drop frames that are available later than expected", offsetof(AVFContext, drop_late_frames), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1288 
1289  { NULL },
1290 };
1291 
1292 static const AVClass avf_class = {
1293  .class_name = "AVFoundation indev",
1294  .item_name = av_default_item_name,
1295  .option = options,
1296  .version = LIBAVUTIL_VERSION_INT,
1298 };
1299 
1301  .p.name = "avfoundation",
1302  .p.long_name = NULL_IF_CONFIG_SMALL("AVFoundation input device"),
1303  .p.flags = AVFMT_NOFILE,
1304  .p.priv_class = &avf_class,
1305  .priv_data_size = sizeof(AVFContext),
1308  .read_close = avf_close,
1309 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
AV_CODEC_ID_PCM_S16LE
@ AV_CODEC_ID_PCM_S16LE
Definition: codec_id.h:328
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_PCM_F32BE
@ AV_CODEC_ID_PCM_F32BE
Definition: codec_id.h:348
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVFContext::audio_buffer_size
int audio_buffer_size
Definition: avfoundation.m:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVFContext::audio_float
int audio_float
Definition: avfoundation.m:119
AVFContext::observed_quit
int observed_quit
Definition: avfoundation.m:140
unlock_frames
static void unlock_frames(AVFContext *ctx)
Definition: avfoundation.m:148
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const struct AVCodec *c)
Add a new stream to a media file.
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
Underlying C type is AVRational.
Definition: opt.h:315
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:65
AVFContext::current_audio_frame
CMSampleBufferRef current_audio_frame
Definition: avfoundation.m:134
pixdesc.h
AVFContext::audio_frames_captured
int audio_frames_captured
Definition: avfoundation.m:90
AVPacket::data
uint8_t * data
Definition: packet.h:533
AVOption
AVOption.
Definition: opt.h:429
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:588
parse_device_name
static int parse_device_name(AVFormatContext *s)
Definition: avfoundation.m:315
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:114
AVFContext::audio_channels
int audio_channels
Definition: avfoundation.m:117
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVFContext::video_filename
char * video_filename
Definition: avfoundation.m:112
AVFPixelFormatSpec::avf_id
OSType avf_id
Definition: avfoundation.m:53
AVFContext::audio_be
int audio_be
Definition: avfoundation.m:120
AVFContext::capture_cursor
int capture_cursor
Definition: avfoundation.m:98
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: avformat.c:855
AV_CODEC_ID_PCM_S16BE
@ AV_CODEC_ID_PCM_S16BE
Definition: codec_id.h:329
fail
#define fail()
Definition: checkasm.h:188
avf_close
static int avf_close(AVFormatContext *s)
Definition: avfoundation.m:1270
avf_time_base
static const int avf_time_base
Definition: avfoundation.m:44
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:143
AVFContext::current_frame
CMSampleBufferRef current_frame
Definition: avfoundation.m:133
AVFPixelFormatSpec::ff_id
enum AVPixelFormat ff_id
Definition: avfoundation.m:52
AVFContext::observed_device
AVCaptureDevice * observed_device
Definition: avfoundation.m:136
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:53
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFContext::framerate
AVRational framerate
Definition: avfoundation.m:95
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
description
Tag description
Definition: snow.txt:206
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
avf_time_base_q
static const AVRational avf_time_base_q
Definition: avfoundation.m:46
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_read_callback.c:42
AVFContext::num_video_devices
int num_video_devices
Definition: avfoundation.m:115
INTERLEAVE_OUTPUT
#define INTERLEAVE_OUTPUT(bps)
width
#define width
getDevicesWithMediaType
static NSArray * getDevicesWithMediaType(AVMediaType mediaType)
Definition: avfoundation.m:766
s
#define s(width, name)
Definition: cbs_vp9.c:198
av_new_packet
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: packet.c:98
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
AVFAudioReceiver::_context
AVFContext * _context
Definition: avfoundation.m:249
options
static const AVOption options[]
Definition: avfoundation.m:1277
add_audio_device
static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
Definition: avfoundation.m:587
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
AVFContext::capture_mouse_clicks
int capture_mouse_clicks
Definition: avfoundation.m:99
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVFContext::frame_lock
pthread_mutex_t frame_lock
Definition: avfoundation.m:91
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVFContext::capture_raw_data
int capture_raw_data
Definition: avfoundation.m:100
AVFContext::list_devices
int list_devices
Definition: avfoundation.m:105
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVFPixelFormatSpec
Definition: avfoundation.m:51
get_video_config
static int get_video_config(AVFormatContext *s)
Definition: avfoundation.m:631
if
if(ret)
Definition: filter_design.txt:179
AVFContext::audio_packed
int audio_packed
Definition: avfoundation.m:122
AVFFrameReceiver::_context
AVFContext * _context
Definition: avfoundation.m:157
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AVFormatContext
Format I/O context.
Definition: avformat.h:1260
internal.h
AVFContext::video_output
AVCaptureVideoDataOutput * video_output
Definition: avfoundation.m:131
framerate
float framerate
Definition: av1_levels.c:29
AVFContext::audio_signed_integer
int audio_signed_integer
Definition: avfoundation.m:121
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:113
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:535
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
AVFContext::drop_late_frames
int drop_late_frames
Definition: avfoundation.m:101
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
add_video_device
static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
Definition: avfoundation.m:440
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFFrameReceiver
FrameReciever class - delegate for AVCaptureSession.
Definition: avfoundation.m:155
ff_avfoundation_demuxer
const FFInputFormat ff_avfoundation_demuxer
Definition: avfoundation.m:1300
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
Definition: opt.h:303
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
parseutils.h
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
time.h
AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
@ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
Definition: log.h:41
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
avf_read_packet
static int avf_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: avfoundation.m:1119
AVFContext::width
int width
Definition: avfoundation.m:96
configure_video_device
static int configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
Configure the video device.
Definition: avfoundation.m:343
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:82
AVFContext::audio_buffer
int32_t * audio_buffer
Definition: avfoundation.m:125
AVFContext::video_stream_index
int video_stream_index
Definition: avfoundation.m:107
AV_CODEC_ID_PCM_S24LE
@ AV_CODEC_ID_PCM_S24LE
Definition: codec_id.h:340
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AVMediaType
AVMediaType
Definition: avutil.h:199
AVPacket::size
int size
Definition: packet.h:534
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
destroy_context
static void destroy_context(AVFContext *ctx)
Definition: avfoundation.m:289
shift
static int shift(int a, int b)
Definition: bonk.c:261
AVFContext::url
char * url
Definition: avfoundation.m:111
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1376
sample
#define sample
Definition: flacdsp_template.c:44
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:468
AVFContext::audio_non_interleaved
int audio_non_interleaved
Definition: avfoundation.m:123
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2464
avdevice.h
FFInputFormat::p
AVInputFormat p
The public AVInputFormat.
Definition: demux.h:41
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:532
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:539
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:834
lock_frames
static void lock_frames(AVFContext *ctx)
Definition: avfoundation.m:143
AVFContext::audio_stream_index
int audio_stream_index
Definition: avfoundation.m:109
copy_cvpixelbuffer
static int copy_cvpixelbuffer(AVFormatContext *s, CVPixelBufferRef image_buffer, AVPacket *pkt)
Definition: avfoundation.m:1076
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:115
AVFContext::audio_bits_per_sample
int audio_bits_per_sample
Definition: avfoundation.m:118
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:526
avf_read_header
static int avf_read_header(AVFormatContext *s)
Definition: avfoundation.m:824
internal.h
AV_CODEC_ID_DVVIDEO
@ AV_CODEC_ID_DVVIDEO
Definition: codec_id.h:76
AV_CODEC_ID_PCM_S32BE
@ AV_CODEC_ID_PCM_S32BE
Definition: codec_id.h:337
demux.h
AVFContext::frames_captured
int frames_captured
Definition: avfoundation.m:89
AVFContext::video_is_muxed
int video_is_muxed
Definition: avfoundation.m:102
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
AVFContext::audio_device_index
int audio_device_index
Definition: avfoundation.m:108
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
avf_pixel_formats
static const struct AVFPixelFormatSpec avf_pixel_formats[]
Definition: avfoundation.m:56
AVFContext::audio_output
AVCaptureAudioDataOutput * audio_output
Definition: avfoundation.m:132
id
enum AVCodecID id
Definition: dts2pts.c:365
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
AVFContext::avf_audio_delegate
id avf_audio_delegate
Definition: avfoundation.m:93
channel_layout.h
AVFContext::video_is_screen
int video_is_screen
Definition: avfoundation.m:103
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AV_OPT_TYPE_PIXEL_FMT
@ AV_OPT_TYPE_PIXEL_FMT
Underlying C type is enum AVPixelFormat.
Definition: opt.h:307
AVPacket::stream_index
int stream_index
Definition: packet.h:535
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFContext::audio_filename
char * audio_filename
Definition: avfoundation.m:113
AV_PIX_FMT_RGB565BE
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:112
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_PCM_S32LE
@ AV_CODEC_ID_PCM_S32LE
Definition: codec_id.h:336
mem.h
get_audio_config
static int get_audio_config(AVFormatContext *s)
Definition: avfoundation.m:679
AVFContext
Definition: avfoundation.m:85
timing_info
static int FUNC() timing_info(CodedBitstreamContext *ctx, RWContext *rw, AV1RawTimingInfo *current)
Definition: cbs_av1_syntax_template.c:158
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:510
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
FFInputFormat
Definition: demux.h:37
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
AVFContext::video_device_index
int video_device_index
Definition: avfoundation.m:106
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
AV_CODEC_ID_PCM_F32LE
@ AV_CODEC_ID_PCM_F32LE
Definition: codec_id.h:349
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFAudioReceiver
AudioReciever class - delegate for AVCaptureSession.
Definition: avfoundation.m:247
avstring.h
AVFContext::avf_delegate
id avf_delegate
Definition: avfoundation.m:92
AV_PIX_FMT_YUVA444P16LE
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Definition: pixfmt.h:192
avf_class
static const AVClass avf_class
Definition: avfoundation.m:1292
int
int
Definition: ffmpeg_filter.c:424
AVFContext::capture_session
AVCaptureSession * capture_session
Definition: avfoundation.m:130
AV_CODEC_ID_PCM_S24BE
@ AV_CODEC_ID_PCM_S24BE
Definition: codec_id.h:341
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
AV_PIX_FMT_BGR48BE
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:145
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:78