FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "libavutil/mem.h"
28 #include "vt_internal.h"
29 #include "libavutil/avutil.h"
30 #include "libavutil/hwcontext.h"
31 #include "libavutil/pixdesc.h"
32 #include "bytestream.h"
33 #include "decode.h"
34 #include "internal.h"
35 #include "h264dec.h"
36 #include "hevc/hevcdec.h"
37 #include "hwaccel_internal.h"
38 #include "mpegvideo.h"
39 #include "proresdec.h"
40 #include <Availability.h>
41 #include <AvailabilityMacros.h>
42 #include <TargetConditionals.h>
43 
44 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
45 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
46 #endif
47 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
48 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
49 #endif
50 
51 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
52 enum { kCMVideoCodecType_HEVC = 'hvc1' };
53 #endif
54 
55 #if !HAVE_KCMVIDEOCODECTYPE_VP9
56 enum { kCMVideoCodecType_VP9 = 'vp09' };
57 #endif
58 
59 #if !HAVE_KCMVIDEOCODECTYPE_AV1
60 enum { kCMVideoCodecType_AV1 = 'av01' };
61 #endif
62 
63 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
64 
65 typedef struct VTHWFrame {
66  CVPixelBufferRef pixbuf;
68 } VTHWFrame;
69 
70 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
71 {
73  av_buffer_unref(&ref->hw_frames_ctx);
74  CVPixelBufferRelease(ref->pixbuf);
75 
76  av_free(data);
77 }
78 
80  const uint8_t *buffer,
81  uint32_t size)
82 {
83  void *tmp;
84 
85  tmp = av_fast_realloc(vtctx->bitstream,
86  &vtctx->allocated_size,
87  size);
88 
89  if (!tmp)
90  return AVERROR(ENOMEM);
91 
92  vtctx->bitstream = tmp;
93  memcpy(vtctx->bitstream, buffer, size);
94  vtctx->bitstream_size = size;
95 
96  return 0;
97 }
98 
100  const uint8_t *buffer,
101  uint32_t size)
102 {
103  void *tmp;
104 
105  tmp = av_fast_realloc(vtctx->bitstream,
106  &vtctx->allocated_size,
107  vtctx->bitstream_size + size);
108 
109  if (!tmp)
110  return AVERROR(ENOMEM);
111 
112  vtctx->bitstream = tmp;
113  memcpy(vtctx->bitstream + vtctx->bitstream_size, buffer, size);
114  vtctx->bitstream_size += size;
115 
116  return 0;
117 }
118 
119 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
120 {
121  int ret;
122  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
123 
124  if (!ref->pixbuf) {
125  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
127  return AVERROR_EXTERNAL;
128  }
129 
130  frame->crop_right = 0;
131  frame->crop_left = 0;
132  frame->crop_top = 0;
133  frame->crop_bottom = 0;
134 
135  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
136  return ret;
137 
138  frame->data[3] = (uint8_t*)ref->pixbuf;
139 
140  if (ref->hw_frames_ctx) {
141  av_buffer_unref(&frame->hw_frames_ctx);
142  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
143  if (!frame->hw_frames_ctx)
144  return AVERROR(ENOMEM);
145  }
146 
147  return 0;
148 }
149 
151 {
152  size_t size = sizeof(VTHWFrame);
153  uint8_t *data = NULL;
154  AVBufferRef *buf = NULL;
156  FrameDecodeData *fdd;
157  if (ret < 0)
158  return ret;
159 
160  data = av_mallocz(size);
161  if (!data)
162  return AVERROR(ENOMEM);
164  if (!buf) {
165  av_freep(&data);
166  return AVERROR(ENOMEM);
167  }
168  frame->buf[0] = buf;
169 
170  fdd = frame->private_ref;
172 
173  frame->width = avctx->width;
174  frame->height = avctx->height;
175  frame->format = avctx->pix_fmt;
176 
177  return 0;
178 }
179 
180 #define AV_W8(p, v) *(p) = (v)
181 
182 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
183 {
184  int i;
185  int size = src_size;
186  uint8_t* p = dst;
187 
188  for (i = 0; i < src_size; i++) {
189  if (i + 2 < src_size &&
190  src[i] == 0x00 &&
191  src[i + 1] == 0x00 &&
192  src[i + 2] <= 0x03) {
193  if (dst) {
194  *p++ = src[i++];
195  *p++ = src[i];
196  *p++ = 0x03;
197  } else {
198  i++;
199  }
200  size++;
201  } else if (dst)
202  *p++ = src[i];
203  }
204 
205  if (dst)
206  av_assert0((p - dst) == size);
207 
208  return size;
209 }
210 
212 {
213  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
214  H264Context *h = avctx->priv_data;
215  CFDataRef data = NULL;
216  uint8_t *p;
217  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
218  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
219  int vt_extradata_size;
220  uint8_t *vt_extradata;
221 
222  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
223  vt_extradata = av_malloc(vt_extradata_size);
224 
225  if (!vt_extradata)
226  return NULL;
227 
228  p = vt_extradata;
229 
230  AV_W8(p + 0, 1); /* version */
231  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
232  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
233  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
234  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
235  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
236  AV_WB16(p + 6, sps_size);
237  p += 8;
238  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
239  AV_W8(p + 0, 1); /* number of pps */
240  AV_WB16(p + 1, pps_size);
241  p += 3;
242  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
243 
244  av_assert0(p - vt_extradata == vt_extradata_size);
245 
246  // save sps header (profile/level) used to create decoder session,
247  // so we can detect changes and recreate it.
248  if (vtctx)
249  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
250 
251  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
252  av_free(vt_extradata);
253  return data;
254 }
255 
257 {
258  HEVCContext *h = avctx->priv_data;
259  int i, num_vps = 0, num_sps = 0, num_pps = 0;
260  const HEVCPPS *pps = h->pps;
261  const HEVCSPS *sps = pps->sps;
262  const HEVCVPS *vps = sps->vps;
263  PTLCommon ptlc = vps->ptl.general_ptl;
264  VUI vui = sps->vui;
265  uint8_t parallelismType;
266  CFDataRef data = NULL;
267  uint8_t *p;
268  int vt_extradata_size = 23 + 3 + 3 + 3;
269  uint8_t *vt_extradata;
270 
271 #define COUNT_SIZE_PS(T, t) \
272  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
273  if (h->ps.t##ps_list[i]) { \
274  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
275  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
276  num_##t##ps++; \
277  } \
278  }
279 
280  COUNT_SIZE_PS(V, v)
281  COUNT_SIZE_PS(S, s)
282  COUNT_SIZE_PS(P, p)
283 
284  vt_extradata = av_malloc(vt_extradata_size);
285  if (!vt_extradata)
286  return NULL;
287  p = vt_extradata;
288 
289  /* unsigned int(8) configurationVersion = 1; */
290  AV_W8(p + 0, 1);
291 
292  /*
293  * unsigned int(2) general_profile_space;
294  * unsigned int(1) general_tier_flag;
295  * unsigned int(5) general_profile_idc;
296  */
297  AV_W8(p + 1, ptlc.profile_space << 6 |
298  ptlc.tier_flag << 5 |
299  ptlc.profile_idc);
300 
301  /* unsigned int(32) general_profile_compatibility_flags; */
302  for (i = 0; i < 4; i++) {
303  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
304  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
305  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
306  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
307  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
308  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
309  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
310  ptlc.profile_compatibility_flag[i * 8 + 7]);
311  }
312 
313  /* unsigned int(48) general_constraint_indicator_flags; */
314  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
315  ptlc.interlaced_source_flag << 6 |
316  ptlc.non_packed_constraint_flag << 5 |
317  ptlc.frame_only_constraint_flag << 4);
318  AV_W8(p + 7, 0);
319  AV_WN32(p + 8, 0);
320 
321  /* unsigned int(8) general_level_idc; */
322  AV_W8(p + 12, ptlc.level_idc);
323 
324  /*
325  * bit(4) reserved = ‘1111’b;
326  * unsigned int(12) min_spatial_segmentation_idc;
327  */
328  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
329  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
330 
331  /*
332  * bit(6) reserved = ‘111111’b;
333  * unsigned int(2) parallelismType;
334  */
336  parallelismType = 0;
337  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
338  parallelismType = 0;
339  else if (pps->entropy_coding_sync_enabled_flag)
340  parallelismType = 3;
341  else if (pps->tiles_enabled_flag)
342  parallelismType = 2;
343  else
344  parallelismType = 1;
345  AV_W8(p + 15, 0xfc | parallelismType);
346 
347  /*
348  * bit(6) reserved = ‘111111’b;
349  * unsigned int(2) chromaFormat;
350  */
351  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
352 
353  /*
354  * bit(5) reserved = ‘11111’b;
355  * unsigned int(3) bitDepthLumaMinus8;
356  */
357  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
358 
359  /*
360  * bit(5) reserved = ‘11111’b;
361  * unsigned int(3) bitDepthChromaMinus8;
362  */
363  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
364 
365  /* bit(16) avgFrameRate; */
366  AV_WB16(p + 19, 0);
367 
368  /*
369  * bit(2) constantFrameRate;
370  * bit(3) numTemporalLayers;
371  * bit(1) temporalIdNested;
372  * unsigned int(2) lengthSizeMinusOne;
373  */
374  AV_W8(p + 21, 0 << 6 |
375  sps->max_sub_layers << 3 |
376  sps->temporal_id_nesting << 2 |
377  3);
378 
379  /* unsigned int(8) numOfArrays; */
380  AV_W8(p + 22, 3);
381 
382  p += 23;
383 
384 #define APPEND_PS(T, t) \
385  /* \
386  * bit(1) array_completeness; \
387  * unsigned int(1) reserved = 0; \
388  * unsigned int(6) NAL_unit_type; \
389  */ \
390  AV_W8(p, 1 << 7 | \
391  HEVC_NAL_##T##PS & 0x3f); \
392  /* unsigned int(16) numNalus; */ \
393  AV_WB16(p + 1, num_##t##ps); \
394  p += 3; \
395  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
396  if (h->ps.t##ps_list[i]) { \
397  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
398  int size = escape_ps(p + 2, lps->data, lps->data_size); \
399  /* unsigned int(16) nalUnitLength; */ \
400  AV_WB16(p, size); \
401  /* bit(8*nalUnitLength) nalUnit; */ \
402  p += 2 + size; \
403  } \
404  }
405 
406  APPEND_PS(V, v)
407  APPEND_PS(S, s)
408  APPEND_PS(P, p)
409 
410  av_assert0(p - vt_extradata == vt_extradata_size);
411 
412  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
413  av_free(vt_extradata);
414  return data;
415 }
416 
418  const AVBufferRef *buffer_ref,
419  const uint8_t *buffer,
420  uint32_t size)
421 {
422  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
423  H264Context *h = avctx->priv_data;
424 
425  if (h->is_avc == 1) {
426  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
427  }
428 
429  return 0;
430 }
431 
433  int type,
434  const uint8_t *buffer,
435  uint32_t size)
436 {
437  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
438  H264Context *h = avctx->priv_data;
439 
440  // save sps header (profile/level) used to create decoder session
441  if (!vtctx->sps[0])
442  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
443 
444  if (type == H264_NAL_SPS) {
445  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
446  vtctx->reconfig_needed = true;
447  memcpy(vtctx->sps, buffer + 1, 3);
448  }
449  }
450 
451  // pass-through SPS/PPS changes to the decoder
453 }
454 
456  const uint8_t *buffer,
457  uint32_t size)
458 {
459  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
460  void *tmp;
461 
462  tmp = av_fast_realloc(vtctx->bitstream,
463  &vtctx->allocated_size,
464  vtctx->bitstream_size+size+4);
465  if (!tmp)
466  return AVERROR(ENOMEM);
467 
468  vtctx->bitstream = tmp;
469 
470  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
471  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
472 
473  vtctx->bitstream_size += size + 4;
474 
475  return 0;
476 }
477 
479  const uint8_t *buffer,
480  uint32_t size)
481 {
482  H264Context *h = avctx->priv_data;
483 
484  if (h->is_avc == 1)
485  return 0;
486 
488 }
489 
490 #if CONFIG_VIDEOTOOLBOX
491 // Return the AVVideotoolboxContext that matters currently. Where it comes from
492 // depends on the API used.
493 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
494 {
495  // Somewhat tricky because the user can call av_videotoolbox_default_free()
496  // at any time, even when the codec is closed.
497  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
498  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
499  if (vtctx->vt_ctx)
500  return vtctx->vt_ctx;
501  }
502  return avctx->hwaccel_context;
503 }
504 
505 static void videotoolbox_stop(AVCodecContext *avctx)
506 {
507  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
508  if (!videotoolbox)
509  return;
510 
511  if (videotoolbox->cm_fmt_desc) {
512  CFRelease(videotoolbox->cm_fmt_desc);
513  videotoolbox->cm_fmt_desc = NULL;
514  }
515 
516  if (videotoolbox->session) {
517  VTDecompressionSessionInvalidate(videotoolbox->session);
518  CFRelease(videotoolbox->session);
519  videotoolbox->session = NULL;
520  }
521 }
522 
524 {
525  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
526  if (!vtctx)
527  return 0;
528 
529  av_freep(&vtctx->bitstream);
530  if (vtctx->frame)
531  CVPixelBufferRelease(vtctx->frame);
532 
533  if (vtctx->vt_ctx)
534  videotoolbox_stop(avctx);
535 
537  av_freep(&vtctx->vt_ctx);
538 
539  return 0;
540 }
541 
542 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
543 {
544  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
545  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
546  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
547  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
548  int width = CVPixelBufferGetWidth(pixbuf);
549  int height = CVPixelBufferGetHeight(pixbuf);
550  AVHWFramesContext *cached_frames;
551  VTHWFrame *ref;
552  int ret;
553 
554  if (!frame->buf[0] || frame->data[3]) {
555  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
557  return AVERROR_EXTERNAL;
558  }
559 
560  ref = (VTHWFrame *)frame->buf[0]->data;
561 
562  if (ref->pixbuf)
563  CVPixelBufferRelease(ref->pixbuf);
564  ref->pixbuf = vtctx->frame;
565  vtctx->frame = NULL;
566 
567  // Old API code path.
568  if (!vtctx->cached_hw_frames_ctx)
569  return 0;
570 
571  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
572 
573  if (cached_frames->sw_format != sw_format ||
574  cached_frames->width != width ||
575  cached_frames->height != height) {
576  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
577  AVHWFramesContext *hw_frames;
578  AVVTFramesContext *hw_ctx;
579  if (!hw_frames_ctx)
580  return AVERROR(ENOMEM);
581 
582  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
583  hw_frames->format = cached_frames->format;
584  hw_frames->sw_format = sw_format;
585  hw_frames->width = width;
586  hw_frames->height = height;
587  hw_ctx = hw_frames->hwctx;
588  hw_ctx->color_range = avctx->color_range;
589 
590  ret = av_hwframe_ctx_init(hw_frames_ctx);
591  if (ret < 0) {
592  av_buffer_unref(&hw_frames_ctx);
593  return ret;
594  }
595 
597  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
598  }
599 
600  av_buffer_unref(&ref->hw_frames_ctx);
601  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
602  if (!ref->hw_frames_ctx)
603  return AVERROR(ENOMEM);
604 
605  return 0;
606 }
607 
608 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
609 {
610  int i;
611  uint8_t b;
612 
613  for (i = 3; i >= 0; i--) {
614  b = (length >> (i * 7)) & 0x7F;
615  if (i != 0)
616  b |= 0x80;
617 
618  bytestream2_put_byteu(pb, b);
619  }
620 }
621 
622 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
623 {
624  CFDataRef data;
625  uint8_t *rw_extradata;
626  PutByteContext pb;
627  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
628  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
629  int config_size = 13 + 5 + avctx->extradata_size;
630  int s;
631 
632  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
633  return NULL;
634 
635  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
636  bytestream2_put_byteu(&pb, 0); // version
637  bytestream2_put_ne24(&pb, 0); // flags
638 
639  // elementary stream descriptor
640  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
641  videotoolbox_write_mp4_descr_length(&pb, full_size);
642  bytestream2_put_ne16(&pb, 0); // esid
643  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
644 
645  // decoder configuration descriptor
646  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
647  videotoolbox_write_mp4_descr_length(&pb, config_size);
648  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
649  bytestream2_put_byteu(&pb, 0x11); // stream type
650  bytestream2_put_ne24(&pb, 0); // buffer size
651  bytestream2_put_ne32(&pb, 0); // max bitrate
652  bytestream2_put_ne32(&pb, 0); // avg bitrate
653 
654  // decoder specific descriptor
655  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
656  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
657 
658  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
659 
660  // SLConfigDescriptor
661  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
662  bytestream2_put_byteu(&pb, 0x01); // length
663  bytestream2_put_byteu(&pb, 0x02); //
664 
665  s = bytestream2_size_p(&pb);
666 
667  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
668 
669  av_freep(&rw_extradata);
670  return data;
671 }
672 
673 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
674  void *buffer,
675  int size)
676 {
677  OSStatus status;
678  CMBlockBufferRef block_buf;
679  CMSampleBufferRef sample_buf;
680 
681  block_buf = NULL;
682  sample_buf = NULL;
683 
684  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
685  buffer, // memoryBlock
686  size, // blockLength
687  kCFAllocatorNull, // blockAllocator
688  NULL, // customBlockSource
689  0, // offsetToData
690  size, // dataLength
691  0, // flags
692  &block_buf);
693 
694  if (!status) {
695  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
696  block_buf, // dataBuffer
697  TRUE, // dataReady
698  0, // makeDataReadyCallback
699  0, // makeDataReadyRefcon
700  fmt_desc, // formatDescription
701  1, // numSamples
702  0, // numSampleTimingEntries
703  NULL, // sampleTimingArray
704  0, // numSampleSizeEntries
705  NULL, // sampleSizeArray
706  &sample_buf);
707  }
708 
709  if (block_buf)
710  CFRelease(block_buf);
711 
712  return sample_buf;
713 }
714 
715 static void videotoolbox_decoder_callback(void *opaque,
716  void *sourceFrameRefCon,
717  OSStatus status,
718  VTDecodeInfoFlags flags,
719  CVImageBufferRef image_buffer,
720  CMTime pts,
721  CMTime duration)
722 {
723  VTContext *vtctx = opaque;
724 
725  if (vtctx->frame) {
726  CVPixelBufferRelease(vtctx->frame);
727  vtctx->frame = NULL;
728  }
729 
730  if (!image_buffer) {
731  // kVTVideoDecoderReferenceMissingErr, defined since the macOS 12 SDKs
732  if (status != -17694)
733  vtctx->reconfig_needed = true;
734 
736  "vt decoder cb: output image buffer is null: %i, reconfig %d\n",
737  status, vtctx->reconfig_needed);
738  return;
739  }
740 
741  vtctx->frame = CVPixelBufferRetain(image_buffer);
742 }
743 
744 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
745 {
746  OSStatus status;
747  CMSampleBufferRef sample_buf;
748  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
749  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
750 
751  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
752  vtctx->bitstream,
753  vtctx->bitstream_size);
754 
755  if (!sample_buf)
756  return -1;
757 
758  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
759  sample_buf,
760  0, // decodeFlags
761  NULL, // sourceFrameRefCon
762  0); // infoFlagsOut
763  if (status == noErr)
764  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
765 
766  CFRelease(sample_buf);
767 
768  return status;
769 }
770 
771 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
772  CFDictionaryRef decoder_spec,
773  int width,
774  int height)
775 {
776  CMFormatDescriptionRef cm_fmt_desc;
777  OSStatus status;
778 
779  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
780  codec_type,
781  width,
782  height,
783  decoder_spec, // Dictionary of extension
784  &cm_fmt_desc);
785 
786  if (status)
787  return NULL;
788 
789  return cm_fmt_desc;
790 }
791 
792 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
793  int height,
794  OSType pix_fmt)
795 {
796  CFMutableDictionaryRef buffer_attributes;
797  CFMutableDictionaryRef io_surface_properties;
798  CFNumberRef cv_pix_fmt;
799  CFNumberRef w;
800  CFNumberRef h;
801 
802  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
803  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
804  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
805 
806  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
807  4,
808  &kCFTypeDictionaryKeyCallBacks,
809  &kCFTypeDictionaryValueCallBacks);
810  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
811  0,
812  &kCFTypeDictionaryKeyCallBacks,
813  &kCFTypeDictionaryValueCallBacks);
814 
815  if (pix_fmt)
816  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
817  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
818  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
819  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
820 #if TARGET_OS_IPHONE
821  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
822 #else
823  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
824 #endif
825 
826  CFRelease(io_surface_properties);
827  CFRelease(cv_pix_fmt);
828  CFRelease(w);
829  CFRelease(h);
830 
831  return buffer_attributes;
832 }
833 
834 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
835  AVCodecContext *avctx)
836 {
837  CFMutableDictionaryRef avc_info;
838  CFDataRef data = NULL;
839 
840  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
841  0,
842  &kCFTypeDictionaryKeyCallBacks,
843  &kCFTypeDictionaryValueCallBacks);
844 
845  CFDictionarySetValue(config_info,
849  kCFBooleanTrue);
850 
851  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
852  1,
853  &kCFTypeDictionaryKeyCallBacks,
854  &kCFTypeDictionaryValueCallBacks);
855 
856  switch (codec_type) {
857  case kCMVideoCodecType_MPEG4Video :
858  if (avctx->extradata_size)
859  data = videotoolbox_esds_extradata_create(avctx);
860  if (data)
861  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
862  break;
863  case kCMVideoCodecType_H264 :
865  if (data)
866  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
867  break;
870  if (data)
871  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
872  break;
873 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
874  case kCMVideoCodecType_VP9 :
876  if (data)
877  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
878  break;
879 #endif
880 #if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
881  case kCMVideoCodecType_AV1 :
883  if (data)
884  CFDictionarySetValue(avc_info, CFSTR("av1C"), data);
885  break;
886 #endif
887  default:
888  break;
889  }
890 
891  CFDictionarySetValue(config_info,
892  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
893  avc_info);
894 
895  if (data)
896  CFRelease(data);
897 
898  CFRelease(avc_info);
899  return config_info;
900 }
901 
902 static int videotoolbox_start(AVCodecContext *avctx)
903 {
904  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
905  OSStatus status;
906  VTDecompressionOutputCallbackRecord decoder_cb;
907  CFDictionaryRef decoder_spec;
908  CFDictionaryRef buf_attr;
909 
910  if (!videotoolbox) {
911  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
912  return -1;
913  }
914 
915  switch( avctx->codec_id ) {
916  case AV_CODEC_ID_H263 :
917  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
918  break;
919  case AV_CODEC_ID_H264 :
920  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
921  break;
922  case AV_CODEC_ID_HEVC :
923  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
924  break;
926  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
927  break;
929  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
930  break;
931  case AV_CODEC_ID_MPEG4 :
932  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
933  break;
934  case AV_CODEC_ID_PRORES :
935  switch (avctx->codec_tag) {
936  default:
937  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
938  // fall-through
939  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
940  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
941  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
942  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
943  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
944  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
945  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
946  break;
947  }
948  break;
949  case AV_CODEC_ID_VP9 :
950  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
951  break;
952  case AV_CODEC_ID_AV1 :
953  videotoolbox->cm_codec_type = kCMVideoCodecType_AV1;
954  break;
955  default :
956  break;
957  }
958 
959 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
960  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
961  if (__builtin_available(macOS 10.9, *)) {
962  VTRegisterProfessionalVideoWorkflowVideoDecoders();
963  }
964  }
965 #endif
966 
967 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
968  if (__builtin_available(macOS 11.0, *)) {
969  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
970  }
971 #endif
972 
973  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
974 
975  if (!decoder_spec) {
976  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
977  return -1;
978  }
979 
980  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
981  decoder_spec,
982  avctx->width,
983  avctx->height);
984  if (!videotoolbox->cm_fmt_desc) {
985  if (decoder_spec)
986  CFRelease(decoder_spec);
987 
988  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
989  return -1;
990  }
991 
992  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
993  avctx->height,
994  videotoolbox->cv_pix_fmt_type);
995 
996  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
997  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
998 
999  status = VTDecompressionSessionCreate(NULL, // allocator
1000  videotoolbox->cm_fmt_desc, // videoFormatDescription
1001  decoder_spec, // videoDecoderSpecification
1002  buf_attr, // destinationImageBufferAttributes
1003  &decoder_cb, // outputCallback
1004  &videotoolbox->session); // decompressionSessionOut
1005 
1006  if (decoder_spec)
1007  CFRelease(decoder_spec);
1008  if (buf_attr)
1009  CFRelease(buf_attr);
1010 
1011  switch (status) {
1012  case kVTVideoDecoderNotAvailableNowErr:
1013  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
1014  return AVERROR(ENOSYS);
1015  case kVTVideoDecoderUnsupportedDataFormatErr:
1016  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
1017  return AVERROR(ENOSYS);
1018  case kVTCouldNotFindVideoDecoderErr:
1019  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
1020  return AVERROR(ENOSYS);
1021  case kVTVideoDecoderMalfunctionErr:
1022  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
1023  return AVERROR(EINVAL);
1024  case kVTVideoDecoderBadDataErr:
1025  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
1026  return AVERROR_INVALIDDATA;
1027  case 0:
1028  return 0;
1029  default:
1030  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
1031  return AVERROR_UNKNOWN;
1032  }
1033 }
1034 
1035 static const char *videotoolbox_error_string(OSStatus status)
1036 {
1037  switch (status) {
1038  case kVTVideoDecoderBadDataErr:
1039  return "bad data";
1040  case kVTVideoDecoderMalfunctionErr:
1041  return "decoder malfunction";
1042  case kVTInvalidSessionErr:
1043  return "invalid session";
1044  }
1045  return "unknown";
1046 }
1047 
1049 {
1050  OSStatus status;
1051  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1052  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1053 
1054  if (vtctx->reconfig_needed == true) {
1055  vtctx->reconfig_needed = false;
1056  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1057  videotoolbox_stop(avctx);
1058  if (videotoolbox_start(avctx) != 0) {
1059  return AVERROR_EXTERNAL;
1060  }
1061  }
1062 
1063  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1064  return AVERROR_INVALIDDATA;
1065 
1066  status = videotoolbox_session_decode_frame(avctx);
1067  if (status != noErr) {
1068  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1069  vtctx->reconfig_needed = true;
1070  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1071  return AVERROR_UNKNOWN;
1072  }
1073 
1074  if (!vtctx->frame)
1075  return AVERROR_UNKNOWN;
1076 
1077  return videotoolbox_buffer_create(avctx, frame);
1078 }
1079 
1080 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1081 {
1082  H264Context *h = avctx->priv_data;
1083  AVFrame *frame = h->cur_pic_ptr->f;
1084  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1086  vtctx->bitstream_size = 0;
1087  return ret;
1088 }
1089 
1090 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1091  const AVBufferRef *buffer_ref,
1092  const uint8_t *buffer,
1093  uint32_t size)
1094 {
1095  HEVCContext *h = avctx->priv_data;
1096  AVFrame *frame = h->cur_frame->f;
1097 
1098  frame->crop_right = 0;
1099  frame->crop_left = 0;
1100  frame->crop_top = 0;
1101  frame->crop_bottom = 0;
1102 
1103  return 0;
1104 }
1105 
1106 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1107  const uint8_t *buffer,
1108  uint32_t size)
1109 {
1111 }
1112 
1113 
1114 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1115  int type,
1116  const uint8_t *buffer,
1117  uint32_t size)
1118 {
1120 }
1121 
1122 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1123 {
1124  HEVCContext *h = avctx->priv_data;
1125  AVFrame *frame = h->cur_frame->f;
1126  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1127  int ret;
1128 
1130  vtctx->bitstream_size = 0;
1131  return ret;
1132 }
1133 
1134 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1135  const AVBufferRef *buffer_ref,
1136  const uint8_t *buffer,
1137  uint32_t size)
1138 {
1139  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1140 
1141  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1142 }
1143 
1144 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1145  const uint8_t *buffer,
1146  uint32_t size)
1147 {
1148  return 0;
1149 }
1150 
1151 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1152 {
1153  MpegEncContext *s = avctx->priv_data;
1154  AVFrame *frame = s->cur_pic.ptr->f;
1155 
1156  return ff_videotoolbox_common_end_frame(avctx, frame);
1157 }
1158 
1159 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1160  const AVBufferRef *buffer_ref,
1161  const uint8_t *buffer,
1162  uint32_t size)
1163 {
1164  return 0;
1165 }
1166 
1167 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1168  const uint8_t *buffer,
1169  uint32_t size)
1170 {
1171  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1172 
1173  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1174 }
1175 
1176 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1177 {
1178  ProresContext *ctx = avctx->priv_data;
1179  AVFrame *frame = ctx->frame;
1180 
1181  return ff_videotoolbox_common_end_frame(avctx, frame);
1182 }
1183 
1184 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1185  int depth;
1186  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1187  if (!descriptor)
1188  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1189 
1190  depth = descriptor->comp[0].depth;
1191 
1192  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1193  return (depth > 8) ? AV_PIX_FMT_AYUV64 : AV_PIX_FMT_AYUV;
1194 
1195 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1196  if (depth > 10)
1197  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1198 #endif
1199 
1200 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1201  if (descriptor->log2_chroma_w == 0) {
1202 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1203  if (depth <= 8)
1204  return AV_PIX_FMT_NV24;
1205 #endif
1206  return AV_PIX_FMT_P410;
1207  }
1208 #endif
1209 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1210  if (descriptor->log2_chroma_h == 0) {
1211 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1212  if (depth <= 8)
1213  return AV_PIX_FMT_NV16;
1214 #endif
1215  return AV_PIX_FMT_P210;
1216  }
1217 #endif
1218 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1219  if (depth > 8) {
1220  return AV_PIX_FMT_P010;
1221  }
1222 #endif
1223 
1224  return AV_PIX_FMT_NV12;
1225 }
1226 
1227 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1228  bool full_range)
1229 {
1230  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1231 
1232  if (ret) {
1233  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1234  if (cv_pix_fmt_type == 0) {
1235  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1236  }
1237  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1238  }
1239 
1240  return ret;
1241 }
1242 
1244 {
1245  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1246  AVHWFramesContext *hw_frames;
1247  AVVTFramesContext *hw_ctx;
1248  int err;
1249  bool full_range;
1250 
1251  vtctx->logctx = avctx;
1252 
1253  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1254  avctx->hwaccel_context)
1255  return videotoolbox_start(avctx);
1256 
1257  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1258  av_log(avctx, AV_LOG_ERROR,
1259  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1260  return AVERROR(EINVAL);
1261  }
1262 
1263  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1264  if (!vtctx->vt_ctx) {
1265  err = AVERROR(ENOMEM);
1266  goto fail;
1267  }
1268 
1269  if (avctx->hw_frames_ctx) {
1270  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1271  } else {
1273  if (!avctx->hw_frames_ctx) {
1274  err = AVERROR(ENOMEM);
1275  goto fail;
1276  }
1277 
1278  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1279  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1280  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1281  hw_frames->width = avctx->width;
1282  hw_frames->height = avctx->height;
1283  hw_ctx = hw_frames->hwctx;
1284  hw_ctx->color_range = avctx->color_range;
1285 
1286  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1287  if (err < 0) {
1288  av_buffer_unref(&avctx->hw_frames_ctx);
1289  goto fail;
1290  }
1291  }
1292 
1294  if (!vtctx->cached_hw_frames_ctx) {
1295  err = AVERROR(ENOMEM);
1296  goto fail;
1297  }
1298 
1300  vtctx->vt_ctx->cv_pix_fmt_type =
1302  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1303  const AVPixFmtDescriptor *attempted_format =
1304  av_pix_fmt_desc_get(hw_frames->sw_format);
1305  av_log(avctx, AV_LOG_ERROR,
1306  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1307  "a VideoToolbox format!\n",
1308  attempted_format ? attempted_format->name : "<unknown>",
1310  err = AVERROR(EINVAL);
1311  goto fail;
1312  }
1313 
1314  err = videotoolbox_start(avctx);
1315  if (err < 0)
1316  goto fail;
1317 
1318  return 0;
1319 
1320 fail:
1321  ff_videotoolbox_uninit(avctx);
1322  return err;
1323 }
1324 
1326  AVBufferRef *hw_frames_ctx)
1327 {
1328  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1329 
1330  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1331  frames_ctx->width = avctx->coded_width;
1332  frames_ctx->height = avctx->coded_height;
1333  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1334 
1335  return 0;
1336 }
1337 
1339  .p.name = "h263_videotoolbox",
1340  .p.type = AVMEDIA_TYPE_VIDEO,
1341  .p.id = AV_CODEC_ID_H263,
1342  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1343  .alloc_frame = ff_videotoolbox_alloc_frame,
1344  .start_frame = videotoolbox_mpeg_start_frame,
1345  .decode_slice = videotoolbox_mpeg_decode_slice,
1346  .end_frame = videotoolbox_mpeg_end_frame,
1347  .frame_params = ff_videotoolbox_frame_params,
1349  .uninit = ff_videotoolbox_uninit,
1350  .priv_data_size = sizeof(VTContext),
1351 };
1352 
1354  .p.name = "hevc_videotoolbox",
1355  .p.type = AVMEDIA_TYPE_VIDEO,
1356  .p.id = AV_CODEC_ID_HEVC,
1357  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1358  .alloc_frame = ff_videotoolbox_alloc_frame,
1359  .start_frame = videotoolbox_hevc_start_frame,
1360  .decode_slice = videotoolbox_hevc_decode_slice,
1361  .decode_params = videotoolbox_hevc_decode_params,
1362  .end_frame = videotoolbox_hevc_end_frame,
1363  .frame_params = ff_videotoolbox_frame_params,
1365  .uninit = ff_videotoolbox_uninit,
1366  .priv_data_size = sizeof(VTContext),
1367 };
1368 
1370  .p.name = "h264_videotoolbox",
1371  .p.type = AVMEDIA_TYPE_VIDEO,
1372  .p.id = AV_CODEC_ID_H264,
1373  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1374  .alloc_frame = ff_videotoolbox_alloc_frame,
1375  .start_frame = ff_videotoolbox_h264_start_frame,
1376  .decode_slice = ff_videotoolbox_h264_decode_slice,
1377  .decode_params = videotoolbox_h264_decode_params,
1378  .end_frame = videotoolbox_h264_end_frame,
1379  .frame_params = ff_videotoolbox_frame_params,
1381  .uninit = ff_videotoolbox_uninit,
1382  .priv_data_size = sizeof(VTContext),
1383 };
1384 
1386  .p.name = "mpeg1_videotoolbox",
1387  .p.type = AVMEDIA_TYPE_VIDEO,
1388  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1389  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1390  .alloc_frame = ff_videotoolbox_alloc_frame,
1391  .start_frame = videotoolbox_mpeg_start_frame,
1392  .decode_slice = videotoolbox_mpeg_decode_slice,
1393  .end_frame = videotoolbox_mpeg_end_frame,
1394  .frame_params = ff_videotoolbox_frame_params,
1396  .uninit = ff_videotoolbox_uninit,
1397  .priv_data_size = sizeof(VTContext),
1398 };
1399 
1401  .p.name = "mpeg2_videotoolbox",
1402  .p.type = AVMEDIA_TYPE_VIDEO,
1403  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1404  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1405  .alloc_frame = ff_videotoolbox_alloc_frame,
1406  .start_frame = videotoolbox_mpeg_start_frame,
1407  .decode_slice = videotoolbox_mpeg_decode_slice,
1408  .end_frame = videotoolbox_mpeg_end_frame,
1409  .frame_params = ff_videotoolbox_frame_params,
1411  .uninit = ff_videotoolbox_uninit,
1412  .priv_data_size = sizeof(VTContext),
1413 };
1414 
1416  .p.name = "mpeg4_videotoolbox",
1417  .p.type = AVMEDIA_TYPE_VIDEO,
1418  .p.id = AV_CODEC_ID_MPEG4,
1419  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1420  .alloc_frame = ff_videotoolbox_alloc_frame,
1421  .start_frame = videotoolbox_mpeg_start_frame,
1422  .decode_slice = videotoolbox_mpeg_decode_slice,
1423  .end_frame = videotoolbox_mpeg_end_frame,
1424  .frame_params = ff_videotoolbox_frame_params,
1426  .uninit = ff_videotoolbox_uninit,
1427  .priv_data_size = sizeof(VTContext),
1428 };
1429 
1431  .p.name = "prores_videotoolbox",
1432  .p.type = AVMEDIA_TYPE_VIDEO,
1433  .p.id = AV_CODEC_ID_PRORES,
1434  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1435  .alloc_frame = ff_videotoolbox_alloc_frame,
1436  .start_frame = videotoolbox_prores_start_frame,
1437  .decode_slice = videotoolbox_prores_decode_slice,
1438  .end_frame = videotoolbox_prores_end_frame,
1439  .frame_params = ff_videotoolbox_frame_params,
1441  .uninit = ff_videotoolbox_uninit,
1442  .priv_data_size = sizeof(VTContext),
1443 };
1444 
1445 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:70
flags
const SwsFlags flags[]
Definition: swscale.c:61
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:78
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1432
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:198
ff_videotoolbox_buffer_append
int ff_videotoolbox_buffer_append(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:99
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:326
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:750
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:252
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:42
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:667
data
const char data[16]
Definition: mxf.c:149
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:180
PTLCommon::profile_space
uint8_t profile_space
Definition: ps.h:128
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:218
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: ps.h:131
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:182
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: ps.h:132
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const AVBufferRef *buffer_ref, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:413
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: ps.h:133
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:211
fail
#define fail()
Definition: checkasm.h:193
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:644
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:610
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: ps.h:135
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:48
AVHWFramesContext::height
int height
Definition: hwcontext.h:218
duration
int64_t duration
Definition: movenc.c:65
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:518
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:46
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
vt_internal.h
PTLCommon
Definition: ps.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
VTHWFrame
Definition: videotoolbox.c:65
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:65
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: ps.h:134
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: ps.h:130
AVVTFramesContext
Definition: hwcontext_videotoolbox.h:45
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
PTLCommon::tier_flag
uint8_t tier_flag
Definition: ps.h:129
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3657
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:211
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:672
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
AVVTFramesContext::color_range
enum AVColorRange color_range
Definition: hwcontext_videotoolbox.h:46
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:127
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
V
#define V
Definition: avdct.c:31
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:600
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:52
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:474
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:451
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:66
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:256
hevcdec.h
height
#define height
Definition: dsp.h:85
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:372
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
P
#define P
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_bswap32
#define av_bswap32
Definition: bswap.h:47
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: ps.h:98
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:584
ff_videotoolbox_av1c_extradata_create
CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_av1.c:31
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:73
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:198
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:603
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:599
VTContext
Definition: vt_internal.h:25
AV_PIX_FMT_AYUV
@ AV_PIX_FMT_AYUV
packed AYUV 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), AYUVAYUV...
Definition: pixfmt.h:442
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:1948
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:45
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:56
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:340
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:517
kCMVideoCodecType_AV1
@ kCMVideoCodecType_AV1
Definition: videotoolbox.c:60
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:371
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1478
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:595
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:634
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1456
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:116
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:150
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:151
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:63
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:144
AVCodecContext
main external API structure.
Definition: avcodec.h:431
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
HEVCContext
Definition: hevcdec.h:492
PTLCommon::level_idc
uint8_t level_idc
Definition: ps.h:147
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
pps
uint64_t pps
Definition: dovi_rpuenc.c:35
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:119
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:67
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:585
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: ps.h:120
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:610
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: ps.h:171
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1558
HEVCSPS
Definition: ps.h:255
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: ps.h:374
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:79
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:604
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:68
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:178
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:595
bytestream.h
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:641
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:428
width
#define width
Definition: dsp.h:85
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
src
#define src
Definition: vp8dsp.c:248
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200