FFmpeg  4.4
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/pixdesc.h"
30 #include "bytestream.h"
31 #include "decode.h"
32 #include "h264dec.h"
33 #include "hevcdec.h"
34 #include "mpegvideo.h"
35 #include <TargetConditionals.h>
36 
37 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
38 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
39 #endif
40 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
41 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
42 #endif
43 
44 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
45 enum { kCMVideoCodecType_HEVC = 'hvc1' };
46 #endif
47 
48 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
49 
50 typedef struct VTHWFrame {
51  CVPixelBufferRef pixbuf;
53 } VTHWFrame;
54 
55 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
56 {
58  av_buffer_unref(&ref->hw_frames_ctx);
59  CVPixelBufferRelease(ref->pixbuf);
60 
61  av_free(data);
62 }
63 
65  const uint8_t *buffer,
66  uint32_t size)
67 {
68  void *tmp;
69 
70  tmp = av_fast_realloc(vtctx->bitstream,
71  &vtctx->allocated_size,
72  size);
73 
74  if (!tmp)
75  return AVERROR(ENOMEM);
76 
77  vtctx->bitstream = tmp;
78  memcpy(vtctx->bitstream, buffer, size);
79  vtctx->bitstream_size = size;
80 
81  return 0;
82 }
83 
84 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
85 {
86  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
87 
88  if (!ref->pixbuf) {
89  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
91  return AVERROR_EXTERNAL;
92  }
93 
94  frame->crop_right = 0;
95  frame->crop_left = 0;
96  frame->crop_top = 0;
97  frame->crop_bottom = 0;
98 
99  frame->data[3] = (uint8_t*)ref->pixbuf;
100 
101  if (ref->hw_frames_ctx) {
103  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
104  if (!frame->hw_frames_ctx)
105  return AVERROR(ENOMEM);
106  }
107 
108  return 0;
109 }
110 
112 {
113  size_t size = sizeof(VTHWFrame);
114  uint8_t *data = NULL;
115  AVBufferRef *buf = NULL;
116  int ret = ff_attach_decode_data(frame);
117  FrameDecodeData *fdd;
118  if (ret < 0)
119  return ret;
120 
121  data = av_mallocz(size);
122  if (!data)
123  return AVERROR(ENOMEM);
125  if (!buf) {
126  av_freep(&data);
127  return AVERROR(ENOMEM);
128  }
129  frame->buf[0] = buf;
130 
133 
134  frame->width = avctx->width;
135  frame->height = avctx->height;
136  frame->format = avctx->pix_fmt;
137 
138  return 0;
139 }
140 
141 #define AV_W8(p, v) *(p) = (v)
142 
144 {
145  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
146  H264Context *h = avctx->priv_data;
147  CFDataRef data = NULL;
148  uint8_t *p;
149  int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
150  uint8_t *vt_extradata = av_malloc(vt_extradata_size);
151  if (!vt_extradata)
152  return NULL;
153 
154  p = vt_extradata;
155 
156  AV_W8(p + 0, 1); /* version */
157  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
158  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
159  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
160  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
161  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
162  AV_WB16(p + 6, h->ps.sps->data_size);
163  memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
164  p += 8 + h->ps.sps->data_size;
165  AV_W8(p + 0, 1); /* number of pps */
166  AV_WB16(p + 1, h->ps.pps->data_size);
167  memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
168 
169  p += 3 + h->ps.pps->data_size;
170  av_assert0(p - vt_extradata == vt_extradata_size);
171 
172  // save sps header (profile/level) used to create decoder session,
173  // so we can detect changes and recreate it.
174  if (vtctx)
175  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
176 
177  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
178  av_free(vt_extradata);
179  return data;
180 }
181 
183 {
184  HEVCContext *h = avctx->priv_data;
185  int i, num_vps = 0, num_sps = 0, num_pps = 0;
186  const HEVCVPS *vps = h->ps.vps;
187  const HEVCSPS *sps = h->ps.sps;
188  const HEVCPPS *pps = h->ps.pps;
189  PTLCommon ptlc = vps->ptl.general_ptl;
190  VUI vui = sps->vui;
191  uint8_t parallelismType;
192  CFDataRef data = NULL;
193  uint8_t *p;
194  int vt_extradata_size = 23 + 3 + 3 + 3;
195  uint8_t *vt_extradata;
196 
197 #define COUNT_SIZE_PS(T, t) \
198  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
199  if (h->ps.t##ps_list[i]) { \
200  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
201  vt_extradata_size += 2 + lps->data_size; \
202  num_##t##ps++; \
203  } \
204  }
205 
206  COUNT_SIZE_PS(V, v)
207  COUNT_SIZE_PS(S, s)
208  COUNT_SIZE_PS(P, p)
209 
210  vt_extradata = av_malloc(vt_extradata_size);
211  if (!vt_extradata)
212  return NULL;
213  p = vt_extradata;
214 
215  /* unsigned int(8) configurationVersion = 1; */
216  AV_W8(p + 0, 1);
217 
218  /*
219  * unsigned int(2) general_profile_space;
220  * unsigned int(1) general_tier_flag;
221  * unsigned int(5) general_profile_idc;
222  */
223  AV_W8(p + 1, ptlc.profile_space << 6 |
224  ptlc.tier_flag << 5 |
225  ptlc.profile_idc);
226 
227  /* unsigned int(32) general_profile_compatibility_flags; */
228  memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
229 
230  /* unsigned int(48) general_constraint_indicator_flags; */
231  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
232  ptlc.interlaced_source_flag << 6 |
233  ptlc.non_packed_constraint_flag << 5 |
234  ptlc.frame_only_constraint_flag << 4);
235  AV_W8(p + 7, 0);
236  AV_WN32(p + 8, 0);
237 
238  /* unsigned int(8) general_level_idc; */
239  AV_W8(p + 12, ptlc.level_idc);
240 
241  /*
242  * bit(4) reserved = ‘1111’b;
243  * unsigned int(12) min_spatial_segmentation_idc;
244  */
245  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
246  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
247 
248  /*
249  * bit(6) reserved = ‘111111’b;
250  * unsigned int(2) parallelismType;
251  */
253  parallelismType = 0;
254  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
255  parallelismType = 0;
256  else if (pps->entropy_coding_sync_enabled_flag)
257  parallelismType = 3;
258  else if (pps->tiles_enabled_flag)
259  parallelismType = 2;
260  else
261  parallelismType = 1;
262  AV_W8(p + 15, 0xfc | parallelismType);
263 
264  /*
265  * bit(6) reserved = ‘111111’b;
266  * unsigned int(2) chromaFormat;
267  */
268  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
269 
270  /*
271  * bit(5) reserved = ‘11111’b;
272  * unsigned int(3) bitDepthLumaMinus8;
273  */
274  AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
275 
276  /*
277  * bit(5) reserved = ‘11111’b;
278  * unsigned int(3) bitDepthChromaMinus8;
279  */
280  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
281 
282  /* bit(16) avgFrameRate; */
283  AV_WB16(p + 19, 0);
284 
285  /*
286  * bit(2) constantFrameRate;
287  * bit(3) numTemporalLayers;
288  * bit(1) temporalIdNested;
289  * unsigned int(2) lengthSizeMinusOne;
290  */
291  AV_W8(p + 21, 0 << 6 |
292  sps->max_sub_layers << 3 |
293  sps->temporal_id_nesting_flag << 2 |
294  3);
295 
296  /* unsigned int(8) numOfArrays; */
297  AV_W8(p + 22, 3);
298 
299  p += 23;
300 
301 #define APPEND_PS(T, t) \
302  /* \
303  * bit(1) array_completeness; \
304  * unsigned int(1) reserved = 0; \
305  * unsigned int(6) NAL_unit_type; \
306  */ \
307  AV_W8(p, 1 << 7 | \
308  HEVC_NAL_##T##PS & 0x3f); \
309  /* unsigned int(16) numNalus; */ \
310  AV_WB16(p + 1, num_##t##ps); \
311  p += 3; \
312  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
313  if (h->ps.t##ps_list[i]) { \
314  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
315  /* unsigned int(16) nalUnitLength; */ \
316  AV_WB16(p, lps->data_size); \
317  /* bit(8*nalUnitLength) nalUnit; */ \
318  memcpy(p + 2, lps->data, lps->data_size); \
319  p += 2 + lps->data_size; \
320  } \
321  }
322 
323  APPEND_PS(V, v)
324  APPEND_PS(S, s)
325  APPEND_PS(P, p)
326 
327  av_assert0(p - vt_extradata == vt_extradata_size);
328 
329  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
330  av_free(vt_extradata);
331  return data;
332 }
333 
335  const uint8_t *buffer,
336  uint32_t size)
337 {
338  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
339  H264Context *h = avctx->priv_data;
340 
341  if (h->is_avc == 1) {
342  return videotoolbox_buffer_copy(vtctx, buffer, size);
343  }
344 
345  return 0;
346 }
347 
349  int type,
350  const uint8_t *buffer,
351  uint32_t size)
352 {
353  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
354  H264Context *h = avctx->priv_data;
355 
356  // save sps header (profile/level) used to create decoder session
357  if (!vtctx->sps[0])
358  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
359 
360  if (type == H264_NAL_SPS) {
361  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
362  vtctx->reconfig_needed = true;
363  memcpy(vtctx->sps, buffer + 1, 3);
364  }
365  }
366 
367  // pass-through SPS/PPS changes to the decoder
369 }
370 
372  const uint8_t *buffer,
373  uint32_t size)
374 {
375  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
376  void *tmp;
377 
378  tmp = av_fast_realloc(vtctx->bitstream,
379  &vtctx->allocated_size,
380  vtctx->bitstream_size+size+4);
381  if (!tmp)
382  return AVERROR(ENOMEM);
383 
384  vtctx->bitstream = tmp;
385 
386  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
387  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
388 
389  vtctx->bitstream_size += size + 4;
390 
391  return 0;
392 }
393 
395  const uint8_t *buffer,
396  uint32_t size)
397 {
398  H264Context *h = avctx->priv_data;
399 
400  if (h->is_avc == 1)
401  return 0;
402 
404 }
405 
407 {
408  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
409  if (vtctx) {
410  av_freep(&vtctx->bitstream);
411  if (vtctx->frame)
412  CVPixelBufferRelease(vtctx->frame);
413  }
414 
415  return 0;
416 }
417 
418 #if CONFIG_VIDEOTOOLBOX
419 // Return the AVVideotoolboxContext that matters currently. Where it comes from
420 // depends on the API used.
421 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
422 {
423  // Somewhat tricky because the user can call av_videotoolbox_default_free()
424  // at any time, even when the codec is closed.
425  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
426  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
427  if (vtctx->vt_ctx)
428  return vtctx->vt_ctx;
429  }
430  return avctx->hwaccel_context;
431 }
432 
433 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
434 {
435  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
436  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
437  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
438  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
439  int width = CVPixelBufferGetWidth(pixbuf);
440  int height = CVPixelBufferGetHeight(pixbuf);
441  AVHWFramesContext *cached_frames;
442  VTHWFrame *ref;
443  int ret;
444 
445  if (!frame->buf[0] || frame->data[3]) {
446  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
448  return AVERROR_EXTERNAL;
449  }
450 
451  ref = (VTHWFrame *)frame->buf[0]->data;
452 
453  if (ref->pixbuf)
454  CVPixelBufferRelease(ref->pixbuf);
455  ref->pixbuf = vtctx->frame;
456  vtctx->frame = NULL;
457 
458  // Old API code path.
459  if (!vtctx->cached_hw_frames_ctx)
460  return 0;
461 
462  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
463 
464  if (cached_frames->sw_format != sw_format ||
465  cached_frames->width != width ||
466  cached_frames->height != height) {
467  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
468  AVHWFramesContext *hw_frames;
469  if (!hw_frames_ctx)
470  return AVERROR(ENOMEM);
471 
472  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
473  hw_frames->format = cached_frames->format;
474  hw_frames->sw_format = sw_format;
475  hw_frames->width = width;
476  hw_frames->height = height;
477 
478  ret = av_hwframe_ctx_init(hw_frames_ctx);
479  if (ret < 0) {
480  av_buffer_unref(&hw_frames_ctx);
481  return ret;
482  }
483 
485  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
486  }
487 
488  av_buffer_unref(&ref->hw_frames_ctx);
489  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
490  if (!ref->hw_frames_ctx)
491  return AVERROR(ENOMEM);
492 
493  return 0;
494 }
495 
496 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
497 {
498  int i;
499  uint8_t b;
500 
501  for (i = 3; i >= 0; i--) {
502  b = (length >> (i * 7)) & 0x7F;
503  if (i != 0)
504  b |= 0x80;
505 
506  bytestream2_put_byteu(pb, b);
507  }
508 }
509 
510 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
511 {
512  CFDataRef data;
513  uint8_t *rw_extradata;
514  PutByteContext pb;
515  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
516  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
517  int config_size = 13 + 5 + avctx->extradata_size;
518  int s;
519 
520  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
521  return NULL;
522 
523  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
524  bytestream2_put_byteu(&pb, 0); // version
525  bytestream2_put_ne24(&pb, 0); // flags
526 
527  // elementary stream descriptor
528  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
529  videotoolbox_write_mp4_descr_length(&pb, full_size);
530  bytestream2_put_ne16(&pb, 0); // esid
531  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
532 
533  // decoder configuration descriptor
534  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
535  videotoolbox_write_mp4_descr_length(&pb, config_size);
536  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
537  bytestream2_put_byteu(&pb, 0x11); // stream type
538  bytestream2_put_ne24(&pb, 0); // buffer size
539  bytestream2_put_ne32(&pb, 0); // max bitrate
540  bytestream2_put_ne32(&pb, 0); // avg bitrate
541 
542  // decoder specific descriptor
543  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
544  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
545 
546  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
547 
548  // SLConfigDescriptor
549  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
550  bytestream2_put_byteu(&pb, 0x01); // length
551  bytestream2_put_byteu(&pb, 0x02); //
552 
553  s = bytestream2_size_p(&pb);
554 
555  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
556 
557  av_freep(&rw_extradata);
558  return data;
559 }
560 
561 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
562  void *buffer,
563  int size)
564 {
565  OSStatus status;
566  CMBlockBufferRef block_buf;
567  CMSampleBufferRef sample_buf;
568 
569  block_buf = NULL;
570  sample_buf = NULL;
571 
572  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
573  buffer, // memoryBlock
574  size, // blockLength
575  kCFAllocatorNull, // blockAllocator
576  NULL, // customBlockSource
577  0, // offsetToData
578  size, // dataLength
579  0, // flags
580  &block_buf);
581 
582  if (!status) {
583  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
584  block_buf, // dataBuffer
585  TRUE, // dataReady
586  0, // makeDataReadyCallback
587  0, // makeDataReadyRefcon
588  fmt_desc, // formatDescription
589  1, // numSamples
590  0, // numSampleTimingEntries
591  NULL, // sampleTimingArray
592  0, // numSampleSizeEntries
593  NULL, // sampleSizeArray
594  &sample_buf);
595  }
596 
597  if (block_buf)
598  CFRelease(block_buf);
599 
600  return sample_buf;
601 }
602 
603 static void videotoolbox_decoder_callback(void *opaque,
604  void *sourceFrameRefCon,
605  OSStatus status,
606  VTDecodeInfoFlags flags,
607  CVImageBufferRef image_buffer,
608  CMTime pts,
609  CMTime duration)
610 {
611  AVCodecContext *avctx = opaque;
612  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
613 
614  if (vtctx->frame) {
615  CVPixelBufferRelease(vtctx->frame);
616  vtctx->frame = NULL;
617  }
618 
619  if (!image_buffer) {
620  av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
621  return;
622  }
623 
624  vtctx->frame = CVPixelBufferRetain(image_buffer);
625 }
626 
627 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
628 {
629  OSStatus status;
630  CMSampleBufferRef sample_buf;
631  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
632  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
633 
634  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
635  vtctx->bitstream,
636  vtctx->bitstream_size);
637 
638  if (!sample_buf)
639  return -1;
640 
641  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
642  sample_buf,
643  0, // decodeFlags
644  NULL, // sourceFrameRefCon
645  0); // infoFlagsOut
646  if (status == noErr)
647  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
648 
649  CFRelease(sample_buf);
650 
651  return status;
652 }
653 
654 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
655  CFDictionaryRef decoder_spec,
656  int width,
657  int height)
658 {
659  CMFormatDescriptionRef cm_fmt_desc;
660  OSStatus status;
661 
662  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
663  codec_type,
664  width,
665  height,
666  decoder_spec, // Dictionary of extension
667  &cm_fmt_desc);
668 
669  if (status)
670  return NULL;
671 
672  return cm_fmt_desc;
673 }
674 
675 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
676  int height,
677  OSType pix_fmt)
678 {
679  CFMutableDictionaryRef buffer_attributes;
680  CFMutableDictionaryRef io_surface_properties;
681  CFNumberRef cv_pix_fmt;
682  CFNumberRef w;
683  CFNumberRef h;
684 
685  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
686  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
687  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
688 
689  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
690  4,
691  &kCFTypeDictionaryKeyCallBacks,
692  &kCFTypeDictionaryValueCallBacks);
693  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
694  0,
695  &kCFTypeDictionaryKeyCallBacks,
696  &kCFTypeDictionaryValueCallBacks);
697 
698  if (pix_fmt)
699  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
700  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
701  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
702  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
703 #if TARGET_OS_IPHONE
704  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
705 #else
706  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
707 #endif
708 
709  CFRelease(io_surface_properties);
710  CFRelease(cv_pix_fmt);
711  CFRelease(w);
712  CFRelease(h);
713 
714  return buffer_attributes;
715 }
716 
717 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
718  AVCodecContext *avctx)
719 {
720  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
721  0,
722  &kCFTypeDictionaryKeyCallBacks,
723  &kCFTypeDictionaryValueCallBacks);
724 
725  CFDictionarySetValue(config_info,
729  kCFBooleanTrue);
730 
731  CFMutableDictionaryRef avc_info;
732  CFDataRef data = NULL;
733 
734  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
735  1,
736  &kCFTypeDictionaryKeyCallBacks,
737  &kCFTypeDictionaryValueCallBacks);
738 
739  switch (codec_type) {
740  case kCMVideoCodecType_MPEG4Video :
741  if (avctx->extradata_size)
742  data = videotoolbox_esds_extradata_create(avctx);
743  if (data)
744  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
745  break;
746  case kCMVideoCodecType_H264 :
748  if (data)
749  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
750  break;
753  if (data)
754  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
755  break;
756  default:
757  break;
758  }
759 
760  CFDictionarySetValue(config_info,
761  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
762  avc_info);
763 
764  if (data)
765  CFRelease(data);
766 
767  CFRelease(avc_info);
768  return config_info;
769 }
770 
771 static int videotoolbox_start(AVCodecContext *avctx)
772 {
773  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
774  OSStatus status;
775  VTDecompressionOutputCallbackRecord decoder_cb;
776  CFDictionaryRef decoder_spec;
777  CFDictionaryRef buf_attr;
778 
779  if (!videotoolbox) {
780  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
781  return -1;
782  }
783 
784  switch( avctx->codec_id ) {
785  case AV_CODEC_ID_H263 :
786  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
787  break;
788  case AV_CODEC_ID_H264 :
789  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
790  break;
791  case AV_CODEC_ID_HEVC :
792  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
793  break;
795  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
796  break;
798  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
799  break;
800  case AV_CODEC_ID_MPEG4 :
801  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
802  break;
803  default :
804  break;
805  }
806 
807  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
808 
809  if (!decoder_spec) {
810  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
811  return -1;
812  }
813 
814  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
815  decoder_spec,
816  avctx->width,
817  avctx->height);
818  if (!videotoolbox->cm_fmt_desc) {
819  if (decoder_spec)
820  CFRelease(decoder_spec);
821 
822  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
823  return -1;
824  }
825 
826  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
827  avctx->height,
828  videotoolbox->cv_pix_fmt_type);
829 
830  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
831  decoder_cb.decompressionOutputRefCon = avctx;
832 
833  status = VTDecompressionSessionCreate(NULL, // allocator
834  videotoolbox->cm_fmt_desc, // videoFormatDescription
835  decoder_spec, // videoDecoderSpecification
836  buf_attr, // destinationImageBufferAttributes
837  &decoder_cb, // outputCallback
838  &videotoolbox->session); // decompressionSessionOut
839 
840  if (decoder_spec)
841  CFRelease(decoder_spec);
842  if (buf_attr)
843  CFRelease(buf_attr);
844 
845  switch (status) {
846  case kVTVideoDecoderNotAvailableNowErr:
847  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
848  return AVERROR(ENOSYS);
849  case kVTVideoDecoderUnsupportedDataFormatErr:
850  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
851  return AVERROR(ENOSYS);
852  case kVTCouldNotFindVideoDecoderErr:
853  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
854  return AVERROR(ENOSYS);
855  case kVTVideoDecoderMalfunctionErr:
856  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
857  return AVERROR(EINVAL);
858  case kVTVideoDecoderBadDataErr:
859  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
860  return AVERROR_INVALIDDATA;
861  case 0:
862  return 0;
863  default:
864  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
865  return AVERROR_UNKNOWN;
866  }
867 }
868 
869 static void videotoolbox_stop(AVCodecContext *avctx)
870 {
871  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
872  if (!videotoolbox)
873  return;
874 
875  if (videotoolbox->cm_fmt_desc) {
876  CFRelease(videotoolbox->cm_fmt_desc);
877  videotoolbox->cm_fmt_desc = NULL;
878  }
879 
880  if (videotoolbox->session) {
881  VTDecompressionSessionInvalidate(videotoolbox->session);
882  CFRelease(videotoolbox->session);
883  videotoolbox->session = NULL;
884  }
885 }
886 
887 static const char *videotoolbox_error_string(OSStatus status)
888 {
889  switch (status) {
890  case kVTVideoDecoderBadDataErr:
891  return "bad data";
892  case kVTVideoDecoderMalfunctionErr:
893  return "decoder malfunction";
894  case kVTInvalidSessionErr:
895  return "invalid session";
896  }
897  return "unknown";
898 }
899 
900 static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
901 {
902  OSStatus status;
903  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
904  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
905 
906  if (vtctx->reconfig_needed == true) {
907  vtctx->reconfig_needed = false;
908  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
909  videotoolbox_stop(avctx);
910  if (videotoolbox_start(avctx) != 0) {
911  return AVERROR_EXTERNAL;
912  }
913  }
914 
915  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
916  return AVERROR_INVALIDDATA;
917 
918  status = videotoolbox_session_decode_frame(avctx);
919  if (status != noErr) {
920  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
921  vtctx->reconfig_needed = true;
922  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
923  return AVERROR_UNKNOWN;
924  }
925 
926  if (!vtctx->frame) {
927  vtctx->reconfig_needed = true;
928  return AVERROR_UNKNOWN;
929  }
930 
931  return videotoolbox_buffer_create(avctx, frame);
932 }
933 
934 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
935 {
936  H264Context *h = avctx->priv_data;
937  AVFrame *frame = h->cur_pic_ptr->f;
938  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
939  int ret = videotoolbox_common_end_frame(avctx, frame);
940  vtctx->bitstream_size = 0;
941  return ret;
942 }
943 
944 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
945  const uint8_t *buffer,
946  uint32_t size)
947 {
948  return 0;
949 }
950 
951 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
952  const uint8_t *buffer,
953  uint32_t size)
954 {
956 }
957 
958 
959 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
960  int type,
961  const uint8_t *buffer,
962  uint32_t size)
963 {
965 }
966 
967 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
968 {
969  HEVCContext *h = avctx->priv_data;
970  AVFrame *frame = h->ref->frame;
971  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
972 
973  h->output_frame->crop_right = 0;
974  h->output_frame->crop_left = 0;
975  h->output_frame->crop_top = 0;
976  h->output_frame->crop_bottom = 0;
977 
978  int ret = videotoolbox_common_end_frame(avctx, frame);
979  vtctx->bitstream_size = 0;
980  return ret;
981 }
982 
983 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
984  const uint8_t *buffer,
985  uint32_t size)
986 {
987  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
988 
989  return videotoolbox_buffer_copy(vtctx, buffer, size);
990 }
991 
992 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
993  const uint8_t *buffer,
994  uint32_t size)
995 {
996  return 0;
997 }
998 
999 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1000 {
1001  MpegEncContext *s = avctx->priv_data;
1002  AVFrame *frame = s->current_picture_ptr->f;
1003 
1004  return videotoolbox_common_end_frame(avctx, frame);
1005 }
1006 
1007 static int videotoolbox_uninit(AVCodecContext *avctx)
1008 {
1009  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1010  if (!vtctx)
1011  return 0;
1012 
1013  ff_videotoolbox_uninit(avctx);
1014 
1015  if (vtctx->vt_ctx)
1016  videotoolbox_stop(avctx);
1017 
1019  av_freep(&vtctx->vt_ctx);
1020 
1021  return 0;
1022 }
1023 
1024 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1025  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->pix_fmt);
1026  if (!descriptor)
1027  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1028 
1029  int depth = descriptor->comp[0].depth;
1030  if (depth > 8) {
1031  return AV_PIX_FMT_P010;
1032  }
1033 
1034  return AV_PIX_FMT_NV12;
1035 }
1036 
1037 static int videotoolbox_common_init(AVCodecContext *avctx)
1038 {
1039  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1040  AVHWFramesContext *hw_frames;
1041  int err;
1042 
1043  // Old API - do nothing.
1044  if (avctx->hwaccel_context)
1045  return 0;
1046 
1047  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1048  av_log(avctx, AV_LOG_ERROR,
1049  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1050  return AVERROR(EINVAL);
1051  }
1052 
1054  if (!vtctx->vt_ctx) {
1055  err = AVERROR(ENOMEM);
1056  goto fail;
1057  }
1058 
1059  if (avctx->hw_frames_ctx) {
1060  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1061  } else {
1063  if (!avctx->hw_frames_ctx) {
1064  err = AVERROR(ENOMEM);
1065  goto fail;
1066  }
1067 
1068  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1069  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1070  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1071  hw_frames->width = avctx->width;
1072  hw_frames->height = avctx->height;
1073 
1074  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1075  if (err < 0) {
1076  av_buffer_unref(&avctx->hw_frames_ctx);
1077  goto fail;
1078  }
1079  }
1080 
1082  if (!vtctx->cached_hw_frames_ctx) {
1083  err = AVERROR(ENOMEM);
1084  goto fail;
1085  }
1086 
1087  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1088  vtctx->vt_ctx->cv_pix_fmt_type =
1090  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1091  const AVPixFmtDescriptor *attempted_format =
1092  av_pix_fmt_desc_get(hw_frames->sw_format);
1093  av_log(avctx, AV_LOG_ERROR,
1094  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1095  "a VideoToolbox format!\n",
1096  attempted_format ? attempted_format->name : "<unknown>",
1098  err = AVERROR(EINVAL);
1099  goto fail;
1100  }
1101 
1102  err = videotoolbox_start(avctx);
1103  if (err < 0)
1104  goto fail;
1105 
1106  return 0;
1107 
1108 fail:
1109  videotoolbox_uninit(avctx);
1110  return err;
1111 }
1112 
1113 static int videotoolbox_frame_params(AVCodecContext *avctx,
1114  AVBufferRef *hw_frames_ctx)
1115 {
1116  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1117 
1118  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1119  frames_ctx->width = avctx->coded_width;
1120  frames_ctx->height = avctx->coded_height;
1121  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1122 
1123  return 0;
1124 }
1125 
1127  .name = "h263_videotoolbox",
1128  .type = AVMEDIA_TYPE_VIDEO,
1129  .id = AV_CODEC_ID_H263,
1130  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1131  .alloc_frame = ff_videotoolbox_alloc_frame,
1132  .start_frame = videotoolbox_mpeg_start_frame,
1133  .decode_slice = videotoolbox_mpeg_decode_slice,
1134  .end_frame = videotoolbox_mpeg_end_frame,
1135  .frame_params = videotoolbox_frame_params,
1136  .init = videotoolbox_common_init,
1137  .uninit = videotoolbox_uninit,
1138  .priv_data_size = sizeof(VTContext),
1139 };
1140 
1142  .name = "hevc_videotoolbox",
1143  .type = AVMEDIA_TYPE_VIDEO,
1144  .id = AV_CODEC_ID_HEVC,
1145  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1146  .alloc_frame = ff_videotoolbox_alloc_frame,
1147  .start_frame = videotoolbox_hevc_start_frame,
1148  .decode_slice = videotoolbox_hevc_decode_slice,
1149  .decode_params = videotoolbox_hevc_decode_params,
1150  .end_frame = videotoolbox_hevc_end_frame,
1151  .frame_params = videotoolbox_frame_params,
1152  .init = videotoolbox_common_init,
1153  .uninit = videotoolbox_uninit,
1154  .priv_data_size = sizeof(VTContext),
1155 };
1156 
1158  .name = "h264_videotoolbox",
1159  .type = AVMEDIA_TYPE_VIDEO,
1160  .id = AV_CODEC_ID_H264,
1161  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1162  .alloc_frame = ff_videotoolbox_alloc_frame,
1163  .start_frame = ff_videotoolbox_h264_start_frame,
1164  .decode_slice = ff_videotoolbox_h264_decode_slice,
1165  .decode_params = videotoolbox_h264_decode_params,
1166  .end_frame = videotoolbox_h264_end_frame,
1167  .frame_params = videotoolbox_frame_params,
1168  .init = videotoolbox_common_init,
1169  .uninit = videotoolbox_uninit,
1170  .priv_data_size = sizeof(VTContext),
1171 };
1172 
1174  .name = "mpeg1_videotoolbox",
1175  .type = AVMEDIA_TYPE_VIDEO,
1176  .id = AV_CODEC_ID_MPEG1VIDEO,
1177  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1178  .alloc_frame = ff_videotoolbox_alloc_frame,
1179  .start_frame = videotoolbox_mpeg_start_frame,
1180  .decode_slice = videotoolbox_mpeg_decode_slice,
1181  .end_frame = videotoolbox_mpeg_end_frame,
1182  .frame_params = videotoolbox_frame_params,
1183  .init = videotoolbox_common_init,
1184  .uninit = videotoolbox_uninit,
1185  .priv_data_size = sizeof(VTContext),
1186 };
1187 
1189  .name = "mpeg2_videotoolbox",
1190  .type = AVMEDIA_TYPE_VIDEO,
1191  .id = AV_CODEC_ID_MPEG2VIDEO,
1192  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1193  .alloc_frame = ff_videotoolbox_alloc_frame,
1194  .start_frame = videotoolbox_mpeg_start_frame,
1195  .decode_slice = videotoolbox_mpeg_decode_slice,
1196  .end_frame = videotoolbox_mpeg_end_frame,
1197  .frame_params = videotoolbox_frame_params,
1198  .init = videotoolbox_common_init,
1199  .uninit = videotoolbox_uninit,
1200  .priv_data_size = sizeof(VTContext),
1201 };
1202 
1204  .name = "mpeg4_videotoolbox",
1205  .type = AVMEDIA_TYPE_VIDEO,
1206  .id = AV_CODEC_ID_MPEG4,
1207  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1208  .alloc_frame = ff_videotoolbox_alloc_frame,
1209  .start_frame = videotoolbox_mpeg_start_frame,
1210  .decode_slice = videotoolbox_mpeg_decode_slice,
1211  .end_frame = videotoolbox_mpeg_end_frame,
1212  .frame_params = videotoolbox_frame_params,
1213  .init = videotoolbox_common_init,
1214  .uninit = videotoolbox_uninit,
1215  .priv_data_size = sizeof(VTContext),
1216 };
1217 
1218 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1219  bool full_range)
1220 {
1221  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1222 
1223  if (ret) {
1224  ret->output_callback = videotoolbox_decoder_callback;
1225 
1226  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1227  if (cv_pix_fmt_type == 0) {
1228  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1229  }
1230  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1231  }
1232 
1233  return ret;
1234 }
1235 
1237 {
1238  return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1239 }
1240 
1242 {
1243  return av_videotoolbox_default_init2(avctx, NULL);
1244 }
1245 
1247 {
1248  enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1249  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1250  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1251  if (!avctx->hwaccel_context)
1252  return AVERROR(ENOMEM);
1253  return videotoolbox_start(avctx);
1254 }
1255 
1257 {
1258 
1259  videotoolbox_stop(avctx);
1260  av_freep(&avctx->hwaccel_context);
1261 }
1262 #endif /* CONFIG_VIDEOTOOLBOX */
uint8_t
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define V
Definition: avdct.c:30
Convenience header that includes libavutil's core.
#define bytestream2_put_ne16
Definition: bytestream.h:127
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
#define bytestream2_put_ne32
Definition: bytestream.h:129
#define bytestream2_put_ne24
Definition: bytestream.h:128
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
#define flags(name, subs,...)
Definition: cbs_av1.c:561
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
#define s(width, name)
Definition: cbs_vp9.c:257
#define fail()
Definition: checkasm.h:133
#define NULL
Definition: coverity.c:32
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1869
static enum AVPixelFormat pix_fmt
static AVFrame * frame
static void videotoolbox_uninit(AVCodecContext *s)
#define S(s, c, i)
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:223
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVBufferRef * av_buffer_create(uint8_t *data, buffer_size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:478
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ H264_NAL_SPS
Definition: h264.h:41
H.264 / AVC / MPEG-4 part10 codec.
const AVHWAccel ff_h263_videotoolbox_hwaccel
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
const AVHWAccel ff_hevc_videotoolbox_hwaccel
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
const AVHWAccel ff_h264_videotoolbox_hwaccel
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
cl_device_type type
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
bool full_range
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX.
int i
Definition: input.c:407
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
uint8_t w
Definition: llviddspenc.c:39
#define P
mpegvideo header.
const char data[16]
Definition: mxf.c:142
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2901
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
#define AV_PIX_FMT_P010
Definition: pixfmt.h:448
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
enum AVMediaType codec_type
Definition: rtp.c:37
static char buffer[20]
Definition: seek.c:32
A reference to a data buffer.
Definition: buffer.h:84
uint8_t * data
The data buffer.
Definition: buffer.h:92
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:2218
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1692
int coded_height
Definition: avcodec.h:724
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2270
enum AVCodecID codec_id
Definition: avcodec.h:546
int extradata_size
Definition: avcodec.h:638
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:724
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:571
void * priv_data
Definition: avcodec.h:563
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:175
int depth
Number of bits in the component.
Definition: pixdesc.h:58
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
size_t crop_right
Definition: frame.h:681
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int width
Definition: frame.h:376
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:657
int height
Definition: frame.h:376
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:697
size_t crop_top
Definition: frame.h:678
size_t crop_left
Definition: frame.h:680
size_t crop_bottom
Definition: frame.h:679
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2444
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const char * name
Definition: pixdesc.h:82
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:70
VTDecompressionOutputCallback output_callback
The output callback that must be passed to the session.
Definition: videotoolbox.h:57
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
H264Context.
Definition: h264dec.h:344
MpegEncContext.
Definition: mpegvideo.h:81
uint8_t level_idc
Definition: hevc_ps.h:112
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:100
uint8_t progressive_source_flag
Definition: hevc_ps.h:97
uint8_t interlaced_source_flag
Definition: hevc_ps.h:98
uint8_t profile_idc
Definition: hevc_ps.h:95
uint8_t profile_space
Definition: hevc_ps.h:93
uint8_t tier_flag
Definition: hevc_ps.h:94
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:96
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:99
CVImageBufferRef frame
Definition: vt_internal.h:33
uint8_t sps[3]
Definition: vt_internal.h:43
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:36
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:40
bool reconfig_needed
Definition: vt_internal.h:44
uint8_t * bitstream
Definition: vt_internal.h:24
int bitstream_size
Definition: vt_internal.h:27
int allocated_size
Definition: vt_internal.h:30
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:51
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:52
Definition: hevc_ps.h:49
int min_spatial_segmentation_idc
Definition: hevc_ps.h:85
#define av_free(p)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static uint8_t tmp[11]
Definition: aes_ctr.c:27
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int64_t duration
Definition: movenc.c:64
#define height
#define width
static int64_t pts
int size
const char * b
Definition: vf_curves.c:118
if(ret< 0)
Definition: vf_mcdeint.c:282
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:48
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:390
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:143
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:55
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:344
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:38
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:84
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:330
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:111
#define COUNT_SIZE_PS(T, t)
#define AV_W8(p, v)
Definition: videotoolbox.c:141
#define APPEND_PS(T, t)
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:45
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:41
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:367
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:182
int ff_videotoolbox_uninit(AVCodecContext *avctx)
Definition: videotoolbox.c:402
static int videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:64
Public libavcodec Videotoolbox header.
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.