40 #if !NVDECAPI_CHECK_VERSION(9, 0)
41 #define cudaVideoSurfaceFormat_YUV444 2
42 #define cudaVideoSurfaceFormat_YUV444_16Bit 3
45 #if NVDECAPI_CHECK_VERSION(11, 0)
46 #define CUVID_HAS_AV1_SUPPORT
108 #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, ctx->cudl, x)
115 CUVIDDECODECAPS *caps =
NULL;
116 CUVIDDECODECREATEINFO cuinfo;
120 int old_width = avctx->
width;
121 int old_height = avctx->
height;
129 memset(&cuinfo, 0,
sizeof(cuinfo));
131 ctx->internal_error = 0;
133 avctx->coded_width = cuinfo.ulWidth =
format->coded_width;
134 avctx->coded_height = cuinfo.ulHeight =
format->coded_height;
137 cuinfo.display_area.left =
format->display_area.left +
ctx->crop.left;
138 cuinfo.display_area.top =
format->display_area.top +
ctx->crop.top;
139 cuinfo.display_area.right =
format->display_area.right -
ctx->crop.right;
140 cuinfo.display_area.bottom =
format->display_area.bottom -
ctx->crop.bottom;
143 if (
ctx->resize_expr) {
144 avctx->width =
ctx->resize.width;
145 avctx->height =
ctx->resize.height;
147 avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
148 avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
152 cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
153 cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
156 cuinfo.target_rect.left = 0;
157 cuinfo.target_rect.top = 0;
158 cuinfo.target_rect.right = cuinfo.ulTargetWidth;
159 cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
161 chroma_444 =
format->chroma_format == cudaVideoChromaFormat_444;
163 switch (
format->bit_depth_luma_minus8) {
180 if (!caps || !caps->bIsSupported) {
182 format->bit_depth_luma_minus8 + 8);
188 if (surface_fmt < 0) {
199 avctx->pix_fmt = surface_fmt;
202 if (avctx->hw_frames_ctx) {
216 (
AVRational){ avctx->width, avctx->height }));
218 ctx->deint_mode_current =
format->progressive_sequence
219 ? cudaVideoDeinterlaceMode_Weave
222 ctx->progressive_sequence =
format->progressive_sequence;
224 if (!
format->progressive_sequence &&
ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
229 if (
format->video_signal_description.video_full_range_flag)
234 avctx->color_primaries =
format->video_signal_description.color_primaries;
235 avctx->color_trc =
format->video_signal_description.transfer_characteristics;
236 avctx->colorspace =
format->video_signal_description.matrix_coefficients;
239 avctx->bit_rate =
format->bitrate;
241 if (
format->frame_rate.numerator &&
format->frame_rate.denominator) {
242 avctx->framerate.num =
format->frame_rate.numerator;
243 avctx->framerate.den =
format->frame_rate.denominator;
247 && avctx->coded_width ==
format->coded_width
248 && avctx->coded_height ==
format->coded_height
249 && avctx->width == old_width
250 && avctx->height == old_height
251 &&
ctx->chroma_format ==
format->chroma_format
255 if (
ctx->cudecoder) {
258 if (
ctx->internal_error < 0)
263 if (hwframe_ctx->pool && (
264 hwframe_ctx->width < avctx->width ||
265 hwframe_ctx->height < avctx->height ||
267 hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
268 av_log(avctx,
AV_LOG_ERROR,
"AVHWFramesContext is already initialized with incompatible parameters\n");
270 av_log(avctx,
AV_LOG_DEBUG,
"height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
278 ctx->chroma_format =
format->chroma_format;
280 cuinfo.CodecType =
ctx->codec_type =
format->codec;
281 cuinfo.ChromaFormat =
format->chroma_format;
283 switch (avctx->sw_pix_fmt) {
285 cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
289 cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
304 cuinfo.ulNumDecodeSurfaces =
ctx->nb_surfaces;
305 cuinfo.ulNumOutputSurfaces = 1;
306 cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
307 cuinfo.bitDepthMinus8 =
format->bit_depth_luma_minus8;
308 cuinfo.DeinterlaceMode =
ctx->deint_mode_current;
310 if (
ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !
ctx->drop_second_field)
313 ctx->internal_error =
CHECK_CU(
ctx->cvdl->cuvidCreateDecoder(&
ctx->cudecoder, &cuinfo));
314 if (
ctx->internal_error < 0)
317 if (!hwframe_ctx->pool) {
319 hwframe_ctx->sw_format = avctx->sw_pix_fmt;
320 hwframe_ctx->width = avctx->width;
321 hwframe_ctx->height = avctx->height;
339 ctx->key_frame[picparams->CurrPicIdx] = picparams->intra_pic_flag;
341 ctx->internal_error =
CHECK_CU(
ctx->cvdl->cuvidDecodePicture(
ctx->cudecoder, picparams));
342 if (
ctx->internal_error < 0)
355 ctx->internal_error = 0;
358 parsed_frame.
dispinfo.progressive_frame =
ctx->progressive_sequence;
360 if (
ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) {
365 if (!
ctx->drop_second_field) {
378 int delay =
ctx->cuparseinfo.ulMaxDisplayDelay;
379 if (
ctx->deint_mode != cudaVideoDeinterlaceMode_Weave && !
ctx->drop_second_field)
390 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
391 CUVIDSOURCEDATAPACKET cupkt;
392 int ret = 0, eret = 0, is_flush =
ctx->decoder_flushing;
396 if (is_flush && avpkt && avpkt->
size)
402 ret =
CHECK_CU(
ctx->cudl->cuCtxPushCurrent(cuda_ctx));
407 memset(&cupkt, 0,
sizeof(cupkt));
409 if (avpkt && avpkt->
size) {
410 cupkt.payload_size = avpkt->
size;
411 cupkt.payload = avpkt->
data;
414 cupkt.flags = CUVID_PKT_TIMESTAMP;
418 cupkt.timestamp = avpkt->
pts;
421 cupkt.flags = CUVID_PKT_ENDOFSTREAM;
422 ctx->decoder_flushing = 1;
425 ret =
CHECK_CU(
ctx->cvdl->cuvidParseVideoData(
ctx->cuparser, &cupkt));
431 if (
ctx->internal_error) {
433 ret =
ctx->internal_error;
455 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
456 CUdeviceptr mapped_frame = 0;
457 int ret = 0, eret = 0;
461 if (
ctx->decoder_flushing) {
481 ret =
CHECK_CU(
ctx->cudl->cuCtxPushCurrent(cuda_ctx));
488 CUVIDPROCPARAMS params;
489 unsigned int pitch = 0;
495 memset(¶ms, 0,
sizeof(params));
496 params.progressive_frame = parsed_frame.
dispinfo.progressive_frame;
498 params.top_field_first = parsed_frame.
dispinfo.top_field_first;
500 ret =
CHECK_CU(
ctx->cvdl->cuvidMapVideoFrame(
ctx->cudecoder, parsed_frame.
dispinfo.picture_index, &mapped_frame, &pitch, ¶ms));
521 CUDA_MEMCPY2D cpy = {
522 .srcMemoryType = CU_MEMORYTYPE_DEVICE,
523 .dstMemoryType = CU_MEMORYTYPE_DEVICE,
524 .srcDevice = mapped_frame,
533 ret =
CHECK_CU(
ctx->cudl->cuMemcpy2DAsync(&cpy, device_hwctx->stream));
604 if (
ctx->prev_pts == INT64_MIN) {
630 }
else if (
ctx->decoder_flushing) {
641 eret =
CHECK_CU(
ctx->cvdl->cuvidUnmapVideoFrame(
ctx->cudecoder, mapped_frame));
659 if (
ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave) {
664 if (!
ctx->decoder_flushing) {
673 }
else if (ret < 0) {
687 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
691 ctx->cudl->cuCtxPushCurrent(cuda_ctx);
694 ctx->cvdl->cuvidDestroyVideoParser(
ctx->cuparser);
697 ctx->cvdl->cuvidDestroyDecoder(
ctx->cudecoder);
709 cuvid_free_functions(&
ctx->cvdl);
715 const CUVIDPARSERPARAMS *cuparseinfo,
721 CUVIDDECODECAPS *caps;
722 int res8 = 0, res10 = 0, res12 = 0;
724 if (!
ctx->cvdl->cuvidGetDecoderCaps) {
725 av_log(avctx,
AV_LOG_WARNING,
"Used Nvidia driver is too old to perform a capability check.\n");
727 #
if defined(_WIN32) || defined(__CYGWIN__)
732 ". Continuing blind.\n");
733 ctx->caps8.bIsSupported =
ctx->caps10.bIsSupported = 1;
735 ctx->caps12.bIsSupported = 0;
739 ctx->caps8.eCodecType =
ctx->caps10.eCodecType =
ctx->caps12.eCodecType
740 = cuparseinfo->CodecType;
741 ctx->caps8.eChromaFormat =
ctx->caps10.eChromaFormat =
ctx->caps12.eChromaFormat
742 = cudaVideoChromaFormat_420;
744 ctx->caps8.nBitDepthMinus8 = 0;
745 ctx->caps10.nBitDepthMinus8 = 2;
746 ctx->caps12.nBitDepthMinus8 = 4;
753 av_log(avctx,
AV_LOG_VERBOSE,
"8 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
754 ctx->caps8.bIsSupported,
ctx->caps8.nMinWidth,
ctx->caps8.nMaxWidth,
ctx->caps8.nMinHeight,
ctx->caps8.nMaxHeight);
755 av_log(avctx,
AV_LOG_VERBOSE,
"10 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
756 ctx->caps10.bIsSupported,
ctx->caps10.nMinWidth,
ctx->caps10.nMaxWidth,
ctx->caps10.nMinHeight,
ctx->caps10.nMaxHeight);
757 av_log(avctx,
AV_LOG_VERBOSE,
"12 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
758 ctx->caps12.bIsSupported,
ctx->caps12.nMinWidth,
ctx->caps12.nMaxWidth,
ctx->caps12.nMinHeight,
ctx->caps12.nMaxHeight);
777 if (!
ctx->caps8.bIsSupported) {
782 if (!caps->bIsSupported) {
787 if (probed_width > caps->nMaxWidth || probed_width < caps->nMinWidth) {
789 probed_width, caps->nMinWidth, caps->nMaxWidth);
793 if (probed_height > caps->nMaxHeight || probed_height < caps->nMinHeight) {
795 probed_height, caps->nMinHeight, caps->nMaxHeight);
799 if ((probed_width * probed_height) / 256 > caps->nMaxMBCount) {
801 (
int)(probed_width * probed_height) / 256, caps->nMaxMBCount);
814 CUVIDSOURCEDATAPACKET seq_pkt;
815 CUcontext cuda_ctx =
NULL;
827 int probed_bit_depth = 8;
831 probed_bit_depth = probe_desc->
comp[0].
depth;
844 if (
ctx->resize_expr && sscanf(
ctx->resize_expr,
"%dx%d",
845 &
ctx->resize.width, &
ctx->resize.height) != 2) {
851 if (
ctx->crop_expr && sscanf(
ctx->crop_expr,
"%dx%dx%dx%d",
852 &
ctx->crop.top, &
ctx->crop.bottom,
853 &
ctx->crop.left, &
ctx->crop.right) != 4) {
859 ret = cuvid_load_functions(&
ctx->cvdl, avctx);
866 if (!
ctx->frame_queue) {
881 if (!
ctx->hwdevice) {
888 if (!
ctx->hwdevice) {
909 device_hwctx = device_ctx->
hwctx;
914 memset(&
ctx->cuparseinfo, 0,
sizeof(
ctx->cuparseinfo));
915 memset(&seq_pkt, 0,
sizeof(seq_pkt));
918 #if CONFIG_H264_CUVID_DECODER
920 ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
923 #if CONFIG_HEVC_CUVID_DECODER
925 ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
928 #if CONFIG_MJPEG_CUVID_DECODER
930 ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
933 #if CONFIG_MPEG1_CUVID_DECODER
935 ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
938 #if CONFIG_MPEG2_CUVID_DECODER
940 ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
943 #if CONFIG_MPEG4_CUVID_DECODER
945 ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
948 #if CONFIG_VP8_CUVID_DECODER
950 ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
953 #if CONFIG_VP9_CUVID_DECODER
955 ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
958 #if CONFIG_VC1_CUVID_DECODER
960 ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
963 #if CONFIG_AV1_CUVID_DECODER && defined(CUVID_HAS_AV1_SUPPORT)
965 ctx->cuparseinfo.CodecType = cudaVideoCodec_AV1;
983 +
FFMAX(extradata_size - (
int)
sizeof(
ctx->cuparse_ext->raw_seqhdr_data), 0));
984 if (!
ctx->cuparse_ext) {
989 if (extradata_size > 0)
990 memcpy(
ctx->cuparse_ext->raw_seqhdr_data, extradata, extradata_size);
991 ctx->cuparse_ext->format.seqhdr_data_length = extradata_size;
993 ctx->cuparseinfo.pExtVideoInfo =
ctx->cuparse_ext;
996 if (!
ctx->key_frame) {
1001 ctx->cuparseinfo.ulMaxNumDecodeSurfaces =
ctx->nb_surfaces;
1003 ctx->cuparseinfo.pUserData = avctx;
1008 ret =
CHECK_CU(
ctx->cudl->cuCtxPushCurrent(cuda_ctx));
1019 ret =
CHECK_CU(
ctx->cvdl->cuvidCreateVideoParser(&
ctx->cuparser, &
ctx->cuparseinfo));
1023 seq_pkt.payload =
ctx->cuparse_ext->raw_seqhdr_data;
1024 seq_pkt.payload_size =
ctx->cuparse_ext->format.seqhdr_data_length;
1026 if (seq_pkt.payload && seq_pkt.payload_size) {
1027 ret =
CHECK_CU(
ctx->cvdl->cuvidParseVideoData(
ctx->cuparser, &seq_pkt));
1036 ctx->prev_pts = INT64_MIN;
1053 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
1054 CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
1057 ret =
CHECK_CU(
ctx->cudl->cuCtxPushCurrent(cuda_ctx));
1064 if (!
ctx->frame_queue) {
1069 if (
ctx->cudecoder) {
1070 ctx->cvdl->cuvidDestroyDecoder(
ctx->cudecoder);
1074 if (
ctx->cuparser) {
1075 ctx->cvdl->cuvidDestroyVideoParser(
ctx->cuparser);
1079 ret =
CHECK_CU(
ctx->cvdl->cuvidCreateVideoParser(&
ctx->cuparser, &
ctx->cuparseinfo));
1083 seq_pkt.payload =
ctx->cuparse_ext->raw_seqhdr_data;
1084 seq_pkt.payload_size =
ctx->cuparse_ext->format.seqhdr_data_length;
1086 if (seq_pkt.payload && seq_pkt.payload_size) {
1087 ret =
CHECK_CU(
ctx->cvdl->cuvidParseVideoData(
ctx->cuparser, &seq_pkt));
1096 ctx->prev_pts = INT64_MIN;
1097 ctx->decoder_flushing = 0;
1104 #define OFFSET(x) offsetof(CuvidContext, x)
1105 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1107 {
"deint",
"Set deinterlacing mode",
OFFSET(deint_mode),
AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive,
VD,
"deint" },
1108 {
"weave",
"Weave deinterlacing (do nothing)", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0,
VD,
"deint" },
1109 {
"bob",
"Bob deinterlacing", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0,
VD,
"deint" },
1110 {
"adaptive",
"Adaptive deinterlacing", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0,
VD,
"deint" },
1112 {
"surfaces",
"Maximum surfaces to be used for decoding",
OFFSET(nb_surfaces),
AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX,
VD },
1113 {
"drop_second_field",
"Drop second field when deinterlacing",
OFFSET(drop_second_field),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VD },
1132 #define DEFINE_CUVID_CODEC(x, X, bsf_name) \
1133 static const AVClass x##_cuvid_class = { \
1134 .class_name = #x "_cuvid", \
1135 .item_name = av_default_item_name, \
1136 .option = options, \
1137 .version = LIBAVUTIL_VERSION_INT, \
1139 AVCodec ff_##x##_cuvid_decoder = { \
1140 .name = #x "_cuvid", \
1141 .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \
1142 .type = AVMEDIA_TYPE_VIDEO, \
1143 .id = AV_CODEC_ID_##X, \
1144 .priv_data_size = sizeof(CuvidContext), \
1145 .priv_class = &x##_cuvid_class, \
1146 .init = cuvid_decode_init, \
1147 .close = cuvid_decode_end, \
1148 .decode = cuvid_decode_frame, \
1149 .receive_frame = cuvid_output_frame, \
1150 .flush = cuvid_flush, \
1152 .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
1153 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
1157 AV_PIX_FMT_NONE }, \
1158 .hw_configs = cuvid_hw_configs, \
1159 .wrapper_name = "cuvid", \
1162 #if CONFIG_AV1_CUVID_DECODER && defined(CUVID_HAS_AV1_SUPPORT)
1166 #if CONFIG_HEVC_CUVID_DECODER
1170 #if CONFIG_H264_CUVID_DECODER
1174 #if CONFIG_MJPEG_CUVID_DECODER
1178 #if CONFIG_MPEG1_CUVID_DECODER
1182 #if CONFIG_MPEG2_CUVID_DECODER
1186 #if CONFIG_MPEG4_CUVID_DECODER
1190 #if CONFIG_VP8_CUVID_DECODER
1194 #if CONFIG_VP9_CUVID_DECODER
1198 #if CONFIG_VC1_CUVID_DECODER
static const char *const format[]
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
Libavcodec external API header.
refcounted data buffer API
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS *picparams)
static const AVCodecHWConfigInternal *const cuvid_hw_configs[]
static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
static const AVOption options[]
static av_cold int cuvid_decode_end(AVCodecContext *avctx)
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define cudaVideoSurfaceFormat_YUV444
static int cuvid_test_capabilities(AVCodecContext *avctx, const CUVIDPARSERPARAMS *cuparseinfo, int probed_width, int probed_height, int bit_depth)
static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO *dispinfo)
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT *format)
static av_cold int cuvid_decode_init(AVCodecContext *avctx)
#define DEFINE_CUVID_CODEC(x, X, bsf_name)
#define cudaVideoSurfaceFormat_YUV444_16Bit
static int cuvid_is_buffer_full(AVCodecContext *avctx)
static void cuvid_flush(AVCodecContext *avctx)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
a very simple circular buffer FIFO implementation
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
@ AV_CODEC_HW_CONFIG_METHOD_INTERNAL
The codec supports this format by some internal method.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
#define AVERROR_EXTERNAL
Generic error in an external library.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_EOF
End of file.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
#define AV_NOPTS_VALUE
Undefined timestamp value.
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
FFmpeg internal API for CUDA.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
common internal API header
#define FF_DISABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
static enum AVPixelFormat pix_fmts[]
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_PIX_FMT_YUV444P16
AVCodecParameters * par_out
Parameters of the output stream.
A reference to a data buffer.
This struct is allocated as AVHWDeviceContext.hwctx.
AVCUDADeviceContextInternal * internal
Describe the class of an AVClass context structure.
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
const struct AVCodec * codec
int flags
AV_CODEC_FLAG_*.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
int coded_width
Bitstream width / height, may be different from width/height e.g.
struct AVCodecInternal * internal
Private context used for internal data.
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
This struct describes the properties of an encoded stream.
int extradata_size
Size of the extradata content in bytes.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding.
const char * name
Name of the codec implementation.
int depth
Number of bits in the component.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
int key_frame
1 -> keyframe, 0-> not
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
int pkt_size
size of the corresponding packet containing the compressed frame.
int top_field_first
If the content is interlaced, is top field displayed first.
int interlaced_frame
The content of the picture is interlaced.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
This struct describes a set or pool of "hardware" frames (i.e.
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
uint8_t nb_components
The number of components each pixel has, (1-4)
Rational number (pair of numerator and denominator).
struct CuvidContext::@43 crop
AVFifoBuffer * frame_queue
cudaVideoCodec codec_type
cudaVideoChromaFormat chroma_format
struct CuvidContext::@44 resize
CUVIDPARSERPARAMS cuparseinfo
CUVIDEOFORMATEX * cuparse_ext
CUVIDPARSERDISPINFO dispinfo
static void error(const char *err)
static const uint8_t offset[127][2]