51 0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
52 0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
53 2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
54 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
58 0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
59 1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
60 2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
61 0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
62 2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
63 2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
64 2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
65 3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
66 3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
67 4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
68 4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
69 5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
70 5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
71 7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
72 6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
73 7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
77 0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
78 2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
79 3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
80 5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
81 0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
82 1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
83 3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
84 5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
85 0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
86 1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
87 3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
88 5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
89 1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
90 1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
91 3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
92 6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
97 0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
98 4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
99 3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
100 2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
101 1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
102 3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
103 2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
104 3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
105 0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
106 2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
107 1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
108 4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
109 0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
110 1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
111 0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
112 5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
121 if (
h->DPB[
i].f->buf[0] && !
h->DPB[
i].reference &&
122 (remove_current || &
h->DPB[
i] !=
h->cur_pic_ptr)) {
139 h->mb_width * 16 * 3 *
sizeof(
uint8_t) * 2);
141 h->mb_width * 16 * 3 *
sizeof(
uint8_t) * 2);
162 const int big_mb_num =
h->mb_stride * (
h->mb_height + 1) + 1;
163 const int mb_array_size =
h->mb_stride *
h->mb_height;
164 const int b4_stride =
h->mb_width * 4 + 1;
165 const int b4_array_size = b4_stride *
h->mb_height * 4;
175 if (!
h->qscale_table_pool || !
h->mb_type_pool || !
h->motion_val_pool ||
176 !
h->ref_index_pool) {
199 if (
h->avctx->hwaccel) {
210 int h_chroma_shift, v_chroma_shift;
212 &h_chroma_shift, &v_chroma_shift);
222 if (!
h->qscale_table_pool) {
258 return (ret < 0) ? ret :
AVERROR(ENOMEM);
266 if (!
h->DPB[
i].f->buf[0])
273 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
275 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
276 (((pic) && (pic) >= (old_ctx)->DPB && \
277 (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
278 &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
286 for (
i = 0;
i < count;
i++) {
300 int inited =
h->context_initialized, err = 0;
307 if (inited && !h1->ps.sps)
311 (
h->width != h1->width ||
312 h->height != h1->height ||
313 h->mb_width != h1->mb_width ||
314 h->mb_height != h1->mb_height ||
316 h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
317 h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
318 h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
323 memcpy(
h->block_offset, h1->block_offset,
sizeof(
h->block_offset));
342 if (h1->ps.pps_ref) {
343 h->ps.pps = (
const PPS*)
h->ps.pps_ref->data;
344 h->ps.sps =
h->ps.pps->sps;
347 if (need_reinit || !inited) {
348 h->width = h1->width;
349 h->height = h1->height;
350 h->mb_height = h1->mb_height;
351 h->mb_width = h1->mb_width;
352 h->mb_num = h1->mb_num;
353 h->mb_stride = h1->mb_stride;
354 h->b_stride = h1->b_stride;
355 h->x264_build = h1->x264_build;
357 if (
h->context_initialized || h1->context_initialized) {
365 memcpy(
h->block_offset, h1->block_offset,
sizeof(
h->block_offset));
368 h->avctx->coded_height = h1->avctx->coded_height;
369 h->avctx->coded_width = h1->avctx->coded_width;
370 h->avctx->width = h1->avctx->width;
371 h->avctx->height = h1->avctx->height;
372 h->width_from_caller = h1->width_from_caller;
373 h->height_from_caller = h1->height_from_caller;
374 h->coded_picture_number = h1->coded_picture_number;
375 h->first_field = h1->first_field;
376 h->picture_structure = h1->picture_structure;
377 h->mb_aff_frame = h1->mb_aff_frame;
378 h->droppable = h1->droppable;
382 if (h1->DPB[
i].f->buf[0] &&
389 if (h1->cur_pic.f->buf[0]) {
395 h->enable_er = h1->enable_er;
396 h->workaround_bugs = h1->workaround_bugs;
397 h->droppable = h1->droppable;
400 h->is_avc = h1->is_avc;
401 h->nal_length_size = h1->nal_length_size;
403 memcpy(&
h->poc, &h1->poc,
sizeof(
h->poc));
405 memcpy(
h->short_ref, h1->short_ref,
sizeof(
h->short_ref));
406 memcpy(
h->long_ref, h1->long_ref,
sizeof(
h->long_ref));
407 memcpy(
h->delayed_pic, h1->delayed_pic,
sizeof(
h->delayed_pic));
408 memcpy(
h->last_pocs, h1->last_pocs,
sizeof(
h->last_pocs));
410 h->next_output_pic = h1->next_output_pic;
411 h->next_outputed_poc = h1->next_outputed_poc;
413 memcpy(
h->mmco, h1->mmco,
sizeof(
h->mmco));
414 h->nb_mmco = h1->nb_mmco;
415 h->mmco_reset = h1->mmco_reset;
416 h->explicit_ref_marking = h1->explicit_ref_marking;
417 h->long_ref_count = h1->long_ref_count;
418 h->short_ref_count = h1->short_ref_count;
425 h->frame_recovered = h1->frame_recovered;
431 for (
i = 0;
i <
h->sei.unregistered.nb_buf_ref;
i++)
433 h->sei.unregistered.nb_buf_ref = 0;
435 if (h1->sei.unregistered.nb_buf_ref) {
437 h1->sei.unregistered.nb_buf_ref,
438 sizeof(*
h->sei.unregistered.buf_ref));
442 for (
i = 0;
i < h1->sei.unregistered.nb_buf_ref;
i++) {
443 h->sei.unregistered.buf_ref[
i] =
av_buffer_ref(h1->sei.unregistered.buf_ref[
i]);
444 if (!
h->sei.unregistered.buf_ref[
i])
446 h->sei.unregistered.nb_buf_ref++;
449 h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
456 h->poc.prev_poc_msb =
h->poc.poc_msb;
457 h->poc.prev_poc_lsb =
h->poc.poc_lsb;
459 h->poc.prev_frame_num_offset =
h->poc.frame_num_offset;
460 h->poc.prev_frame_num =
h->poc.frame_num;
462 h->recovery_frame = h1->recovery_frame;
471 const int pixel_shift =
h->pixel_shift;
479 h->cur_pic_ptr =
NULL;
488 pic->
reference =
h->droppable ? 0 :
h->picture_structure;
513 h->cur_pic_ptr = pic;
522 for (
i = 0;
i <
h->nb_slice_ctx;
i++) {
523 h->slice_ctx[
i].linesize =
h->cur_pic_ptr->f->linesize[0];
524 h->slice_ctx[
i].uvlinesize =
h->cur_pic_ptr->f->linesize[1];
533 for (
i = 0;
i < 16;
i++) {
537 for (
i = 0;
i < 16;
i++) {
538 h->block_offset[16 +
i] =
540 h->block_offset[48 + 16 +
i] =
548 h->cur_pic_ptr->reference = 0;
550 h->cur_pic_ptr->field_poc[0] =
h->cur_pic_ptr->field_poc[1] = INT_MAX;
552 h->next_output_pic =
NULL;
554 h->postpone_filter = 0;
556 h->mb_aff_frame =
h->ps.sps->mb_aff && (
h->picture_structure ==
PICT_FRAME);
558 if (
h->sei.unregistered.x264_build >= 0)
559 h->x264_build =
h->sei.unregistered.x264_build;
561 assert(
h->cur_pic_ptr->long_ref == 0);
569 int linesize,
int uvlinesize,
574 const int pixel_shift =
h->pixel_shift;
579 src_cb -= uvlinesize;
580 src_cr -= uvlinesize;
586 AV_COPY128(top_border, src_y + 15 * linesize);
588 AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
592 AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
593 AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
594 AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
595 AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
597 AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
598 AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
600 }
else if (chroma422) {
602 AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
603 AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
605 AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
606 AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
610 AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
611 AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
613 AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
614 AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
628 AV_COPY128(top_border, src_y + 16 * linesize);
630 AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
635 AV_COPY128(top_border + 32, src_cb + 16 * linesize);
636 AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
637 AV_COPY128(top_border + 64, src_cr + 16 * linesize);
638 AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
640 AV_COPY128(top_border + 16, src_cb + 16 * linesize);
641 AV_COPY128(top_border + 32, src_cr + 16 * linesize);
643 }
else if (chroma422) {
645 AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
646 AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
648 AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
649 AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
653 AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
654 AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
656 AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
657 AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
670 int ref0, ref1,
i, cur_poc, ref_start, ref_count0, ref_count1;
672 for (
i = 0;
i < 2;
i++) {
679 cur_poc =
h->cur_pic_ptr->poc;
681 cur_poc =
h->cur_pic_ptr->field_poc[
h->picture_structure - 1];
693 cur_poc =
h->cur_pic_ptr->field_poc[field];
704 for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
706 for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
713 int tx = (16384 + (
FFABS(
td) >> 1)) /
td;
714 int dist_scale_factor = (
tb * tx + 32) >> 8;
715 if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
716 w = 64 - dist_scale_factor;
735 for (
i = 0;
i < 16;
i++) {
736 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
741 for (
i = 0;
i < 64;
i++) {
742 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
749 if (
h->ps.sps->transform_bypass) {
753 memcpy(
h->field_scan_q0 ,
field_scan ,
sizeof(
h->field_scan_q0 ));
754 memcpy(
h->field_scan8x8_q0 ,
field_scan8x8 ,
sizeof(
h->field_scan8x8_q0 ));
757 memcpy(
h->zigzag_scan_q0 ,
h->zigzag_scan ,
sizeof(
h->zigzag_scan_q0 ));
758 memcpy(
h->zigzag_scan8x8_q0 ,
h->zigzag_scan8x8 ,
sizeof(
h->zigzag_scan8x8_q0 ));
759 memcpy(
h->zigzag_scan8x8_cavlc_q0 ,
h->zigzag_scan8x8_cavlc ,
sizeof(
h->zigzag_scan8x8_cavlc_q0));
760 memcpy(
h->field_scan_q0 ,
h->field_scan ,
sizeof(
h->field_scan_q0 ));
761 memcpy(
h->field_scan8x8_q0 ,
h->field_scan8x8 ,
sizeof(
h->field_scan8x8_q0 ));
762 memcpy(
h->field_scan8x8_cavlc_q0 ,
h->field_scan8x8_cavlc ,
sizeof(
h->field_scan8x8_cavlc_q0 ));
768 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
769 (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
770 CONFIG_H264_NVDEC_HWACCEL + \
771 CONFIG_H264_VAAPI_HWACCEL + \
772 CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
773 CONFIG_H264_VDPAU_HWACCEL)
778 switch (
h->ps.sps->bit_depth_luma) {
824 #if CONFIG_H264_VDPAU_HWACCEL
827 #if CONFIG_H264_NVDEC_HWACCEL
843 #if CONFIG_H264_DXVA2_HWACCEL
846 #if CONFIG_H264_D3D11VA_HWACCEL
850 #if CONFIG_H264_VAAPI_HWACCEL
853 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
856 if (
h->avctx->codec->pix_fmts)
857 choices =
h->avctx->codec->pix_fmts;
866 "Unsupported bit depth %d\n",
h->ps.sps->bit_depth_luma);
873 if (choices[
i] ==
h->avctx->pix_fmt && !force_callback)
882 int cr =
sps->crop_right;
883 int cl =
sps->crop_left;
884 int ct =
sps->crop_top;
885 int cb =
sps->crop_bottom;
892 if (
h->width_from_caller > 0 &&
h->height_from_caller > 0 &&
893 !
sps->crop_top && !
sps->crop_left &&
896 h->width_from_caller <=
width &&
897 h->height_from_caller <=
height) {
898 width =
h->width_from_caller;
899 height =
h->height_from_caller;
905 h->width_from_caller = 0;
906 h->height_from_caller = 0;
909 h->avctx->coded_width =
h->width;
910 h->avctx->coded_height =
h->height;
931 &
h->chroma_x_shift, &
h->chroma_y_shift);
933 if (
sps->timing_info_present_flag) {
934 int64_t den =
sps->time_scale;
935 if (
h->x264_build < 44U)
937 av_reduce(&
h->avctx->framerate.den, &
h->avctx->framerate.num,
938 sps->num_units_in_tick *
h->avctx->ticks_per_frame, den, 1 << 30);
944 h->prev_interlaced_frame = 1;
953 if (
sps->bit_depth_luma < 8 ||
sps->bit_depth_luma > 14 ||
954 sps->bit_depth_luma == 11 ||
sps->bit_depth_luma == 13
957 sps->bit_depth_luma);
962 h->cur_bit_depth_luma =
963 h->avctx->bits_per_raw_sample =
sps->bit_depth_luma;
964 h->cur_chroma_format_idc =
sps->chroma_format_idc;
965 h->pixel_shift =
sps->bit_depth_luma > 8;
966 h->chroma_format_idc =
sps->chroma_format_idc;
967 h->bit_depth_luma =
sps->bit_depth_luma;
970 sps->chroma_format_idc);
974 sps->chroma_format_idc);
984 for (
i = 0;
i <
h->nb_slice_ctx;
i++) {
989 sl->
mvd_table[0] =
h->mvd_table[0] +
i * 8 * 2 *
h->mb_stride;
990 sl->
mvd_table[1] =
h->mvd_table[1] +
i * 8 * 2 *
h->mb_stride;
999 h->context_initialized = 1;
1004 h->context_initialized = 0;
1022 int needs_reinit = 0, must_reinit, ret;
1030 h->ps.pps = (
const PPS*)
h->ps.pps_ref->data;
1033 if (
h->ps.sps !=
h->ps.pps->sps) {
1034 h->ps.sps = (
const SPS*)
h->ps.pps->sps;
1036 if (
h->mb_width !=
h->ps.sps->mb_width ||
1037 h->mb_height !=
h->ps.sps->mb_height ||
1038 h->cur_bit_depth_luma !=
h->ps.sps->bit_depth_luma ||
1039 h->cur_chroma_format_idc !=
h->ps.sps->chroma_format_idc
1043 if (
h->bit_depth_luma !=
h->ps.sps->bit_depth_luma ||
1044 h->chroma_format_idc !=
h->ps.sps->chroma_format_idc)
1049 must_reinit = (
h->context_initialized &&
1050 ( 16*
sps->mb_width !=
h->avctx->coded_width
1051 || 16*
sps->mb_height !=
h->avctx->coded_height
1052 ||
h->cur_bit_depth_luma !=
sps->bit_depth_luma
1053 ||
h->cur_chroma_format_idc !=
sps->chroma_format_idc
1054 ||
h->mb_width !=
sps->mb_width
1055 ||
h->mb_height !=
sps->mb_height
1061 if (first_slice &&
av_cmp_q(
sps->sar,
h->avctx->sample_aspect_ratio))
1064 if (!
h->setup_finished) {
1066 h->avctx->level =
sps->level_idc;
1067 h->avctx->refs =
sps->ref_frame_count;
1069 h->mb_width =
sps->mb_width;
1070 h->mb_height =
sps->mb_height;
1071 h->mb_num =
h->mb_width *
h->mb_height;
1072 h->mb_stride =
h->mb_width + 1;
1074 h->b_stride =
h->mb_width * 4;
1076 h->chroma_y_shift =
sps->chroma_format_idc <= 1;
1078 h->width = 16 *
h->mb_width;
1079 h->height = 16 *
h->mb_height;
1083 if (
sps->video_signal_type_present_flag) {
1086 if (
sps->colour_description_present_flag) {
1087 if (
h->avctx->colorspace !=
sps->colorspace)
1089 h->avctx->color_primaries =
sps->color_primaries;
1090 h->avctx->color_trc =
sps->color_trc;
1091 h->avctx->colorspace =
sps->colorspace;
1095 if (
h->sei.alternative_transfer.present &&
1098 h->avctx->color_trc =
h->sei.alternative_transfer.preferred_transfer_characteristics;
1101 h->avctx->chroma_sample_location =
sps->chroma_location;
1103 if (!
h->context_initialized || must_reinit || needs_reinit) {
1104 int flush_changes =
h->context_initialized;
1105 h->context_initialized = 0;
1106 if (sl !=
h->slice_ctx) {
1108 "changing width %d -> %d / height %d -> %d on "
1110 h->width,
h->avctx->coded_width,
1111 h->height,
h->avctx->coded_height,
1112 h->current_slice + 1);
1123 h->avctx->pix_fmt = ret;
1130 "h264_slice_header_init() failed\n");
1144 out->interlaced_frame = 0;
1145 out->repeat_pict = 0;
1150 if (
h->sei.picture_timing.present) {
1157 h->sei.picture_timing.present = 0;
1161 if (
sps->pic_struct_present_flag &&
h->sei.picture_timing.present) {
1163 switch (
pt->pic_struct) {
1168 out->interlaced_frame = 1;
1173 out->interlaced_frame = 1;
1176 out->interlaced_frame =
h->prev_interlaced_frame;
1183 out->repeat_pict = 1;
1186 out->repeat_pict = 2;
1189 out->repeat_pict = 4;
1193 if ((
pt->ct_type & 3) &&
1195 out->interlaced_frame = (
pt->ct_type & (1 << 1)) != 0;
1200 h->prev_interlaced_frame =
out->interlaced_frame;
1206 if (
sps->pic_struct_present_flag &&
h->sei.picture_timing.present) {
1211 out->top_field_first = 1;
1213 out->top_field_first = 0;
1214 }
else if (
out->interlaced_frame) {
1217 out->top_field_first = 1;
1220 out->top_field_first = 0;
1224 if (
h->sei.frame_packing.present &&
1225 h->sei.frame_packing.arrangement_type <= 6 &&
1226 h->sei.frame_packing.content_interpretation_type > 0 &&
1227 h->sei.frame_packing.content_interpretation_type < 3) {
1231 switch (
fp->arrangement_type) {
1242 if (
fp->quincunx_sampling_flag)
1258 if (
fp->content_interpretation_type == 2)
1262 if (
fp->current_frame_is_frame0_flag)
1270 if (
h->sei.display_orientation.present &&
1271 (
h->sei.display_orientation.anticlockwise_rotation ||
1272 h->sei.display_orientation.hflip ||
1273 h->sei.display_orientation.vflip)) {
1286 if (
h->sei.afd.present) {
1291 *sd->
data =
h->sei.afd.active_format_description;
1292 h->sei.afd.present = 0;
1296 if (
h->sei.a53_caption.buf_ref) {
1307 for (
int i = 0;
i <
h->sei.unregistered.nb_buf_ref;
i++) {
1319 h->sei.unregistered.nb_buf_ref = 0;
1321 if (
h->sei.picture_timing.timecode_cnt > 0) {
1327 sizeof(uint32_t)*4);
1331 tc_sd = (uint32_t*)tcside->
data;
1332 tc_sd[0] =
h->sei.picture_timing.timecode_cnt;
1334 for (
int i = 0;
i < tc_sd[0];
i++) {
1335 int drop =
h->sei.picture_timing.timecode[
i].dropframe;
1336 int hh =
h->sei.picture_timing.timecode[
i].hours;
1337 int mm =
h->sei.picture_timing.timecode[
i].minutes;
1338 int ss =
h->sei.picture_timing.timecode[
i].seconds;
1339 int ff =
h->sei.picture_timing.timecode[
i].frame;
1345 h->sei.picture_timing.timecode_cnt = 0;
1356 int i, pics, out_of_order, out_idx;
1361 if (
sps->bitstream_restriction_flag ||
1363 h->avctx->has_b_frames =
FFMAX(
h->avctx->has_b_frames,
sps->num_reorder_frames);
1366 for (
i = 0; 1;
i++) {
1369 h->last_pocs[
i-1] = cur->
poc;
1372 h->last_pocs[
i-1]=
h->last_pocs[
i];
1378 out_of_order =
FFMAX(out_of_order, 1);
1382 h->last_pocs[
i] = INT_MIN;
1383 h->last_pocs[0] = cur->
poc;
1385 }
else if(
h->avctx->has_b_frames < out_of_order && !
sps->bitstream_restriction_flag){
1387 av_log(
h->avctx, loglevel,
"Increasing reorder buffer to %d\n", out_of_order);
1388 h->avctx->has_b_frames = out_of_order;
1392 while (
h->delayed_pic[pics])
1397 h->delayed_pic[pics++] = cur;
1401 out =
h->delayed_pic[0];
1403 for (
i = 1;
h->delayed_pic[
i] &&
1404 !
h->delayed_pic[
i]->f->key_frame &&
1405 !
h->delayed_pic[
i]->mmco_reset;
1407 if (
h->delayed_pic[
i]->poc <
out->poc) {
1408 out =
h->delayed_pic[
i];
1411 if (
h->avctx->has_b_frames == 0 &&
1412 (
h->delayed_pic[0]->f->key_frame ||
h->delayed_pic[0]->mmco_reset))
1413 h->next_outputed_poc = INT_MIN;
1414 out_of_order =
out->poc <
h->next_outputed_poc;
1416 if (out_of_order || pics >
h->avctx->has_b_frames) {
1418 for (
i = out_idx;
h->delayed_pic[
i];
i++)
1419 h->delayed_pic[
i] =
h->delayed_pic[
i + 1];
1421 if (!out_of_order && pics >
h->avctx->has_b_frames) {
1422 h->next_output_pic =
out;
1423 if (out_idx == 0 &&
h->delayed_pic[0] && (
h->delayed_pic[0]->f->key_frame ||
h->delayed_pic[0]->mmco_reset)) {
1424 h->next_outputed_poc = INT_MIN;
1426 h->next_outputed_poc =
out->poc;
1428 if (
out->recovered) {
1435 if (!
out->recovered) {
1438 h->next_output_pic =
NULL;
1455 const H2645NAL *nal,
int first_slice)
1460 int last_pic_structure, last_pic_droppable, ret;
1468 if (
sps &&
sps->bitstream_restriction_flag &&
1469 h->avctx->has_b_frames <
sps->num_reorder_frames) {
1470 h->avctx->has_b_frames =
sps->num_reorder_frames;
1473 last_pic_droppable =
h->droppable;
1474 last_pic_structure =
h->picture_structure;
1475 h->droppable = (nal->
ref_idc == 0);
1486 if (
h->poc.frame_num !=
h->poc.prev_frame_num) {
1487 int unwrap_prev_frame_num =
h->poc.prev_frame_num;
1488 int max_frame_num = 1 <<
sps->log2_max_frame_num;
1490 if (unwrap_prev_frame_num >
h->poc.frame_num)
1491 unwrap_prev_frame_num -= max_frame_num;
1493 if ((
h->poc.frame_num - unwrap_prev_frame_num) >
sps->ref_frame_count) {
1494 unwrap_prev_frame_num = (
h->poc.frame_num -
sps->ref_frame_count) - 1;
1495 if (unwrap_prev_frame_num < 0)
1496 unwrap_prev_frame_num += max_frame_num;
1498 h->poc.prev_frame_num = unwrap_prev_frame_num;
1507 if (
h->first_field) {
1514 if (
h->cur_pic_ptr->tf.owner[last_field] ==
h->avctx) {
1519 if (!
FIELD_PICTURE(
h) ||
h->picture_structure == last_pic_structure) {
1527 if (
h->cur_pic_ptr->frame_num !=
h->poc.frame_num) {
1543 "Invalid field mode combination %d/%d\n",
1544 last_pic_structure,
h->picture_structure);
1545 h->picture_structure = last_pic_structure;
1546 h->droppable = last_pic_droppable;
1548 }
else if (last_pic_droppable !=
h->droppable) {
1550 "Found reference and non-reference fields in the same frame, which");
1551 h->picture_structure = last_pic_structure;
1552 h->droppable = last_pic_droppable;
1559 while (
h->poc.frame_num !=
h->poc.prev_frame_num && !
h->first_field &&
1560 h->poc.frame_num != (
h->poc.prev_frame_num + 1) % (1 <<
sps->log2_max_frame_num)) {
1563 h->poc.frame_num,
h->poc.prev_frame_num);
1564 if (!
sps->gaps_in_frame_num_allowed_flag)
1566 h->last_pocs[
i] = INT_MIN;
1573 h->poc.prev_frame_num++;
1574 h->poc.prev_frame_num %= 1 <<
sps->log2_max_frame_num;
1575 h->cur_pic_ptr->frame_num =
h->poc.prev_frame_num;
1576 h->cur_pic_ptr->invalid_gap = !
sps->gaps_in_frame_num_allowed_flag;
1580 h->explicit_ref_marking = 0;
1591 if (
h->short_ref_count) {
1593 1<<(
h->ps.sps->bit_depth_luma-1),
1594 1<<(
h->ps.sps->bit_depth_chroma-1),
1595 1<<(
h->ps.sps->bit_depth_chroma-1),
1600 h->short_ref[0]->f->width == prev->
f->
width &&
1601 h->short_ref[0]->f->height == prev->
f->
height &&
1602 h->short_ref[0]->f->format == prev->
f->
format) {
1607 h->short_ref[0]->tf.f =
h->short_ref[0]->f;
1611 h->short_ref[0]->poc = prev->
poc + 2U;
1613 if (
h->short_ref[0]->field_picture)
1615 }
else if (!
h->frame_recovered && !
h->avctx->hwaccel)
1617 h->short_ref[0]->frame_num =
h->poc.prev_frame_num;
1624 if (
h->first_field) {
1630 if (!
FIELD_PICTURE(
h) ||
h->picture_structure == last_pic_structure) {
1633 h->missing_fields ++;
1634 h->cur_pic_ptr =
NULL;
1637 h->missing_fields = 0;
1638 if (
h->cur_pic_ptr->frame_num !=
h->poc.frame_num) {
1645 h->cur_pic_ptr =
NULL;
1651 h->cur_pic_ptr =
NULL;
1670 h->cur_pic_ptr->tf.owner[field] =
h->avctx;
1676 memset(
h->slice_table +
i*
h->mb_stride, -1, (
h->mb_stride - (
i+1==
h->mb_height)) *
sizeof(*
h->slice_table));
1678 memset(
h->slice_table, -1,
1679 (
h->mb_height *
h->mb_stride - 1) *
sizeof(*
h->slice_table));
1683 h->ps.sps, &
h->poc,
h->picture_structure, nal->
ref_idc);
1693 if (
h->sei.recovery_point.recovery_frame_cnt >= 0) {
1694 const int sei_recovery_frame_cnt =
h->sei.recovery_point.recovery_frame_cnt;
1697 h->valid_recovery_point = 1;
1699 if (
h->recovery_frame < 0
1700 ||
av_mod_uintp2(
h->recovery_frame -
h->poc.frame_num,
h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1701 h->recovery_frame =
av_mod_uintp2(
h->poc.frame_num + sei_recovery_frame_cnt,
h->ps.sps->log2_max_frame_num);
1703 if (!
h->valid_recovery_point)
1704 h->recovery_frame =
h->poc.frame_num;
1711 (
h->recovery_frame ==
h->poc.frame_num && nal->
ref_idc)) {
1712 h->recovery_frame = -1;
1713 h->cur_pic_ptr->recovered = 1;
1720 h->cur_pic_ptr->recovered |=
h->frame_recovered;
1747 unsigned int slice_type,
tmp,
i;
1748 int field_pic_flag, bottom_field_flag;
1749 int first_slice = sl ==
h->slice_ctx && !
h->current_slice;
1750 int picture_structure;
1758 if (slice_type > 9) {
1760 "slice type %d too large at %d\n",
1764 if (slice_type > 4) {
1785 if (!
h->ps.pps_list[sl->
pps_id]) {
1787 "non-existing PPS %u referenced\n",
1805 if (
sps->frame_mbs_only_flag) {
1809 av_log(
h->avctx,
AV_LOG_ERROR,
"This stream was generated by a broken encoder, invalid 8x8 inference\n");
1813 if (field_pic_flag) {
1834 if (
sps->poc_type == 0) {
1837 if (
pps->pic_order_present == 1 && picture_structure ==
PICT_FRAME)
1841 if (
sps->poc_type == 1 && !
sps->delta_pic_order_always_zero_flag) {
1844 if (
pps->pic_order_present == 1 && picture_structure ==
PICT_FRAME)
1849 if (
pps->redundant_pic_cnt_present)
1857 picture_structure,
h->avctx);
1870 for (
i = 0;
i < 2;
i++) {
1875 (
pps->weighted_bipred_idc == 1 &&
1879 picture_structure,
h->avctx);
1902 if (
tmp > 51 + 6 * (
sps->bit_depth_luma - 8)) {
1919 if (
pps->deblocking_filter_parameters_present) {
1923 "deblocking_filter_idc %u out of range\n",
tmp);
1933 if (slice_alpha_c0_offset_div2 > 6 ||
1934 slice_alpha_c0_offset_div2 < -6 ||
1935 slice_beta_offset_div2 > 6 ||
1936 slice_beta_offset_div2 < -6) {
1938 "deblocking filter parameters %d %d out of range\n",
1939 slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1979 if (
h->ps.pps->weighted_bipred_idc == 2 &&
1990 if (!
h->setup_finished)
2010 h->postpone_filter = 1;
2016 h->ps.pps->chroma_qp_index_offset[0],
2017 h->ps.pps->chroma_qp_index_offset[1]) +
2018 6 * (
h->ps.sps->bit_depth_luma - 8);
2031 for (j = 0; j < 2; j++) {
2034 for (
i = 0;
i < 16;
i++) {
2036 if (j < sl->list_count && i < sl->ref_count[j] &&
2040 for (k = 0; k <
h->short_ref_count; k++)
2041 if (
h->short_ref[k]->f->buf[0]->buffer == buf) {
2045 for (k = 0; k <
h->long_ref_count; k++)
2046 if (
h->long_ref[k] &&
h->long_ref[k]->f->buf[0]->buffer == buf) {
2047 id_list[
i] =
h->short_ref_count + k;
2055 for (
i = 0;
i < 16;
i++)
2058 ref2frm[18 + 1] = -1;
2059 for (
i = 16;
i < 48;
i++)
2060 ref2frm[
i + 4] = 4 * id_list[(
i - 16) >> 1] +
2066 "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2074 h->cur_pic_ptr->field_poc[0],
2075 h->cur_pic_ptr->field_poc[1],
2091 int first_slice = sl ==
h->slice_ctx && !
h->current_slice;
2107 if (
h->setup_finished) {
2114 if (
h->current_slice) {
2117 if (
h->nb_slice_ctx_queued) {
2124 memcpy(&tmp_ctx,
h->slice_ctx,
sizeof(tmp_ctx));
2125 memcpy(
h->slice_ctx, sl,
sizeof(tmp_ctx));
2126 memcpy(sl, &tmp_ctx,
sizeof(tmp_ctx));
2139 h->cur_pic_ptr =
NULL;
2146 if (!
h->first_field) {
2147 if (
h->cur_pic_ptr && !
h->droppable) {
2151 h->cur_pic_ptr =
NULL;
2155 if (!
h->current_slice)
2158 if (
h->current_slice == 0 && !
h->first_field) {
2172 if (
h->ps.pps->sps_id !=
pps->sps_id ||
2173 h->ps.pps->transform_8x8_mode !=
pps->transform_8x8_mode
2178 if (
h->ps.sps !=
pps->sps) {
2180 "SPS changed in the middle of the frame\n");
2185 if (
h->current_slice == 0) {
2191 h->droppable != (nal->
ref_idc == 0)) {
2193 "Changing field mode (%d -> %d) between slices is not allowed\n",
2196 }
else if (!
h->cur_pic_ptr) {
2198 "unset cur_pic_ptr on slice %d\n",
2199 h->current_slice + 1);
2208 h->nb_slice_ctx_queued++;
2233 int mb_type,
int top_xy,
2237 int mb_xy,
int list)
2239 int b_stride =
h->b_stride;
2244 const int b_xy =
h->mb2b_xy[top_xy] + 3 * b_stride;
2245 const int b8_xy = 4 * top_xy + 2;
2246 const int *ref2frm = &
h->ref2frm[
h->slice_table[top_xy] & (
MAX_SLICES - 1)][list][(
MB_MBAFF(sl) ? 20 : 2)];
2247 AV_COPY128(mv_dst - 1 * 8,
h->cur_pic.motion_val[list][b_xy + 0]);
2248 ref_cache[0 - 1 * 8] =
2249 ref_cache[1 - 1 * 8] = ref2frm[
h->cur_pic.ref_index[list][b8_xy + 0]];
2250 ref_cache[2 - 1 * 8] =
2251 ref_cache[3 - 1 * 8] = ref2frm[
h->cur_pic.ref_index[list][b8_xy + 1]];
2259 const int b_xy =
h->mb2b_xy[left_xy[
LTOP]] + 3;
2260 const int b8_xy = 4 * left_xy[
LTOP] + 1;
2262 AV_COPY32(mv_dst - 1 + 0,
h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2263 AV_COPY32(mv_dst - 1 + 8,
h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2264 AV_COPY32(mv_dst - 1 + 16,
h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2265 AV_COPY32(mv_dst - 1 + 24,
h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2267 ref_cache[-1 + 8] = ref2frm[
h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2268 ref_cache[-1 + 16] =
2269 ref_cache[-1 + 24] = ref2frm[
h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2277 ref_cache[-1 + 16] =
2293 int8_t *
ref = &
h->cur_pic.ref_index[list][4 * mb_xy];
2295 uint32_t ref01 = (
pack16to32(ref2frm[
ref[0]], ref2frm[
ref[1]]) & 0x00FF00FF) * 0x0101;
2296 uint32_t ref23 = (
pack16to32(ref2frm[
ref[2]], ref2frm[
ref[3]]) & 0x00FF00FF) * 0x0101;
2297 AV_WN32A(&ref_cache[0 * 8], ref01);
2298 AV_WN32A(&ref_cache[1 * 8], ref01);
2299 AV_WN32A(&ref_cache[2 * 8], ref23);
2300 AV_WN32A(&ref_cache[3 * 8], ref23);
2304 int16_t(*mv_src)[2] = &
h->cur_pic.motion_val[list][4 * sl->
mb_x + 4 * sl->
mb_y * b_stride];
2305 AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2306 AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2307 AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2308 AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2317 const int mb_xy = sl->
mb_xy;
2323 top_xy = mb_xy - (
h->mb_stride <<
MB_FIELD(sl));
2325 left_xy[
LBOT] = left_xy[
LTOP] = mb_xy - 1;
2327 const int left_mb_field_flag =
IS_INTERLACED(
h->cur_pic.mb_type[mb_xy - 1]);
2330 if (left_mb_field_flag != curr_mb_field_flag)
2331 left_xy[
LTOP] -=
h->mb_stride;
2333 if (curr_mb_field_flag)
2334 top_xy +=
h->mb_stride &
2335 (((
h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2336 if (left_mb_field_flag != curr_mb_field_flag)
2337 left_xy[
LBOT] +=
h->mb_stride;
2349 int qp =
h->cur_pic.qscale_table[mb_xy];
2350 if (qp <= qp_thresh &&
2351 (left_xy[
LTOP] < 0 ||
2352 ((qp +
h->cur_pic.qscale_table[left_xy[
LTOP]] + 1) >> 1) <= qp_thresh) &&
2354 ((qp +
h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2357 if ((left_xy[
LTOP] < 0 ||
2358 ((qp +
h->cur_pic.qscale_table[left_xy[
LBOT]] + 1) >> 1) <= qp_thresh) &&
2359 (top_xy <
h->mb_stride ||
2360 ((qp +
h->cur_pic.qscale_table[top_xy -
h->mb_stride] + 1) >> 1) <= qp_thresh))
2365 top_type =
h->cur_pic.mb_type[top_xy];
2366 left_type[
LTOP] =
h->cur_pic.mb_type[left_xy[
LTOP]];
2367 left_type[
LBOT] =
h->cur_pic.mb_type[left_xy[
LBOT]];
2372 left_type[
LTOP] = left_type[
LBOT] = 0;
2374 if (
h->slice_table[top_xy] == 0xFFFF)
2376 if (
h->slice_table[left_xy[
LBOT]] == 0xFFFF)
2377 left_type[
LTOP] = left_type[
LBOT] = 0;
2387 top_type, left_type, mb_xy, 0);
2390 top_type, left_type, mb_xy, 1);
2392 nnz =
h->non_zero_count[mb_xy];
2394 AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2395 AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2396 AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2397 AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2398 sl->
cbp =
h->cbp_table[mb_xy];
2401 nnz =
h->non_zero_count[top_xy];
2402 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2405 if (left_type[
LTOP]) {
2406 nnz =
h->non_zero_count[left_xy[
LTOP]];
2407 nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2408 nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2409 nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2410 nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2415 if (!
CABAC(
h) &&
h->ps.pps->transform_8x8_mode) {
2417 nnz_cache[4 + 8 * 0] =
2418 nnz_cache[5 + 8 * 0] = (
h->cbp_table[top_xy] & 0x4000) >> 12;
2419 nnz_cache[6 + 8 * 0] =
2420 nnz_cache[7 + 8 * 0] = (
h->cbp_table[top_xy] & 0x8000) >> 12;
2423 nnz_cache[3 + 8 * 1] =
2424 nnz_cache[3 + 8 * 2] = (
h->cbp_table[left_xy[
LTOP]] & 0x2000) >> 12;
2427 nnz_cache[3 + 8 * 3] =
2428 nnz_cache[3 + 8 * 4] = (
h->cbp_table[left_xy[
LBOT]] & 0x8000) >> 12;
2432 nnz_cache[
scan8[0]] =
2433 nnz_cache[
scan8[1]] =
2434 nnz_cache[
scan8[2]] =
2435 nnz_cache[
scan8[3]] = (sl->
cbp & 0x1000) >> 12;
2437 nnz_cache[
scan8[0 + 4]] =
2438 nnz_cache[
scan8[1 + 4]] =
2439 nnz_cache[
scan8[2 + 4]] =
2440 nnz_cache[
scan8[3 + 4]] = (sl->
cbp & 0x2000) >> 12;
2442 nnz_cache[
scan8[0 + 8]] =
2443 nnz_cache[
scan8[1 + 8]] =
2444 nnz_cache[
scan8[2 + 8]] =
2445 nnz_cache[
scan8[3 + 8]] = (sl->
cbp & 0x4000) >> 12;
2447 nnz_cache[
scan8[0 + 12]] =
2448 nnz_cache[
scan8[1 + 12]] =
2449 nnz_cache[
scan8[2 + 12]] =
2450 nnz_cache[
scan8[3 + 12]] = (sl->
cbp & 0x8000) >> 12;
2459 uint8_t *dest_y, *dest_cb, *dest_cr;
2460 int linesize, uvlinesize, mb_x, mb_y;
2463 const int pixel_shift =
h->pixel_shift;
2464 const int block_h = 16 >>
h->chroma_y_shift;
2466 if (
h->postpone_filter)
2470 for (mb_x = start_x; mb_x < end_x; mb_x++)
2471 for (mb_y = end_mb_y -
FRAME_MBAFF(
h); mb_y <= end_mb_y; mb_y++) {
2473 mb_xy = sl->
mb_xy = mb_x + mb_y *
h->mb_stride;
2474 mb_type =
h->cur_pic.mb_type[mb_xy];
2482 dest_y =
h->cur_pic.f->data[0] +
2483 ((mb_x << pixel_shift) + mb_y * sl->
linesize) * 16;
2484 dest_cb =
h->cur_pic.f->data[1] +
2485 (mb_x << pixel_shift) * (8 <<
CHROMA444(
h)) +
2487 dest_cr =
h->cur_pic.f->data[2] +
2488 (mb_x << pixel_shift) * (8 <<
CHROMA444(
h)) +
2513 linesize, uvlinesize);
2516 dest_cr, linesize, uvlinesize);
2529 const int mb_xy = sl->
mb_x + sl->
mb_y *
h->mb_stride;
2530 int mb_type = (
h->slice_table[mb_xy - 1] == sl->
slice_num) ?
2531 h->cur_pic.mb_type[mb_xy - 1] :
2532 (
h->slice_table[mb_xy -
h->mb_stride] == sl->
slice_num) ?
2533 h->cur_pic.mb_type[mb_xy -
h->mb_stride] : 0;
2548 if ((top +
height) >= pic_height)
2549 height += deblock_border;
2550 top -= deblock_border;
2553 if (top >= pic_height || (top +
height) < 0)
2572 int startx,
int starty,
2573 int endx,
int endy,
int status)
2589 int lf_x_start = sl->
mb_x;
2593 sl->
linesize =
h->cur_pic_ptr->f->linesize[0];
2604 if (
h->postpone_filter)
2610 if (!(
h->avctx->active_thread_type &
FF_THREAD_SLICE) &&
h->picture_structure ==
PICT_FRAME &&
h->slice_ctx[0].er.error_status_table) {
2613 int prev_status =
h->slice_ctx[0].er.error_status_table[
h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2616 h->slice_ctx[0].er.error_occurred = 1;
2620 if (
h->ps.pps->cabac) {
2664 if (sl->
mb_x >= lf_x_start)
2680 if (++sl->
mb_x >=
h->mb_width) {
2682 sl->
mb_x = lf_x_start = 0;
2692 if (eos || sl->
mb_y >=
h->mb_height) {
2693 ff_tlog(
h->avctx,
"slice end %d %d\n",
2697 if (sl->
mb_x > lf_x_start)
2731 "error while decoding MB %d %d\n", sl->
mb_x, sl->
mb_y);
2737 if (++sl->
mb_x >=
h->mb_width) {
2739 sl->
mb_x = lf_x_start = 0;
2747 if (sl->
mb_y >=
h->mb_height) {
2748 ff_tlog(
h->avctx,
"slice end %d %d\n",
2767 ff_tlog(
h->avctx,
"slice end %d %d\n",
2773 if (sl->
mb_x > lf_x_start)
2801 int context_count =
h->nb_slice_ctx_queued;
2805 h->slice_ctx[0].next_slice_idx = INT_MAX;
2807 if (
h->avctx->hwaccel || context_count < 1)
2810 av_assert0(context_count &&
h->slice_ctx[context_count - 1].mb_y <
h->mb_height);
2812 if (context_count == 1) {
2814 h->slice_ctx[0].next_slice_idx =
h->mb_width *
h->mb_height;
2815 h->postpone_filter = 0;
2818 h->mb_y =
h->slice_ctx[0].mb_y;
2823 for (
i = 0;
i < context_count;
i++) {
2824 int next_slice_idx =
h->mb_width *
h->mb_height;
2827 sl = &
h->slice_ctx[
i];
2833 slice_idx = sl->
mb_y *
h->mb_width + sl->
mb_x;
2834 for (j = 0; j < context_count; j++) {
2836 int slice_idx2 = sl2->
mb_y *
h->mb_width + sl2->
mb_x;
2838 if (
i == j || slice_idx2 < slice_idx)
2840 next_slice_idx =
FFMIN(next_slice_idx, slice_idx2);
2846 NULL, context_count,
sizeof(
h->slice_ctx[0]));
2849 sl = &
h->slice_ctx[context_count - 1];
2852 for (
i = 1;
i < context_count;
i++)
2853 h->slice_ctx[0].er.error_count +=
h->slice_ctx[
i].er.error_count;
2856 if (
h->postpone_filter) {
2857 h->postpone_filter = 0;
2859 for (
i = 0;
i < context_count;
i++) {
2862 sl = &
h->slice_ctx[
i];
2864 x_end = (sl->
mb_y >=
h->mb_height) ?
h->mb_width : sl->
mb_x;
2869 j == y_end - 1 ? x_end :
h->mb_width);
2876 h->nb_slice_ctx_queued = 0;
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
#define FF_DEBUG_PICT_INFO
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Context Adaptive Binary Arithmetic Coder inline functions.
static int av_unused get_cabac_terminate(CABACContext *c)
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
#define ss(width, name, subs,...)
#define AV_CEIL_RSHIFT(a, b)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define CONFIG_ERROR_RESILIENCE
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_er_frame_start(ERContext *s)
#define VP_START
< current MB is the first after a resync marker
static void fill_rectangle(int x, int y, int w, int h)
static int get_bits_left(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static int get_bits_count(const GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const uint8_t * align_get_bits(GetBitContext *s)
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_BIDIR
discard all bidirectional frames
@ AVDISCARD_NONINTRA
discard all non intra frames
@ AVDISCARD_NONREF
discard all non reference
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PICTURE_TYPE_SI
Switching Intra.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
@ AV_STEREO3D_COLUMNS
Views are packed per column.
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
H.264 common definitions.
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
H.264 / AVC / MPEG-4 part10 motion vector prediction.
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
H.264 parameter set handling.
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
static int h264_frame_start(H264Context *h)
static void init_dimensions(H264Context *h)
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
static void init_scan_tables(H264Context *h)
initialize scan tables
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
static const uint8_t field_scan[16+1]
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
#define IN_RANGE(a, b, size)
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
static const uint8_t field_scan8x8[64+1]
static int h264_export_frame_props(H264Context *h)
static int alloc_picture(H264Context *h, H264Picture *pic)
static const uint8_t field_scan8x8_cavlc[64+1]
static void release_unused_pictures(H264Context *h, int remove_current)
static int h264_slice_header_init(H264Context *h)
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
static int decode_slice(struct AVCodecContext *avctx, void *arg)
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
static int init_table_pools(H264Context *h)
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
static int h264_select_output_frame(H264Context *h)
static const uint8_t zigzag_scan8x8_cavlc[64+1]
static int find_unused_picture(H264Context *h)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
const uint8_t ff_h264_golomb_to_pict_type[5]
H.264 / AVC / MPEG-4 part10 codec.
#define FIELD_OR_MBAFF_PICTURE(h)
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
static const uint8_t scan8[16 *3+3]
#define H264_MAX_PICTURE_COUNT
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
#define MAX_DELAYED_PIC_COUNT
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
void ff_h264_free_tables(H264Context *h)
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
void ff_h264_flush_change(H264Context *h)
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
void ff_color_frame(AVFrame *frame, const int color[4])
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
int ff_thread_can_start_frame(AVCodecContext *avctx)
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
common internal API header
#define PTRDIFF_SPECIFIER
static enum AVPixelFormat pix_fmts[]
const uint8_t ff_zigzag_direct[64]
const uint8_t ff_zigzag_scan[16+1]
#define USES_LIST(a, list)
#define PICT_BOTTOM_FIELD
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV422P9
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P14
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_YUV444P10
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
FF_ENABLE_DEPRECATION_WARNINGS int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
useful rectangle filling function
#define FF_ARRAY_ELEMS(a)
uint8_t * data
The data buffer.
A reference counted buffer type.
main external API structure.
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Structure to hold side data for an AVFrame.
This structure describes decoded (raw) audio or video data.
int coded_picture_number
picture number in bitstream order
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int key_frame
1 -> keyframe, 0-> not
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
enum AVPictureType pict_type
Picture type of the frame.
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
enum AVStereo3DType type
How views are packed within the video.
int flags
Additional information about the frame packing.
enum AVStereo3DView view
Determines which views are packed.
const uint8_t * bytestream_end
const uint8_t * bytestream
int ref_idc
H.264 only, nal_ref_idc.
H264Picture DPB[H264_MAX_PICTURE_COUNT]
H264SliceContext * slice_ctx
int recovered
picture at IDR or recovery point + recovery count
AVBufferRef * motion_val_buf[2]
int16_t(*[2] motion_val)[2]
int sei_recovery_frame_cnt
int field_picture
whether or not picture was encoded in separate fields
int frame_num
frame_num (raw frame_num from slice header)
int long_ref
1->long term reference 0->short term reference
void * hwaccel_picture_private
hardware accelerator private data
AVBufferRef * mb_type_buf
AVBufferRef * ref_index_buf[2]
int mmco_reset
MMCO_RESET set this 1.
int field_poc[2]
top/bottom POC
AVBufferRef * hwaccel_priv_buf
AVBufferRef * qscale_table_buf
int chroma_log2_weight_denom
int luma_log2_weight_denom
int implicit_weight[48][48][2]
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
int anticlockwise_rotation
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
int mb_field_decoding_flag
int8_t ref_cache[2][5 *8]
unsigned int first_mb_addr
int bipred_scratchpad_allocated
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
uint8_t * edge_emu_buffer
struct H264Context * h264
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
int top_borders_allocated[2]
uint8_t * bipred_scratchpad
int qp_thresh
QP threshold to skip loopfilter.
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
uint8_t(*[2] top_borders)[(16 *3) *2]
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
int direct_spatial_mv_pred
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
uint8_t(*[2] mvd_table)[2]
int8_t * intra4x4_pred_mode
int edge_emu_buffer_allocated
int slice_alpha_c0_offset
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
MMCO mmco[MAX_MMCO_COUNT]
#define avpriv_request_sample(...)
static int ref[MAX_W *MAX_W]
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
#define AV_TIMECODE_STR_SIZE
static double cr(void *priv, double x, double y)
static double cb(void *priv, double x, double y)