FFmpeg  4.4
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276  sub2video_push_ref(ist, pts);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
313 static void sub2video_flush(InputStream *ist)
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  if (ist->sub2video.sub_queue) {
539  while (av_fifo_size(ist->sub2video.sub_queue)) {
540  AVSubtitle sub;
542  &sub, sizeof(sub), NULL);
544  }
546  }
547  av_buffer_unref(&ifilter->hw_frames_ctx);
548  av_freep(&ifilter->name);
549  av_freep(&fg->inputs[j]);
550  }
551  av_freep(&fg->inputs);
552  for (j = 0; j < fg->nb_outputs; j++) {
553  OutputFilter *ofilter = fg->outputs[j];
554 
555  avfilter_inout_free(&ofilter->out_tmp);
556  av_freep(&ofilter->name);
557  av_freep(&ofilter->formats);
558  av_freep(&ofilter->channel_layouts);
559  av_freep(&ofilter->sample_rates);
560  av_freep(&fg->outputs[j]);
561  }
562  av_freep(&fg->outputs);
563  av_freep(&fg->graph_desc);
564 
566  }
568 
570 
571  /* close files */
572  for (i = 0; i < nb_output_files; i++) {
573  OutputFile *of = output_files[i];
575  if (!of)
576  continue;
577  s = of->ctx;
578  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
579  avio_closep(&s->pb);
581  av_dict_free(&of->opts);
582 
584  }
585  for (i = 0; i < nb_output_streams; i++) {
587 
588  if (!ost)
589  continue;
590 
591  av_bsf_free(&ost->bsf_ctx);
592 
593  av_frame_free(&ost->filtered_frame);
594  av_frame_free(&ost->last_frame);
595  av_packet_free(&ost->pkt);
596  av_dict_free(&ost->encoder_opts);
597 
598  av_freep(&ost->forced_keyframes);
599  av_expr_free(ost->forced_keyframes_pexpr);
600  av_freep(&ost->avfilter);
601  av_freep(&ost->logfile_prefix);
602 
603  av_freep(&ost->audio_channels_map);
604  ost->audio_channels_mapped = 0;
605 
606  av_dict_free(&ost->sws_dict);
607  av_dict_free(&ost->swr_opts);
608 
609  avcodec_free_context(&ost->enc_ctx);
610  avcodec_parameters_free(&ost->ref_par);
611 
612  if (ost->muxing_queue) {
613  while (av_fifo_size(ost->muxing_queue)) {
614  AVPacket *pkt;
615  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
617  }
618  av_fifo_freep(&ost->muxing_queue);
619  }
620 
622  }
623 #if HAVE_THREADS
624  free_input_threads();
625 #endif
626  for (i = 0; i < nb_input_files; i++) {
630  }
631  for (i = 0; i < nb_input_streams; i++) {
632  InputStream *ist = input_streams[i];
633 
636  av_packet_free(&ist->pkt);
637  av_dict_free(&ist->decoder_opts);
640  av_freep(&ist->filters);
641  av_freep(&ist->hwaccel_device);
642  av_freep(&ist->dts_buffer);
643 
645 
647  }
648 
649  if (vstats_file) {
650  if (fclose(vstats_file))
652  "Error closing vstats file, loss of information possible: %s\n",
653  av_err2str(AVERROR(errno)));
654  }
656 
661 
662  uninit_opts();
663 
665 
666  if (received_sigterm) {
667  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
668  (int) received_sigterm);
669  } else if (ret && atomic_load(&transcode_init_done)) {
670  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
671  }
672  term_exit();
673  ffmpeg_exited = 1;
674 }
675 
677 {
678  AVDictionaryEntry *t = NULL;
679 
680  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
682  }
683 }
684 
686 {
688  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
689  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
690  exit_program(1);
691  }
692 }
693 
694 static void abort_codec_experimental(const AVCodec *c, int encoder)
695 {
696  exit_program(1);
697 }
698 
699 static void update_benchmark(const char *fmt, ...)
700 {
701  if (do_benchmark_all) {
703  va_list va;
704  char buf[1024];
705 
706  if (fmt) {
707  va_start(va, fmt);
708  vsnprintf(buf, sizeof(buf), fmt, va);
709  va_end(va);
711  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
714  t.real_usec - current_time.real_usec, buf);
715  }
716  current_time = t;
717  }
718 }
719 
721 {
722  int i;
723  for (i = 0; i < nb_output_streams; i++) {
724  OutputStream *ost2 = output_streams[i];
725  ost2->finished |= ost == ost2 ? this_stream : others;
726  }
727 }
728 
729 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
730 {
731  AVFormatContext *s = of->ctx;
732  AVStream *st = ost->st;
733  int ret;
734 
735  /*
736  * Audio encoders may split the packets -- #frames in != #packets out.
737  * But there is no reordering, so we can limit the number of output packets
738  * by simply dropping them here.
739  * Counting encoded video frames needs to be done separately because of
740  * reordering, see do_video_out().
741  * Do not count the packet when unqueued because it has been counted when queued.
742  */
743  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
744  if (ost->frame_number >= ost->max_frames) {
746  return;
747  }
748  ost->frame_number++;
749  }
750 
751  if (!of->header_written) {
752  AVPacket *tmp_pkt;
753  /* the muxer is not initialized yet, buffer the packet */
754  if (!av_fifo_space(ost->muxing_queue)) {
755  unsigned int are_we_over_size =
756  (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
757  int new_size = are_we_over_size ?
758  FFMIN(2 * av_fifo_size(ost->muxing_queue),
759  ost->max_muxing_queue_size) :
760  2 * av_fifo_size(ost->muxing_queue);
761 
762  if (new_size <= av_fifo_size(ost->muxing_queue)) {
764  "Too many packets buffered for output stream %d:%d.\n",
765  ost->file_index, ost->st->index);
766  exit_program(1);
767  }
768  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769  if (ret < 0)
770  exit_program(1);
771  }
773  if (ret < 0)
774  exit_program(1);
775  tmp_pkt = av_packet_alloc();
776  if (!tmp_pkt)
777  exit_program(1);
778  av_packet_move_ref(tmp_pkt, pkt);
779  ost->muxing_queue_data_size += tmp_pkt->size;
780  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
781  return;
782  }
783 
786  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
787 
789  int i;
791  NULL);
792  ost->quality = sd ? AV_RL32(sd) : -1;
793  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
794 
795  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
796  if (sd && i < sd[5])
797  ost->error[i] = AV_RL64(sd + 8 + 8*i);
798  else
799  ost->error[i] = -1;
800  }
801 
802  if (ost->frame_rate.num && ost->is_cfr) {
803  if (pkt->duration > 0)
804  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
805  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
806  ost->mux_timebase);
807  }
808  }
809 
810  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
811 
812  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
813  if (pkt->dts != AV_NOPTS_VALUE &&
814  pkt->pts != AV_NOPTS_VALUE &&
815  pkt->dts > pkt->pts) {
816  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
817  pkt->dts, pkt->pts,
818  ost->file_index, ost->st->index);
819  pkt->pts =
820  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
821  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
822  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
823  }
825  pkt->dts != AV_NOPTS_VALUE &&
826  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
827  ost->last_mux_dts != AV_NOPTS_VALUE) {
828  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
829  if (pkt->dts < max) {
830  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
831  if (exit_on_error)
832  loglevel = AV_LOG_ERROR;
833  av_log(s, loglevel, "Non-monotonous DTS in output stream "
834  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
835  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
836  if (exit_on_error) {
837  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
838  exit_program(1);
839  }
840  av_log(s, loglevel, "changing to %"PRId64". This may result "
841  "in incorrect timestamps in the output file.\n",
842  max);
843  if (pkt->pts >= pkt->dts)
844  pkt->pts = FFMAX(pkt->pts, max);
845  pkt->dts = max;
846  }
847  }
848  }
849  ost->last_mux_dts = pkt->dts;
850 
851  ost->data_size += pkt->size;
852  ost->packets_written++;
853 
855 
856  if (debug_ts) {
857  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
858  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
859  av_get_media_type_string(ost->enc_ctx->codec_type),
862  pkt->size
863  );
864  }
865 
867  if (ret < 0) {
868  print_error("av_interleaved_write_frame()", ret);
869  main_return_code = 1;
871  }
873 }
874 
876 {
877  OutputFile *of = output_files[ost->file_index];
878 
879  ost->finished |= ENCODER_FINISHED;
880  if (of->shortest) {
881  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
882  of->recording_time = FFMIN(of->recording_time, end);
883  }
884 }
885 
886 /*
887  * Send a single packet to the output, applying any bitstream filters
888  * associated with the output stream. This may result in any number
889  * of packets actually being written, depending on what bitstream
890  * filters are applied. The supplied packet is consumed and will be
891  * blank (as if newly-allocated) when this function returns.
892  *
893  * If eof is set, instead indicate EOF to all bitstream filters and
894  * therefore flush any delayed packets to the output. A blank packet
895  * must be supplied in this case.
896  */
898  OutputStream *ost, int eof)
899 {
900  int ret = 0;
901 
902  /* apply the output bitstream filters */
903  if (ost->bsf_ctx) {
904  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
905  if (ret < 0)
906  goto finish;
907  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
908  write_packet(of, pkt, ost, 0);
909  if (ret == AVERROR(EAGAIN))
910  ret = 0;
911  } else if (!eof)
912  write_packet(of, pkt, ost, 0);
913 
914 finish:
915  if (ret < 0 && ret != AVERROR_EOF) {
916  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
917  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
918  if(exit_on_error)
919  exit_program(1);
920  }
921 }
922 
924 {
925  OutputFile *of = output_files[ost->file_index];
926 
927  if (of->recording_time != INT64_MAX &&
928  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
929  AV_TIME_BASE_Q) >= 0) {
931  return 0;
932  }
933  return 1;
934 }
935 
937  AVFrame *frame)
938 {
939  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
940  AVCodecContext *enc = ost->enc_ctx;
941  if (!frame || frame->pts == AV_NOPTS_VALUE ||
942  !enc || !ost->filter || !ost->filter->graph->graph)
943  goto early_exit;
944 
945  {
946  AVFilterContext *filter = ost->filter->filter;
947 
948  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
950  AVRational tb = enc->time_base;
951  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
952 
953  tb.den <<= extra_bits;
954  float_pts =
955  av_rescale_q(frame->pts, filter_tb, tb) -
957  float_pts /= 1 << extra_bits;
958  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
959  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
960 
961  frame->pts =
962  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
964  }
965 
966 early_exit:
967 
968  if (debug_ts) {
969  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
970  frame ? av_ts2str(frame->pts) : "NULL",
971  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
972  float_pts,
973  enc ? enc->time_base.num : -1,
974  enc ? enc->time_base.den : -1);
975  }
976 
977  return float_pts;
978 }
979 
981  char *error, int error_len);
982 
984  unsigned int fatal)
985 {
986  int ret = AVERROR_BUG;
987  char error[1024] = {0};
988 
989  if (ost->initialized)
990  return 0;
991 
992  ret = init_output_stream(ost, frame, error, sizeof(error));
993  if (ret < 0) {
994  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
995  ost->file_index, ost->index, error);
996 
997  if (fatal)
998  exit_program(1);
999  }
1000 
1001  return ret;
1002 }
1003 
1005  AVFrame *frame)
1006 {
1007  AVCodecContext *enc = ost->enc_ctx;
1008  AVPacket *pkt = ost->pkt;
1009  int ret;
1010 
1012 
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1017  frame->pts = ost->sync_opts;
1018  ost->sync_opts = frame->pts + frame->nb_samples;
1019  ost->samples_encoded += frame->nb_samples;
1020  ost->frames_encoded++;
1021 
1023  if (debug_ts) {
1024  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1025  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1027  enc->time_base.num, enc->time_base.den);
1028  }
1029 
1030  ret = avcodec_send_frame(enc, frame);
1031  if (ret < 0)
1032  goto error;
1033 
1034  while (1) {
1036  ret = avcodec_receive_packet(enc, pkt);
1037  if (ret == AVERROR(EAGAIN))
1038  break;
1039  if (ret < 0)
1040  goto error;
1041 
1042  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1043 
1044  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1045 
1046  if (debug_ts) {
1047  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1051  }
1052 
1053  output_packet(of, pkt, ost, 0);
1054  }
1055 
1056  return;
1057 error:
1058  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1059  exit_program(1);
1060 }
1061 
1062 static void do_subtitle_out(OutputFile *of,
1063  OutputStream *ost,
1064  AVSubtitle *sub)
1065 {
1066  int subtitle_out_max_size = 1024 * 1024;
1067  int subtitle_out_size, nb, i;
1068  AVCodecContext *enc;
1069  AVPacket *pkt = ost->pkt;
1070  int64_t pts;
1071 
1072  if (sub->pts == AV_NOPTS_VALUE) {
1073  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1074  if (exit_on_error)
1075  exit_program(1);
1076  return;
1077  }
1078 
1079  enc = ost->enc_ctx;
1080 
1081  if (!subtitle_out) {
1082  subtitle_out = av_malloc(subtitle_out_max_size);
1083  if (!subtitle_out) {
1084  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1085  exit_program(1);
1086  }
1087  }
1088 
1089  /* Note: DVB subtitle need one packet to draw them and one other
1090  packet to clear them */
1091  /* XXX: signal it in the codec context ? */
1092  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1093  nb = 2;
1094  else
1095  nb = 1;
1096 
1097  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1098  pts = sub->pts;
1099  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1100  pts -= output_files[ost->file_index]->start_time;
1101  for (i = 0; i < nb; i++) {
1102  unsigned save_num_rects = sub->num_rects;
1103 
1104  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1105  if (!check_recording_time(ost))
1106  return;
1107 
1108  sub->pts = pts;
1109  // start_display_time is required to be 0
1110  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111  sub->end_display_time -= sub->start_display_time;
1112  sub->start_display_time = 0;
1113  if (i == 1)
1114  sub->num_rects = 0;
1115 
1116  ost->frames_encoded++;
1117 
1118  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119  subtitle_out_max_size, sub);
1120  if (i == 1)
1121  sub->num_rects = save_num_rects;
1122  if (subtitle_out_size < 0) {
1123  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1124  exit_program(1);
1125  }
1126 
1128  pkt->data = subtitle_out;
1129  pkt->size = subtitle_out_size;
1130  pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1131  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133  /* XXX: the pts correction is handled here. Maybe handling
1134  it in the codec would be better */
1135  if (i == 0)
1136  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137  else
1138  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139  }
1140  pkt->dts = pkt->pts;
1141  output_packet(of, pkt, ost, 0);
1142  }
1143 }
1144 
1145 static void do_video_out(OutputFile *of,
1146  OutputStream *ost,
1147  AVFrame *next_picture)
1148 {
1149  int ret, format_video_sync;
1150  AVPacket *pkt = ost->pkt;
1151  AVCodecContext *enc = ost->enc_ctx;
1152  AVRational frame_rate;
1153  int nb_frames, nb0_frames, i;
1154  double delta, delta0;
1155  double duration = 0;
1156  double sync_ipts = AV_NOPTS_VALUE;
1157  int frame_size = 0;
1158  InputStream *ist = NULL;
1159  AVFilterContext *filter = ost->filter->filter;
1160 
1161  init_output_stream_wrapper(ost, next_picture, 1);
1162  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1163 
1164  if (ost->source_index >= 0)
1165  ist = input_streams[ost->source_index];
1166 
1167  frame_rate = av_buffersink_get_frame_rate(filter);
1168  if (frame_rate.num > 0 && frame_rate.den > 0)
1169  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1170 
1171  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1172  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1173 
1174  if (!ost->filters_script &&
1175  !ost->filters &&
1176  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177  next_picture &&
1178  ist &&
1179  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181  }
1182 
1183  if (!next_picture) {
1184  //end, flushing
1185  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186  ost->last_nb0_frames[1],
1187  ost->last_nb0_frames[2]);
1188  } else {
1189  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190  delta = delta0 + duration;
1191 
1192  /* by default, we output a single frame */
1193  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194  nb_frames = 1;
1195 
1196  format_video_sync = video_sync_method;
1197  if (format_video_sync == VSYNC_AUTO) {
1198  if(!strcmp(of->ctx->oformat->name, "avi")) {
1199  format_video_sync = VSYNC_VFR;
1200  } else
1201  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1202  if ( ist
1203  && format_video_sync == VSYNC_CFR
1204  && input_files[ist->file_index]->ctx->nb_streams == 1
1205  && input_files[ist->file_index]->input_ts_offset == 0) {
1206  format_video_sync = VSYNC_VSCFR;
1207  }
1208  if (format_video_sync == VSYNC_CFR && copy_ts) {
1209  format_video_sync = VSYNC_VSCFR;
1210  }
1211  }
1212  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1213 
1214  if (delta0 < 0 &&
1215  delta > 0 &&
1216  format_video_sync != VSYNC_PASSTHROUGH &&
1217  format_video_sync != VSYNC_DROP) {
1218  if (delta0 < -0.6) {
1219  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1220  } else
1221  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222  sync_ipts = ost->sync_opts;
1223  duration += delta0;
1224  delta0 = 0;
1225  }
1226 
1227  switch (format_video_sync) {
1228  case VSYNC_VSCFR:
1229  if (ost->frame_number == 0 && delta0 >= 0.5) {
1230  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231  delta = duration;
1232  delta0 = 0;
1233  ost->sync_opts = llrint(sync_ipts);
1234  }
1235  case VSYNC_CFR:
1236  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1238  nb_frames = 0;
1239  } else if (delta < -1.1)
1240  nb_frames = 0;
1241  else if (delta > 1.1) {
1242  nb_frames = lrintf(delta);
1243  if (delta0 > 1.1)
1244  nb0_frames = llrintf(delta0 - 0.6);
1245  }
1246  break;
1247  case VSYNC_VFR:
1248  if (delta <= -0.6)
1249  nb_frames = 0;
1250  else if (delta > 0.6)
1251  ost->sync_opts = llrint(sync_ipts);
1252  break;
1253  case VSYNC_DROP:
1254  case VSYNC_PASSTHROUGH:
1255  ost->sync_opts = llrint(sync_ipts);
1256  break;
1257  default:
1258  av_assert0(0);
1259  }
1260  }
1261 
1262  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263  nb0_frames = FFMIN(nb0_frames, nb_frames);
1264 
1265  memmove(ost->last_nb0_frames + 1,
1266  ost->last_nb0_frames,
1267  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268  ost->last_nb0_frames[0] = nb0_frames;
1269 
1270  if (nb0_frames == 0 && ost->last_dropped) {
1271  nb_frames_drop++;
1273  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1274  ost->frame_number, ost->st->index, ost->last_frame->pts);
1275  }
1276  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277  if (nb_frames > dts_error_threshold * 30) {
1278  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1279  nb_frames_drop++;
1280  return;
1281  }
1282  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284  if (nb_frames_dup > dup_warning) {
1285  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1286  dup_warning *= 10;
1287  }
1288  }
1289  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1290 
1291  /* duplicates frame if needed */
1292  for (i = 0; i < nb_frames; i++) {
1293  AVFrame *in_picture;
1294  int forced_keyframe = 0;
1295  double pts_time;
1296 
1297  if (i < nb0_frames && ost->last_frame) {
1298  in_picture = ost->last_frame;
1299  } else
1300  in_picture = next_picture;
1301 
1302  if (!in_picture)
1303  return;
1304 
1305  in_picture->pts = ost->sync_opts;
1306 
1307  if (!check_recording_time(ost))
1308  return;
1309 
1310  in_picture->quality = enc->global_quality;
1311  in_picture->pict_type = 0;
1312 
1313  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1314  in_picture->pts != AV_NOPTS_VALUE)
1315  ost->forced_kf_ref_pts = in_picture->pts;
1316 
1317  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1318  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1319  if (ost->forced_kf_index < ost->forced_kf_count &&
1320  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1321  ost->forced_kf_index++;
1322  forced_keyframe = 1;
1323  } else if (ost->forced_keyframes_pexpr) {
1324  double res;
1325  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1326  res = av_expr_eval(ost->forced_keyframes_pexpr,
1327  ost->forced_keyframes_expr_const_values, NULL);
1328  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1329  ost->forced_keyframes_expr_const_values[FKF_N],
1330  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1331  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1332  ost->forced_keyframes_expr_const_values[FKF_T],
1333  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1334  res);
1335  if (res) {
1336  forced_keyframe = 1;
1337  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1338  ost->forced_keyframes_expr_const_values[FKF_N];
1339  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1340  ost->forced_keyframes_expr_const_values[FKF_T];
1341  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1342  }
1343 
1344  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1345  } else if ( ost->forced_keyframes
1346  && !strncmp(ost->forced_keyframes, "source", 6)
1347  && in_picture->key_frame==1
1348  && !i) {
1349  forced_keyframe = 1;
1350  }
1351 
1352  if (forced_keyframe) {
1353  in_picture->pict_type = AV_PICTURE_TYPE_I;
1354  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1355  }
1356 
1358  if (debug_ts) {
1359  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1360  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1361  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1362  enc->time_base.num, enc->time_base.den);
1363  }
1364 
1365  ost->frames_encoded++;
1366 
1367  ret = avcodec_send_frame(enc, in_picture);
1368  if (ret < 0)
1369  goto error;
1370  // Make sure Closed Captions will not be duplicated
1372 
1373  while (1) {
1375  ret = avcodec_receive_packet(enc, pkt);
1376  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377  if (ret == AVERROR(EAGAIN))
1378  break;
1379  if (ret < 0)
1380  goto error;
1381 
1382  if (debug_ts) {
1383  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1387  }
1388 
1390  pkt->pts = ost->sync_opts;
1391 
1392  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1393 
1394  if (debug_ts) {
1395  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1398  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1399  }
1400 
1401  frame_size = pkt->size;
1402  output_packet(of, pkt, ost, 0);
1403 
1404  /* if two pass, output log */
1405  if (ost->logfile && enc->stats_out) {
1406  fprintf(ost->logfile, "%s", enc->stats_out);
1407  }
1408  }
1409  ost->sync_opts++;
1410  /*
1411  * For video, number of frames in == number of packets out.
1412  * But there may be reordering, so we can't throw away frames on encoder
1413  * flush, we need to limit them here, before they go into encoder.
1414  */
1415  ost->frame_number++;
1416 
1417  if (vstats_filename && frame_size)
1419  }
1420 
1421  if (!ost->last_frame)
1422  ost->last_frame = av_frame_alloc();
1423  av_frame_unref(ost->last_frame);
1424  if (next_picture && ost->last_frame)
1425  av_frame_ref(ost->last_frame, next_picture);
1426  else
1427  av_frame_free(&ost->last_frame);
1428 
1429  return;
1430 error:
1431  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1432  exit_program(1);
1433 }
1434 
1435 static double psnr(double d)
1436 {
1437  return -10.0 * log10(d);
1438 }
1439 
1441 {
1442  AVCodecContext *enc;
1443  int frame_number;
1444  double ti1, bitrate, avg_bitrate;
1445 
1446  /* this is executed just the first time do_video_stats is called */
1447  if (!vstats_file) {
1448  vstats_file = fopen(vstats_filename, "w");
1449  if (!vstats_file) {
1450  perror("fopen");
1451  exit_program(1);
1452  }
1453  }
1454 
1455  enc = ost->enc_ctx;
1456  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457  frame_number = ost->st->nb_frames;
1458  if (vstats_version <= 1) {
1459  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460  ost->quality / (float)FF_QP2LAMBDA);
1461  } else {
1462  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463  ost->quality / (float)FF_QP2LAMBDA);
1464  }
1465 
1466  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1468 
1469  fprintf(vstats_file,"f_size= %6d ", frame_size);
1470  /* compute pts value */
1471  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1472  if (ti1 < 0.01)
1473  ti1 = 0.01;
1474 
1475  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1480  }
1481 }
1482 
1484 {
1485  OutputFile *of = output_files[ost->file_index];
1486  int i;
1487 
1488  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1489 
1490  if (of->shortest) {
1491  for (i = 0; i < of->ctx->nb_streams; i++)
1493  }
1494 }
1495 
1496 /**
1497  * Get and encode new output from any of the filtergraphs, without causing
1498  * activity.
1499  *
1500  * @return 0 for success, <0 for severe errors
1501  */
1502 static int reap_filters(int flush)
1503 {
1504  AVFrame *filtered_frame = NULL;
1505  int i;
1506 
1507  /* Reap all buffers present in the buffer sinks */
1508  for (i = 0; i < nb_output_streams; i++) {
1510  OutputFile *of = output_files[ost->file_index];
1512  AVCodecContext *enc = ost->enc_ctx;
1513  int ret = 0;
1514 
1515  if (!ost->filter || !ost->filter->graph->graph)
1516  continue;
1517  filter = ost->filter->filter;
1518 
1519  /*
1520  * Unlike video, with audio the audio frame size matters.
1521  * Currently we are fully reliant on the lavfi filter chain to
1522  * do the buffering deed for us, and thus the frame size parameter
1523  * needs to be set accordingly. Where does one get the required
1524  * frame size? From the initialized AVCodecContext of an audio
1525  * encoder. Thus, if we have gotten to an audio stream, initialize
1526  * the encoder earlier than receiving the first AVFrame.
1527  */
1530 
1531  if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1532  return AVERROR(ENOMEM);
1533  }
1534  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1535  return AVERROR(ENOMEM);
1536  }
1537  filtered_frame = ost->filtered_frame;
1538 
1539  while (1) {
1540  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1542  if (ret < 0) {
1543  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1545  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1546  } else if (flush && ret == AVERROR_EOF) {
1548  do_video_out(of, ost, NULL);
1549  }
1550  break;
1551  }
1552  if (ost->finished) {
1553  av_frame_unref(filtered_frame);
1554  continue;
1555  }
1556 
1557  switch (av_buffersink_get_type(filter)) {
1558  case AVMEDIA_TYPE_VIDEO:
1559  if (!ost->frame_aspect_ratio.num)
1560  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1561 
1562  do_video_out(of, ost, filtered_frame);
1563  break;
1564  case AVMEDIA_TYPE_AUDIO:
1565  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1566  enc->channels != filtered_frame->channels) {
1568  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1569  break;
1570  }
1571  do_audio_out(of, ost, filtered_frame);
1572  break;
1573  default:
1574  // TODO support subtitle filters
1575  av_assert0(0);
1576  }
1577 
1578  av_frame_unref(filtered_frame);
1579  }
1580  }
1581 
1582  return 0;
1583 }
1584 
1585 static void print_final_stats(int64_t total_size)
1586 {
1587  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1588  uint64_t subtitle_size = 0;
1589  uint64_t data_size = 0;
1590  float percent = -1.0;
1591  int i, j;
1592  int pass1_used = 1;
1593 
1594  for (i = 0; i < nb_output_streams; i++) {
1596  switch (ost->enc_ctx->codec_type) {
1597  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1598  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1599  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1600  default: other_size += ost->data_size; break;
1601  }
1602  extra_size += ost->enc_ctx->extradata_size;
1603  data_size += ost->data_size;
1604  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1606  pass1_used = 0;
1607  }
1608 
1609  if (data_size && total_size>0 && total_size >= data_size)
1610  percent = 100.0 * (total_size - data_size) / data_size;
1611 
1612  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1613  video_size / 1024.0,
1614  audio_size / 1024.0,
1615  subtitle_size / 1024.0,
1616  other_size / 1024.0,
1617  extra_size / 1024.0);
1618  if (percent >= 0.0)
1619  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1620  else
1621  av_log(NULL, AV_LOG_INFO, "unknown");
1622  av_log(NULL, AV_LOG_INFO, "\n");
1623 
1624  /* print verbose per-stream stats */
1625  for (i = 0; i < nb_input_files; i++) {
1626  InputFile *f = input_files[i];
1627  uint64_t total_packets = 0, total_size = 0;
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1630  i, f->ctx->url);
1631 
1632  for (j = 0; j < f->nb_streams; j++) {
1633  InputStream *ist = input_streams[f->ist_index + j];
1634  enum AVMediaType type = ist->dec_ctx->codec_type;
1635 
1636  total_size += ist->data_size;
1637  total_packets += ist->nb_packets;
1638 
1639  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1640  i, j, media_type_string(type));
1641  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1642  ist->nb_packets, ist->data_size);
1643 
1644  if (ist->decoding_needed) {
1645  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1646  ist->frames_decoded);
1647  if (type == AVMEDIA_TYPE_AUDIO)
1648  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1649  av_log(NULL, AV_LOG_VERBOSE, "; ");
1650  }
1651 
1652  av_log(NULL, AV_LOG_VERBOSE, "\n");
1653  }
1654 
1655  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1656  total_packets, total_size);
1657  }
1658 
1659  for (i = 0; i < nb_output_files; i++) {
1660  OutputFile *of = output_files[i];
1661  uint64_t total_packets = 0, total_size = 0;
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1664  i, of->ctx->url);
1665 
1666  for (j = 0; j < of->ctx->nb_streams; j++) {
1668  enum AVMediaType type = ost->enc_ctx->codec_type;
1669 
1670  total_size += ost->data_size;
1671  total_packets += ost->packets_written;
1672 
1673  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1674  i, j, media_type_string(type));
1675  if (ost->encoding_needed) {
1676  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1677  ost->frames_encoded);
1678  if (type == AVMEDIA_TYPE_AUDIO)
1679  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1680  av_log(NULL, AV_LOG_VERBOSE, "; ");
1681  }
1682 
1683  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1684  ost->packets_written, ost->data_size);
1685 
1686  av_log(NULL, AV_LOG_VERBOSE, "\n");
1687  }
1688 
1689  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1690  total_packets, total_size);
1691  }
1692  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1693  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1694  if (pass1_used) {
1695  av_log(NULL, AV_LOG_WARNING, "\n");
1696  } else {
1697  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1698  }
1699  }
1700 }
1701 
1702 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1703 {
1704  AVBPrint buf, buf_script;
1705  OutputStream *ost;
1706  AVFormatContext *oc;
1707  int64_t total_size;
1708  AVCodecContext *enc;
1709  int frame_number, vid, i;
1710  double bitrate;
1711  double speed;
1712  int64_t pts = INT64_MIN + 1;
1713  static int64_t last_time = -1;
1714  static int first_report = 1;
1715  static int qp_histogram[52];
1716  int hours, mins, secs, us;
1717  const char *hours_sign;
1718  int ret;
1719  float t;
1720 
1721  if (!print_stats && !is_last_report && !progress_avio)
1722  return;
1723 
1724  if (!is_last_report) {
1725  if (last_time == -1) {
1726  last_time = cur_time;
1727  }
1728  if (((cur_time - last_time) < stats_period && !first_report) ||
1729  (first_report && nb_output_dumped < nb_output_files))
1730  return;
1731  last_time = cur_time;
1732  }
1733 
1734  t = (cur_time-timer_start) / 1000000.0;
1735 
1736 
1737  oc = output_files[0]->ctx;
1738 
1739  total_size = avio_size(oc->pb);
1740  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1741  total_size = avio_tell(oc->pb);
1742 
1743  vid = 0;
1745  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1746  for (i = 0; i < nb_output_streams; i++) {
1747  float q = -1;
1748  ost = output_streams[i];
1749  enc = ost->enc_ctx;
1750  if (!ost->stream_copy)
1751  q = ost->quality / (float) FF_QP2LAMBDA;
1752 
1753  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1754  av_bprintf(&buf, "q=%2.1f ", q);
1755  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1756  ost->file_index, ost->index, q);
1757  }
1758  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1759  float fps;
1760 
1761  frame_number = ost->frame_number;
1762  fps = t > 1 ? frame_number / t : 0;
1763  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1764  frame_number, fps < 9.95, fps, q);
1765  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1766  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1767  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1768  ost->file_index, ost->index, q);
1769  if (is_last_report)
1770  av_bprintf(&buf, "L");
1771  if (qp_hist) {
1772  int j;
1773  int qp = lrintf(q);
1774  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1775  qp_histogram[qp]++;
1776  for (j = 0; j < 32; j++)
1777  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1778  }
1779 
1780  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1781  int j;
1782  double error, error_sum = 0;
1783  double scale, scale_sum = 0;
1784  double p;
1785  char type[3] = { 'Y','U','V' };
1786  av_bprintf(&buf, "PSNR=");
1787  for (j = 0; j < 3; j++) {
1788  if (is_last_report) {
1789  error = enc->error[j];
1790  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1791  } else {
1792  error = ost->error[j];
1793  scale = enc->width * enc->height * 255.0 * 255.0;
1794  }
1795  if (j)
1796  scale /= 4;
1797  error_sum += error;
1798  scale_sum += scale;
1799  p = psnr(error / scale);
1800  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1801  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1802  ost->file_index, ost->index, type[j] | 32, p);
1803  }
1804  p = psnr(error_sum / scale_sum);
1805  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1806  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1807  ost->file_index, ost->index, p);
1808  }
1809  vid = 1;
1810  }
1811  /* compute min output value */
1814  ost->st->time_base, AV_TIME_BASE_Q));
1815  if (copy_ts) {
1816  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1820  }
1821  }
1822 
1823  if (is_last_report)
1824  nb_frames_drop += ost->last_dropped;
1825  }
1826 
1827  secs = FFABS(pts) / AV_TIME_BASE;
1828  us = FFABS(pts) % AV_TIME_BASE;
1829  mins = secs / 60;
1830  secs %= 60;
1831  hours = mins / 60;
1832  mins %= 60;
1833  hours_sign = (pts < 0) ? "-" : "";
1834 
1835  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1836  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1837 
1838  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1839  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1840  if (pts == AV_NOPTS_VALUE) {
1841  av_bprintf(&buf, "N/A ");
1842  } else {
1843  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1844  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1845  }
1846 
1847  if (bitrate < 0) {
1848  av_bprintf(&buf, "bitrate=N/A");
1849  av_bprintf(&buf_script, "bitrate=N/A\n");
1850  }else{
1851  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1852  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1853  }
1854 
1855  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1856  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1857  if (pts == AV_NOPTS_VALUE) {
1858  av_bprintf(&buf_script, "out_time_us=N/A\n");
1859  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1860  av_bprintf(&buf_script, "out_time=N/A\n");
1861  } else {
1862  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1863  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1864  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1865  hours_sign, hours, mins, secs, us);
1866  }
1867 
1869  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1870  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1871  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1872 
1873  if (speed < 0) {
1874  av_bprintf(&buf, " speed=N/A");
1875  av_bprintf(&buf_script, "speed=N/A\n");
1876  } else {
1877  av_bprintf(&buf, " speed=%4.3gx", speed);
1878  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1879  }
1880 
1881  if (print_stats || is_last_report) {
1882  const char end = is_last_report ? '\n' : '\r';
1883  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1884  fprintf(stderr, "%s %c", buf.str, end);
1885  } else
1886  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1887 
1888  fflush(stderr);
1889  }
1890  av_bprint_finalize(&buf, NULL);
1891 
1892  if (progress_avio) {
1893  av_bprintf(&buf_script, "progress=%s\n",
1894  is_last_report ? "end" : "continue");
1895  avio_write(progress_avio, buf_script.str,
1896  FFMIN(buf_script.len, buf_script.size - 1));
1898  av_bprint_finalize(&buf_script, NULL);
1899  if (is_last_report) {
1900  if ((ret = avio_closep(&progress_avio)) < 0)
1902  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1903  }
1904  }
1905 
1906  first_report = 0;
1907 
1908  if (is_last_report)
1909  print_final_stats(total_size);
1910 }
1911 
1913 {
1914  // We never got any input. Set a fake format, which will
1915  // come from libavformat.
1916  ifilter->format = par->format;
1917  ifilter->sample_rate = par->sample_rate;
1918  ifilter->channels = par->channels;
1919  ifilter->channel_layout = par->channel_layout;
1920  ifilter->width = par->width;
1921  ifilter->height = par->height;
1922  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1923 }
1924 
1925 static void flush_encoders(void)
1926 {
1927  int i, ret;
1928 
1929  for (i = 0; i < nb_output_streams; i++) {
1931  AVCodecContext *enc = ost->enc_ctx;
1932  OutputFile *of = output_files[ost->file_index];
1933 
1934  if (!ost->encoding_needed)
1935  continue;
1936 
1937  // Try to enable encoding with no input frames.
1938  // Maybe we should just let encoding fail instead.
1939  if (!ost->initialized) {
1940  FilterGraph *fg = ost->filter->graph;
1941 
1943  "Finishing stream %d:%d without any data written to it.\n",
1944  ost->file_index, ost->st->index);
1945 
1946  if (ost->filter && !fg->graph) {
1947  int x;
1948  for (x = 0; x < fg->nb_inputs; x++) {
1949  InputFilter *ifilter = fg->inputs[x];
1950  if (ifilter->format < 0)
1951  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1952  }
1953 
1955  continue;
1956 
1957  ret = configure_filtergraph(fg);
1958  if (ret < 0) {
1959  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1960  exit_program(1);
1961  }
1962 
1964  }
1965 
1967  }
1968 
1970  continue;
1971 
1972  for (;;) {
1973  const char *desc = NULL;
1974  AVPacket *pkt = ost->pkt;
1975  int pkt_size;
1976 
1977  switch (enc->codec_type) {
1978  case AVMEDIA_TYPE_AUDIO:
1979  desc = "audio";
1980  break;
1981  case AVMEDIA_TYPE_VIDEO:
1982  desc = "video";
1983  break;
1984  default:
1985  av_assert0(0);
1986  }
1987 
1989 
1991  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1992  ret = avcodec_send_frame(enc, NULL);
1993  if (ret < 0) {
1994  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1995  desc,
1996  av_err2str(ret));
1997  exit_program(1);
1998  }
1999  }
2000 
2001  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002  if (ret < 0 && ret != AVERROR_EOF) {
2003  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2004  desc,
2005  av_err2str(ret));
2006  exit_program(1);
2007  }
2008  if (ost->logfile && enc->stats_out) {
2009  fprintf(ost->logfile, "%s", enc->stats_out);
2010  }
2011  if (ret == AVERROR_EOF) {
2012  output_packet(of, pkt, ost, 1);
2013  break;
2014  }
2015  if (ost->finished & MUXER_FINISHED) {
2017  continue;
2018  }
2019  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2020  pkt_size = pkt->size;
2021  output_packet(of, pkt, ost, 0);
2022  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2023  do_video_stats(ost, pkt_size);
2024  }
2025  }
2026  }
2027 }
2028 
2029 /*
2030  * Check whether a packet from ist should be written into ost at this time
2031  */
2033 {
2034  OutputFile *of = output_files[ost->file_index];
2035  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2036 
2037  if (ost->source_index != ist_index)
2038  return 0;
2039 
2040  if (ost->finished)
2041  return 0;
2042 
2043  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2044  return 0;
2045 
2046  return 1;
2047 }
2048 
2050 {
2051  OutputFile *of = output_files[ost->file_index];
2052  InputFile *f = input_files [ist->file_index];
2053  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2055  AVPacket *opkt = ost->pkt;
2056 
2057  av_packet_unref(opkt);
2058  // EOF: flush output bitstream filters.
2059  if (!pkt) {
2060  output_packet(of, opkt, ost, 1);
2061  return;
2062  }
2063 
2064  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2065  !ost->copy_initial_nonkeyframes)
2066  return;
2067 
2068  if (!ost->frame_number && !ost->copy_prior_start) {
2069  int64_t comp_start = start_time;
2070  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2071  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2072  if (pkt->pts == AV_NOPTS_VALUE ?
2073  ist->pts < comp_start :
2074  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2075  return;
2076  }
2077 
2078  if (of->recording_time != INT64_MAX &&
2079  ist->pts >= of->recording_time + start_time) {
2081  return;
2082  }
2083 
2084  if (f->recording_time != INT64_MAX) {
2085  start_time = f->ctx->start_time;
2086  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2087  start_time += f->start_time;
2088  if (ist->pts >= f->recording_time + start_time) {
2090  return;
2091  }
2092  }
2093 
2094  /* force the input stream PTS */
2095  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2096  ost->sync_opts++;
2097 
2098  if (av_packet_ref(opkt, pkt) < 0)
2099  exit_program(1);
2100 
2101  if (pkt->pts != AV_NOPTS_VALUE)
2102  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2103 
2104  if (pkt->dts == AV_NOPTS_VALUE) {
2105  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2106  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2108  if(!duration)
2109  duration = ist->dec_ctx->frame_size;
2110  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2111  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2112  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2113  /* dts will be set immediately afterwards to what pts is now */
2114  opkt->pts = opkt->dts - ost_tb_start_time;
2115  } else
2116  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2117  opkt->dts -= ost_tb_start_time;
2118 
2119  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2120 
2121  output_packet(of, opkt, ost, 0);
2122 }
2123 
2125 {
2126  AVCodecContext *dec = ist->dec_ctx;
2127 
2128  if (!dec->channel_layout) {
2129  char layout_name[256];
2130 
2131  if (dec->channels > ist->guess_layout_max)
2132  return 0;
2133  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2134  if (!dec->channel_layout)
2135  return 0;
2136  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2137  dec->channels, dec->channel_layout);
2138  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2139  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2140  }
2141  return 1;
2142 }
2143 
2144 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2145 {
2146  if (*got_output || ret<0)
2147  decode_error_stat[ret<0] ++;
2148 
2149  if (ret < 0 && exit_on_error)
2150  exit_program(1);
2151 
2152  if (*got_output && ist) {
2155  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2156  if (exit_on_error)
2157  exit_program(1);
2158  }
2159  }
2160 }
2161 
2162 // Filters can be configured only if the formats of all inputs are known.
2164 {
2165  int i;
2166  for (i = 0; i < fg->nb_inputs; i++) {
2167  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2168  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2169  return 0;
2170  }
2171  return 1;
2172 }
2173 
2175 {
2176  FilterGraph *fg = ifilter->graph;
2177  int need_reinit, ret, i;
2178 
2179  /* determine if the parameters for this input changed */
2180  need_reinit = ifilter->format != frame->format;
2181 
2182  switch (ifilter->ist->st->codecpar->codec_type) {
2183  case AVMEDIA_TYPE_AUDIO:
2184  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2185  ifilter->channels != frame->channels ||
2186  ifilter->channel_layout != frame->channel_layout;
2187  break;
2188  case AVMEDIA_TYPE_VIDEO:
2189  need_reinit |= ifilter->width != frame->width ||
2190  ifilter->height != frame->height;
2191  break;
2192  }
2193 
2194  if (!ifilter->ist->reinit_filters && fg->graph)
2195  need_reinit = 0;
2196 
2197  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2198  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2199  need_reinit = 1;
2200 
2201  if (need_reinit) {
2203  if (ret < 0)
2204  return ret;
2205  }
2206 
2207  /* (re)init the graph if possible, otherwise buffer the frame and return */
2208  if (need_reinit || !fg->graph) {
2209  for (i = 0; i < fg->nb_inputs; i++) {
2210  if (!ifilter_has_all_input_formats(fg)) {
2212  if (!tmp)
2213  return AVERROR(ENOMEM);
2215 
2216  if (!av_fifo_space(ifilter->frame_queue)) {
2217  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2218  if (ret < 0) {
2219  av_frame_free(&tmp);
2220  return ret;
2221  }
2222  }
2223  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2224  return 0;
2225  }
2226  }
2227 
2228  ret = reap_filters(1);
2229  if (ret < 0 && ret != AVERROR_EOF) {
2230  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2231  return ret;
2232  }
2233 
2234  ret = configure_filtergraph(fg);
2235  if (ret < 0) {
2236  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2237  return ret;
2238  }
2239  }
2240 
2242  if (ret < 0) {
2243  if (ret != AVERROR_EOF)
2244  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2245  return ret;
2246  }
2247 
2248  return 0;
2249 }
2250 
2251 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2252 {
2253  int ret;
2254 
2255  ifilter->eof = 1;
2256 
2257  if (ifilter->filter) {
2259  if (ret < 0)
2260  return ret;
2261  } else {
2262  // the filtergraph was never configured
2263  if (ifilter->format < 0)
2264  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2265  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2266  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2267  return AVERROR_INVALIDDATA;
2268  }
2269  }
2270 
2271  return 0;
2272 }
2273 
2274 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2275 // There is the following difference: if you got a frame, you must call
2276 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2277 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2278 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2279 {
2280  int ret;
2281 
2282  *got_frame = 0;
2283 
2284  if (pkt) {
2285  ret = avcodec_send_packet(avctx, pkt);
2286  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2287  // decoded frames with avcodec_receive_frame() until done.
2288  if (ret < 0 && ret != AVERROR_EOF)
2289  return ret;
2290  }
2291 
2292  ret = avcodec_receive_frame(avctx, frame);
2293  if (ret < 0 && ret != AVERROR(EAGAIN))
2294  return ret;
2295  if (ret >= 0)
2296  *got_frame = 1;
2297 
2298  return 0;
2299 }
2300 
2302 {
2303  int i, ret;
2304  AVFrame *f;
2305 
2306  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2307  for (i = 0; i < ist->nb_filters; i++) {
2308  if (i < ist->nb_filters - 1) {
2309  f = ist->filter_frame;
2311  if (ret < 0)
2312  break;
2313  } else
2314  f = decoded_frame;
2315  ret = ifilter_send_frame(ist->filters[i], f);
2316  if (ret == AVERROR_EOF)
2317  ret = 0; /* ignore */
2318  if (ret < 0) {
2320  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2321  break;
2322  }
2323  }
2324  return ret;
2325 }
2326 
2328  int *decode_failed)
2329 {
2331  AVCodecContext *avctx = ist->dec_ctx;
2332  int ret, err = 0;
2333  AVRational decoded_frame_tb;
2334 
2335  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2336  return AVERROR(ENOMEM);
2337  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2338  return AVERROR(ENOMEM);
2340 
2342  ret = decode(avctx, decoded_frame, got_output, pkt);
2343  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2344  if (ret < 0)
2345  *decode_failed = 1;
2346 
2347  if (ret >= 0 && avctx->sample_rate <= 0) {
2348  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2350  }
2351 
2352  if (ret != AVERROR_EOF)
2354 
2355  if (!*got_output || ret < 0)
2356  return ret;
2357 
2359  ist->frames_decoded++;
2360 
2361  /* increment next_dts to use for the case where the input stream does not
2362  have timestamps or there are multiple frames in the packet */
2363  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2364  avctx->sample_rate;
2365  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2366  avctx->sample_rate;
2367 
2368  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2369  decoded_frame_tb = ist->st->time_base;
2370  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2371  decoded_frame->pts = pkt->pts;
2372  decoded_frame_tb = ist->st->time_base;
2373  }else {
2374  decoded_frame->pts = ist->dts;
2375  decoded_frame_tb = AV_TIME_BASE_Q;
2376  }
2378  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2379  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2380  (AVRational){1, avctx->sample_rate});
2383 
2386  return err < 0 ? err : ret;
2387 }
2388 
2389 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2390  int *decode_failed)
2391 {
2393  int i, ret = 0, err = 0;
2394  int64_t best_effort_timestamp;
2395  int64_t dts = AV_NOPTS_VALUE;
2396 
2397  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2398  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2399  // skip the packet.
2400  if (!eof && pkt && pkt->size == 0)
2401  return 0;
2402 
2403  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2404  return AVERROR(ENOMEM);
2405  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2406  return AVERROR(ENOMEM);
2408  if (ist->dts != AV_NOPTS_VALUE)
2409  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2410  if (pkt) {
2411  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2412  }
2413 
2414  // The old code used to set dts on the drain packet, which does not work
2415  // with the new API anymore.
2416  if (eof) {
2417  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2418  if (!new)
2419  return AVERROR(ENOMEM);
2420  ist->dts_buffer = new;
2421  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2422  }
2423 
2426  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2427  if (ret < 0)
2428  *decode_failed = 1;
2429 
2430  // The following line may be required in some cases where there is no parser
2431  // or the parser does not has_b_frames correctly
2432  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2433  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2434  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2435  } else
2437  "video_delay is larger in decoder than demuxer %d > %d.\n"
2438  "If you want to help, upload a sample "
2439  "of this file to https://streams.videolan.org/upload/ "
2440  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2441  ist->dec_ctx->has_b_frames,
2442  ist->st->codecpar->video_delay);
2443  }
2444 
2445  if (ret != AVERROR_EOF)
2447 
2448  if (*got_output && ret >= 0) {
2449  if (ist->dec_ctx->width != decoded_frame->width ||
2450  ist->dec_ctx->height != decoded_frame->height ||
2451  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2452  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2456  ist->dec_ctx->width,
2457  ist->dec_ctx->height,
2458  ist->dec_ctx->pix_fmt);
2459  }
2460  }
2461 
2462  if (!*got_output || ret < 0)
2463  return ret;
2464 
2465  if(ist->top_field_first>=0)
2467 
2468  ist->frames_decoded++;
2469 
2471  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2472  if (err < 0)
2473  goto fail;
2474  }
2476 
2477  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2478  *duration_pts = decoded_frame->pkt_duration;
2479 
2480  if (ist->framerate.num)
2481  best_effort_timestamp = ist->cfr_next_pts++;
2482 
2483  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2484  best_effort_timestamp = ist->dts_buffer[0];
2485 
2486  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2487  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2488  ist->nb_dts_buffer--;
2489  }
2490 
2491  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2492  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2493 
2494  if (ts != AV_NOPTS_VALUE)
2495  ist->next_pts = ist->pts = ts;
2496  }
2497 
2498  if (debug_ts) {
2499  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2500  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2501  ist->st->index, av_ts2str(decoded_frame->pts),
2503  best_effort_timestamp,
2504  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2506  ist->st->time_base.num, ist->st->time_base.den);
2507  }
2508 
2509  if (ist->st->sample_aspect_ratio.num)
2511 
2513 
2514 fail:
2517  return err < 0 ? err : ret;
2518 }
2519 
2521  int *decode_failed)
2522 {
2524  int free_sub = 1;
2525  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2526  &subtitle, got_output, pkt);
2527 
2529 
2530  if (ret < 0 || !*got_output) {
2531  *decode_failed = 1;
2532  if (!pkt->size)
2533  sub2video_flush(ist);
2534  return ret;
2535  }
2536 
2537  if (ist->fix_sub_duration) {
2538  int end = 1;
2539  if (ist->prev_sub.got_output) {
2540  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2541  1000, AV_TIME_BASE);
2542  if (end < ist->prev_sub.subtitle.end_display_time) {
2543  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2544  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2546  end <= 0 ? ", dropping it" : "");
2547  ist->prev_sub.subtitle.end_display_time = end;
2548  }
2549  }
2550  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2551  FFSWAP(int, ret, ist->prev_sub.ret);
2553  if (end <= 0)
2554  goto out;
2555  }
2556 
2557  if (!*got_output)
2558  return ret;
2559 
2560  if (ist->sub2video.frame) {
2561  sub2video_update(ist, INT64_MIN, &subtitle);
2562  } else if (ist->nb_filters) {
2563  if (!ist->sub2video.sub_queue)
2564  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2565  if (!ist->sub2video.sub_queue)
2566  exit_program(1);
2567  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2569  if (ret < 0)
2570  exit_program(1);
2571  }
2573  free_sub = 0;
2574  }
2575 
2576  if (!subtitle.num_rects)
2577  goto out;
2578 
2579  ist->frames_decoded++;
2580 
2581  for (i = 0; i < nb_output_streams; i++) {
2583 
2584  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2585  exit_program(1);
2586  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2587  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2588  continue;
2589 
2590  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2591  }
2592 
2593 out:
2594  if (free_sub)
2596  return ret;
2597 }
2598 
2600 {
2601  int i, ret;
2602  /* TODO keep pts also in stream time base to avoid converting back */
2603  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2605 
2606  for (i = 0; i < ist->nb_filters; i++) {
2607  ret = ifilter_send_eof(ist->filters[i], pts);
2608  if (ret < 0)
2609  return ret;
2610  }
2611  return 0;
2612 }
2613 
2614 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2615 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2616 {
2617  int ret = 0, i;
2618  int repeating = 0;
2619  int eof_reached = 0;
2620 
2621  AVPacket *avpkt;
2622 
2623  if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2624  return AVERROR(ENOMEM);
2625  avpkt = ist->pkt;
2626 
2627  if (!ist->saw_first_ts) {
2628  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2629  ist->pts = 0;
2630  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2631  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2632  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2633  }
2634  ist->saw_first_ts = 1;
2635  }
2636 
2637  if (ist->next_dts == AV_NOPTS_VALUE)
2638  ist->next_dts = ist->dts;
2639  if (ist->next_pts == AV_NOPTS_VALUE)
2640  ist->next_pts = ist->pts;
2641 
2642  if (pkt) {
2643  av_packet_unref(avpkt);
2644  ret = av_packet_ref(avpkt, pkt);
2645  if (ret < 0)
2646  return ret;
2647  }
2648 
2649  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2650  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2651  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2652  ist->next_pts = ist->pts = ist->dts;
2653  }
2654 
2655  // while we have more to decode or while the decoder did output something on EOF
2656  while (ist->decoding_needed) {
2657  int64_t duration_dts = 0;
2658  int64_t duration_pts = 0;
2659  int got_output = 0;
2660  int decode_failed = 0;
2661 
2662  ist->pts = ist->next_pts;
2663  ist->dts = ist->next_dts;
2664 
2665  switch (ist->dec_ctx->codec_type) {
2666  case AVMEDIA_TYPE_AUDIO:
2667  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2668  &decode_failed);
2669  av_packet_unref(avpkt);
2670  break;
2671  case AVMEDIA_TYPE_VIDEO:
2672  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2673  &decode_failed);
2674  if (!repeating || !pkt || got_output) {
2675  if (pkt && pkt->duration) {
2676  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2677  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2679  duration_dts = ((int64_t)AV_TIME_BASE *
2680  ist->dec_ctx->framerate.den * ticks) /
2682  }
2683 
2684  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2685  ist->next_dts += duration_dts;
2686  }else
2687  ist->next_dts = AV_NOPTS_VALUE;
2688  }
2689 
2690  if (got_output) {
2691  if (duration_pts > 0) {
2692  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2693  } else {
2694  ist->next_pts += duration_dts;
2695  }
2696  }
2697  av_packet_unref(avpkt);
2698  break;
2699  case AVMEDIA_TYPE_SUBTITLE:
2700  if (repeating)
2701  break;
2702  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2703  if (!pkt && ret >= 0)
2704  ret = AVERROR_EOF;
2705  av_packet_unref(avpkt);
2706  break;
2707  default:
2708  return -1;
2709  }
2710 
2711  if (ret == AVERROR_EOF) {
2712  eof_reached = 1;
2713  break;
2714  }
2715 
2716  if (ret < 0) {
2717  if (decode_failed) {
2718  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2719  ist->file_index, ist->st->index, av_err2str(ret));
2720  } else {
2721  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2722  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2723  }
2724  if (!decode_failed || exit_on_error)
2725  exit_program(1);
2726  break;
2727  }
2728 
2729  if (got_output)
2730  ist->got_output = 1;
2731 
2732  if (!got_output)
2733  break;
2734 
2735  // During draining, we might get multiple output frames in this loop.
2736  // ffmpeg.c does not drain the filter chain on configuration changes,
2737  // which means if we send multiple frames at once to the filters, and
2738  // one of those frames changes configuration, the buffered frames will
2739  // be lost. This can upset certain FATE tests.
2740  // Decode only 1 frame per call on EOF to appease these FATE tests.
2741  // The ideal solution would be to rewrite decoding to use the new
2742  // decoding API in a better way.
2743  if (!pkt)
2744  break;
2745 
2746  repeating = 1;
2747  }
2748 
2749  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2750  /* except when looping we need to flush but not to send an EOF */
2751  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2752  int ret = send_filter_eof(ist);
2753  if (ret < 0) {
2754  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2755  exit_program(1);
2756  }
2757  }
2758 
2759  /* handle stream copy */
2760  if (!ist->decoding_needed && pkt) {
2761  ist->dts = ist->next_dts;
2762  switch (ist->dec_ctx->codec_type) {
2763  case AVMEDIA_TYPE_AUDIO:
2764  av_assert1(pkt->duration >= 0);
2765  if (ist->dec_ctx->sample_rate) {
2766  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2767  ist->dec_ctx->sample_rate;
2768  } else {
2770  }
2771  break;
2772  case AVMEDIA_TYPE_VIDEO:
2773  if (ist->framerate.num) {
2774  // TODO: Remove work-around for c99-to-c89 issue 7
2775  AVRational time_base_q = AV_TIME_BASE_Q;
2776  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2777  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2778  } else if (pkt->duration) {
2780  } else if(ist->dec_ctx->framerate.num != 0) {
2781  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2782  ist->next_dts += ((int64_t)AV_TIME_BASE *
2783  ist->dec_ctx->framerate.den * ticks) /
2785  }
2786  break;
2787  }
2788  ist->pts = ist->dts;
2789  ist->next_pts = ist->next_dts;
2790  }
2791  for (i = 0; i < nb_output_streams; i++) {
2793 
2794  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2795  exit_program(1);
2796  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2797  continue;
2798 
2799  do_streamcopy(ist, ost, pkt);
2800  }
2801 
2802  return !eof_reached;
2803 }
2804 
2805 static void print_sdp(void)
2806 {
2807  char sdp[16384];
2808  int i;
2809  int j;
2810  AVIOContext *sdp_pb;
2811  AVFormatContext **avc;
2812 
2813  for (i = 0; i < nb_output_files; i++) {
2814  if (!output_files[i]->header_written)
2815  return;
2816  }
2817 
2818  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2819  if (!avc)
2820  exit_program(1);
2821  for (i = 0, j = 0; i < nb_output_files; i++) {
2822  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2823  avc[j] = output_files[i]->ctx;
2824  j++;
2825  }
2826  }
2827 
2828  if (!j)
2829  goto fail;
2830 
2831  av_sdp_create(avc, j, sdp, sizeof(sdp));
2832 
2833  if (!sdp_filename) {
2834  printf("SDP:\n%s\n", sdp);
2835  fflush(stdout);
2836  } else {
2837  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2838  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2839  } else {
2840  avio_print(sdp_pb, sdp);
2841  avio_closep(&sdp_pb);
2843  }
2844  }
2845 
2846 fail:
2847  av_freep(&avc);
2848 }
2849 
2851 {
2852  InputStream *ist = s->opaque;
2853  const enum AVPixelFormat *p;
2854  int ret;
2855 
2856  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2858  const AVCodecHWConfig *config = NULL;
2859  int i;
2860 
2861  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2862  break;
2863 
2864  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2865  ist->hwaccel_id == HWACCEL_AUTO) {
2866  for (i = 0;; i++) {
2867  config = avcodec_get_hw_config(s->codec, i);
2868  if (!config)
2869  break;
2870  if (!(config->methods &
2872  continue;
2873  if (config->pix_fmt == *p)
2874  break;
2875  }
2876  }
2877  if (config) {
2878  if (config->device_type != ist->hwaccel_device_type) {
2879  // Different hwaccel offered, ignore.
2880  continue;
2881  }
2882 
2884  if (ret < 0) {
2885  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2887  "%s hwaccel requested for input stream #%d:%d, "
2888  "but cannot be initialized.\n",
2890  ist->file_index, ist->st->index);
2891  return AV_PIX_FMT_NONE;
2892  }
2893  continue;
2894  }
2895  } else {
2896  const HWAccel *hwaccel = NULL;
2897  int i;
2898  for (i = 0; hwaccels[i].name; i++) {
2899  if (hwaccels[i].pix_fmt == *p) {
2900  hwaccel = &hwaccels[i];
2901  break;
2902  }
2903  }
2904  if (!hwaccel) {
2905  // No hwaccel supporting this pixfmt.
2906  continue;
2907  }
2908  if (hwaccel->id != ist->hwaccel_id) {
2909  // Does not match requested hwaccel.
2910  continue;
2911  }
2912 
2913  ret = hwaccel->init(s);
2914  if (ret < 0) {
2916  "%s hwaccel requested for input stream #%d:%d, "
2917  "but cannot be initialized.\n", hwaccel->name,
2918  ist->file_index, ist->st->index);
2919  return AV_PIX_FMT_NONE;
2920  }
2921  }
2922 
2923  if (ist->hw_frames_ctx) {
2924  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2925  if (!s->hw_frames_ctx)
2926  return AV_PIX_FMT_NONE;
2927  }
2928 
2929  ist->hwaccel_pix_fmt = *p;
2930  break;
2931  }
2932 
2933  return *p;
2934 }
2935 
2937 {
2938  InputStream *ist = s->opaque;
2939 
2940  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2941  return ist->hwaccel_get_buffer(s, frame, flags);
2942 
2944 }
2945 
2946 static int init_input_stream(int ist_index, char *error, int error_len)
2947 {
2948  int ret;
2949  InputStream *ist = input_streams[ist_index];
2950 
2951  if (ist->decoding_needed) {
2952  const AVCodec *codec = ist->dec;
2953  if (!codec) {
2954  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2955  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2956  return AVERROR(EINVAL);
2957  }
2958 
2959  ist->dec_ctx->opaque = ist;
2960  ist->dec_ctx->get_format = get_format;
2961  ist->dec_ctx->get_buffer2 = get_buffer;
2962 #if LIBAVCODEC_VERSION_MAJOR < 60
2963  ist->dec_ctx->thread_safe_callbacks = 1;
2964 #endif
2965 
2966  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2967  (ist->decoding_needed & DECODING_FOR_OST)) {
2968  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2970  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2971  }
2972 
2973  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2974 
2975  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2976  * audio, and video decoders such as cuvid or mediacodec */
2977  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2978 
2979  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2980  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2981  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2983  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2984 
2986  if (ret < 0) {
2987  snprintf(error, error_len, "Device setup failed for "
2988  "decoder on input stream #%d:%d : %s",
2989  ist->file_index, ist->st->index, av_err2str(ret));
2990  return ret;
2991  }
2992 
2993  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2994  if (ret == AVERROR_EXPERIMENTAL)
2995  abort_codec_experimental(codec, 0);
2996 
2997  snprintf(error, error_len,
2998  "Error while opening decoder for input stream "
2999  "#%d:%d : %s",
3000  ist->file_index, ist->st->index, av_err2str(ret));
3001  return ret;
3002  }
3004  }
3005 
3006  ist->next_pts = AV_NOPTS_VALUE;
3007  ist->next_dts = AV_NOPTS_VALUE;
3008 
3009  return 0;
3010 }
3011 
3013 {
3014  if (ost->source_index >= 0)
3015  return input_streams[ost->source_index];
3016  return NULL;
3017 }
3018 
3019 static int compare_int64(const void *a, const void *b)
3020 {
3021  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3022 }
3023 
3024 /* open the muxer when all the streams are initialized */
3026 {
3027  int ret, i;
3028 
3029  for (i = 0; i < of->ctx->nb_streams; i++) {
3031  if (!ost->initialized)
3032  return 0;
3033  }
3034 
3035  of->ctx->interrupt_callback = int_cb;
3036 
3037  ret = avformat_write_header(of->ctx, &of->opts);
3038  if (ret < 0) {
3040  "Could not write header for output file #%d "
3041  "(incorrect codec parameters ?): %s\n",
3043  return ret;
3044  }
3045  //assert_avoptions(of->opts);
3046  of->header_written = 1;
3047 
3048  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3049  nb_output_dumped++;
3050 
3051  if (sdp_filename || want_sdp)
3052  print_sdp();
3053 
3054  /* flush the muxing queues */
3055  for (i = 0; i < of->ctx->nb_streams; i++) {
3057 
3058  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3059  if (!av_fifo_size(ost->muxing_queue))
3060  ost->mux_timebase = ost->st->time_base;
3061 
3062  while (av_fifo_size(ost->muxing_queue)) {
3063  AVPacket *pkt;
3064  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3065  ost->muxing_queue_data_size -= pkt->size;
3066  write_packet(of, pkt, ost, 1);
3067  av_packet_free(&pkt);
3068  }
3069  }
3070 
3071  return 0;
3072 }
3073 
3075 {
3076  AVBSFContext *ctx = ost->bsf_ctx;
3077  int ret;
3078 
3079  if (!ctx)
3080  return 0;
3081 
3082  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3083  if (ret < 0)
3084  return ret;
3085 
3086  ctx->time_base_in = ost->st->time_base;
3087 
3088  ret = av_bsf_init(ctx);
3089  if (ret < 0) {
3090  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3091  ctx->filter->name);
3092  return ret;
3093  }
3094 
3095  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3096  if (ret < 0)
3097  return ret;
3098  ost->st->time_base = ctx->time_base_out;
3099 
3100  return 0;
3101 }
3102 
3104 {
3105  OutputFile *of = output_files[ost->file_index];
3107  AVCodecParameters *par_dst = ost->st->codecpar;
3108  AVCodecParameters *par_src = ost->ref_par;
3109  AVRational sar;
3110  int i, ret;
3111  uint32_t codec_tag = par_dst->codec_tag;
3112 
3113  av_assert0(ist && !ost->filter);
3114 
3115  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3116  if (ret >= 0)
3117  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3118  if (ret < 0) {
3120  "Error setting up codec context options.\n");
3121  return ret;
3122  }
3123 
3124  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3125  if (ret < 0) {
3127  "Error getting reference codec parameters.\n");
3128  return ret;
3129  }
3130 
3131  if (!codec_tag) {
3132  unsigned int codec_tag_tmp;
3133  if (!of->ctx->oformat->codec_tag ||
3134  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3135  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3136  codec_tag = par_src->codec_tag;
3137  }
3138 
3139  ret = avcodec_parameters_copy(par_dst, par_src);
3140  if (ret < 0)
3141  return ret;
3142 
3143  par_dst->codec_tag = codec_tag;
3144 
3145  if (!ost->frame_rate.num)
3146  ost->frame_rate = ist->framerate;
3147  ost->st->avg_frame_rate = ost->frame_rate;
3148 
3150  if (ret < 0)
3151  return ret;
3152 
3153  // copy timebase while removing common factors
3154  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3156 
3157  // copy estimated duration as a hint to the muxer
3158  if (ost->st->duration <= 0 && ist->st->duration > 0)
3159  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3160 
3161  // copy disposition
3162  ost->st->disposition = ist->st->disposition;
3163 
3164  if (ist->st->nb_side_data) {
3165  for (i = 0; i < ist->st->nb_side_data; i++) {
3166  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3167  uint8_t *dst_data;
3168 
3169  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3170  if (!dst_data)
3171  return AVERROR(ENOMEM);
3172  memcpy(dst_data, sd_src->data, sd_src->size);
3173  }
3174  }
3175 
3176  if (ost->rotate_overridden) {
3178  sizeof(int32_t) * 9);
3179  if (sd)
3180  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3181  }
3182 
3183  switch (par_dst->codec_type) {
3184  case AVMEDIA_TYPE_AUDIO:
3185  if (audio_volume != 256) {
3186  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3187  exit_program(1);
3188  }
3189  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3190  par_dst->block_align= 0;
3191  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3192  par_dst->block_align= 0;
3193  break;
3194  case AVMEDIA_TYPE_VIDEO:
3195  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3196  sar =
3197  av_mul_q(ost->frame_aspect_ratio,
3198  (AVRational){ par_dst->height, par_dst->width });
3199  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3200  "with stream copy may produce invalid files\n");
3201  }
3202  else if (ist->st->sample_aspect_ratio.num)
3203  sar = ist->st->sample_aspect_ratio;
3204  else
3205  sar = par_src->sample_aspect_ratio;
3206  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3207  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3208  ost->st->r_frame_rate = ist->st->r_frame_rate;
3209  break;
3210  }
3211 
3212  ost->mux_timebase = ist->st->time_base;
3213 
3214  return 0;
3215 }
3216 
3218 {
3219  AVDictionaryEntry *e;
3220 
3221  uint8_t *encoder_string;
3222  int encoder_string_len;
3223  int format_flags = 0;
3224  int codec_flags = ost->enc_ctx->flags;
3225 
3226  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3227  return;
3228 
3229  e = av_dict_get(of->opts, "fflags", NULL, 0);
3230  if (e) {
3231  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3232  if (!o)
3233  return;
3234  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3235  }
3236  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3237  if (e) {
3238  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3239  if (!o)
3240  return;
3241  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3242  }
3243 
3244  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3245  encoder_string = av_mallocz(encoder_string_len);
3246  if (!encoder_string)
3247  exit_program(1);
3248 
3249  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3250  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3251  else
3252  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3253  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3254  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3256 }
3257 
3259  AVCodecContext *avctx)
3260 {
3261  char *p;
3262  int n = 1, i, size, index = 0;
3263  int64_t t, *pts;
3264 
3265  for (p = kf; *p; p++)
3266  if (*p == ',')
3267  n++;
3268  size = n;
3269  pts = av_malloc_array(size, sizeof(*pts));
3270  if (!pts) {
3271  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3272  exit_program(1);
3273  }
3274 
3275  p = kf;
3276  for (i = 0; i < n; i++) {
3277  char *next = strchr(p, ',');
3278 
3279  if (next)
3280  *next++ = 0;
3281 
3282  if (!memcmp(p, "chapters", 8)) {
3283 
3284  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3285  int j;
3286 
3287  if (avf->nb_chapters > INT_MAX - size ||
3288  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3289  sizeof(*pts)))) {
3291  "Could not allocate forced key frames array.\n");
3292  exit_program(1);
3293  }
3294  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3295  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3296 
3297  for (j = 0; j < avf->nb_chapters; j++) {
3298  AVChapter *c = avf->chapters[j];
3299  av_assert1(index < size);
3300  pts[index++] = av_rescale_q(c->start, c->time_base,
3301  avctx->time_base) + t;
3302  }
3303 
3304  } else {
3305 
3306  t = parse_time_or_die("force_key_frames", p, 1);
3307  av_assert1(index < size);
3308  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3309 
3310  }
3311 
3312  p = next;
3313  }
3314 
3315  av_assert0(index == size);
3316  qsort(pts, size, sizeof(*pts), compare_int64);
3317  ost->forced_kf_count = size;
3318  ost->forced_kf_pts = pts;
3319 }
3320 
3321 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3322 {
3324  AVCodecContext *enc_ctx = ost->enc_ctx;
3325  AVFormatContext *oc;
3326 
3327  if (ost->enc_timebase.num > 0) {
3328  enc_ctx->time_base = ost->enc_timebase;
3329  return;
3330  }
3331 
3332  if (ost->enc_timebase.num < 0) {
3333  if (ist) {
3334  enc_ctx->time_base = ist->st->time_base;
3335  return;
3336  }
3337 
3338  oc = output_files[ost->file_index]->ctx;
3339  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3340  }
3341 
3342  enc_ctx->time_base = default_time_base;
3343 }
3344 
3346 {
3348  AVCodecContext *enc_ctx = ost->enc_ctx;
3350  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3351  int j, ret;
3352 
3353  set_encoder_id(output_files[ost->file_index], ost);
3354 
3355  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3356  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3357  // which have to be filtered out to prevent leaking them to output files.
3358  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3359 
3360  if (ist) {
3361  ost->st->disposition = ist->st->disposition;
3362 
3363  dec_ctx = ist->dec_ctx;
3364 
3366  } else {
3367  for (j = 0; j < oc->nb_streams; j++) {
3368  AVStream *st = oc->streams[j];
3369  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3370  break;
3371  }
3372  if (j == oc->nb_streams)
3373  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3376  }
3377 
3378  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3379  if (!ost->frame_rate.num)
3380  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3381  if (ist && !ost->frame_rate.num)
3382  ost->frame_rate = ist->framerate;
3383  if (ist && !ost->frame_rate.num)
3384  ost->frame_rate = ist->st->r_frame_rate;
3385  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3386  ost->frame_rate = (AVRational){25, 1};
3388  "No information "
3389  "about the input framerate is available. Falling "
3390  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3391  "if you want a different framerate.\n",
3392  ost->file_index, ost->index);
3393  }
3394 
3395  if (ost->max_frame_rate.num &&
3396  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3397  !ost->frame_rate.den))
3398  ost->frame_rate = ost->max_frame_rate;
3399 
3400  if (ost->enc->supported_framerates && !ost->force_fps) {
3401  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3402  ost->frame_rate = ost->enc->supported_framerates[idx];
3403  }
3404  // reduce frame rate for mpeg4 to be within the spec limits
3405  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3406  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3407  ost->frame_rate.num, ost->frame_rate.den, 65535);
3408  }
3409  }
3410 
3411  switch (enc_ctx->codec_type) {
3412  case AVMEDIA_TYPE_AUDIO:
3413  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3414  if (dec_ctx)
3416  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3417  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3418  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3419  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3420 
3422  break;
3423 
3424  case AVMEDIA_TYPE_VIDEO:
3425  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3426 
3427  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3428  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3429  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3431  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3432  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3433  }
3434 
3435  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3436  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3437  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3438  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3439  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3440  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3441 
3442  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3443  if (dec_ctx)
3445  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3446 
3447  if (frame) {
3448  enc_ctx->color_range = frame->color_range;
3450  enc_ctx->color_trc = frame->color_trc;
3451  enc_ctx->colorspace = frame->colorspace;
3453  }
3454 
3455  enc_ctx->framerate = ost->frame_rate;
3456 
3457  ost->st->avg_frame_rate = ost->frame_rate;
3458 
3459  if (!dec_ctx ||
3460  enc_ctx->width != dec_ctx->width ||
3461  enc_ctx->height != dec_ctx->height ||
3462  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3464  }
3465 
3466  if (ost->top_field_first == 0) {
3467  enc_ctx->field_order = AV_FIELD_BB;
3468  } else if (ost->top_field_first == 1) {
3469  enc_ctx->field_order = AV_FIELD_TT;
3470  }
3471 
3472  if (frame) {
3474  ost->top_field_first >= 0)
3475  frame->top_field_first = !!ost->top_field_first;
3476 
3477  if (frame->interlaced_frame) {
3478  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3480  else
3482  } else
3483  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3484  }
3485 
3486  if (ost->forced_keyframes) {
3487  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3488  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3490  if (ret < 0) {
3492  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3493  return ret;
3494  }
3495  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3496  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3497  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3498  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3499 
3500  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3501  // parse it only for static kf timings
3502  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3503  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3504  }
3505  }
3506  break;
3507  case AVMEDIA_TYPE_SUBTITLE:
3508  enc_ctx->time_base = AV_TIME_BASE_Q;
3509  if (!enc_ctx->width) {
3510  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3511  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3512  }
3513  break;
3514  case AVMEDIA_TYPE_DATA:
3515  break;
3516  default:
3517  abort();
3518  break;
3519  }
3520 
3521  ost->mux_timebase = enc_ctx->time_base;
3522 
3523  return 0;
3524 }
3525 
3527  char *error, int error_len)
3528 {
3529  int ret = 0;
3530 
3531  if (ost->encoding_needed) {
3532  const AVCodec *codec = ost->enc;
3533  AVCodecContext *dec = NULL;
3534  InputStream *ist;
3535 
3537  if (ret < 0)
3538  return ret;
3539 
3540  if ((ist = get_input_stream(ost)))
3541  dec = ist->dec_ctx;
3542  if (dec && dec->subtitle_header) {
3543  /* ASS code assumes this buffer is null terminated so add extra byte. */
3544  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3545  if (!ost->enc_ctx->subtitle_header)
3546  return AVERROR(ENOMEM);
3547  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3548  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3549  }
3550  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3551  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3552  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3553  !codec->defaults &&
3554  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3555  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3556  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3557 
3559  if (ret < 0) {
3560  snprintf(error, error_len, "Device setup failed for "
3561  "encoder on output stream #%d:%d : %s",
3562  ost->file_index, ost->index, av_err2str(ret));
3563  return ret;
3564  }
3565 
3566  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3567  int input_props = 0, output_props = 0;
3568  AVCodecDescriptor const *input_descriptor =
3569  avcodec_descriptor_get(dec->codec_id);
3570  AVCodecDescriptor const *output_descriptor =
3571  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3572  if (input_descriptor)
3573  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3574  if (output_descriptor)
3575  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3576  if (input_props && output_props && input_props != output_props) {
3577  snprintf(error, error_len,
3578  "Subtitle encoding currently only possible from text to text "
3579  "or bitmap to bitmap");
3580  return AVERROR_INVALIDDATA;
3581  }
3582  }
3583 
3584  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3585  if (ret == AVERROR_EXPERIMENTAL)
3586  abort_codec_experimental(codec, 1);
3587  snprintf(error, error_len,
3588  "Error while opening encoder for output stream #%d:%d - "
3589  "maybe incorrect parameters such as bit_rate, rate, width or height",
3590  ost->file_index, ost->index);
3591  return ret;
3592  }
3593  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3594  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3595  av_buffersink_set_frame_size(ost->filter->filter,
3596  ost->enc_ctx->frame_size);
3597  assert_avoptions(ost->encoder_opts);
3598  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3599  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3600  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3601  " It takes bits/s as argument, not kbits/s\n");
3602 
3604  if (ret < 0) {
3606  "Error initializing the output stream codec context.\n");
3607  exit_program(1);
3608  }
3609 
3610  if (ost->enc_ctx->nb_coded_side_data) {
3611  int i;
3612 
3613  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3614  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3615  uint8_t *dst_data;
3616 
3617  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3618  if (!dst_data)
3619  return AVERROR(ENOMEM);
3620  memcpy(dst_data, sd_src->data, sd_src->size);
3621  }
3622  }
3623 
3624  /*
3625  * Add global input side data. For now this is naive, and copies it
3626  * from the input stream's global side data. All side data should
3627  * really be funneled over AVFrame and libavfilter, then added back to
3628  * packet side data, and then potentially using the first packet for
3629  * global side data.
3630  */
3631  if (ist) {
3632  int i;
3633  for (i = 0; i < ist->st->nb_side_data; i++) {
3634  AVPacketSideData *sd = &ist->st->side_data[i];
3635  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3636  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3637  if (!dst)
3638  return AVERROR(ENOMEM);
3639  memcpy(dst, sd->data, sd->size);
3640  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3641  av_display_rotation_set((uint32_t *)dst, 0);
3642  }
3643  }
3644  }
3645 
3646  // copy timebase while removing common factors
3647  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3648  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3649 
3650  // copy estimated duration as a hint to the muxer
3651  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3652  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3653  } else if (ost->stream_copy) {
3655  if (ret < 0)
3656  return ret;
3657  }
3658 
3659  // parse user provided disposition, and update stream values
3660  if (ost->disposition) {
3661  static const AVOption opts[] = {
3662  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3663  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3664  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3665  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3666  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3667  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3668  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3669  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3670  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3671  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3672  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3673  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3674  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3675  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3676  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3677  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3678  { NULL },
3679  };
3680  static const AVClass class = {
3681  .class_name = "",
3682  .item_name = av_default_item_name,
3683  .option = opts,
3684  .version = LIBAVUTIL_VERSION_INT,
3685  };
3686  const AVClass *pclass = &class;
3687 
3688  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3689  if (ret < 0)
3690  return ret;
3691  }
3692 
3693  /* initialize bitstream filters for the output stream
3694  * needs to be done here, because the codec id for streamcopy is not
3695  * known until now */
3696  ret = init_output_bsfs(ost);
3697  if (ret < 0)
3698  return ret;
3699 
3700  ost->initialized = 1;
3701 
3702  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3703  if (ret < 0)
3704  return ret;
3705 
3706  return ret;
3707 }
3708 
3709 static void report_new_stream(int input_index, AVPacket *pkt)
3710 {
3711  InputFile *file = input_files[input_index];
3712  AVStream *st = file->ctx->streams[pkt->stream_index];
3713 
3714  if (pkt->stream_index < file->nb_streams_warn)
3715  return;
3716  av_log(file->ctx, AV_LOG_WARNING,
3717  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3719  input_index, pkt->stream_index,
3720  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3721  file->nb_streams_warn = pkt->stream_index + 1;
3722 }
3723 
3724 static int transcode_init(void)
3725 {
3726  int ret = 0, i, j, k;
3727  AVFormatContext *oc;
3728  OutputStream *ost;
3729  InputStream *ist;
3730  char error[1024] = {0};
3731 
3732  for (i = 0; i < nb_filtergraphs; i++) {
3733  FilterGraph *fg = filtergraphs[i];
3734  for (j = 0; j < fg->nb_outputs; j++) {
3735  OutputFilter *ofilter = fg->outputs[j];
3736  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3737  continue;
3738  if (fg->nb_inputs != 1)
3739  continue;
3740  for (k = nb_input_streams-1; k >= 0 ; k--)
3741  if (fg->inputs[0]->ist == input_streams[k])
3742  break;
3743  ofilter->ost->source_index = k;
3744  }
3745  }
3746 
3747  /* init framerate emulation */
3748  for (i = 0; i < nb_input_files; i++) {
3750  if (ifile->rate_emu)
3751  for (j = 0; j < ifile->nb_streams; j++)
3752  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3753  }
3754 
3755  /* init input streams */
3756  for (i = 0; i < nb_input_streams; i++)
3757  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3758  for (i = 0; i < nb_output_streams; i++) {
3759  ost = output_streams[i];
3760  avcodec_close(ost->enc_ctx);
3761  }
3762  goto dump_format;
3763  }
3764 
3765  /*
3766  * initialize stream copy and subtitle/data streams.
3767  * Encoded AVFrame based streams will get initialized as follows:
3768  * - when the first AVFrame is received in do_video_out
3769  * - just before the first AVFrame is received in either transcode_step
3770  * or reap_filters due to us requiring the filter chain buffer sink
3771  * to be configured with the correct audio frame size, which is only
3772  * known after the encoder is initialized.
3773  */
3774  for (i = 0; i < nb_output_streams; i++) {
3775  if (!output_streams[i]->stream_copy &&
3776  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3778  continue;
3779 
3781  if (ret < 0)
3782  goto dump_format;
3783  }
3784 
3785  /* discard unused programs */
3786  for (i = 0; i < nb_input_files; i++) {
3788  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3789  AVProgram *p = ifile->ctx->programs[j];
3790  int discard = AVDISCARD_ALL;
3791 
3792  for (k = 0; k < p->nb_stream_indexes; k++)
3793  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3794  discard = AVDISCARD_DEFAULT;
3795  break;
3796  }
3797  p->discard = discard;
3798  }
3799  }
3800 
3801  /* write headers for files with no streams */
3802  for (i = 0; i < nb_output_files; i++) {
3803  oc = output_files[i]->ctx;
3804  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3806  if (ret < 0)
3807  goto dump_format;
3808  }
3809  }
3810 
3811  dump_format:
3812  /* dump the stream mapping */
3813  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3814  for (i = 0; i < nb_input_streams; i++) {
3815  ist = input_streams[i];
3816 
3817  for (j = 0; j < ist->nb_filters; j++) {
3818  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3819  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3820  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3821  ist->filters[j]->name);
3822  if (nb_filtergraphs > 1)
3823  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3824  av_log(NULL, AV_LOG_INFO, "\n");
3825  }
3826  }
3827  }
3828 
3829  for (i = 0; i < nb_output_streams; i++) {
3830  ost = output_streams[i];
3831 
3832  if (ost->attachment_filename) {
3833  /* an attached file */
3834  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3835  ost->attachment_filename, ost->file_index, ost->index);
3836  continue;
3837  }
3838 
3839  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3840  /* output from a complex graph */
3841  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3842  if (nb_filtergraphs > 1)
3843  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3844 
3845  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3846  ost->index, ost->enc ? ost->enc->name : "?");
3847  continue;
3848  }
3849 
3850  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3851  input_streams[ost->source_index]->file_index,
3852  input_streams[ost->source_index]->st->index,
3853  ost->file_index,
3854  ost->index);
3855  if (ost->sync_ist != input_streams[ost->source_index])
3856  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3857  ost->sync_ist->file_index,
3858  ost->sync_ist->st->index);
3859  if (ost->stream_copy)
3860  av_log(NULL, AV_LOG_INFO, " (copy)");
3861  else {
3862  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3863  const AVCodec *out_codec = ost->enc;
3864  const char *decoder_name = "?";
3865  const char *in_codec_name = "?";
3866  const char *encoder_name = "?";
3867  const char *out_codec_name = "?";
3868  const AVCodecDescriptor *desc;
3869 
3870  if (in_codec) {
3871  decoder_name = in_codec->name;
3872  desc = avcodec_descriptor_get(in_codec->id);
3873  if (desc)
3874  in_codec_name = desc->name;
3875  if (!strcmp(decoder_name, in_codec_name))
3876  decoder_name = "native";
3877  }
3878 
3879  if (out_codec) {
3880  encoder_name = out_codec->name;
3881  desc = avcodec_descriptor_get(out_codec->id);
3882  if (desc)
3883  out_codec_name = desc->name;
3884  if (!strcmp(encoder_name, out_codec_name))
3885  encoder_name = "native";
3886  }
3887 
3888  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3889  in_codec_name, decoder_name,
3890  out_codec_name, encoder_name);
3891  }
3892  av_log(NULL, AV_LOG_INFO, "\n");
3893  }
3894 
3895  if (ret) {
3896  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3897  return ret;
3898  }
3899 
3901 
3902  return 0;
3903 }
3904 
3905 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3906 static int need_output(void)
3907 {
3908  int i;
3909 
3910  for (i = 0; i < nb_output_streams; i++) {
3912  OutputFile *of = output_files[ost->file_index];
3913  AVFormatContext *os = output_files[ost->file_index]->ctx;
3914 
3915  if (ost->finished ||
3916  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3917  continue;
3918  if (ost->frame_number >= ost->max_frames) {
3919  int j;
3920  for (j = 0; j < of->ctx->nb_streams; j++)
3922  continue;
3923  }
3924 
3925  return 1;
3926  }
3927 
3928  return 0;
3929 }
3930 
3931 /**
3932  * Select the output stream to process.
3933  *
3934  * @return selected output stream, or NULL if none available
3935  */
3937 {
3938  int i;
3939  int64_t opts_min = INT64_MAX;
3940  OutputStream *ost_min = NULL;
3941 
3942  for (i = 0; i < nb_output_streams; i++) {
3944  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3945  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3946  AV_TIME_BASE_Q);
3947  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3949  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3950  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3951 
3952  if (!ost->initialized && !ost->inputs_done)
3953  return ost;
3954 
3955  if (!ost->finished && opts < opts_min) {
3956  opts_min = opts;
3957  ost_min = ost->unavailable ? NULL : ost;
3958  }
3959  }
3960  return ost_min;
3961 }
3962 
3963 static void set_tty_echo(int on)
3964 {
3965 #if HAVE_TERMIOS_H
3966  struct termios tty;
3967  if (tcgetattr(0, &tty) == 0) {
3968  if (on) tty.c_lflag |= ECHO;
3969  else tty.c_lflag &= ~ECHO;
3970  tcsetattr(0, TCSANOW, &tty);
3971  }
3972 #endif
3973 }
3974 
3975 static int check_keyboard_interaction(int64_t cur_time)
3976 {
3977  int i, ret, key;
3978  static int64_t last_time;
3979  if (received_nb_signals)
3980  return AVERROR_EXIT;
3981  /* read_key() returns 0 on EOF */
3982  if(cur_time - last_time >= 100000 && !run_as_daemon){
3983  key = read_key();
3984  last_time = cur_time;
3985  }else
3986  key = -1;
3987  if (key == 'q')
3988  return AVERROR_EXIT;
3989  if (key == '+') av_log_set_level(av_log_get_level()+10);
3990  if (key == '-') av_log_set_level(av_log_get_level()-10);
3991  if (key == 's') qp_hist ^= 1;
3992  if (key == 'h'){
3993  if (do_hex_dump){
3994  do_hex_dump = do_pkt_dump = 0;
3995  } else if(do_pkt_dump){
3996  do_hex_dump = 1;
3997  } else
3998  do_pkt_dump = 1;
4000  }
4001  if (key == 'c' || key == 'C'){
4002  char buf[4096], target[64], command[256], arg[256] = {0};
4003  double time;
4004  int k, n = 0;
4005  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4006  i = 0;
4007  set_tty_echo(1);
4008  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4009  if (k > 0)
4010  buf[i++] = k;
4011  buf[i] = 0;
4012  set_tty_echo(0);
4013  fprintf(stderr, "\n");
4014  if (k > 0 &&
4015  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4016  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4017  target, time, command, arg);
4018  for (i = 0; i < nb_filtergraphs; i++) {
4019  FilterGraph *fg = filtergraphs[i];
4020  if (fg->graph) {
4021  if (time < 0) {
4022  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4023  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4024  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4025  } else if (key == 'c') {
4026  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4027  ret = AVERROR_PATCHWELCOME;
4028  } else {
4029  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4030  if (ret < 0)
4031  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4032  }
4033  }
4034  }
4035  } else {
4037  "Parse error, at least 3 arguments were expected, "
4038  "only %d given in string '%s'\n", n, buf);
4039  }
4040  }
4041  if (key == 'd' || key == 'D'){
4042  int debug=0;
4043  if(key == 'D') {
4044  debug = input_streams[0]->dec_ctx->debug << 1;
4045  if(!debug) debug = 1;
4046  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4047  debug += debug;
4048  }else{
4049  char buf[32];
4050  int k = 0;
4051  i = 0;
4052  set_tty_echo(1);
4053  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4054  if (k > 0)
4055  buf[i++] = k;
4056  buf[i] = 0;
4057  set_tty_echo(0);
4058  fprintf(stderr, "\n");
4059  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4060  fprintf(stderr,"error parsing debug value\n");
4061  }
4062  for(i=0;i<nb_input_streams;i++) {
4063  input_streams[i]->dec_ctx->debug = debug;
4064  }
4065  for(i=0;i<nb_output_streams;i++) {
4067  ost->enc_ctx->debug = debug;
4068  }
4069  if(debug) av_log_set_level(AV_LOG_DEBUG);
4070  fprintf(stderr,"debug=%d\n", debug);
4071  }
4072  if (key == '?'){
4073  fprintf(stderr, "key function\n"
4074  "? show this help\n"
4075  "+ increase verbosity\n"
4076  "- decrease verbosity\n"
4077  "c Send command to first matching filter supporting it\n"
4078  "C Send/Queue command to all matching filters\n"
4079  "D cycle through available debug modes\n"
4080  "h dump packets/hex press to cycle through the 3 states\n"
4081  "q quit\n"
4082  "s Show QP histogram\n"
4083  );
4084  }
4085  return 0;
4086 }
4087 
4088 #if HAVE_THREADS
4089 static void *input_thread(void *arg)
4090 {
4091  InputFile *f = arg;
4092  AVPacket *pkt = f->pkt, *queue_pkt;
4093  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4094  int ret = 0;
4095 
4096  while (1) {
4097  ret = av_read_frame(f->ctx, pkt);
4098 
4099  if (ret == AVERROR(EAGAIN)) {
4100  av_usleep(10000);
4101  continue;
4102  }
4103  if (ret < 0) {
4104  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4105  break;
4106  }
4107  queue_pkt = av_packet_alloc();
4108  if (!queue_pkt) {
4110  av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4111  break;
4112  }
4113  av_packet_move_ref(queue_pkt, pkt);
4114  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4115  if (flags && ret == AVERROR(EAGAIN)) {
4116  flags = 0;
4117  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4118  av_log(f->ctx, AV_LOG_WARNING,
4119  "Thread message queue blocking; consider raising the "
4120  "thread_queue_size option (current value: %d)\n",
4121  f->thread_queue_size);
4122  }
4123  if (ret < 0) {
4124  if (ret != AVERROR_EOF)
4125  av_log(f->ctx, AV_LOG_ERROR,
4126  "Unable to send packet to main thread: %s\n",
4127  av_err2str(ret));
4128  av_packet_free(&queue_pkt);
4129  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4130  break;
4131  }
4132  }
4133 
4134  return NULL;
4135 }
4136 
4137 static void free_input_thread(int i)
4138 {
4139  InputFile *f = input_files[i];
4140  AVPacket *pkt;
4141 
4142  if (!f || !f->in_thread_queue)
4143  return;
4145  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4146  av_packet_free(&pkt);
4147 
4148  pthread_join(f->thread, NULL);
4149  f->joined = 1;
4150  av_thread_message_queue_free(&f->in_thread_queue);
4151 }
4152 
4153 static void free_input_threads(void)
4154 {
4155  int i;
4156 
4157  for (i = 0; i < nb_input_files; i++)
4158  free_input_thread(i);
4159 }
4160 
4161 static int init_input_thread(int i)
4162 {
4163  int ret;
4164  InputFile *f = input_files[i];
4165 
4166  if (f->thread_queue_size < 0)
4167  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4168  if (!f->thread_queue_size)
4169  return 0;
4170 
4171  if (f->ctx->pb ? !f->ctx->pb->seekable :
4172  strcmp(f->ctx->iformat->name, "lavfi"))
4173  f->non_blocking = 1;
4174  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4175  f->thread_queue_size, sizeof(f->pkt));
4176  if (ret < 0)
4177  return ret;
4178 
4179  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4180  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4181  av_thread_message_queue_free(&f->in_thread_queue);
4182  return AVERROR(ret);
4183  }
4184 
4185  return 0;
4186 }
4187 
4188 static int init_input_threads(void)
4189 {
4190  int i, ret;
4191 
4192  for (i = 0; i < nb_input_files; i++) {
4193  ret = init_input_thread(i);
4194  if (ret < 0)
4195  return ret;
4196  }
4197  return 0;
4198 }
4199 
4200 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4201 {
4202  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4203  f->non_blocking ?
4205 }
4206 #endif
4207 
4209 {
4210  if (f->rate_emu) {
4211  int i;
4212  for (i = 0; i < f->nb_streams; i++) {
4213  InputStream *ist = input_streams[f->ist_index + i];
4214  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4215  int64_t now = av_gettime_relative() - ist->start;
4216  if (pts > now)
4217  return AVERROR(EAGAIN);
4218  }
4219  }
4220 
4221 #if HAVE_THREADS
4222  if (f->thread_queue_size)
4223  return get_input_packet_mt(f, pkt);
4224 #endif
4225  *pkt = f->pkt;
4226  return av_read_frame(f->ctx, *pkt);
4227 }
4228 
4229 static int got_eagain(void)
4230 {
4231  int i;
4232  for (i = 0; i < nb_output_streams; i++)
4233  if (output_streams[i]->unavailable)
4234  return 1;
4235  return 0;
4236 }
4237 
4238 static void reset_eagain(void)
4239 {
4240  int i;
4241  for (i = 0; i < nb_input_files; i++)
4242  input_files[i]->eagain = 0;
4243  for (i = 0; i < nb_output_streams; i++)
4244  output_streams[i]->unavailable = 0;
4245 }
4246 
4247 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4248 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4249  AVRational time_base)
4250 {
4251  int ret;
4252 
4253  if (!*duration) {
4254  *duration = tmp;
4255  return tmp_time_base;
4256  }
4257 
4258  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4259  if (ret < 0) {
4260  *duration = tmp;
4261  return tmp_time_base;
4262  }
4263 
4264  return time_base;
4265 }
4266 
4268 {
4269  InputStream *ist;
4270  AVCodecContext *avctx;
4271  int i, ret, has_audio = 0;
4272  int64_t duration = 0;
4273 
4274  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4275  if (ret < 0)
4276  return ret;
4277 
4278  for (i = 0; i < ifile->nb_streams; i++) {
4279  ist = input_streams[ifile->ist_index + i];
4280  avctx = ist->dec_ctx;
4281 
4282  /* duration is the length of the last frame in a stream
4283  * when audio stream is present we don't care about
4284  * last video frame length because it's not defined exactly */
4285  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4286  has_audio = 1;
4287  }
4288 
4289  for (i = 0; i < ifile->nb_streams; i++) {
4290  ist = input_streams[ifile->ist_index + i];
4291  avctx = ist->dec_ctx;
4292 
4293  if (has_audio) {
4294  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4295  AVRational sample_rate = {1, avctx->sample_rate};
4296 
4298  } else {
4299  continue;
4300  }
4301  } else {
4302  if (ist->framerate.num) {
4303  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4304  } else if (ist->st->avg_frame_rate.num) {
4306  } else {
4307  duration = 1;
4308  }
4309  }
4310  if (!ifile->duration)
4311  ifile->time_base = ist->st->time_base;
4312  /* the total duration of the stream, max_pts - min_pts is
4313  * the duration of the stream without the last frame */
4314  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4315  duration += ist->max_pts - ist->min_pts;
4316  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4317  ifile->time_base);
4318  }
4319 
4320  if (ifile->loop > 0)
4321  ifile->loop--;
4322 
4323  return ret;
4324 }
4325 
4326 /*
4327  * Return
4328  * - 0 -- one packet was read and processed
4329  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4330  * this function should be called again
4331  * - AVERROR_EOF -- this function should not be called again
4332  */
4333 static int process_input(int file_index)
4334 {
4335  InputFile *ifile = input_files[file_index];
4337  InputStream *ist;
4338  AVPacket *pkt;
4339  int ret, thread_ret, i, j;
4340  int64_t duration;
4341  int64_t pkt_dts;
4342  int disable_discontinuity_correction = copy_ts;
4343 
4344  is = ifile->ctx;
4345  ret = get_input_packet(ifile, &pkt);
4346 
4347  if (ret == AVERROR(EAGAIN)) {
4348  ifile->eagain = 1;
4349  return ret;
4350  }
4351  if (ret < 0 && ifile->loop) {
4352  AVCodecContext *avctx;
4353  for (i = 0; i < ifile->nb_streams; i++) {
4354  ist = input_streams[ifile->ist_index + i];
4355  avctx = ist->dec_ctx;
4356  if (ist->decoding_needed) {
4357  ret = process_input_packet(ist, NULL, 1);
4358  if (ret>0)
4359  return 0;
4360  avcodec_flush_buffers(avctx);
4361  }
4362  }
4363 #if HAVE_THREADS
4364  free_input_thread(file_index);
4365 #endif
4366  ret = seek_to_start(ifile, is);
4367 #if HAVE_THREADS
4368  thread_ret = init_input_thread(file_index);
4369  if (thread_ret < 0)
4370  return thread_ret;
4371 #endif
4372  if (ret < 0)
4373  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4374  else
4375  ret = get_input_packet(ifile, &pkt);
4376  if (ret == AVERROR(EAGAIN)) {
4377  ifile->eagain = 1;
4378  return ret;
4379  }
4380  }
4381  if (ret < 0) {
4382  if (ret != AVERROR_EOF) {
4383  print_error(is->url, ret);
4384  if (exit_on_error)
4385  exit_program(1);
4386  }
4387 
4388  for (i = 0; i < ifile->nb_streams; i++) {
4389  ist = input_streams[ifile->ist_index + i];
4390  if (ist->decoding_needed) {
4391  ret = process_input_packet(ist, NULL, 0);
4392  if (ret>0)
4393  return 0;
4394  }
4395 
4396  /* mark all outputs that don't go through lavfi as finished */
4397  for (j = 0; j < nb_output_streams; j++) {
4399 
4400  if (ost->source_index == ifile->ist_index + i &&
4401  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4403  }
4404  }
4405 
4406  ifile->eof_reached = 1;
4407  return AVERROR(EAGAIN);
4408  }
4409 
4410  reset_eagain();
4411 
4412  if (do_pkt_dump) {
4414  is->streams[pkt->stream_index]);
4415  }
4416  /* the following test is needed in case new streams appear
4417  dynamically in stream : we ignore them */
4418  if (pkt->stream_index >= ifile->nb_streams) {
4419  report_new_stream(file_index, pkt);
4420  goto discard_packet;
4421  }
4422 
4423  ist = input_streams[ifile->ist_index + pkt->stream_index];
4424 
4425  ist->data_size += pkt->size;
4426  ist->nb_packets++;
4427 
4428  if (ist->discard)
4429  goto discard_packet;
4430 
4431  if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4433  "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4434  if (exit_on_error)
4435  exit_program(1);
4436  }
4437 
4438  if (debug_ts) {
4439  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4440  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4448  }
4449 
4450  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4451  int64_t stime, stime2;
4452  // Correcting starttime based on the enabled streams
4453  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4454  // so we instead do it here as part of discontinuity handling
4455  if ( ist->next_dts == AV_NOPTS_VALUE
4456  && ifile->ts_offset == -is->start_time
4457  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4458  int64_t new_start_time = INT64_MAX;
4459  for (i=0; i<is->nb_streams; i++) {
4460  AVStream *st = is->streams[i];
4461  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4462  continue;
4463  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4464  }
4465  if (new_start_time > is->start_time) {
4466  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4467  ifile->ts_offset = -new_start_time;
4468  }
4469  }
4470 
4471  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4472  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4473  ist->wrap_correction_done = 1;
4474 
4475  if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4476  pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4477  ist->wrap_correction_done = 0;
4478  }
4479  if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4480  pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4481  ist->wrap_correction_done = 0;
4482  }
4483  }
4484 
4485  /* add the stream-global side data to the first packet */
4486  if (ist->nb_packets == 1) {
4487  for (i = 0; i < ist->st->nb_side_data; i++) {
4488  AVPacketSideData *src_sd = &ist->st->side_data[i];
4489  uint8_t *dst_data;
4490 
4491  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4492  continue;
4493 
4494  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4495  continue;
4496 
4497  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4498  if (!dst_data)
4499  exit_program(1);
4500 
4501  memcpy(dst_data, src_sd->data, src_sd->size);
4502  }
4503  }
4504 
4505  if (pkt->dts != AV_NOPTS_VALUE)
4506  pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4507  if (pkt->pts != AV_NOPTS_VALUE)
4508  pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4509 
4510  if (pkt->pts != AV_NOPTS_VALUE)
4511  pkt->pts *= ist->ts_scale;
4512  if (pkt->dts != AV_NOPTS_VALUE)
4513  pkt->dts *= ist->ts_scale;
4514 
4516  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4518  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4519  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4520  int64_t delta = pkt_dts - ifile->last_ts;
4521  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4523  ifile->ts_offset -= delta;
4525  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4526  delta, ifile->ts_offset);
4528  if (pkt->pts != AV_NOPTS_VALUE)
4530  }
4531  }
4532 
4533  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4534  if (pkt->pts != AV_NOPTS_VALUE) {
4535  pkt->pts += duration;
4536  ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4537  ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4538  }
4539 
4540  if (pkt->dts != AV_NOPTS_VALUE)
4541  pkt->dts += duration;
4542 
4544 
4545  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4546  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4547  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4548  ist->st->time_base, AV_TIME_BASE_Q,
4550  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4551  disable_discontinuity_correction = 0;
4552  }
4553 
4554  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4556  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4557  !disable_discontinuity_correction) {
4558  int64_t delta = pkt_dts - ist->next_dts;
4559  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4560  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4562  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4563  ifile->ts_offset -= delta;
4565  "timestamp discontinuity for stream #%d:%d "
4566  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4567  ist->file_index, ist->st->index, ist->st->id,
4569  delta, ifile->ts_offset);
4571  if (pkt->pts != AV_NOPTS_VALUE)
4573  }
4574  } else {
4575  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4577  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4578  pkt->dts = AV_NOPTS_VALUE;
4579  }
4580  if (pkt->pts != AV_NOPTS_VALUE){
4581  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4582  delta = pkt_pts - ist->next_dts;
4583  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4585  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4586  pkt->pts = AV_NOPTS_VALUE;
4587  }
4588  }
4589  }
4590  }
4591 
4592  if (pkt->dts != AV_NOPTS_VALUE)
4593  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4594 
4595  if (debug_ts) {
4596  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4602  }
4603 
4604  sub2video_heartbeat(ist, pkt->pts);
4605 
4606  process_input_packet(ist, pkt, 0);
4607 
4608 discard_packet:
4609 #if HAVE_THREADS
4610  if (ifile->thread_queue_size)
4611  av_packet_free(&pkt);
4612  else
4613 #endif
4615 
4616  return 0;
4617 }
4618 
4619 /**
4620  * Perform a step of transcoding for the specified filter graph.
4621  *
4622  * @param[in] graph filter graph to consider
4623  * @param[out] best_ist input stream where a frame would allow to continue
4624  * @return 0 for success, <0 for error
4625  */
4626 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4627 {
4628  int i, ret;
4629  int nb_requests, nb_requests_max = 0;
4630  InputFilter *ifilter;
4631  InputStream *ist;
4632 
4633  *best_ist = NULL;
4634  ret = avfilter_graph_request_oldest(graph->graph);
4635  if (ret >= 0)
4636  return reap_filters(0);
4637 
4638  if (ret == AVERROR_EOF) {
4639  ret = reap_filters(1);
4640  for (i = 0; i < graph->nb_outputs; i++)
4641  close_output_stream(graph->outputs[i]->ost);
4642  return ret;
4643  }
4644  if (ret != AVERROR(EAGAIN))
4645  return ret;
4646 
4647  for (i = 0; i < graph->nb_inputs; i++) {
4648  ifilter = graph->inputs[i];
4649  ist = ifilter->ist;
4650  if (input_files[ist->file_index]->eagain ||
4652  continue;
4653  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4654  if (nb_requests > nb_requests_max) {
4655  nb_requests_max = nb_requests;
4656  *best_ist = ist;
4657  }
4658  }
4659 
4660  if (!*best_ist)
4661  for (i = 0; i < graph->nb_outputs; i++)
4662  graph->outputs[i]->ost->unavailable = 1;
4663 
4664  return 0;
4665 }
4666 
4667 /**
4668  * Run a single step of transcoding.
4669  *
4670  * @return 0 for success, <0 for error
4671  */
4672 static int transcode_step(void)
4673 {
4674  OutputStream *ost;
4675  InputStream *ist = NULL;
4676  int ret;
4677 
4678  ost = choose_output();
4679  if (!ost) {
4680  if (got_eagain()) {
4681  reset_eagain();
4682  av_usleep(10000);
4683  return 0;
4684  }
4685  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4686  return AVERROR_EOF;
4687  }
4688 
4689  if (ost->filter && !ost->filter->graph->graph) {
4690  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4691  ret = configure_filtergraph(ost->filter->graph);
4692  if (ret < 0) {
4693  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4694  return ret;
4695  }
4696  }
4697  }
4698 
4699  if (ost->filter && ost->filter->graph->graph) {
4700  /*
4701  * Similar case to the early audio initialization in reap_filters.
4702  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4703  * audio frame buffering/creation to get the output audio frame size
4704  * in samples correct. The audio frame size for the filter chain is
4705  * configured during the output stream initialization.
4706  *
4707  * Apparently avfilter_graph_request_oldest (called in
4708  * transcode_from_filter just down the line) peeks. Peeking already
4709  * puts one frame "ready to be given out", which means that any
4710  * update in filter buffer sink configuration afterwards will not
4711  * help us. And yes, even if it would be utilized,
4712  * av_buffersink_get_samples is affected, as it internally utilizes
4713  * the same early exit for peeked frames.
4714  *
4715  * In other words, if avfilter_graph_request_oldest would not make
4716  * further filter chain configuration or usage of
4717  * av_buffersink_get_samples useless (by just causing the return
4718  * of the peeked AVFrame as-is), we could get rid of this additional
4719  * early encoder initialization.
4720  */
4721  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4723 
4724  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4725  return ret;
4726  if (!ist)
4727  return 0;
4728  } else if (ost->filter) {
4729  int i;
4730  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4731  InputFilter *ifilter = ost->filter->graph->inputs[i];
4732  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4733  ist = ifilter->ist;
4734  break;
4735  }
4736  }
4737  if (!ist) {
4738  ost->inputs_done = 1;
4739  return 0;
4740  }
4741  } else {
4742  av_assert0(ost->source_index >= 0);
4743  ist = input_streams[ost->source_index];
4744  }
4745 
4746  ret = process_input(ist->file_index);
4747  if (ret == AVERROR(EAGAIN)) {
4748  if (input_files[ist->file_index]->eagain)
4749  ost->unavailable = 1;
4750  return 0;
4751  }
4752 
4753  if (ret < 0)
4754  return ret == AVERROR_EOF ? 0 : ret;
4755 
4756  return reap_filters(0);
4757 }
4758 
4759 /*
4760  * The following code is the main loop of the file converter
4761  */
4762 static int transcode(void)
4763 {
4764  int ret, i;
4765  AVFormatContext *os;
4766  OutputStream *ost;
4767  InputStream *ist;
4768  int64_t timer_start;
4769  int64_t total_packets_written = 0;
4770 
4771  ret = transcode_init();
4772  if (ret < 0)
4773  goto fail;
4774 
4775  if (stdin_interaction) {
4776  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4777  }
4778 
4779  timer_start = av_gettime_relative();
4780 
4781 #if HAVE_THREADS
4782  if ((ret = init_input_threads()) < 0)
4783  goto fail;
4784 #endif
4785 
4786  while (!received_sigterm) {
4787  int64_t cur_time= av_gettime_relative();
4788 
4789  /* if 'q' pressed, exits */
4790  if (stdin_interaction)
4791  if (check_keyboard_interaction(cur_time) < 0)
4792  break;
4793 
4794  /* check if there's any stream where output is still needed */
4795  if (!need_output()) {
4796  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4797  break;
4798  }
4799 
4800  ret = transcode_step();
4801  if (ret < 0 && ret != AVERROR_EOF) {
4802  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4803  break;
4804  }
4805 
4806  /* dump report by using the output first video and audio streams */
4807  print_report(0, timer_start, cur_time);
4808  }
4809 #if HAVE_THREADS
4810  free_input_threads();
4811 #endif
4812 
4813  /* at the end of stream, we must flush the decoder buffers */
4814  for (i = 0; i < nb_input_streams; i++) {
4815  ist = input_streams[i];
4816  if (!input_files[ist->file_index]->eof_reached) {
4817  process_input_packet(ist, NULL, 0);
4818  }
4819  }
4820  flush_encoders();
4821 
4822  term_exit();
4823 
4824  /* write the trailer if needed and close file */
4825  for (i = 0; i < nb_output_files; i++) {
4826  os = output_files[i]->ctx;
4827  if (!output_files[i]->header_written) {
4829  "Nothing was written into output file %d (%s), because "
4830  "at least one of its streams received no packets.\n",
4831  i, os->url);
4832  continue;
4833  }
4834  if ((ret = av_write_trailer(os)) < 0) {
4835  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4836  if (exit_on_error)
4837  exit_program(1);
4838  }
4839  }
4840 
4841  /* dump report by using the first video and audio streams */
4842  print_report(1, timer_start, av_gettime_relative());
4843 
4844  /* close each encoder */
4845  for (i = 0; i < nb_output_streams; i++) {
4846  ost = output_streams[i];
4847  if (ost->encoding_needed) {
4848  av_freep(&ost->enc_ctx->stats_in);
4849  }
4850  total_packets_written += ost->packets_written;
4851  if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4852  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4853  exit_program(1);
4854  }
4855  }
4856 
4857  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4858  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4859  exit_program(1);
4860  }
4861 
4862  /* close each decoder */
4863  for (i = 0; i < nb_input_streams; i++) {
4864  ist = input_streams[i];
4865  if (ist->decoding_needed) {
4866  avcodec_close(ist->dec_ctx);
4867  if (ist->hwaccel_uninit)
4868  ist->hwaccel_uninit(ist->dec_ctx);
4869  }
4870  }
4871 
4873 
4874  /* finished ! */
4875  ret = 0;
4876 
4877  fail:
4878 #if HAVE_THREADS
4879  free_input_threads();
4880 #endif
4881 
4882  if (output_streams) {
4883  for (i = 0; i < nb_output_streams; i++) {
4884  ost = output_streams[i];
4885  if (ost) {
4886  if (ost->logfile) {
4887  if (fclose(ost->logfile))
4889  "Error closing logfile, loss of information possible: %s\n",
4890  av_err2str(AVERROR(errno)));
4891  ost->logfile = NULL;
4892  }
4893  av_freep(&ost->forced_kf_pts);
4894  av_freep(&ost->apad);
4896  av_dict_free(&ost->encoder_opts);
4897  av_dict_free(&ost->sws_dict);
4898  av_dict_free(&ost->swr_opts);
4899  av_dict_free(&ost->resample_opts);
4900  }
4901  }
4902  }
4903  return ret;
4904 }
4905 
4907 {
4908  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4909 #if HAVE_GETRUSAGE
4910  struct rusage rusage;
4911 
4912  getrusage(RUSAGE_SELF, &rusage);
4913  time_stamps.user_usec =
4914  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4915  time_stamps.sys_usec =
4916  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4917 #elif HAVE_GETPROCESSTIMES
4918  HANDLE proc;
4919  FILETIME c, e, k, u;
4920  proc = GetCurrentProcess();
4921  GetProcessTimes(proc, &c, &e, &k, &u);
4922  time_stamps.user_usec =
4923  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4924  time_stamps.sys_usec =
4925  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4926 #else
4927  time_stamps.user_usec = time_stamps.sys_usec = 0;
4928 #endif
4929  return time_stamps;
4930 }
4931 
4932 static int64_t getmaxrss(void)
4933 {
4934 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4935  struct rusage rusage;
4936  getrusage(RUSAGE_SELF, &rusage);
4937  return (int64_t)rusage.ru_maxrss * 1024;
4938 #elif HAVE_GETPROCESSMEMORYINFO
4939  HANDLE proc;
4940  PROCESS_MEMORY_COUNTERS memcounters;
4941  proc = GetCurrentProcess();
4942  memcounters.cb = sizeof(memcounters);
4943  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4944  return memcounters.PeakPagefileUsage;
4945 #else
4946  return 0;
4947 #endif
4948 }
4949 
4950 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4951 {
4952 }
4953 
4954 int main(int argc, char **argv)
4955 {
4956  int i, ret;
4958 
4959  init_dynload();
4960 
4962 
4963  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4964 
4966  parse_loglevel(argc, argv, options);
4967 
4968  if(argc>1 && !strcmp(argv[1], "-d")){
4969  run_as_daemon=1;
4971  argc--;
4972  argv++;
4973  }
4974 
4975 #if CONFIG_AVDEVICE
4977 #endif
4979 
4980  show_banner(argc, argv, options);
4981 
4982  /* parse options and open all input/output files */
4983  ret = ffmpeg_parse_options(argc, argv);
4984  if (ret < 0)
4985  exit_program(1);
4986 
4987  if (nb_output_files <= 0 && nb_input_files == 0) {
4988  show_usage();
4989  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4990  exit_program(1);
4991  }
4992 
4993  /* file converter / grab */
4994  if (nb_output_files <= 0) {
4995  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4996  exit_program(1);
4997  }
4998 
4999  for (i = 0; i < nb_output_files; i++) {
5000  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5001  want_sdp = 0;
5002  }
5003 
5005  if (transcode() < 0)
5006  exit_program(1);
5007  if (do_benchmark) {
5008  int64_t utime, stime, rtime;
5010  utime = current_time.user_usec - ti.user_usec;
5011  stime = current_time.sys_usec - ti.sys_usec;
5012  rtime = current_time.real_usec - ti.real_usec;
5014  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5015  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5016  }
5017  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5020  exit_program(69);
5021 
5023  return main_return_code;
5024 }
static void flush(AVCodecContext *avctx)
#define ECHO(name, type, min, max)
Definition: af_aecho.c:188
uint8_t
int32_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1629
Main libavdevice API header.
Main libavfilter public API header.
Main libavformat public API header.
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:822
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:145
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:467
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1380
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:831
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:821
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:465
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:823
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:853
#define AV_DISPOSITION_DUB
Definition: avformat.h:819
#define AV_DISPOSITION_METADATA
Definition: avformat.h:855
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:856
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:472
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:833
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:830
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:832
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:841
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:462
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:854
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:137
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:820
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:818
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:342
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:594
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:225
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:245
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1163
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1192
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
#define AV_RL64
Definition: intreadwrite.h:173
#define AV_RL32
Definition: intreadwrite.h:146
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
#define AV_BPRINT_SIZE_AUTOMATIC
memory buffer sink API for audio and video
Memory buffer source API.
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define is(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:286
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:278
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhddsp.c:27
audio channel layout utility functions
#define fail()
Definition: checkasm.h:133
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1084
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:502
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1183
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:128
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
#define media_type_string
Definition: cmdutils.h:617
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:72
void avcodec_parameters_free(AVCodecParameters **ppar)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: codec_par.c:61
@ AV_FIELD_TT
Definition: codec_par.h:39
@ AV_FIELD_BB
Definition: codec_par.h:40
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
@ AV_FIELD_BT
Definition: codec_par.h:42
@ AV_FIELD_TB
Definition: codec_par.h:41
#define FFMAX3(a, b, c)
Definition: common.h:104
#define FFSWAP(type, a, b)
Definition: common.h:108
#define FFMIN(a, b)
Definition: common.h:105
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:101
#define FFSIGN(a)
Definition: common.h:73
#define FFMIN3(a, b, c)
Definition: common.h:106
#define NULL
Definition: coverity.c:32
#define max(a, b)
Definition: cuda_runtime.h:33
static enum AVPixelFormat pix_fmt
static AVFrame * frame
Public dictionary API.
Display matrix.
static float sub(float src0, float src1)
#define atomic_store(object, desired)
Definition: stdatomic.h:85
intptr_t atomic_int
Definition: stdatomic.h:55
#define atomic_load(object)
Definition: stdatomic.h:93
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
static int nb_frames_drop
Definition: ffmpeg.c:137
AVIOContext * progress_avio
Definition: ffmpeg.c:144
static int nb_frames_dup
Definition: ffmpeg.c:135
static int transcode(void)
Definition: ffmpeg.c:4762
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2520
static void set_tty_echo(int on)
Definition: ffmpeg.c:3963
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2327
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3975
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:1004
int nb_output_streams
Definition: ffmpeg.c:154
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4906
static int need_output(void)
Definition: ffmpeg.c:3906
void term_exit(void)
Definition: ffmpeg.c:337
static volatile int received_sigterm
Definition: ffmpeg.c:343
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2278
int nb_filtergraphs
Definition: ffmpeg.c:159
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2615
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:1062
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2946
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:223
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2124
static void print_sdp(void)
Definition: ffmpeg.c:2805
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1502
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:923
InputFile ** input_files
Definition: ffmpeg.c:150
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
Definition: ffmpeg.c:1145
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2850
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1585
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4626
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:513
int main(int argc, char **argv)
Definition: ffmpeg.c:4954
static double psnr(double d)
Definition: ffmpeg.c:1435
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3074
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:3345
static int main_return_code
Definition: ffmpeg.c:347
static uint8_t * subtitle_out
Definition: ffmpeg.c:146
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2301
int nb_input_streams
Definition: ffmpeg.c:149
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4932
static void reset_eagain(void)
Definition: ffmpeg.c:4238
int nb_input_files
Definition: ffmpeg.c:151
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1483
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3258
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3936
FilterGraph ** filtergraphs
Definition: ffmpeg.c:158
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:3019
static unsigned dup_warning
Definition: ffmpeg.c:136
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
Definition: ffmpeg.c:983
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2389
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2174
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2144
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:281
static int process_input(int file_index)
Definition: ffmpeg.c:4333
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4950
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4267
static volatile int ffmpeg_exited
Definition: ffmpeg.c:346
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:2032
int nb_output_files
Definition: ffmpeg.c:156
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:729
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3217
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
static int read_key(void)
Definition: ffmpeg.c:457
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:3025
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:875
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2936
#define SIGNAL(sig, func)
Definition: ffmpeg.c:404
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:241
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:3526
static int got_eagain(void)
Definition: ffmpeg.c:4229
static int want_sdp
Definition: ffmpeg.c:141
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2599
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:508
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:329
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:313
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:192
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:676
static int transcode_init(void)
Definition: ffmpeg.c:3724
static volatile int received_nb_signals
Definition: ffmpeg.c:344
InputStream ** input_streams
Definition: ffmpeg.c:148
static int64_t copy_ts_first_pts
Definition: ffmpeg.c:348
static FILE * vstats_file
Definition: ffmpeg.c:112
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3709
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:143
static void abort_codec_experimental(const AVCodec *c, int encoder)
Definition: ffmpeg.c:694
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:699
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4248
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:515
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:936
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4672
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2163
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:720
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1702
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3103
void term_init(void)
Definition: ffmpeg.c:408
static unsigned nb_output_dumped
Definition: ffmpeg.c:139
OutputStream ** output_streams
Definition: ffmpeg.c:153
static int get_input_packet(InputFile *f, AVPacket **pkt)
Definition: ffmpeg.c:4208
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1440
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:2049
OutputFile ** output_files
Definition: ffmpeg.c:155
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:177
static void flush_encoders(void)
Definition: ffmpeg.c:1925
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:897
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:3012
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:685
static int run_as_daemon
Definition: ffmpeg.c:134
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3321
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1912
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2251
static void sigterm_handler(int sig)
Definition: ffmpeg.c:351
static atomic_int transcode_init_done
Definition: ffmpeg.c:345
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:419
int debug_ts
Definition: ffmpeg_opt.c:166
@ HWACCEL_GENERIC
Definition: ffmpeg.h:61
@ HWACCEL_AUTO
Definition: ffmpeg.h:60
float max_error_rate
Definition: ffmpeg_opt.c:173
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
int audio_volume
Definition: ffmpeg_opt.c:154
#define VSYNC_DROP
Definition: ffmpeg.h:54
char * sdp_filename
Definition: ffmpeg_opt.c:148
int print_stats
Definition: ffmpeg_opt.c:169
int stdin_interaction
Definition: ffmpeg_opt.c:171
int do_benchmark
Definition: ffmpeg_opt.c:159
int do_hex_dump
Definition: ffmpeg_opt.c:161
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:172
float dts_error_threshold
Definition: ffmpeg_opt.c:152
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:516
OSTFinished
Definition: ffmpeg.h:447
@ ENCODER_FINISHED
Definition: ffmpeg.h:448
@ MUXER_FINISHED
Definition: ffmpeg.h:449
#define VSYNC_CFR
Definition: ffmpeg.h:51
int abort_on_flags
Definition: ffmpeg_opt.c:168
float frame_drop_threshold
Definition: ffmpeg_opt.c:157
void show_usage(void)
Definition: ffmpeg_opt.c:3300
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:307
int qp_hist
Definition: ffmpeg_opt.c:170
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:303
#define VSYNC_AUTO
Definition: ffmpeg.h:49
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:274
int vstats_version
Definition: ffmpeg_opt.c:176
char * vstats_filename
Definition: ffmpeg_opt.c:147
int audio_sync_method
Definition: ffmpeg_opt.c:155
int copy_tb
Definition: ffmpeg_opt.c:165
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:436
@ FKF_T
Definition: ffmpeg.h:438
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:437
@ FKF_N_FORCED
Definition: ffmpeg.h:435
@ FKF_N
Definition: ffmpeg.h:434
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:442
#define DECODING_FOR_OST
Definition: ffmpeg.h:306
int video_sync_method
Definition: ffmpeg_opt.c:156
float dts_delta_threshold
Definition: ffmpeg_opt.c:151
int64_t stats_period
Definition: ffmpeg_opt.c:178
int copy_ts
Definition: ffmpeg_opt.c:163
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
int filtergraph_is_simple(FilterGraph *fg)
int do_benchmark_all
Definition: ffmpeg_opt.c:160
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
int configure_filtergraph(FilterGraph *fg)
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:136
#define VSYNC_VFR
Definition: ffmpeg.h:52
int exit_on_error
Definition: ffmpeg_opt.c:167
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:443
int do_pkt_dump
Definition: ffmpeg_opt.c:162
const OptionDef options[]
sample_rate
static int loop
Definition: ffplay.c:341
static int64_t start_time
Definition: ffplay.c:332
a very simple circular buffer FIFO implementation
static AVCodecContext * dec_ctx
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1661
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1656
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:224
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:90
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:454
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:148
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:129
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:873
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:227
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:551
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:201
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3501
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:147
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:116
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: avcodec.c:570
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:411
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2670
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:524
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:491
@ AV_CODEC_ID_AC3
Definition: codec_id.h:427
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:425
@ AV_CODEC_ID_VP9
Definition: codec_id.h:217
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1686
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:643
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1025
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:580
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:146
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:826
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:491
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:411
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:696
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:690
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:641
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:737
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:132
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:66
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5064
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5052
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4432
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2510
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1739
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4477
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:506
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1259
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1274
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5846
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:846
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:115
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:640
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5774
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int av_buffersink_get_format(const AVFilterContext *ctx)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_buffersink_get_h(const AVFilterContext *ctx)
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
int av_buffersink_get_w(const AVFilterContext *ctx)
int av_buffersink_get_channels(const AVFilterContext *ctx)
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:198
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:140
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:96
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:166
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:233
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:265
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:701
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:72
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define AVERROR(e)
Definition: error.h:43
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:543
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:812
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
#define AV_LOG_QUIET
Print no output.
Definition: log.h:176
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:384
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
void av_log_set_flags(int arg)
Definition: log.c:445
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
AVMediaType
Definition: avutil.h:199
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
int index
Definition: gxfenc.c:89
for(j=16;j >0;--j)
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
cl_device_type type
const char * key
misc image utilities
int i
Definition: input.c:407
#define av_log2
Definition: intmath.h:83
#define extra_bits(eb)
Definition: intrax8.c:125
const char * arg
Definition: jacosubdec.c:66
#define LIBAVCODEC_IDENT
Definition: version.h:42
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
common internal API header
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
Replacements for frequently missing libm functions.
#define llrint(x)
Definition: libm.h:394
#define llrintf(x)
Definition: libm.h:399
#define lrintf(x)
Definition: libm_mips.h:70
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
#define NAN
Definition: mathematics.h:64
#define mid_pred
Definition: mathops.h:97
int frame_size
Definition: mxfenc.c:2206
AVOptions.
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
miscellaneous OS support macros and functions.
misc parsing utilities
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
#define tb
Definition: regdef.h:68
#define FF_ARRAY_ELEMS(a)
#define vsnprintf
Definition: snprintf.h:36
#define snprintf
Definition: snprintf.h:34
The bitstream filter state.
Definition: bsf.h:49
uint8_t * data
The data buffer.
Definition: buffer.h:92
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1557
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
int debug
debug
Definition: avcodec.h:1623
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:602
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2085
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:788
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1150
enum AVMediaType codec_type
Definition: avcodec.h:544
AVRational framerate
Definition: avcodec.h:2071
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:668
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1193
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
const struct AVCodec * codec
Definition: avcodec.h:545
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
int sample_rate
samples per second
Definition: avcodec.h:1196
attribute_deprecated int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1812
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:578
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1157
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:659
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
int channels
number of audio channels
Definition: avcodec.h:1197
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1699
enum AVCodecID codec_id
Definition: avcodec.h:546
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1247
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1216
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1351
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:464
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:457
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:452
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
int channels
Audio only.
Definition: codec_par.h:166
int width
Video only.
Definition: codec_par.h:126
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int block_align
Audio only.
Definition: codec_par.h:177
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
int video_delay
Video only.
Definition: codec_par.h:155
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
int sample_rate
Audio only.
Definition: codec_par.h:170
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:3395
AVCodec.
Definition: codec.h:197
enum AVCodecID id
Definition: codec.h:211
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: codec.h:266
enum AVMediaType type
Definition: codec.h:210
const char * name
Name of the codec implementation.
Definition: codec.h:204
int capabilities
Codec capabilities.
Definition: codec.h:216
int depth
Number of bits in the component.
Definition: pixdesc.h:58
char * key
Definition: dict.h:82
char * value
Definition: dict.h:83
An instance of a filter.
Definition: avfilter.h:341
Format I/O context.
Definition: avformat.h:1232
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1288
AVIOContext * pb
I/O context.
Definition: avformat.h:1274
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1512
char * url
input or output URL.
Definition: avformat.h:1328
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1463
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1251
AVChapter ** chapters
Definition: avformat.h:1464
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1300
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:582
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
enum AVChromaLocation chroma_location
Definition: frame.h:575
int width
Definition: frame.h:376
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:657
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:597
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:613
int height
Definition: frame.h:376
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:555
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
enum AVColorPrimaries color_primaries
Definition: frame.h:564
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:495
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:406
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:441
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:562
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:573
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:566
int sample_rate
Sample rate of the audio data.
Definition: frame.h:490
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
Bytestream IO Context.
Definition: avio.h:161
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
AVOption.
Definition: opt.h:248
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:510
const char * name
Definition: avformat.h:491
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:516
uint8_t * data
Definition: packet.h:307
enum AVPacketSideDataType type
Definition: packet.h:313
size_t size
Definition: packet.h:311
This structure stores compressed data.
Definition: packet.h:346
int stream_index
Definition: packet.h:371
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
int size
Definition: packet.h:370
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:387
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
uint8_t * data
Definition: packet.h:369
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:389
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1150
unsigned int nb_stream_indexes
Definition: avformat.h:1155
unsigned int * stream_index
Definition: avformat.h:1154
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1153
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stream structure.
Definition: avformat.h:873
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:975
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1038
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1065
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:935
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:924
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:922
AVDictionary * metadata
Definition: avformat.h:937
int id
Format-specific stream ID.
Definition: avformat.h:880
int index
stream index in AVFormatContext
Definition: avformat.h:874
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1055
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:912
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:946
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:902
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:979
int64_t cur_dts
Definition: avformat.h:1066
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1015
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
uint32_t end_display_time
Definition: avcodec.h:2725
unsigned num_rects
Definition: avcodec.h:2726
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2728
int64_t sys_usec
Definition: ffmpeg.c:126
int64_t real_usec
Definition: ffmpeg.c:124
int64_t user_usec
Definition: ffmpeg.c:125
int index
Definition: ffmpeg.h:288
int nb_outputs
Definition: ffmpeg.h:297
OutputFilter ** outputs
Definition: ffmpeg.h:296
int nb_inputs
Definition: ffmpeg.h:295
const char * graph_desc
Definition: ffmpeg.h:289
AVFilterGraph * graph
Definition: ffmpeg.h:291
InputFilter ** inputs
Definition: ffmpeg.h:294
Definition: ffmpeg.h:66
enum HWAccelID id
Definition: ffmpeg.h:69
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:68
const char * name
Definition: ffmpeg.h:67
int eagain
Definition: ffmpeg.h:403
int64_t ts_offset
Definition: ffmpeg.h:411
AVFormatContext * ctx
Definition: ffmpeg.h:401
int64_t input_ts_offset
Definition: ffmpeg.h:409
int eof_reached
Definition: ffmpeg.h:402
int nb_streams_warn
Definition: ffmpeg.h:418
int ist_index
Definition: ffmpeg.h:404
int nb_streams
Definition: ffmpeg.h:416
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:259
uint8_t * name
Definition: ffmpeg.h:244
int sample_rate
Definition: ffmpeg.h:255
int height
Definition: ffmpeg.h:252
struct InputStream * ist
Definition: ffmpeg.h:242
AVFilterContext * filter
Definition: ffmpeg.h:241
enum AVMediaType type
Definition: ffmpeg.h:245
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:247
uint64_t channel_layout
Definition: ffmpeg.h:257
struct FilterGraph * graph
Definition: ffmpeg.h:243
AVRational sample_aspect_ratio
Definition: ffmpeg.h:253
int eof
Definition: ffmpeg.h:261
int channels
Definition: ffmpeg.h:256
int format
Definition: ffmpeg.h:250
int width
Definition: ffmpeg.h:252
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:358
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:355
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:381
int saw_first_ts
Definition: ffmpeg.h:337
double ts_scale
Definition: ffmpeg.h:336
AVFrame * decoded_frame
Definition: ffmpeg.h:311
int64_t * dts_buffer
Definition: ffmpeg.h:394
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:319
int fix_sub_duration
Definition: ffmpeg.h:345
int64_t cfr_next_pts
Definition: ffmpeg.h:332
int got_output
Definition: ffmpeg.h:347
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:378
int nb_dts_buffer
Definition: ffmpeg.h:395
AVCodecContext * dec_ctx
Definition: ffmpeg.h:309
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:322
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:383
struct InputStream::@2 prev_sub
int64_t start
Definition: ffmpeg.h:315
int ret
Definition: ffmpeg.h:348
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:371
uint64_t data_size
Definition: ffmpeg.h:387
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:379
int nb_filters
Definition: ffmpeg.h:366
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:325
int64_t next_dts
Definition: ffmpeg.h:318
int reinit_filters
Definition: ffmpeg.h:368
int wrap_correction_done
Definition: ffmpeg.h:323
int64_t max_pts
Definition: ffmpeg.h:328
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:382
int guess_layout_max
Definition: ffmpeg.h:341
AVPacket * pkt
Definition: ffmpeg.h:313
AVFrame * filter_frame
Definition: ffmpeg.h:312
uint64_t samples_decoded
Definition: ffmpeg.h:392
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:321
int top_field_first
Definition: ffmpeg.h:340
int autorotate
Definition: ffmpeg.h:343
int discard
Definition: ffmpeg.h:303
uint64_t frames_decoded
Definition: ffmpeg.h:391
int decoding_needed
Definition: ffmpeg.h:305
struct InputStream::sub2video sub2video
AVStream * st
Definition: ffmpeg.h:302
int file_index
Definition: ffmpeg.h:301
InputFilter ** filters
Definition: ffmpeg.h:365
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:380
uint64_t nb_packets
Definition: ffmpeg.h:389
AVSubtitle subtitle
Definition: ffmpeg.h:349
char * hwaccel_device
Definition: ffmpeg.h:373
AVDictionary * decoder_opts
Definition: ffmpeg.h:338
int64_t min_pts
Definition: ffmpeg.h:327
const AVCodec * dec
Definition: ffmpeg.h:310
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:372
int64_t nb_samples
Definition: ffmpeg.h:334
AVRational framerate
Definition: ffmpeg.h:339
uint64_t limit_filesize
Definition: ffmpeg.h:581
AVFormatContext * ctx
Definition: ffmpeg.h:576
int header_written
Definition: ffmpeg.h:585
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:580
AVDictionary * opts
Definition: ffmpeg.h:577
int ost_index
Definition: ffmpeg.h:578
int shortest
Definition: ffmpeg.h:583
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:579
AVFilterInOut * out_tmp
Definition: ffmpeg.h:271
struct OutputStream * ost
Definition: ffmpeg.h:266
uint64_t * channel_layouts
Definition: ffmpeg.h:283
uint8_t * name
Definition: ffmpeg.h:268
int * sample_rates
Definition: ffmpeg.h:284
int * formats
Definition: ffmpeg.h:282
int source_index
Definition: ffmpeg.h:455
OSTFinished finished
Definition: ffmpeg.h:524
int unavailable
Definition: ffmpeg.h:525
AVCodecContext * enc_ctx
Definition: ffmpeg.h:474
uint8_t level
Definition: svq3.c:206
libswresample public header
#define av_malloc_array(a, b)
#define ff_dlog(a,...)
#define av_realloc_f(p, o, n)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static void error(const char *err)
static uint8_t tmp[11]
Definition: aes_ctr.c:27
#define src
Definition: vp8dsp.c:255
int64_t bitrate
Definition: h264_levels.c:131
FILE * out
Definition: movenc.c:54
int64_t duration
Definition: movenc.c:64
AVPacket * pkt
Definition: movenc.c:59
AVFormatContext * ctx
Definition: movenc.c:48
static void finish(void)
Definition: movenc.c:342
AVDictionary * opts
Definition: movenc.c:50
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
timestamp utils, mostly useful for debugging/logging purposes
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
static int64_t pts
int size
static AVStream * ost
const char * b
Definition: vf_curves.c:118
const char * r
Definition: vf_curves.c:116
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:873
float delta
static double c[64]