FFmpeg  4.4
framesync.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Nicolas George
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public License
8  * as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/opt.h"
23 #include "avfilter.h"
24 #include "filters.h"
25 #include "framesync.h"
26 #include "internal.h"
27 
28 #define OFFSET(member) offsetof(FFFrameSync, member)
29 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
30 
31 static const char *framesync_name(void *ptr)
32 {
33  return "framesync";
34 }
35 
36 static const AVOption framesync_options[] = {
37  { "eof_action", "Action to take when encountering EOF from secondary input ",
38  OFFSET(opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
39  EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
40  { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
41  { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
42  { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
43  { "shortest", "force termination when the shortest input terminates", OFFSET(opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
44  { "repeatlast", "extend last frame of secondary streams beyond EOF", OFFSET(opt_repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
45  { NULL }
46 };
47 static const AVClass framesync_class = {
49  .class_name = "framesync",
50  .item_name = framesync_name,
51  .category = AV_CLASS_CATEGORY_FILTER,
52  .option = framesync_options,
53  .parent_log_context_offset = OFFSET(parent),
54 };
55 
57 {
58  const AVClass *c = *iter ? NULL : &framesync_class;
59  *iter = (void *)(uintptr_t)c;
60  return c;
61 }
62 
63 enum {
67 };
68 
69 static int consume_from_fifos(FFFrameSync *fs);
70 
72 {
73  return &framesync_class;
74 }
75 
77 {
78  if (fs->class)
79  return;
80  fs->class = &framesync_class;
82 }
83 
84 int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
85 {
86  /* For filters with several outputs, we will not be able to assume which
87  output is relevant for ff_outlink_frame_wanted() and
88  ff_outlink_set_status(). To be designed when needed. */
89  av_assert0(parent->nb_outputs == 1);
90 
92  fs->parent = parent;
93  fs->nb_in = nb_in;
94 
95  fs->in = av_calloc(nb_in, sizeof(*fs->in));
96  if (!fs->in)
97  return AVERROR(ENOMEM);
98  return 0;
99 }
100 
102 {
103  fs->eof = 1;
104  fs->frame_ready = 0;
105  ff_outlink_set_status(fs->parent->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE);
106 }
107 
109 {
110  unsigned i, level = 0;
111 
112  for (i = 0; i < fs->nb_in; i++)
113  if (fs->in[i].state != STATE_EOF)
114  level = FFMAX(level, fs->in[i].sync);
115  av_assert0(level <= fs->sync_level);
116  if (level < fs->sync_level)
117  av_log(fs, AV_LOG_VERBOSE, "Sync level %u\n", level);
118  if (level)
119  fs->sync_level = level;
120  else
121  framesync_eof(fs);
122 }
123 
125 {
126  unsigned i;
127 
128  if (!fs->opt_repeatlast || fs->opt_eof_action == EOF_ACTION_PASS) {
129  fs->opt_repeatlast = 0;
130  fs->opt_eof_action = EOF_ACTION_PASS;
131  }
132  if (fs->opt_shortest || fs->opt_eof_action == EOF_ACTION_ENDALL) {
133  fs->opt_shortest = 1;
134  fs->opt_eof_action = EOF_ACTION_ENDALL;
135  }
136  if (!fs->opt_repeatlast) {
137  for (i = 1; i < fs->nb_in; i++) {
138  fs->in[i].after = EXT_NULL;
139  fs->in[i].sync = 0;
140  }
141  }
142  if (fs->opt_shortest) {
143  for (i = 0; i < fs->nb_in; i++)
144  fs->in[i].after = EXT_STOP;
145  }
146 
147  if (!fs->time_base.num) {
148  for (i = 0; i < fs->nb_in; i++) {
149  if (fs->in[i].sync) {
150  if (fs->time_base.num) {
151  fs->time_base = av_gcd_q(fs->time_base, fs->in[i].time_base,
153  } else {
154  fs->time_base = fs->in[i].time_base;
155  }
156  }
157  }
158  if (!fs->time_base.num) {
159  av_log(fs, AV_LOG_ERROR, "Impossible to set time base\n");
160  return AVERROR(EINVAL);
161  }
162  av_log(fs, AV_LOG_VERBOSE, "Selected %d/%d time base\n",
163  fs->time_base.num, fs->time_base.den);
164  }
165 
166  for (i = 0; i < fs->nb_in; i++)
167  fs->in[i].pts = fs->in[i].pts_next = AV_NOPTS_VALUE;
168  fs->sync_level = UINT_MAX;
170 
171  return 0;
172 }
173 
175 {
176  unsigned i;
177  int64_t pts;
178  int ret;
179 
180  while (!(fs->frame_ready || fs->eof)) {
181  ret = consume_from_fifos(fs);
182  if (ret <= 0)
183  return ret;
184 
185  pts = INT64_MAX;
186  for (i = 0; i < fs->nb_in; i++)
187  if (fs->in[i].have_next && fs->in[i].pts_next < pts)
188  pts = fs->in[i].pts_next;
189  if (pts == INT64_MAX) {
190  framesync_eof(fs);
191  break;
192  }
193  for (i = 0; i < fs->nb_in; i++) {
194  if (fs->in[i].pts_next == pts ||
195  (fs->in[i].before == EXT_INFINITY &&
196  fs->in[i].state == STATE_BOF)) {
197  av_frame_free(&fs->in[i].frame);
198  fs->in[i].frame = fs->in[i].frame_next;
199  fs->in[i].pts = fs->in[i].pts_next;
200  fs->in[i].frame_next = NULL;
201  fs->in[i].pts_next = AV_NOPTS_VALUE;
202  fs->in[i].have_next = 0;
203  fs->in[i].state = fs->in[i].frame ? STATE_RUN : STATE_EOF;
204  if (fs->in[i].sync == fs->sync_level && fs->in[i].frame)
205  fs->frame_ready = 1;
206  if (fs->in[i].state == STATE_EOF &&
207  fs->in[i].after == EXT_STOP)
208  framesync_eof(fs);
209  }
210  }
211  if (fs->frame_ready)
212  for (i = 0; i < fs->nb_in; i++)
213  if ((fs->in[i].state == STATE_BOF &&
214  fs->in[i].before == EXT_STOP))
215  fs->frame_ready = 0;
216  fs->pts = pts;
217  }
218  return 0;
219 }
220 
221 static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in,
222  int64_t pts)
223 {
224  /* Possible enhancement: use the link's frame rate */
225  return pts + 1;
226 }
227 
229 {
230  int64_t pts;
231 
232  av_assert0(!fs->in[in].have_next);
233  av_assert0(frame);
234  pts = av_rescale_q(frame->pts, fs->in[in].time_base, fs->time_base);
235  frame->pts = pts;
236  fs->in[in].frame_next = frame;
237  fs->in[in].pts_next = pts;
238  fs->in[in].have_next = 1;
239 }
240 
241 static void framesync_inject_status(FFFrameSync *fs, unsigned in, int status, int64_t pts)
242 {
243  av_assert0(!fs->in[in].have_next);
244  pts = fs->in[in].state != STATE_RUN || fs->in[in].after == EXT_INFINITY
245  ? INT64_MAX : framesync_pts_extrapolate(fs, in, fs->in[in].pts);
246  fs->in[in].sync = 0;
248  fs->in[in].frame_next = NULL;
249  fs->in[in].pts_next = pts;
250  fs->in[in].have_next = 1;
251 }
252 
253 int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
254  unsigned get)
255 {
256  AVFrame *frame;
257  unsigned need_copy = 0, i;
258  int64_t pts_next;
259  int ret;
260 
261  if (!fs->in[in].frame) {
262  *rframe = NULL;
263  return 0;
264  }
265  frame = fs->in[in].frame;
266  if (get) {
267  /* Find out if we need to copy the frame: is there another sync
268  stream, and do we know if its current frame will outlast this one? */
269  pts_next = fs->in[in].have_next ? fs->in[in].pts_next : INT64_MAX;
270  for (i = 0; i < fs->nb_in && !need_copy; i++)
271  if (i != in && fs->in[i].sync &&
272  (!fs->in[i].have_next || fs->in[i].pts_next < pts_next))
273  need_copy = 1;
274  if (need_copy) {
275  if (!(frame = av_frame_clone(frame)))
276  return AVERROR(ENOMEM);
277  if ((ret = av_frame_make_writable(frame)) < 0) {
279  return ret;
280  }
281  } else {
282  fs->in[in].frame = NULL;
283  }
284  fs->frame_ready = 0;
285  }
286  *rframe = frame;
287  return 0;
288 }
289 
291 {
292  unsigned i;
293 
294  for (i = 0; i < fs->nb_in; i++) {
295  av_frame_free(&fs->in[i].frame);
296  av_frame_free(&fs->in[i].frame_next);
297  }
298 
299  av_freep(&fs->in);
300 }
301 
303 {
304  AVFilterContext *ctx = fs->parent;
305  AVFrame *frame = NULL;
306  int64_t pts;
307  unsigned i, nb_active, nb_miss;
308  int ret, status;
309 
310  nb_active = nb_miss = 0;
311  for (i = 0; i < fs->nb_in; i++) {
312  if (fs->in[i].have_next || fs->in[i].state == STATE_EOF)
313  continue;
314  nb_active++;
315  ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
316  if (ret < 0)
317  return ret;
318  if (ret) {
319  av_assert0(frame);
321  } else {
322  ret = ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts);
323  if (ret > 0) {
324  framesync_inject_status(fs, i, status, pts);
325  } else if (!ret) {
326  nb_miss++;
327  }
328  }
329  }
330  if (nb_miss) {
331  if (nb_miss == nb_active && !ff_outlink_frame_wanted(ctx->outputs[0]))
332  return FFERROR_NOT_READY;
333  for (i = 0; i < fs->nb_in; i++)
334  if (!fs->in[i].have_next && fs->in[i].state != STATE_EOF)
335  ff_inlink_request_frame(ctx->inputs[i]);
336  return 0;
337  }
338  return 1;
339 }
340 
342 {
343  int ret;
344 
345  ret = framesync_advance(fs);
346  if (ret < 0)
347  return ret;
348  if (fs->eof || !fs->frame_ready)
349  return 0;
350  ret = fs->on_event(fs);
351  if (ret < 0)
352  return ret;
353  fs->frame_ready = 0;
354 
355  return 0;
356 }
357 
359 {
360  int ret;
361 
362  ret = ff_framesync_init(fs, parent, 2);
363  if (ret < 0)
364  return ret;
365  fs->in[0].time_base = parent->inputs[0]->time_base;
366  fs->in[1].time_base = parent->inputs[1]->time_base;
367  fs->in[0].sync = 2;
368  fs->in[0].before = EXT_STOP;
369  fs->in[0].after = EXT_INFINITY;
370  fs->in[1].sync = 1;
371  fs->in[1].before = EXT_NULL;
372  fs->in[1].after = EXT_INFINITY;
373  return 0;
374 }
375 
377 {
378  AVFilterContext *ctx = fs->parent;
379  AVFrame *mainpic = NULL, *secondpic = NULL;
380  int ret;
381 
382  if ((ret = ff_framesync_get_frame(fs, 0, &mainpic, 1)) < 0 ||
383  (ret = ff_framesync_get_frame(fs, 1, &secondpic, 0)) < 0) {
384  av_frame_free(&mainpic);
385  return ret;
386  }
387  av_assert0(mainpic);
388  mainpic->pts = av_rescale_q(fs->pts, fs->time_base, ctx->outputs[0]->time_base);
389  if (ctx->is_disabled)
390  secondpic = NULL;
391  *f0 = mainpic;
392  *f1 = secondpic;
393  return 0;
394 }
395 
397 {
398  int ret;
399 
400  ret = ff_framesync_dualinput_get(fs, f0, f1);
401  if (ret < 0)
402  return ret;
403  ret = ff_inlink_make_frame_writable(fs->parent->inputs[0], f0);
404  if (ret < 0) {
405  av_frame_free(f0);
406  *f1 = NULL;
407  return ret;
408  }
409  return 0;
410 }
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1447
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
Definition: avfilter.c:1536
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1492
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1618
Main libavfilter public API header.
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
#define FFMAX(a, b)
Definition: common.h:103
#define NULL
Definition: coverity.c:32
static AVFrame * frame
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
static void framesync_eof(FFFrameSync *fs)
Definition: framesync.c:101
static int framesync_advance(FFFrameSync *fs)
Definition: framesync.c:174
static const AVOption framesync_options[]
Definition: framesync.c:36
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
static const AVClass framesync_class
Definition: framesync.c:47
static const char * framesync_name(void *ptr)
Definition: framesync.c:31
static int consume_from_fifos(FFFrameSync *fs)
Definition: framesync.c:302
static void framesync_sync_level_update(FFFrameSync *fs)
Definition: framesync.c:108
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:376
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:341
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:253
static void framesync_inject_status(FFFrameSync *fs, unsigned in, int status, int64_t pts)
Definition: framesync.c:241
@ STATE_BOF
Definition: framesync.c:64
@ STATE_EOF
Definition: framesync.c:66
@ STATE_RUN
Definition: framesync.c:65
#define FLAGS
Definition: framesync.c:29
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:358
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
const AVClass * ff_framesync_get_class(void)
Get the class for the framesync object.
Definition: framesync.c:71
void ff_framesync_preinit(FFFrameSync *fs)
Pre-initialize a frame sync structure.
Definition: framesync.c:76
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:396
#define OFFSET(member)
Definition: framesync.c:28
static void framesync_inject_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
Definition: framesync.c:228
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:84
static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in, int64_t pts)
Definition: framesync.c:221
const AVClass * ff_framesync_child_class_iterate(void **iter)
Definition: framesync.c:56
@ EOF_ACTION_PASS
Definition: framesync.h:29
@ EOF_ACTION_ENDALL
Definition: framesync.h:28
@ EOF_ACTION_REPEAT
Definition: framesync.h:27
@ EXT_STOP
Completely stop all streams with this one.
Definition: framesync.h:65
@ EXT_NULL
Ignore this stream and continue processing the other ones.
Definition: framesync.h:70
@ EXT_INFINITY
Extend the frame to infinity.
Definition: framesync.h:75
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def)
Return the best rational so that a and b are multiple of it.
Definition: rational.c:186
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int i
Definition: input.c:407
common internal API header
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:37
AVOptions.
static void get(uint8_t *pixels, int stride, int16_t *block)
Describe the class of an AVClass context structure.
Definition: log.h:67
int version
LIBAVUTIL_VERSION with which this structure was created.
Definition: log.h:93
An instance of a filter.
Definition: avfilter.h:341
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
unsigned nb_outputs
number of output pads
Definition: avfilter.h:354
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVOption.
Definition: opt.h:248
Frame sync structure.
Definition: framesync.h:146
uint8_t level
Definition: svq3.c:206
#define av_freep(p)
#define av_log(a,...)
AVFormatContext * ctx
Definition: movenc.c:48
static int64_t pts
static double c[64]