FFmpeg  4.4
avf_aphasemeter.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia aphasemeter filter
24  */
25 
26 #include "libavutil/avassert.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/timestamp.h"
32 #include "avfilter.h"
33 #include "formats.h"
34 #include "audio.h"
35 #include "video.h"
36 #include "internal.h"
37 #include "float.h"
38 
39 typedef struct AudioPhaseMeterContext {
40  const AVClass *class;
42  int do_video;
44  int w, h;
46  int contrast[4];
50  int is_mono;
54  float tolerance;
55  float angle;
56  float phase;
58  int64_t duration;
59  int64_t frame_end;
60  int64_t mono_idx[2];
61  int64_t out_phase_idx[2];
63 
64 #define MAX_DURATION (24*60*60*1000000LL)
65 #define OFFSET(x) offsetof(AudioPhaseMeterContext, x)
66 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
67 #define get_duration(index) (index[1] - index[0])
68 
69 static const AVOption aphasemeter_options[] = {
70  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
71  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
72  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
73  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
74  { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=2}, 0, 255, FLAGS },
75  { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=7}, 0, 255, FLAGS },
76  { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=1}, 0, 255, FLAGS },
77  { "mpc", "set median phase color", OFFSET(mpc_str), AV_OPT_TYPE_STRING, {.str = "none"}, 0, 0, FLAGS },
78  { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
79  { "phasing", "set mono and out-of-phase detection output", OFFSET(do_phasing_detection), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
80  { "tolerance", "set phase tolerance for mono detection", OFFSET(tolerance), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
81  { "t", "set phase tolerance for mono detection", OFFSET(tolerance), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
82  { "angle", "set angle threshold for out-of-phase detection", OFFSET(angle), AV_OPT_TYPE_FLOAT, {.dbl = 170.}, 90, 180, FLAGS },
83  { "a", "set angle threshold for out-of-phase detection", OFFSET(angle), AV_OPT_TYPE_FLOAT, {.dbl = 170.}, 90, 180, FLAGS },
84  { "duration", "set minimum mono or out-of-phase duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION, FLAGS },
85  { "d", "set minimum mono or out-of-phase duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION, FLAGS },
86  { NULL }
87 };
88 
89 AVFILTER_DEFINE_CLASS(aphasemeter);
90 
92 {
93  AudioPhaseMeterContext *s = ctx->priv;
96  AVFilterLink *inlink = ctx->inputs[0];
97  AVFilterLink *outlink = ctx->outputs[0];
99  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
100  int ret;
101 
103  if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
104  (ret = ff_formats_ref (formats, &outlink->incfg.formats )) < 0 ||
106  (ret = ff_channel_layouts_ref (layout , &inlink->outcfg.channel_layouts)) < 0 ||
107  (ret = ff_channel_layouts_ref (layout , &outlink->incfg.channel_layouts)) < 0)
108  return ret;
109 
111  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
112  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
113  return ret;
114 
115  if (s->do_video) {
116  AVFilterLink *outlink = ctx->outputs[1];
117 
119  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
120  return ret;
121  }
122 
123  return 0;
124 }
125 
126 static int config_input(AVFilterLink *inlink)
127 {
128  AVFilterContext *ctx = inlink->dst;
129  AudioPhaseMeterContext *s = ctx->priv;
130  int nb_samples;
131  s->duration = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
132 
133  if (s->do_video) {
134  nb_samples = FFMAX(1, av_rescale(inlink->sample_rate, s->frame_rate.den, s->frame_rate.num));
135  inlink->partial_buf_size =
136  inlink->min_samples =
137  inlink->max_samples = nb_samples;
138  }
139 
140  return 0;
141 }
142 
143 static int config_video_output(AVFilterLink *outlink)
144 {
145  AVFilterContext *ctx = outlink->src;
146  AudioPhaseMeterContext *s = ctx->priv;
147 
148  outlink->w = s->w;
149  outlink->h = s->h;
150  outlink->sample_aspect_ratio = (AVRational){1,1};
151  outlink->frame_rate = s->frame_rate;
152 
153  if (!strcmp(s->mpc_str, "none"))
154  s->draw_median_phase = 0;
155  else if (av_parse_color(s->mpc, s->mpc_str, -1, ctx) >= 0)
156  s->draw_median_phase = 1;
157  else
158  return AVERROR(EINVAL);
159 
160  return 0;
161 }
162 
163 static inline int get_x(float phase, int w)
164 {
165  return (phase + 1.) / 2. * (w - 1);
166 }
167 
168 static inline void add_metadata(AVFrame *insamples, const char *key, char *value)
169 {
170  char buf[128];
171 
172  snprintf(buf, sizeof(buf), "lavfi.aphasemeter.%s", key);
173  av_dict_set(&insamples->metadata, buf, value, 0);
174 }
175 
176 static inline void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
177 {
178  int64_t mono_duration;
179  if (!s->is_mono && mono_measurement) {
180  s->is_mono = 1;
181  s->start_mono_presence = 1;
182  s->mono_idx[0] = insamples->pts;
183  }
184  if (s->is_mono && mono_measurement && s->start_mono_presence) {
185  s->mono_idx[1] = s->frame_end;
186  mono_duration = get_duration(s->mono_idx);
187  if (mono_duration >= s->duration) {
188  add_metadata(insamples, "mono_start", av_ts2timestr(s->mono_idx[0], &s->time_base));
189  av_log(s, AV_LOG_INFO, "mono_start: %s\n", av_ts2timestr(s->mono_idx[0], &s->time_base));
190  s->start_mono_presence = 0;
191  }
192  }
193  if (s->is_mono && !mono_measurement) {
194  s->mono_idx[1] = insamples ? insamples->pts : s->frame_end;
195  mono_duration = get_duration(s->mono_idx);
196  if (mono_duration >= s->duration) {
197  if (insamples) {
198  add_metadata(insamples, "mono_end", av_ts2timestr(s->mono_idx[1], &s->time_base));
199  add_metadata(insamples, "mono_duration", av_ts2timestr(mono_duration, &s->time_base));
200  }
201  av_log(s, AV_LOG_INFO, "mono_end: %s | mono_duration: %s\n", av_ts2timestr(s->mono_idx[1], &s->time_base), av_ts2timestr(mono_duration, &s->time_base));
202  }
203  s->is_mono = 0;
204  }
205 }
206 
207 static inline void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
208 {
209  int64_t out_phase_duration;
210  if (!s->is_out_phase && out_phase_measurement) {
211  s->is_out_phase = 1;
212  s->start_out_phase_presence = 1;
213  s->out_phase_idx[0] = insamples->pts;
214  }
215  if (s->is_out_phase && out_phase_measurement && s->start_out_phase_presence) {
216  s->out_phase_idx[1] = s->frame_end;
217  out_phase_duration = get_duration(s->out_phase_idx);
218  if (out_phase_duration >= s->duration) {
219  add_metadata(insamples, "out_phase_start", av_ts2timestr(s->out_phase_idx[0], &s->time_base));
220  av_log(s, AV_LOG_INFO, "out_phase_start: %s\n", av_ts2timestr(s->out_phase_idx[0], &s->time_base));
221  s->start_out_phase_presence = 0;
222  }
223  }
224  if (s->is_out_phase && !out_phase_measurement) {
225  s->out_phase_idx[1] = insamples ? insamples->pts : s->frame_end;
226  out_phase_duration = get_duration(s->out_phase_idx);
227  if (out_phase_duration >= s->duration) {
228  if (insamples) {
229  add_metadata(insamples, "out_phase_end", av_ts2timestr(s->out_phase_idx[1], &s->time_base));
230  add_metadata(insamples, "out_phase_duration", av_ts2timestr(out_phase_duration, &s->time_base));
231  }
232  av_log(s, AV_LOG_INFO, "out_phase_end: %s | out_phase_duration: %s\n", av_ts2timestr(s->out_phase_idx[1], &s->time_base), av_ts2timestr(out_phase_duration, &s->time_base));
233  }
234  s->is_out_phase = 0;
235  }
236 }
237 
238 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
239 {
240  AVFilterContext *ctx = inlink->dst;
241  AudioPhaseMeterContext *s = ctx->priv;
242  AVFilterLink *outlink = s->do_video ? ctx->outputs[1] : NULL;
243  AVFilterLink *aoutlink = ctx->outputs[0];
244  AVDictionary **metadata;
245  const int rc = s->contrast[0];
246  const int gc = s->contrast[1];
247  const int bc = s->contrast[2];
248  float fphase = 0;
249  AVFrame *out;
250  uint8_t *dst;
251  int i;
252  int mono_measurement;
253  int out_phase_measurement;
254  float tolerance = 1.0f - s->tolerance;
255  float angle = cosf(s->angle/180.0f*M_PI);
256 
257  if (s->do_video && (!s->out || s->out->width != outlink->w ||
258  s->out->height != outlink->h)) {
259  av_frame_free(&s->out);
260  s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
261  if (!s->out) {
262  av_frame_free(&in);
263  return AVERROR(ENOMEM);
264  }
265 
266  out = s->out;
267  for (i = 0; i < outlink->h; i++)
268  memset(out->data[0] + i * out->linesize[0], 0, outlink->w * 4);
269  } else if (s->do_video) {
270  out = s->out;
271  for (i = outlink->h - 1; i >= 10; i--)
272  memmove(out->data[0] + (i ) * out->linesize[0],
273  out->data[0] + (i-1) * out->linesize[0],
274  outlink->w * 4);
275  for (i = 0; i < outlink->w; i++)
276  AV_WL32(out->data[0] + i * 4, 0);
277  }
278 
279  for (i = 0; i < in->nb_samples; i++) {
280  const float *src = (float *)in->data[0] + i * 2;
281  const float f = src[0] * src[1] / (src[0]*src[0] + src[1] * src[1]) * 2;
282  const float phase = isnan(f) ? 1 : f;
283  const int x = get_x(phase, s->w);
284 
285  if (s->do_video) {
286  dst = out->data[0] + x * 4;
287  dst[0] = FFMIN(255, dst[0] + rc);
288  dst[1] = FFMIN(255, dst[1] + gc);
289  dst[2] = FFMIN(255, dst[2] + bc);
290  dst[3] = 255;
291  }
292  fphase += phase;
293  }
294  fphase /= in->nb_samples;
295  s->phase = fphase;
296 
297  if (s->do_video) {
298  if (s->draw_median_phase) {
299  dst = out->data[0] + get_x(fphase, s->w) * 4;
300  AV_WL32(dst, AV_RL32(s->mpc));
301  }
302 
303  for (i = 1; i < 10 && i < outlink->h; i++)
304  memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4);
305  }
306 
307  metadata = &in->metadata;
308  if (metadata) {
309  uint8_t value[128];
310 
311  snprintf(value, sizeof(value), "%f", fphase);
312  add_metadata(in, "phase", value);
313  }
314 
315  if (s->do_phasing_detection) {
316  s->time_base = inlink->time_base;
317  s->frame_end = in->pts + av_rescale_q(in->nb_samples,
318  (AVRational){ 1, in->sample_rate }, inlink->time_base);
319 
320  mono_measurement = (tolerance - fphase) < FLT_EPSILON;
321  out_phase_measurement = (angle - fphase) > FLT_EPSILON;
322 
323  update_mono_detection(s, in, mono_measurement);
324  update_out_phase_detection(s, in, out_phase_measurement);
325  }
326 
327  if (s->do_video) {
328  AVFrame *clone;
329 
330  s->out->pts = in->pts;
331  clone = av_frame_clone(s->out);
332  if (!clone)
333  return AVERROR(ENOMEM);
334  ff_filter_frame(outlink, clone);
335  }
336  return ff_filter_frame(aoutlink, in);
337 }
338 
340 {
341  AudioPhaseMeterContext *s = ctx->priv;
342 
343  if (s->do_phasing_detection) {
346  }
347  av_frame_free(&s->out);
348 }
349 
351 {
352  AudioPhaseMeterContext *s = ctx->priv;
353  AVFilterPad pad;
354  int ret;
355 
356  pad = (AVFilterPad){
357  .name = "out0",
358  .type = AVMEDIA_TYPE_AUDIO,
359  };
360  ret = ff_insert_outpad(ctx, 0, &pad);
361  if (ret < 0)
362  return ret;
363 
364  if (s->do_video) {
365  pad = (AVFilterPad){
366  .name = "out1",
367  .type = AVMEDIA_TYPE_VIDEO,
368  .config_props = config_video_output,
369  };
370  ret = ff_insert_outpad(ctx, 1, &pad);
371  if (ret < 0)
372  return ret;
373  }
374 
375  return 0;
376 }
377 
378 static const AVFilterPad inputs[] = {
379  {
380  .name = "default",
381  .type = AVMEDIA_TYPE_AUDIO,
382  .config_props = config_input,
383  .filter_frame = filter_frame,
384  },
385  { NULL }
386 };
387 
389  .name = "aphasemeter",
390  .description = NULL_IF_CONFIG_SMALL("Convert input audio to phase meter video output."),
391  .init = init,
392  .uninit = uninit,
393  .query_formats = query_formats,
394  .priv_size = sizeof(AudioPhaseMeterContext),
395  .inputs = inputs,
396  .outputs = NULL,
397  .priv_class = &aphasemeter_class,
399 };
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
static void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
AVFILTER_DEFINE_CLASS(aphasemeter)
static void add_metadata(AVFrame *insamples, const char *key, char *value)
AVFilter ff_avf_aphasemeter
static int get_x(float phase, int w)
static int query_formats(AVFilterContext *ctx)
#define get_duration(index)
static int config_input(AVFilterLink *inlink)
#define FLAGS
static const AVFilterPad inputs[]
#define MAX_DURATION
static void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static av_cold int init(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
static const AVOption aphasemeter_options[]
#define OFFSET(x)
static int config_video_output(AVFilterLink *outlink)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1094
Main libavfilter public API header.
#define AV_RL32
Definition: intreadwrite.h:146
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
uint64_t layout
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:105
#define FFMAX(a, b)
Definition: common.h:103
#define NULL
Definition: coverity.c:32
double value
Definition: eval.c:98
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:338
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add ref as a new reference to formats.
Definition: formats.c:466
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:461
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
@ AV_OPT_TYPE_DURATION
Definition: opt.h:239
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
#define AV_CH_LAYOUT_STEREO
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR(e)
Definition: error.h:43
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
const char * key
int i
Definition: input.c:407
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
#define isnan(x)
Definition: libm.h:340
#define cosf(x)
Definition: libm.h:78
uint8_t w
Definition: llviddspenc.c:39
#define M_PI
Definition: mathematics.h:52
AVOptions.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
misc parsing utilities
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
formats
Definition: signature.h:48
#define snprintf
Definition: snprintf.h:34
Describe the class of an AVClass context structure.
Definition: log.h:67
A list of supported channel layouts.
Definition: formats.h:86
An instance of a filter.
Definition: avfilter.h:341
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVDictionary * metadata
metadata.
Definition: frame.h:604
AVOption.
Definition: opt.h:248
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
int64_t duration
Definition: movenc.c:64
AVFormatContext * ctx
Definition: movenc.c:48
timestamp utils, mostly useful for debugging/logging purposes
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
if(ret< 0)
Definition: vf_mcdeint.c:282
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99