FFmpeg
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 #include "audio.h"
32 #include "avfilter.h"
33 #include "filters.h"
34 
35 typedef struct AudioFadeContext {
36  const AVClass *class;
37  int nb_inputs;
38  int type;
39  int curve, curve2;
44  double silence;
45  double unity;
46  int overlap;
48  int xfade_idx;
49 
50  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
51  int nb_samples, int channels, int direction,
52  int64_t start, int64_t range, int curve,
53  double silence, double unity);
54  void (*scale_samples)(uint8_t **dst, uint8_t * const *src,
55  int nb_samples, int channels, double unity);
56  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
57  uint8_t * const *cf1,
58  int nb_samples, int channels,
59  int curve0, int curve1);
61 
63 
64 #define OFFSET(x) offsetof(AudioFadeContext, x)
65 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
66 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
67 
68  static const enum AVSampleFormat sample_fmts[] = {
74  };
75 
76 static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
77 {
78 #define CUBE(a) ((a)*(a)*(a))
79  double gain;
80 
81  gain = av_clipd(1.0 * index / range, 0, 1.0);
82 
83  switch (curve) {
84  case QSIN:
85  gain = sin(gain * M_PI / 2.0);
86  break;
87  case IQSIN:
88  /* 0.6... = 2 / M_PI */
89  gain = 0.6366197723675814 * asin(gain);
90  break;
91  case ESIN:
92  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
93  break;
94  case HSIN:
95  gain = (1.0 - cos(gain * M_PI)) / 2.0;
96  break;
97  case IHSIN:
98  /* 0.3... = 1 / M_PI */
99  gain = 0.3183098861837907 * acos(1 - 2 * gain);
100  break;
101  case EXP:
102  /* -11.5... = 5*ln(0.1) */
103  gain = exp(-11.512925464970227 * (1 - gain));
104  break;
105  case LOG:
106  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
107  break;
108  case PAR:
109  gain = 1 - sqrt(1 - gain);
110  break;
111  case IPAR:
112  gain = (1 - (1 - gain) * (1 - gain));
113  break;
114  case QUA:
115  gain *= gain;
116  break;
117  case CUB:
118  gain = CUBE(gain);
119  break;
120  case SQU:
121  gain = sqrt(gain);
122  break;
123  case CBR:
124  gain = cbrt(gain);
125  break;
126  case DESE:
127  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
128  break;
129  case DESI:
130  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
131  break;
132  case LOSI: {
133  const double a = 1. / (1. - 0.787) - 1;
134  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
135  double B = 1. / (1.0 + exp(a));
136  double C = 1. / (1.0 + exp(0-a));
137  gain = (A - B) / (C - B);
138  }
139  break;
140  case SINC:
141  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
142  break;
143  case ISINC:
144  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
145  break;
146  case QUAT:
147  gain = gain * gain * gain * gain;
148  break;
149  case QUATR:
150  gain = pow(gain, 0.25);
151  break;
152  case QSIN2:
153  gain = sin(gain * M_PI / 2.0) * sin(gain * M_PI / 2.0);
154  break;
155  case HSIN2:
156  gain = pow((1.0 - cos(gain * M_PI)) / 2.0, 2.0);
157  break;
158  case NONE:
159  gain = 1.0;
160  break;
161  }
162 
163  return silence + (unity - silence) * gain;
164 }
165 
166 #define FADE_PLANAR(name, type) \
167 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
168  int nb_samples, int channels, int dir, \
169  int64_t start, int64_t range,int curve,\
170  double silence, double unity) \
171 { \
172  int i, c; \
173  \
174  for (i = 0; i < nb_samples; i++) { \
175  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
176  for (c = 0; c < channels; c++) { \
177  type *d = (type *)dst[c]; \
178  const type *s = (type *)src[c]; \
179  \
180  d[i] = s[i] * gain; \
181  } \
182  } \
183 }
184 
185 #define FADE(name, type) \
186 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
187  int nb_samples, int channels, int dir, \
188  int64_t start, int64_t range, int curve, \
189  double silence, double unity) \
190 { \
191  type *d = (type *)dst[0]; \
192  const type *s = (type *)src[0]; \
193  int i, c, k = 0; \
194  \
195  for (i = 0; i < nb_samples; i++) { \
196  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
197  for (c = 0; c < channels; c++, k++) \
198  d[k] = s[k] * gain; \
199  } \
200 }
201 
202 FADE_PLANAR(dbl, double)
203 FADE_PLANAR(flt, float)
204 FADE_PLANAR(s16, int16_t)
205 FADE_PLANAR(s32, int32_t)
206 
207 FADE(dbl, double)
208 FADE(flt, float)
209 FADE(s16, int16_t)
210 FADE(s32, int32_t)
211 
212 #define SCALE_PLANAR(name, type) \
213 static void scale_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
214  int nb_samples, int channels, \
215  double gain) \
216 { \
217  int i, c; \
218  \
219  for (i = 0; i < nb_samples; i++) { \
220  for (c = 0; c < channels; c++) { \
221  type *d = (type *)dst[c]; \
222  const type *s = (type *)src[c]; \
223  \
224  d[i] = s[i] * gain; \
225  } \
226  } \
227 }
228 
229 #define SCALE(name, type) \
230 static void scale_samples_## name (uint8_t **dst, uint8_t * const *src, \
231  int nb_samples, int channels, double gain)\
232 { \
233  type *d = (type *)dst[0]; \
234  const type *s = (type *)src[0]; \
235  int i, c, k = 0; \
236  \
237  for (i = 0; i < nb_samples; i++) { \
238  for (c = 0; c < channels; c++, k++) \
239  d[k] = s[k] * gain; \
240  } \
241 }
242 
243 SCALE_PLANAR(dbl, double)
244 SCALE_PLANAR(flt, float)
245 SCALE_PLANAR(s16, int16_t)
246 SCALE_PLANAR(s32, int32_t)
247 
248 SCALE(dbl, double)
249 SCALE(flt, float)
250 SCALE(s16, int16_t)
251 SCALE(s32, int32_t)
252 
253 static int config_output(AVFilterLink *outlink)
254 {
255  AVFilterContext *ctx = outlink->src;
256  AudioFadeContext *s = ctx->priv;
257 
258  switch (outlink->format) {
259  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl;
260  s->scale_samples = scale_samples_dbl;
261  break;
262  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp;
263  s->scale_samples = scale_samples_dblp;
264  break;
265  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt;
266  s->scale_samples = scale_samples_flt;
267  break;
268  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp;
269  s->scale_samples = scale_samples_fltp;
270  break;
271  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16;
272  s->scale_samples = scale_samples_s16;
273  break;
274  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p;
275  s->scale_samples = scale_samples_s16p;
276  break;
277  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32;
278  s->scale_samples = scale_samples_s32;
279  break;
280  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p;
281  s->scale_samples = scale_samples_s32p;
282  break;
283  }
284 
285  if (s->duration)
286  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
287  s->duration = 0;
288  if (s->start_time)
289  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
290  s->start_time = 0;
291 
292  return 0;
293 }
294 
295 #if CONFIG_AFADE_FILTER
296 
297 static const AVOption afade_options[] = {
298  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, .unit = "type" },
299  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, .unit = "type" },
300  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, .unit = "type" },
301  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, .unit = "type" },
302  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
303  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
304  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
305  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
306  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
307  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
308  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
309  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
310  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, .unit = "curve" },
311  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, .unit = "curve" },
312  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, .unit = "curve" },
313  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, .unit = "curve" },
314  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, .unit = "curve" },
315  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, .unit = "curve" },
316  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, .unit = "curve" },
317  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, .unit = "curve" },
318  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, .unit = "curve" },
319  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, .unit = "curve" },
320  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, .unit = "curve" },
321  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, .unit = "curve" },
322  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, .unit = "curve" },
323  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, .unit = "curve" },
324  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, .unit = "curve" },
325  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, .unit = "curve" },
326  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, .unit = "curve" },
327  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, .unit = "curve" },
328  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, .unit = "curve" },
329  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, .unit = "curve" },
330  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, .unit = "curve" },
331  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, .unit = "curve" },
332  { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, TFLAGS, .unit = "curve" },
333  { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, TFLAGS, .unit = "curve" },
334  { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, TFLAGS, .unit = "curve" },
335  { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, TFLAGS, .unit = "curve" },
336  { "silence", "set the silence gain", OFFSET(silence), AV_OPT_TYPE_DOUBLE, {.dbl = 0 }, 0, 1, TFLAGS },
337  { "unity", "set the unity gain", OFFSET(unity), AV_OPT_TYPE_DOUBLE, {.dbl = 1 }, 0, 1, TFLAGS },
338  { NULL }
339 };
340 
341 AVFILTER_DEFINE_CLASS(afade);
342 
343 static av_cold int init(AVFilterContext *ctx)
344 {
345  AudioFadeContext *s = ctx->priv;
346 
347  if (INT64_MAX - s->nb_samples < s->start_sample)
348  return AVERROR(EINVAL);
349 
350  return 0;
351 }
352 
353 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
354 {
355  AudioFadeContext *s = inlink->dst->priv;
356  AVFilterLink *outlink = inlink->dst->outputs[0];
357  int nb_samples = buf->nb_samples;
358  AVFrame *out_buf;
359  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
360 
361  if (s->unity == 1.0 &&
362  ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
363  ( s->type && (cur_sample + nb_samples < s->start_sample))))
364  return ff_filter_frame(outlink, buf);
365 
366  if (av_frame_is_writable(buf)) {
367  out_buf = buf;
368  } else {
369  out_buf = ff_get_audio_buffer(outlink, nb_samples);
370  if (!out_buf)
371  return AVERROR(ENOMEM);
372  av_frame_copy_props(out_buf, buf);
373  }
374 
375  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
376  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
377  if (s->silence == 0.) {
378  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
379  out_buf->ch_layout.nb_channels, out_buf->format);
380  } else {
381  s->scale_samples(out_buf->extended_data, buf->extended_data,
382  nb_samples, buf->ch_layout.nb_channels,
383  s->silence);
384  }
385  } else if (( s->type && (cur_sample + nb_samples < s->start_sample)) ||
386  (!s->type && (s->start_sample + s->nb_samples < cur_sample))) {
387  s->scale_samples(out_buf->extended_data, buf->extended_data,
388  nb_samples, buf->ch_layout.nb_channels,
389  s->unity);
390  } else {
391  int64_t start;
392 
393  if (!s->type)
394  start = cur_sample - s->start_sample;
395  else
396  start = s->start_sample + s->nb_samples - cur_sample;
397 
398  s->fade_samples(out_buf->extended_data, buf->extended_data,
399  nb_samples, buf->ch_layout.nb_channels,
400  s->type ? -1 : 1, start,
401  s->nb_samples, s->curve, s->silence, s->unity);
402  }
403 
404  if (buf != out_buf)
405  av_frame_free(&buf);
406 
407  return ff_filter_frame(outlink, out_buf);
408 }
409 
410 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
411  char *res, int res_len, int flags)
412 {
413  int ret;
414 
415  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
416  if (ret < 0)
417  return ret;
418 
419  return config_output(ctx->outputs[0]);
420 }
421 
422 static const AVFilterPad avfilter_af_afade_inputs[] = {
423  {
424  .name = "default",
425  .type = AVMEDIA_TYPE_AUDIO,
426  .filter_frame = filter_frame,
427  },
428 };
429 
430 static const AVFilterPad avfilter_af_afade_outputs[] = {
431  {
432  .name = "default",
433  .type = AVMEDIA_TYPE_AUDIO,
434  .config_props = config_output,
435  },
436 };
437 
438 const FFFilter ff_af_afade = {
439  .p.name = "afade",
440  .p.description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
441  .p.priv_class = &afade_class,
443  .priv_size = sizeof(AudioFadeContext),
444  .init = init,
445  FILTER_INPUTS(avfilter_af_afade_inputs),
446  FILTER_OUTPUTS(avfilter_af_afade_outputs),
448  .process_command = process_command,
449 };
450 
451 #endif /* CONFIG_AFADE_FILTER */
452 
453 #if CONFIG_ACROSSFADE_FILTER
454 
455 static const AVOption acrossfade_options[] = {
456  { "inputs", "set number of input files to cross fade", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT32_MAX, FLAGS },
457  { "n", "set number of input files to cross fade", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT32_MAX, FLAGS },
458  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
459  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
460  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
461  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
462  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
463  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
464  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
465  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
466  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, .unit = "curve" },
467  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, .unit = "curve" },
468  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, .unit = "curve" },
469  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, .unit = "curve" },
470  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, .unit = "curve" },
471  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, .unit = "curve" },
472  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, .unit = "curve" },
473  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, .unit = "curve" },
474  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, .unit = "curve" },
475  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, .unit = "curve" },
476  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, .unit = "curve" },
477  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, .unit = "curve" },
478  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, .unit = "curve" },
479  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, .unit = "curve" },
480  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, .unit = "curve" },
481  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, .unit = "curve" },
482  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, .unit = "curve" },
483  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, .unit = "curve" },
484  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, .unit = "curve" },
485  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, .unit = "curve" },
486  { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, FLAGS, .unit = "curve" },
487  { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, FLAGS, .unit = "curve" },
488  { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, FLAGS, .unit = "curve" },
489  { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, FLAGS, .unit = "curve" },
490  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
491  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
492  { NULL }
493 };
494 
495 AVFILTER_DEFINE_CLASS(acrossfade);
496 
497 #define CROSSFADE_PLANAR(name, type) \
498 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
499  uint8_t * const *cf1, \
500  int nb_samples, int channels, \
501  int curve0, int curve1) \
502 { \
503  int i, c; \
504  \
505  for (i = 0; i < nb_samples; i++) { \
506  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples,0.,1.);\
507  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
508  for (c = 0; c < channels; c++) { \
509  type *d = (type *)dst[c]; \
510  const type *s0 = (type *)cf0[c]; \
511  const type *s1 = (type *)cf1[c]; \
512  \
513  d[i] = s0[i] * gain0 + s1[i] * gain1; \
514  } \
515  } \
516 }
517 
518 #define CROSSFADE(name, type) \
519 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
520  uint8_t * const *cf1, \
521  int nb_samples, int channels, \
522  int curve0, int curve1) \
523 { \
524  type *d = (type *)dst[0]; \
525  const type *s0 = (type *)cf0[0]; \
526  const type *s1 = (type *)cf1[0]; \
527  int i, c, k = 0; \
528  \
529  for (i = 0; i < nb_samples; i++) { \
530  double gain0 = fade_gain(curve0, nb_samples - 1-i,nb_samples,0.,1.);\
531  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
532  for (c = 0; c < channels; c++, k++) \
533  d[k] = s0[k] * gain0 + s1[k] * gain1; \
534  } \
535 }
536 
537 CROSSFADE_PLANAR(dbl, double)
538 CROSSFADE_PLANAR(flt, float)
539 CROSSFADE_PLANAR(s16, int16_t)
540 CROSSFADE_PLANAR(s32, int32_t)
541 
542 CROSSFADE(dbl, double)
543 CROSSFADE(flt, float)
544 CROSSFADE(s16, int16_t)
545 CROSSFADE(s32, int32_t)
546 
547 static int pass_frame(AVFilterLink *inlink, AVFilterLink *outlink, int64_t *pts)
548 {
549  AVFrame *in;
550  int ret = ff_inlink_consume_frame(inlink, &in);
551  if (ret < 0)
552  return ret;
553  av_assert1(ret);
554  in->pts = *pts;
555  *pts += av_rescale_q(in->nb_samples,
556  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
557  return ff_filter_frame(outlink, in);
558 }
559 
560 static int pass_samples(AVFilterLink *inlink, AVFilterLink *outlink, unsigned nb_samples, int64_t *pts)
561 {
562  AVFrame *in;
563  int ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
564  if (ret < 0)
565  return ret;
566  av_assert1(ret);
567  in->pts = *pts;
568  *pts += av_rescale_q(in->nb_samples,
569  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
570  return ff_filter_frame(outlink, in);
571 }
572 
573 static int pass_crossfade(AVFilterContext *ctx, const int idx0, const int idx1)
574 {
575  AudioFadeContext *s = ctx->priv;
576  AVFilterLink *outlink = ctx->outputs[0];
577  AVFrame *out, *cf[2] = { NULL };
578  int ret;
579 
580  AVFilterLink *in0 = ctx->inputs[idx0];
581  AVFilterLink *in1 = ctx->inputs[idx1];
582  int queued_samples0 = ff_inlink_queued_samples(in0);
583  int queued_samples1 = ff_inlink_queued_samples(in1);
584 
585  /* Limit to the relevant region */
586  av_assert1(queued_samples0 <= s->nb_samples);
587  if (ff_outlink_get_status(in1) && idx1 < s->nb_inputs - 1)
588  queued_samples1 /= 2; /* reserve second half for next fade-out */
589  queued_samples1 = FFMIN(queued_samples1, s->nb_samples);
590 
591  if (s->overlap) {
592  int nb_samples = FFMIN(queued_samples0, queued_samples1);
593  if (nb_samples < s->nb_samples) {
594  av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
595  "is shorter than crossfade duration (%"PRId64" samples), "
596  "crossfade will be shorter by %"PRId64" samples.\n",
597  queued_samples0 <= queued_samples1 ? idx0 : idx1,
598  nb_samples, s->nb_samples, s->nb_samples - nb_samples);
599 
600  if (queued_samples0 > nb_samples) {
601  ret = pass_samples(in0, outlink, queued_samples0 - nb_samples, &s->pts);
602  if (ret < 0)
603  return ret;
604  }
605 
606  if (!nb_samples)
607  return 0; /* either input was completely empty */
608  }
609 
610  av_assert1(nb_samples > 0);
611  out = ff_get_audio_buffer(outlink, nb_samples);
612  if (!out)
613  return AVERROR(ENOMEM);
614 
615  ret = ff_inlink_consume_samples(in0, nb_samples, nb_samples, &cf[0]);
616  if (ret < 0) {
617  av_frame_free(&out);
618  return ret;
619  }
620 
621  ret = ff_inlink_consume_samples(in1, nb_samples, nb_samples, &cf[1]);
622  if (ret < 0) {
623  av_frame_free(&cf[0]);
624  av_frame_free(&out);
625  return ret;
626  }
627 
628  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
629  cf[1]->extended_data, nb_samples,
630  out->ch_layout.nb_channels, s->curve, s->curve2);
631  out->pts = s->pts;
632  s->pts += av_rescale_q(nb_samples,
633  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
634  av_frame_free(&cf[0]);
635  av_frame_free(&cf[1]);
636  return ff_filter_frame(outlink, out);
637  } else {
638  if (queued_samples0 < s->nb_samples) {
639  av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
640  "is shorter than crossfade duration (%"PRId64" samples), "
641  "fade-out will be shorter by %"PRId64" samples.\n",
642  idx0, queued_samples0, s->nb_samples,
643  s->nb_samples - queued_samples0);
644  if (!queued_samples0)
645  goto fade_in;
646  }
647 
648  out = ff_get_audio_buffer(outlink, queued_samples0);
649  if (!out)
650  return AVERROR(ENOMEM);
651 
652  ret = ff_inlink_consume_samples(in0, queued_samples0, queued_samples0, &cf[0]);
653  if (ret < 0) {
654  av_frame_free(&out);
655  return ret;
656  }
657 
658  s->fade_samples(out->extended_data, cf[0]->extended_data, cf[0]->nb_samples,
659  outlink->ch_layout.nb_channels, -1, cf[0]->nb_samples - 1, cf[0]->nb_samples, s->curve, 0., 1.);
660  out->pts = s->pts;
661  s->pts += av_rescale_q(cf[0]->nb_samples,
662  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
663  av_frame_free(&cf[0]);
664  ret = ff_filter_frame(outlink, out);
665  if (ret < 0)
666  return ret;
667 
668  fade_in:
669  if (queued_samples1 < s->nb_samples) {
670  av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
671  "is shorter than crossfade duration (%"PRId64" samples), "
672  "fade-in will be shorter by %"PRId64" samples.\n",
673  idx1, ff_inlink_queued_samples(in1), s->nb_samples,
674  s->nb_samples - queued_samples1);
675  if (!queued_samples1)
676  return 0;
677  }
678 
679  out = ff_get_audio_buffer(outlink, queued_samples1);
680  if (!out)
681  return AVERROR(ENOMEM);
682 
683  ret = ff_inlink_consume_samples(in1, queued_samples1, queued_samples1, &cf[1]);
684  if (ret < 0) {
685  av_frame_free(&out);
686  return ret;
687  }
688 
689  s->fade_samples(out->extended_data, cf[1]->extended_data, cf[1]->nb_samples,
690  outlink->ch_layout.nb_channels, 1, 0, cf[1]->nb_samples, s->curve2, 0., 1.);
691  out->pts = s->pts;
692  s->pts += av_rescale_q(cf[1]->nb_samples,
693  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
694  av_frame_free(&cf[1]);
695  return ff_filter_frame(outlink, out);
696  }
697 }
698 
699 static int activate(AVFilterContext *ctx)
700 {
701  AudioFadeContext *s = ctx->priv;
702  const int idx0 = s->xfade_idx;
703  const int idx1 = s->xfade_idx + 1;
704  AVFilterLink *outlink = ctx->outputs[0];
705  AVFilterLink *in0 = ctx->inputs[idx0];
706 
708 
709  if (idx0 == s->nb_inputs - 1) {
710  /* Last active input, read until EOF */
711  if (ff_inlink_queued_frames(in0))
712  return pass_frame(in0, outlink, &s->pts);
713  FF_FILTER_FORWARD_STATUS(in0, outlink);
714  FF_FILTER_FORWARD_WANTED(outlink, in0);
715  return FFERROR_NOT_READY;
716  }
717 
718  AVFilterLink *in1 = ctx->inputs[idx1];
719  int queued_samples0 = ff_inlink_queued_samples(in0);
720  if (queued_samples0 > s->nb_samples) {
722  if (queued_samples0 - s->nb_samples >= frame->nb_samples)
723  return pass_frame(in0, outlink, &s->pts);
724  }
725 
726  /* Continue reading until EOF */
727  if (ff_outlink_get_status(in0)) {
728  if (queued_samples0 > s->nb_samples)
729  return pass_samples(in0, outlink, queued_samples0 - s->nb_samples, &s->pts);
730  } else {
731  FF_FILTER_FORWARD_WANTED(outlink, in0);
732  return FFERROR_NOT_READY;
733  }
734 
735  /* At this point, in0 has reached EOF with no more samples remaining
736  * except those that we want to crossfade */
737  av_assert0(queued_samples0 <= s->nb_samples);
738  int queued_samples1 = ff_inlink_queued_samples(in1);
739 
740  /* If this clip is sandwiched between two other clips, buffer at least
741  * twice the total crossfade duration to ensure that we won't reach EOF
742  * during the second fade (in which case we would shorten the fade) */
743  int needed_samples = s->nb_samples;
744  if (idx1 < s->nb_inputs - 1)
745  needed_samples *= 2;
746 
747  if (queued_samples1 >= needed_samples || ff_outlink_get_status(in1)) {
748  /* The first filter may EOF before delivering any samples, in which
749  * case it's possible for pass_crossfade() to be a no-op. Just ensure
750  * the activate() function runs again after incrementing the index to
751  * ensure we correctly move on to the next input in that case. */
752  s->xfade_idx++;
754  return pass_crossfade(ctx, idx0, idx1);
755  } else {
756  FF_FILTER_FORWARD_WANTED(outlink, in1);
757  return FFERROR_NOT_READY;
758  }
759 }
760 
761 static av_cold int acrossfade_init(AVFilterContext *ctx)
762 {
763  AudioFadeContext *s = ctx->priv;
764  int ret;
765 
766  for (int i = 0; i < s->nb_inputs; i++) {
767  AVFilterPad pad = {
768  .name = av_asprintf("crossfade%d", i),
769  .type = AVMEDIA_TYPE_AUDIO,
770  };
771  if (!pad.name)
772  return AVERROR(ENOMEM);
773 
775  if (ret < 0)
776  return ret;
777  }
778 
779  return 0;
780 }
781 
782 static int acrossfade_config_output(AVFilterLink *outlink)
783 {
784  AVFilterContext *ctx = outlink->src;
785  AudioFadeContext *s = ctx->priv;
786 
787  outlink->time_base = ctx->inputs[0]->time_base;
788 
789  switch (outlink->format) {
790  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
791  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
792  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
793  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
794  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
795  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
796  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
797  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
798  }
799 
800  config_output(outlink);
801 
802  return 0;
803 }
804 
805 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
806  {
807  .name = "default",
808  .type = AVMEDIA_TYPE_AUDIO,
809  .config_props = acrossfade_config_output,
810  },
811 };
812 
813 const FFFilter ff_af_acrossfade = {
814  .p.name = "acrossfade",
815  .p.description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
816  .p.priv_class = &acrossfade_class,
817  .p.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
818  .priv_size = sizeof(AudioFadeContext),
819  .init = acrossfade_init,
820  .activate = activate,
821  FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
823 };
824 
825 #endif /* CONFIG_ACROSSFADE_FILTER */
flags
const SwsFlags flags[]
Definition: swscale.c:61
AudioFadeContext::unity
double unity
Definition: af_afade.c:45
A
#define A(x)
Definition: vpx_arith.h:28
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AudioFadeContext::type
int type
Definition: af_afade.c:38
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioFadeContext::curve2
int curve2
Definition: af_afade.c:39
out
FILE * out
Definition: movenc.c:55
NONE
@ NONE
Definition: af_afade.c:62
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1067
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AudioFadeContext::fade_samples
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve, double silence, double unity)
Definition: af_afade.c:50
QUA
@ QUA
Definition: af_afade.c:62
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:263
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
av_samples_set_silence
int av_samples_set_silence(uint8_t *const *audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:246
AVOption
AVOption.
Definition: opt.h:429
IPAR
@ IPAR
Definition: af_afade.c:62
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Underlying C type is int64_t.
Definition: opt.h:319
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:253
NB_CURVES
@ NB_CURVES
Definition: af_afade.c:62
SCALE_PLANAR
#define SCALE_PLANAR(name, type)
Definition: af_afade.c:212
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:220
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
DESE
@ DESE
Definition: af_afade.c:62
DESI
@ DESI
Definition: af_afade.c:62
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1517
AudioFadeContext::nb_inputs
int nb_inputs
Definition: af_afade.c:37
FADE
#define FADE(name, type)
Definition: af_afade.c:185
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:651
ISINC
@ ISINC
Definition: af_afade.c:62
scale_samples_s32
static void scale_samples_s32(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:201
scale_samples_s16
static void scale_samples_s16(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:181
fade_gain
static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
Definition: af_afade.c:76
CUBE
#define CUBE(a)
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:644
OFFSET
#define OFFSET(x)
Definition: af_afade.c:64
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
cbrt
#define cbrt
Definition: tablegen.h:35
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AudioFadeContext::silence
double silence
Definition: af_afade.c:44
avassert.h
QUAT
@ QUAT
Definition: af_afade.c:62
av_cold
#define av_cold
Definition: attributes.h:90
CUB
@ CUB
Definition: af_afade.c:62
QSIN
@ QSIN
Definition: af_afade.c:62
FFFilter
Definition: filters.h:266
duration
int64_t duration
Definition: movenc.c:65
IHSIN
@ IHSIN
Definition: af_afade.c:62
s
#define s(width, name)
Definition: cbs_vp9.c:198
TRI
@ TRI
Definition: af_afade.c:62
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
Definition: opt.h:267
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
HSIN2
@ HSIN2
Definition: af_afade.c:62
HSIN
@ HSIN
Definition: af_afade.c:62
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Underlying C type is int64_t.
Definition: opt.h:263
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
filters.h
B
#define B
Definition: huffyuv.h:42
ctx
AVFormatContext * ctx
Definition: movenc.c:49
FLAGS
#define FLAGS
Definition: af_afade.c:65
channels
channels
Definition: aptx.h:31
IQSIN
@ IQSIN
Definition: af_afade.c:62
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:264
AudioFadeContext::crossfade_samples
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:56
ff_inlink_peek_frame
AVFrame * ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
Access a frame in the link fifo without consuming it.
Definition: avfilter.c:1558
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1537
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
SCALE
#define SCALE(name, type)
Definition: af_afade.c:229
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFadeContext::start_sample
int64_t start_sample
Definition: af_afade.c:41
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:132
activate
filter_frame For filters that do not use the activate() callback
ff_af_acrossfade
const FFFilter ff_af_acrossfade
filter_frame
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1059
QSIN2
@ QSIN2
Definition: af_afade.c:62
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: filters.h:477
exp
int8_t exp
Definition: eval.c:73
index
int index
Definition: gxfenc.c:90
SQU
@ SQU
Definition: af_afade.c:62
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1480
CurveType
CurveType
Definition: af_afade.c:62
TFLAGS
#define TFLAGS
Definition: af_afade.c:66
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:307
start_time
static int64_t start_time
Definition: ffplay.c:326
AudioFadeContext::curve
int curve
Definition: af_afade.c:39
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AudioFadeContext::xfade_idx
int xfade_idx
Definition: af_afade.c:48
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:535
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AudioFadeContext::scale_samples
void(* scale_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, double unity)
Definition: af_afade.c:54
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
AudioFadeContext::duration
int64_t duration
Definition: af_afade.c:42
EXP
@ EXP
Definition: af_afade.c:62
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:905
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_af_afade
const FFFilter ff_af_afade
M_PI
#define M_PI
Definition: mathematics.h:67
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:197
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:507
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
SINC
@ SINC
Definition: af_afade.c:62
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:253
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:488
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AudioFadeContext::pts
int64_t pts
Definition: af_afade.c:47
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AudioFadeContext::overlap
int overlap
Definition: af_afade.c:46
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
FILTER_SAMPLEFMTS_ARRAY
#define FILTER_SAMPLEFMTS_ARRAY(array)
Definition: filters.h:246
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:45
CBR
@ CBR
Definition: af_afade.c:62
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1492
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AudioFadeContext::start_time
int64_t start_time
Definition: af_afade.c:43
LOSI
@ LOSI
Definition: af_afade.c:62
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
FADE_PLANAR
#define FADE_PLANAR(name, type)
Definition: af_afade.c:166
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1645
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:270
audio.h
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
QUATR
@ QUATR
Definition: af_afade.c:62
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
int32_t
int32_t
Definition: audioconvert.c:56
ESIN
@ ESIN
Definition: af_afade.c:62
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: af_afade.c:68
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
avstring.h
PAR
@ PAR
Definition: af_afade.c:62
AV_SAMPLE_FMT_S32
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:59
AudioFadeContext::nb_samples
int64_t nb_samples
Definition: af_afade.c:40
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
AudioFadeContext
Definition: af_afade.c:35
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
src
#define src
Definition: vp8dsp.c:248
av_clipd
av_clipd
Definition: af_crystalizer.c:132
LOG
@ LOG
Definition: af_afade.c:62
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:229