FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/af_afade.c
Date: 2025-08-19 23:55:23
Exec Total Coverage
Lines: 135 296 45.6%
Functions: 11 34 32.4%
Branches: 73 252 29.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2013-2015 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * fade audio filter
24 */
25
26 #include "config_components.h"
27
28 #include "libavutil/avassert.h"
29 #include "libavutil/opt.h"
30 #include "audio.h"
31 #include "avfilter.h"
32 #include "filters.h"
33
34 typedef struct AudioFadeContext {
35 const AVClass *class;
36 int type;
37 int curve, curve2;
38 int64_t nb_samples;
39 int64_t start_sample;
40 int64_t duration;
41 int64_t start_time;
42 double silence;
43 double unity;
44 int overlap;
45 int64_t pts;
46 int xfade_status;
47
48 void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
49 int nb_samples, int channels, int direction,
50 int64_t start, int64_t range, int curve,
51 double silence, double unity);
52 void (*scale_samples)(uint8_t **dst, uint8_t * const *src,
53 int nb_samples, int channels, double unity);
54 void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
55 uint8_t * const *cf1,
56 int nb_samples, int channels,
57 int curve0, int curve1);
58 } AudioFadeContext;
59
60 enum CurveType { NONE = -1, TRI, QSIN, ESIN, HSIN, LOG, IPAR, QUA, CUB, SQU, CBR, PAR, EXP, IQSIN, IHSIN, DESE, DESI, LOSI, SINC, ISINC, QUAT, QUATR, QSIN2, HSIN2, NB_CURVES };
61
62 #define OFFSET(x) offsetof(AudioFadeContext, x)
63 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
64 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
65
66 static const enum AVSampleFormat sample_fmts[] = {
67 AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
68 AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
69 AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
70 AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
71 AV_SAMPLE_FMT_NONE
72 };
73
74 737552 static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
75 {
76 #define CUBE(a) ((a)*(a)*(a))
77 double gain;
78
79 737552 gain = av_clipd(1.0 * index / range, 0, 1.0);
80
81
6/24
✓ Branch 0 taken 90112 times.
✓ Branch 1 taken 90112 times.
✓ Branch 2 taken 90112 times.
✓ Branch 3 taken 90112 times.
✗ Branch 4 not taken.
✓ Branch 5 taken 178312 times.
✓ Branch 6 taken 198792 times.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✗ Branch 13 not taken.
✗ Branch 14 not taken.
✗ Branch 15 not taken.
✗ Branch 16 not taken.
✗ Branch 17 not taken.
✗ Branch 18 not taken.
✗ Branch 19 not taken.
✗ Branch 20 not taken.
✗ Branch 21 not taken.
✗ Branch 22 not taken.
✗ Branch 23 not taken.
737552 switch (curve) {
82 90112 case QSIN:
83 90112 gain = sin(gain * M_PI / 2.0);
84 90112 break;
85 90112 case IQSIN:
86 /* 0.6... = 2 / M_PI */
87 90112 gain = 0.6366197723675814 * asin(gain);
88 90112 break;
89 90112 case ESIN:
90 90112 gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
91 90112 break;
92 90112 case HSIN:
93 90112 gain = (1.0 - cos(gain * M_PI)) / 2.0;
94 90112 break;
95 case IHSIN:
96 /* 0.3... = 1 / M_PI */
97 gain = 0.3183098861837907 * acos(1 - 2 * gain);
98 break;
99 178312 case EXP:
100 /* -11.5... = 5*ln(0.1) */
101 178312 gain = exp(-11.512925464970227 * (1 - gain));
102 178312 break;
103 198792 case LOG:
104 198792 gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
105 198792 break;
106 case PAR:
107 gain = 1 - sqrt(1 - gain);
108 break;
109 case IPAR:
110 gain = (1 - (1 - gain) * (1 - gain));
111 break;
112 case QUA:
113 gain *= gain;
114 break;
115 case CUB:
116 gain = CUBE(gain);
117 break;
118 case SQU:
119 gain = sqrt(gain);
120 break;
121 case CBR:
122 gain = cbrt(gain);
123 break;
124 case DESE:
125 gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
126 break;
127 case DESI:
128 gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
129 break;
130 case LOSI: {
131 const double a = 1. / (1. - 0.787) - 1;
132 double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
133 double B = 1. / (1.0 + exp(a));
134 double C = 1. / (1.0 + exp(0-a));
135 gain = (A - B) / (C - B);
136 }
137 break;
138 case SINC:
139 gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
140 break;
141 case ISINC:
142 gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
143 break;
144 case QUAT:
145 gain = gain * gain * gain * gain;
146 break;
147 case QUATR:
148 gain = pow(gain, 0.25);
149 break;
150 case QSIN2:
151 gain = sin(gain * M_PI / 2.0) * sin(gain * M_PI / 2.0);
152 break;
153 case HSIN2:
154 gain = pow((1.0 - cos(gain * M_PI)) / 2.0, 2.0);
155 break;
156 case NONE:
157 gain = 1.0;
158 break;
159 }
160
161 737552 return silence + (unity - silence) * gain;
162 }
163
164 #define FADE_PLANAR(name, type) \
165 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
166 int nb_samples, int channels, int dir, \
167 int64_t start, int64_t range,int curve,\
168 double silence, double unity) \
169 { \
170 int i, c; \
171 \
172 for (i = 0; i < nb_samples; i++) { \
173 double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
174 for (c = 0; c < channels; c++) { \
175 type *d = (type *)dst[c]; \
176 const type *s = (type *)src[c]; \
177 \
178 d[i] = s[i] * gain; \
179 } \
180 } \
181 }
182
183 #define FADE(name, type) \
184 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
185 int nb_samples, int channels, int dir, \
186 int64_t start, int64_t range, int curve, \
187 double silence, double unity) \
188 { \
189 type *d = (type *)dst[0]; \
190 const type *s = (type *)src[0]; \
191 int i, c, k = 0; \
192 \
193 for (i = 0; i < nb_samples; i++) { \
194 double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
195 for (c = 0; c < channels; c++, k++) \
196 d[k] = s[k] * gain; \
197 } \
198 }
199
200 FADE_PLANAR(dbl, double)
201 FADE_PLANAR(flt, float)
202 FADE_PLANAR(s16, int16_t)
203 FADE_PLANAR(s32, int32_t)
204
205 FADE(dbl, double)
206 FADE(flt, float)
207
4/4
✓ Branch 1 taken 1122304 times.
✓ Branch 2 taken 561152 times.
✓ Branch 3 taken 561152 times.
✓ Branch 4 taken 137 times.
1683593 FADE(s16, int16_t)
208 FADE(s32, int32_t)
209
210 #define SCALE_PLANAR(name, type) \
211 static void scale_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
212 int nb_samples, int channels, \
213 double gain) \
214 { \
215 int i, c; \
216 \
217 for (i = 0; i < nb_samples; i++) { \
218 for (c = 0; c < channels; c++) { \
219 type *d = (type *)dst[c]; \
220 const type *s = (type *)src[c]; \
221 \
222 d[i] = s[i] * gain; \
223 } \
224 } \
225 }
226
227 #define SCALE(name, type) \
228 static void scale_samples_## name (uint8_t **dst, uint8_t * const *src, \
229 int nb_samples, int channels, double gain)\
230 { \
231 type *d = (type *)dst[0]; \
232 const type *s = (type *)src[0]; \
233 int i, c, k = 0; \
234 \
235 for (i = 0; i < nb_samples; i++) { \
236 for (c = 0; c < channels; c++, k++) \
237 d[k] = s[k] * gain; \
238 } \
239 }
240
241 SCALE_PLANAR(dbl, double)
242 SCALE_PLANAR(flt, float)
243 SCALE_PLANAR(s16, int16_t)
244 SCALE_PLANAR(s32, int32_t)
245
246 SCALE(dbl, double)
247 SCALE(flt, float)
248 SCALE(s16, int16_t)
249 SCALE(s32, int32_t)
250
251 7 static int config_output(AVFilterLink *outlink)
252 {
253 7 AVFilterContext *ctx = outlink->src;
254 7 AudioFadeContext *s = ctx->priv;
255
256
1/9
✗ Branch 0 not taken.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 7 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
7 switch (outlink->format) {
257 case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl;
258 s->scale_samples = scale_samples_dbl;
259 break;
260 case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp;
261 s->scale_samples = scale_samples_dblp;
262 break;
263 case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt;
264 s->scale_samples = scale_samples_flt;
265 break;
266 case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp;
267 s->scale_samples = scale_samples_fltp;
268 break;
269 7 case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16;
270 7 s->scale_samples = scale_samples_s16;
271 7 break;
272 case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p;
273 s->scale_samples = scale_samples_s16p;
274 break;
275 case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32;
276 s->scale_samples = scale_samples_s32;
277 break;
278 case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p;
279 s->scale_samples = scale_samples_s32p;
280 break;
281 }
282
283
1/2
✓ Branch 0 taken 7 times.
✗ Branch 1 not taken.
7 if (s->duration)
284 7 s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
285 7 s->duration = 0;
286
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 7 times.
7 if (s->start_time)
287 s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
288 7 s->start_time = 0;
289
290 7 return 0;
291 }
292
293 #if CONFIG_AFADE_FILTER
294
295 static const AVOption afade_options[] = {
296 { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, .unit = "type" },
297 { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, .unit = "type" },
298 { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, .unit = "type" },
299 { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, .unit = "type" },
300 { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
301 { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
302 { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
303 { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
304 { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
305 { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
306 { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
307 { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
308 { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, .unit = "curve" },
309 { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, .unit = "curve" },
310 { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, .unit = "curve" },
311 { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, .unit = "curve" },
312 { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, .unit = "curve" },
313 { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, .unit = "curve" },
314 { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, .unit = "curve" },
315 { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, .unit = "curve" },
316 { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, .unit = "curve" },
317 { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, .unit = "curve" },
318 { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, .unit = "curve" },
319 { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, .unit = "curve" },
320 { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, .unit = "curve" },
321 { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, .unit = "curve" },
322 { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, .unit = "curve" },
323 { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, .unit = "curve" },
324 { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, .unit = "curve" },
325 { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, .unit = "curve" },
326 { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, .unit = "curve" },
327 { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, .unit = "curve" },
328 { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, .unit = "curve" },
329 { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, .unit = "curve" },
330 { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, TFLAGS, .unit = "curve" },
331 { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, TFLAGS, .unit = "curve" },
332 { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, TFLAGS, .unit = "curve" },
333 { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, TFLAGS, .unit = "curve" },
334 { "silence", "set the silence gain", OFFSET(silence), AV_OPT_TYPE_DOUBLE, {.dbl = 0 }, 0, 1, TFLAGS },
335 { "unity", "set the unity gain", OFFSET(unity), AV_OPT_TYPE_DOUBLE, {.dbl = 1 }, 0, 1, TFLAGS },
336 { NULL }
337 };
338
339 AVFILTER_DEFINE_CLASS(afade);
340
341 12 static av_cold int init(AVFilterContext *ctx)
342 {
343 12 AudioFadeContext *s = ctx->priv;
344
345
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
12 if (INT64_MAX - s->nb_samples < s->start_sample)
346 return AVERROR(EINVAL);
347
348 12 return 0;
349 }
350
351 390 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
352 {
353 390 AudioFadeContext *s = inlink->dst->priv;
354 390 AVFilterLink *outlink = inlink->dst->outputs[0];
355 390 int nb_samples = buf->nb_samples;
356 AVFrame *out_buf;
357 390 int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
358
359
1/2
✓ Branch 0 taken 390 times.
✗ Branch 1 not taken.
390 if (s->unity == 1.0 &&
360
3/4
✓ Branch 0 taken 390 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 137 times.
✓ Branch 3 taken 253 times.
390 ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
361
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 137 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
137 ( s->type && (cur_sample + nb_samples < s->start_sample))))
362 253 return ff_filter_frame(outlink, buf);
363
364
1/2
✓ Branch 1 taken 137 times.
✗ Branch 2 not taken.
137 if (av_frame_is_writable(buf)) {
365 137 out_buf = buf;
366 } else {
367 out_buf = ff_get_audio_buffer(outlink, nb_samples);
368 if (!out_buf)
369 return AVERROR(ENOMEM);
370 av_frame_copy_props(out_buf, buf);
371 }
372
373
2/4
✓ Branch 0 taken 137 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 137 times.
✗ Branch 3 not taken.
137 if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
374
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 137 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
137 ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
375 if (s->silence == 0.) {
376 av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
377 out_buf->ch_layout.nb_channels, out_buf->format);
378 } else {
379 s->scale_samples(out_buf->extended_data, buf->extended_data,
380 nb_samples, buf->ch_layout.nb_channels,
381 s->silence);
382 }
383
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 137 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
137 } else if (( s->type && (cur_sample + nb_samples < s->start_sample)) ||
384
2/4
✓ Branch 0 taken 137 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 137 times.
137 (!s->type && (s->start_sample + s->nb_samples < cur_sample))) {
385 s->scale_samples(out_buf->extended_data, buf->extended_data,
386 nb_samples, buf->ch_layout.nb_channels,
387 s->unity);
388 } else {
389 int64_t start;
390
391
1/2
✓ Branch 0 taken 137 times.
✗ Branch 1 not taken.
137 if (!s->type)
392 137 start = cur_sample - s->start_sample;
393 else
394 start = s->start_sample + s->nb_samples - cur_sample;
395
396 137 s->fade_samples(out_buf->extended_data, buf->extended_data,
397 137 nb_samples, buf->ch_layout.nb_channels,
398
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 137 times.
137 s->type ? -1 : 1, start,
399 s->nb_samples, s->curve, s->silence, s->unity);
400 }
401
402
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 137 times.
137 if (buf != out_buf)
403 av_frame_free(&buf);
404
405 137 return ff_filter_frame(outlink, out_buf);
406 }
407
408 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
409 char *res, int res_len, int flags)
410 {
411 int ret;
412
413 ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
414 if (ret < 0)
415 return ret;
416
417 return config_output(ctx->outputs[0]);
418 }
419
420 static const AVFilterPad avfilter_af_afade_inputs[] = {
421 {
422 .name = "default",
423 .type = AVMEDIA_TYPE_AUDIO,
424 .filter_frame = filter_frame,
425 },
426 };
427
428 static const AVFilterPad avfilter_af_afade_outputs[] = {
429 {
430 .name = "default",
431 .type = AVMEDIA_TYPE_AUDIO,
432 .config_props = config_output,
433 },
434 };
435
436 const FFFilter ff_af_afade = {
437 .p.name = "afade",
438 .p.description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
439 .p.priv_class = &afade_class,
440 .p.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
441 .priv_size = sizeof(AudioFadeContext),
442 .init = init,
443 FILTER_INPUTS(avfilter_af_afade_inputs),
444 FILTER_OUTPUTS(avfilter_af_afade_outputs),
445 FILTER_SAMPLEFMTS_ARRAY(sample_fmts),
446 .process_command = process_command,
447 };
448
449 #endif /* CONFIG_AFADE_FILTER */
450
451 #if CONFIG_ACROSSFADE_FILTER
452
453 static const AVOption acrossfade_options[] = {
454 { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
455 { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
456 { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
457 { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
458 { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
459 { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
460 { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
461 { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
462 { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, .unit = "curve" },
463 { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, .unit = "curve" },
464 { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, .unit = "curve" },
465 { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, .unit = "curve" },
466 { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, .unit = "curve" },
467 { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, .unit = "curve" },
468 { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, .unit = "curve" },
469 { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, .unit = "curve" },
470 { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, .unit = "curve" },
471 { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, .unit = "curve" },
472 { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, .unit = "curve" },
473 { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, .unit = "curve" },
474 { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, .unit = "curve" },
475 { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, .unit = "curve" },
476 { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, .unit = "curve" },
477 { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, .unit = "curve" },
478 { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, .unit = "curve" },
479 { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, .unit = "curve" },
480 { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, .unit = "curve" },
481 { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, .unit = "curve" },
482 { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, FLAGS, .unit = "curve" },
483 { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, FLAGS, .unit = "curve" },
484 { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, FLAGS, .unit = "curve" },
485 { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, FLAGS, .unit = "curve" },
486 { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
487 { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, .unit = "curve" },
488 { NULL }
489 };
490
491 AVFILTER_DEFINE_CLASS(acrossfade);
492
493 #define CROSSFADE_PLANAR(name, type) \
494 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
495 uint8_t * const *cf1, \
496 int nb_samples, int channels, \
497 int curve0, int curve1) \
498 { \
499 int i, c; \
500 \
501 for (i = 0; i < nb_samples; i++) { \
502 double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples,0.,1.);\
503 double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
504 for (c = 0; c < channels; c++) { \
505 type *d = (type *)dst[c]; \
506 const type *s0 = (type *)cf0[c]; \
507 const type *s1 = (type *)cf1[c]; \
508 \
509 d[i] = s0[i] * gain0 + s1[i] * gain1; \
510 } \
511 } \
512 }
513
514 #define CROSSFADE(name, type) \
515 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
516 uint8_t * const *cf1, \
517 int nb_samples, int channels, \
518 int curve0, int curve1) \
519 { \
520 type *d = (type *)dst[0]; \
521 const type *s0 = (type *)cf0[0]; \
522 const type *s1 = (type *)cf1[0]; \
523 int i, c, k = 0; \
524 \
525 for (i = 0; i < nb_samples; i++) { \
526 double gain0 = fade_gain(curve0, nb_samples - 1-i,nb_samples,0.,1.);\
527 double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
528 for (c = 0; c < channels; c++, k++) \
529 d[k] = s0[k] * gain0 + s1[k] * gain1; \
530 } \
531 }
532
533 CROSSFADE_PLANAR(dbl, double)
534 CROSSFADE_PLANAR(flt, float)
535 CROSSFADE_PLANAR(s16, int16_t)
536 CROSSFADE_PLANAR(s32, int32_t)
537
538 CROSSFADE(dbl, double)
539 CROSSFADE(flt, float)
540
4/4
✓ Branch 2 taken 176400 times.
✓ Branch 3 taken 88200 times.
✓ Branch 4 taken 88200 times.
✓ Branch 5 taken 1 times.
264601 CROSSFADE(s16, int16_t)
541 CROSSFADE(s32, int32_t)
542
543 125 static int pass_frame(AVFilterLink *inlink, AVFilterLink *outlink, int64_t *pts)
544 {
545 AVFrame *in;
546 125 int ret = ff_inlink_consume_frame(inlink, &in);
547
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 125 times.
125 if (ret < 0)
548 return ret;
549 av_assert1(ret);
550 125 in->pts = *pts;
551 125 *pts += av_rescale_q(in->nb_samples,
552 125 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
553 125 return ff_filter_frame(outlink, in);
554 }
555
556 1 static int pass_samples(AVFilterLink *inlink, AVFilterLink *outlink, unsigned nb_samples, int64_t *pts)
557 {
558 AVFrame *in;
559 1 int ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
560
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (ret < 0)
561 return ret;
562 av_assert1(ret);
563 1 in->pts = *pts;
564 1 *pts += av_rescale_q(in->nb_samples,
565 1 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
566 1 return ff_filter_frame(outlink, in);
567 }
568
569 1 static int pass_crossfade(AVFilterContext *ctx)
570 {
571 1 AudioFadeContext *s = ctx->priv;
572 1 AVFilterLink *outlink = ctx->outputs[0];
573 1 AVFrame *out, *cf[2] = { NULL };
574 int ret;
575
576
1/2
✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
1 if (s->overlap) {
577 1 out = ff_get_audio_buffer(outlink, s->nb_samples);
578
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (!out)
579 return AVERROR(ENOMEM);
580
581 1 ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
582
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (ret < 0) {
583 av_frame_free(&out);
584 return ret;
585 }
586
587 1 ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
588
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (ret < 0) {
589 av_frame_free(&out);
590 return ret;
591 }
592
593 1 s->crossfade_samples(out->extended_data, cf[0]->extended_data,
594 1 cf[1]->extended_data,
595 1 s->nb_samples, out->ch_layout.nb_channels,
596 s->curve, s->curve2);
597 1 out->pts = s->pts;
598 1 s->pts += av_rescale_q(s->nb_samples,
599 1 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
600 1 av_frame_free(&cf[0]);
601 1 av_frame_free(&cf[1]);
602 1 return ff_filter_frame(outlink, out);
603 } else {
604 out = ff_get_audio_buffer(outlink, s->nb_samples);
605 if (!out)
606 return AVERROR(ENOMEM);
607
608 ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
609 if (ret < 0) {
610 av_frame_free(&out);
611 return ret;
612 }
613
614 s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
615 outlink->ch_layout.nb_channels, -1, s->nb_samples - 1, s->nb_samples, s->curve, 0., 1.);
616 out->pts = s->pts;
617 s->pts += av_rescale_q(s->nb_samples,
618 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
619 av_frame_free(&cf[0]);
620 ret = ff_filter_frame(outlink, out);
621 if (ret < 0)
622 return ret;
623
624 out = ff_get_audio_buffer(outlink, s->nb_samples);
625 if (!out)
626 return AVERROR(ENOMEM);
627
628 ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
629 if (ret < 0) {
630 av_frame_free(&out);
631 return ret;
632 }
633
634 s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
635 outlink->ch_layout.nb_channels, 1, 0, s->nb_samples, s->curve2, 0., 1.);
636 out->pts = s->pts;
637 s->pts += av_rescale_q(s->nb_samples,
638 (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
639 av_frame_free(&cf[1]);
640 return ff_filter_frame(outlink, out);
641 }
642 }
643
644 323 static int activate(AVFilterContext *ctx)
645 {
646 323 AudioFadeContext *s = ctx->priv;
647 323 AVFilterLink *outlink = ctx->outputs[0];
648
649
4/4
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 322 times.
✓ Branch 4 taken 2 times.
✓ Branch 5 taken 1 times.
325 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
650
651 // Read first input until EOF
652
2/2
✓ Branch 0 taken 146 times.
✓ Branch 1 taken 176 times.
322 if (s->xfade_status == 0) {
653 146 int queued_samples = ff_inlink_queued_samples(ctx->inputs[0]);
654
2/2
✓ Branch 0 taken 89 times.
✓ Branch 1 taken 57 times.
146 if (queued_samples > s->nb_samples) {
655 89 AVFrame *frame = ff_inlink_peek_frame(ctx->inputs[0], 0);
656
2/2
✓ Branch 0 taken 43 times.
✓ Branch 1 taken 46 times.
89 if (queued_samples - s->nb_samples >= frame->nb_samples)
657 43 return pass_frame(ctx->inputs[0], outlink, &s->pts);
658 }
659
2/2
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 101 times.
103 if (ff_outlink_get_status(ctx->inputs[0])) {
660
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
2 if (queued_samples > s->nb_samples)
661 1 return pass_samples(ctx->inputs[0], outlink, queued_samples - s->nb_samples, &s->pts);
662 1 s->xfade_status = 1;
663 } else {
664
2/2
✓ Branch 1 taken 100 times.
✓ Branch 2 taken 1 times.
101 FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[0]);
665 }
666 }
667 // Read second input until enough data is ready or EOF
668
2/2
✓ Branch 0 taken 12 times.
✓ Branch 1 taken 166 times.
178 if (s->xfade_status == 1) {
669
3/4
✓ Branch 1 taken 11 times.
✓ Branch 2 taken 1 times.
✗ Branch 4 not taken.
✓ Branch 5 taken 11 times.
12 if (ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples || ff_outlink_get_status(ctx->inputs[1])) {
670 1 s->xfade_status = 2;
671 } else {
672
1/2
✓ Branch 1 taken 11 times.
✗ Branch 2 not taken.
11 FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[1]);
673 }
674 }
675 // Do crossfade
676
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 166 times.
167 if (s->xfade_status == 2) {
677 1 s->xfade_status = 3;
678 // TODO: Do some partial crossfade if not all inputs have enough duration?
679
1/2
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
1 if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
680
1/2
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
1 ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples)
681 1 return pass_crossfade(ctx);
682 }
683 // Read second input until EOF
684
2/2
✓ Branch 0 taken 165 times.
✓ Branch 1 taken 1 times.
166 if (s->xfade_status == 3) {
685
2/2
✓ Branch 1 taken 82 times.
✓ Branch 2 taken 83 times.
165 if (ff_inlink_queued_frames(ctx->inputs[1]))
686 82 return pass_frame(ctx->inputs[1], outlink, &s->pts);
687
2/2
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 82 times.
83 FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
688
1/2
✓ Branch 1 taken 82 times.
✗ Branch 2 not taken.
82 FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[1]);
689 }
690
691 1 return FFERROR_NOT_READY;
692 }
693
694 1 static int acrossfade_config_output(AVFilterLink *outlink)
695 {
696 1 AVFilterContext *ctx = outlink->src;
697 1 AudioFadeContext *s = ctx->priv;
698
699 1 outlink->time_base = ctx->inputs[0]->time_base;
700
701
1/9
✗ Branch 0 not taken.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
1 switch (outlink->format) {
702 case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
703 case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
704 case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
705 case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
706 1 case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
707 case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
708 case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
709 case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
710 }
711
712 1 config_output(outlink);
713
714 1 return 0;
715 }
716
717 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
718 {
719 .name = "crossfade0",
720 .type = AVMEDIA_TYPE_AUDIO,
721 },
722 {
723 .name = "crossfade1",
724 .type = AVMEDIA_TYPE_AUDIO,
725 },
726 };
727
728 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
729 {
730 .name = "default",
731 .type = AVMEDIA_TYPE_AUDIO,
732 .config_props = acrossfade_config_output,
733 },
734 };
735
736 const FFFilter ff_af_acrossfade = {
737 .p.name = "acrossfade",
738 .p.description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
739 .p.priv_class = &acrossfade_class,
740 .priv_size = sizeof(AudioFadeContext),
741 .activate = activate,
742 FILTER_INPUTS(avfilter_af_acrossfade_inputs),
743 FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
744 FILTER_SAMPLEFMTS_ARRAY(sample_fmts),
745 };
746
747 #endif /* CONFIG_ACROSSFADE_FILTER */
748