FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/af_adynamicequalizer.c
Date: 2022-12-05 03:11:11
Exec Total Coverage
Lines: 0 130 0.0%
Functions: 0 6 0.0%
Branches: 0 54 0.0%

Line Branch Exec Source
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <float.h>
20
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "audio.h"
24 #include "formats.h"
25
26 typedef struct AudioDynamicEqualizerContext {
27 const AVClass *class;
28
29 double threshold;
30 double dfrequency;
31 double dqfactor;
32 double tfrequency;
33 double tqfactor;
34 double ratio;
35 double range;
36 double makeup;
37 double attack;
38 double release;
39 double attack_coef;
40 double release_coef;
41 int mode;
42 int direction;
43 int type;
44
45 AVFrame *state;
46 } AudioDynamicEqualizerContext;
47
48 static int config_input(AVFilterLink *inlink)
49 {
50 AVFilterContext *ctx = inlink->dst;
51 AudioDynamicEqualizerContext *s = ctx->priv;
52
53 s->state = ff_get_audio_buffer(inlink, 8);
54 if (!s->state)
55 return AVERROR(ENOMEM);
56
57 for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) {
58 double *state = (double *)s->state->extended_data[ch];
59
60 state[4] = 1.;
61 }
62
63 return 0;
64 }
65
66 static double get_svf(double in, double *m, double *a, double *b)
67 {
68 const double v0 = in;
69 const double v3 = v0 - b[1];
70 const double v1 = a[0] * b[0] + a[1] * v3;
71 const double v2 = b[1] + a[1] * b[0] + a[2] * v3;
72
73 b[0] = 2. * v1 - b[0];
74 b[1] = 2. * v2 - b[1];
75
76 return m[0] * v0 + m[1] * v1 + m[2] * v2;
77 }
78
79 typedef struct ThreadData {
80 AVFrame *in, *out;
81 } ThreadData;
82
83 static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
84 {
85 AudioDynamicEqualizerContext *s = ctx->priv;
86 ThreadData *td = arg;
87 AVFrame *in = td->in;
88 AVFrame *out = td->out;
89 const double sample_rate = in->sample_rate;
90 const double makeup = s->makeup;
91 const double ratio = s->ratio;
92 const double range = s->range;
93 const double dfrequency = fmin(s->dfrequency, sample_rate * 0.5);
94 const double tfrequency = fmin(s->tfrequency, sample_rate * 0.5);
95 const double threshold = s->threshold;
96 const double release = s->release_coef;
97 const double irelease = 1. - release;
98 const double attack = s->attack_coef;
99 const double iattack = 1. - attack;
100 const double dqfactor = s->dqfactor;
101 const double tqfactor = s->tqfactor;
102 const double fg = tan(M_PI * tfrequency / sample_rate);
103 const double dg = tan(M_PI * dfrequency / sample_rate);
104 const int start = (in->ch_layout.nb_channels * jobnr) / nb_jobs;
105 const int end = (in->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
106 const int direction = s->direction;
107 const int mode = s->mode;
108 const int type = s->type;
109 double da[3], dm[3];
110
111 {
112 double k = 1. / dqfactor;
113
114 da[0] = 1. / (1. + dg * (dg + k));
115 da[1] = dg * da[0];
116 da[2] = dg * da[1];
117
118 dm[0] = 0.;
119 dm[1] = k;
120 dm[2] = 0.;
121 }
122
123 for (int ch = start; ch < end; ch++) {
124 const double *src = (const double *)in->extended_data[ch];
125 double *dst = (double *)out->extended_data[ch];
126 double *state = (double *)s->state->extended_data[ch];
127
128 for (int n = 0; n < out->nb_samples; n++) {
129 double detect, gain, v, listen;
130 double fa[3], fm[3];
131 double k, g;
132
133 detect = listen = get_svf(src[n], dm, da, state);
134 detect = fabs(detect);
135
136 if (direction == 0 && mode == 0 && detect < threshold)
137 detect = 1. / av_clipd(1. + makeup + (threshold - detect) * ratio, 1., range);
138 else if (direction == 0 && mode == 1 && detect < threshold)
139 detect = av_clipd(1. + makeup + (threshold - detect) * ratio, 1., range);
140 else if (direction == 1 && mode == 0 && detect > threshold)
141 detect = 1. / av_clipd(1. + makeup + (detect - threshold) * ratio, 1., range);
142 else if (direction == 1 && mode == 1 && detect > threshold)
143 detect = av_clipd(1. + makeup + (detect - threshold) * ratio, 1., range);
144 else
145 detect = 1.;
146
147 if (detect < state[4]) {
148 detect = iattack * detect + attack * state[4];
149 } else {
150 detect = irelease * detect + release * state[4];
151 }
152
153 if (state[4] != detect || n == 0) {
154 state[4] = gain = detect;
155
156 switch (type) {
157 case 0:
158 k = 1. / (tqfactor * gain);
159
160 fa[0] = 1. / (1. + fg * (fg + k));
161 fa[1] = fg * fa[0];
162 fa[2] = fg * fa[1];
163
164 fm[0] = 1.;
165 fm[1] = k * (gain * gain - 1.);
166 fm[2] = 0.;
167 break;
168 case 1:
169 k = 1. / tqfactor;
170 g = fg / sqrt(gain);
171
172 fa[0] = 1. / (1. + g * (g + k));
173 fa[1] = g * fa[0];
174 fa[2] = g * fa[1];
175
176 fm[0] = 1.;
177 fm[1] = k * (gain - 1.);
178 fm[2] = gain * gain - 1.;
179 break;
180 case 2:
181 k = 1. / tqfactor;
182 g = fg / sqrt(gain);
183
184 fa[0] = 1. / (1. + g * (g + k));
185 fa[1] = g * fa[0];
186 fa[2] = g * fa[1];
187
188 fm[0] = gain * gain;
189 fm[1] = k * (1. - gain) * gain;
190 fm[2] = 1. - gain * gain;
191 break;
192 }
193 }
194
195 v = get_svf(src[n], fm, fa, &state[2]);
196 v = mode == -1 ? listen : v;
197 dst[n] = ctx->is_disabled ? src[n] : v;
198 }
199 }
200
201 return 0;
202 }
203
204 static double get_coef(double x, double sr)
205 {
206 return exp(-1000. / (x * sr));
207 }
208
209 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
210 {
211 AVFilterContext *ctx = inlink->dst;
212 AVFilterLink *outlink = ctx->outputs[0];
213 AudioDynamicEqualizerContext *s = ctx->priv;
214 ThreadData td;
215 AVFrame *out;
216
217 if (av_frame_is_writable(in)) {
218 out = in;
219 } else {
220 out = ff_get_audio_buffer(outlink, in->nb_samples);
221 if (!out) {
222 av_frame_free(&in);
223 return AVERROR(ENOMEM);
224 }
225 av_frame_copy_props(out, in);
226 }
227
228 s->attack_coef = get_coef(s->attack, in->sample_rate);
229 s->release_coef = get_coef(s->release, in->sample_rate);
230
231 td.in = in;
232 td.out = out;
233 ff_filter_execute(ctx, filter_channels, &td, NULL,
234 FFMIN(outlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
235
236 if (out != in)
237 av_frame_free(&in);
238 return ff_filter_frame(outlink, out);
239 }
240
241 static av_cold void uninit(AVFilterContext *ctx)
242 {
243 AudioDynamicEqualizerContext *s = ctx->priv;
244
245 av_frame_free(&s->state);
246 }
247
248 #define OFFSET(x) offsetof(AudioDynamicEqualizerContext, x)
249 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
250
251 static const AVOption adynamicequalizer_options[] = {
252 { "threshold", "set detection threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 100, FLAGS },
253 { "dfrequency", "set detection frequency", OFFSET(dfrequency), AV_OPT_TYPE_DOUBLE, {.dbl=1000}, 2, 1000000, FLAGS },
254 { "dqfactor", "set detection Q factor", OFFSET(dqfactor), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.001, 1000, FLAGS },
255 { "tfrequency", "set target frequency", OFFSET(tfrequency), AV_OPT_TYPE_DOUBLE, {.dbl=1000}, 2, 1000000, FLAGS },
256 { "tqfactor", "set target Q factor", OFFSET(tqfactor), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.001, 1000, FLAGS },
257 { "attack", "set attack duration", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 2000, FLAGS },
258 { "release", "set release duration", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=200}, 1, 2000, FLAGS },
259 { "ratio", "set ratio factor", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 30, FLAGS },
260 { "makeup", "set makeup gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 100, FLAGS },
261 { "range", "set max gain", OFFSET(range), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 200, FLAGS },
262 { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, -1, 1, FLAGS, "mode" },
263 { "listen", 0, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "mode" },
264 { "cut", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
265 { "boost", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
266 { "tftype", "set target filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "type" },
267 { "bell", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
268 { "lowshelf", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
269 { "highshelf",0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
270 { "direction", "set direction", OFFSET(direction), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "direction" },
271 { "downward", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "direction" },
272 { "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "direction" },
273 { NULL }
274 };
275
276 AVFILTER_DEFINE_CLASS(adynamicequalizer);
277
278 static const AVFilterPad inputs[] = {
279 {
280 .name = "default",
281 .type = AVMEDIA_TYPE_AUDIO,
282 .filter_frame = filter_frame,
283 .config_props = config_input,
284 },
285 };
286
287 static const AVFilterPad outputs[] = {
288 {
289 .name = "default",
290 .type = AVMEDIA_TYPE_AUDIO,
291 },
292 };
293
294 const AVFilter ff_af_adynamicequalizer = {
295 .name = "adynamicequalizer",
296 .description = NULL_IF_CONFIG_SMALL("Apply Dynamic Equalization of input audio."),
297 .priv_size = sizeof(AudioDynamicEqualizerContext),
298 .priv_class = &adynamicequalizer_class,
299 .uninit = uninit,
300 FILTER_INPUTS(inputs),
301 FILTER_OUTPUTS(outputs),
302 FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_DBLP),
303 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
304 AVFILTER_FLAG_SLICE_THREADS,
305 .process_command = ff_filter_process_command,
306 };
307