1 |
|
|
/* |
2 |
|
|
* Copyright (c) 1999 Chris Bagwell |
3 |
|
|
* Copyright (c) 1999 Nick Bailey |
4 |
|
|
* Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net> |
5 |
|
|
* Copyright (c) 2013 Paul B Mahol |
6 |
|
|
* Copyright (c) 2014 Andrew Kelley |
7 |
|
|
* |
8 |
|
|
* This file is part of FFmpeg. |
9 |
|
|
* |
10 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
11 |
|
|
* modify it under the terms of the GNU Lesser General Public |
12 |
|
|
* License as published by the Free Software Foundation; either |
13 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
14 |
|
|
* |
15 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
16 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 |
|
|
* Lesser General Public License for more details. |
19 |
|
|
* |
20 |
|
|
* You should have received a copy of the GNU Lesser General Public |
21 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
22 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
23 |
|
|
*/ |
24 |
|
|
|
25 |
|
|
/** |
26 |
|
|
* @file |
27 |
|
|
* audio compand filter |
28 |
|
|
*/ |
29 |
|
|
|
30 |
|
|
#include "libavutil/avassert.h" |
31 |
|
|
#include "libavutil/avstring.h" |
32 |
|
|
#include "libavutil/ffmath.h" |
33 |
|
|
#include "libavutil/opt.h" |
34 |
|
|
#include "libavutil/samplefmt.h" |
35 |
|
|
#include "audio.h" |
36 |
|
|
#include "avfilter.h" |
37 |
|
|
#include "internal.h" |
38 |
|
|
|
39 |
|
|
typedef struct ChanParam { |
40 |
|
|
double attack; |
41 |
|
|
double decay; |
42 |
|
|
double volume; |
43 |
|
|
} ChanParam; |
44 |
|
|
|
45 |
|
|
typedef struct CompandSegment { |
46 |
|
|
double x, y; |
47 |
|
|
double a, b; |
48 |
|
|
} CompandSegment; |
49 |
|
|
|
50 |
|
|
typedef struct CompandContext { |
51 |
|
|
const AVClass *class; |
52 |
|
|
int nb_segments; |
53 |
|
|
char *attacks, *decays, *points; |
54 |
|
|
CompandSegment *segments; |
55 |
|
|
ChanParam *channels; |
56 |
|
|
double in_min_lin; |
57 |
|
|
double out_min_lin; |
58 |
|
|
double curve_dB; |
59 |
|
|
double gain_dB; |
60 |
|
|
double initial_volume; |
61 |
|
|
double delay; |
62 |
|
|
AVFrame *delay_frame; |
63 |
|
|
int delay_samples; |
64 |
|
|
int delay_count; |
65 |
|
|
int delay_index; |
66 |
|
|
int64_t pts; |
67 |
|
|
|
68 |
|
|
int (*compand)(AVFilterContext *ctx, AVFrame *frame); |
69 |
|
|
} CompandContext; |
70 |
|
|
|
71 |
|
|
#define OFFSET(x) offsetof(CompandContext, x) |
72 |
|
|
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
73 |
|
|
|
74 |
|
|
static const AVOption compand_options[] = { |
75 |
|
|
{ "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A }, |
76 |
|
|
{ "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A }, |
77 |
|
|
{ "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A }, |
78 |
|
|
{ "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A }, |
79 |
|
|
{ "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A }, |
80 |
|
|
{ "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A }, |
81 |
|
|
{ "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A }, |
82 |
|
|
{ NULL } |
83 |
|
|
}; |
84 |
|
|
|
85 |
|
|
AVFILTER_DEFINE_CLASS(compand); |
86 |
|
|
|
87 |
|
2 |
static av_cold int init(AVFilterContext *ctx) |
88 |
|
|
{ |
89 |
|
2 |
CompandContext *s = ctx->priv; |
90 |
|
2 |
s->pts = AV_NOPTS_VALUE; |
91 |
|
2 |
return 0; |
92 |
|
|
} |
93 |
|
|
|
94 |
|
3 |
static av_cold void uninit(AVFilterContext *ctx) |
95 |
|
|
{ |
96 |
|
3 |
CompandContext *s = ctx->priv; |
97 |
|
|
|
98 |
|
3 |
av_freep(&s->channels); |
99 |
|
3 |
av_freep(&s->segments); |
100 |
|
3 |
av_frame_free(&s->delay_frame); |
101 |
|
3 |
} |
102 |
|
|
|
103 |
|
1 |
static int query_formats(AVFilterContext *ctx) |
104 |
|
|
{ |
105 |
|
|
AVFilterChannelLayouts *layouts; |
106 |
|
|
AVFilterFormats *formats; |
107 |
|
|
static const enum AVSampleFormat sample_fmts[] = { |
108 |
|
|
AV_SAMPLE_FMT_DBLP, |
109 |
|
|
AV_SAMPLE_FMT_NONE |
110 |
|
|
}; |
111 |
|
|
int ret; |
112 |
|
|
|
113 |
|
1 |
layouts = ff_all_channel_counts(); |
114 |
✗✓ |
1 |
if (!layouts) |
115 |
|
|
return AVERROR(ENOMEM); |
116 |
|
1 |
ret = ff_set_common_channel_layouts(ctx, layouts); |
117 |
✗✓ |
1 |
if (ret < 0) |
118 |
|
|
return ret; |
119 |
|
|
|
120 |
|
1 |
formats = ff_make_format_list(sample_fmts); |
121 |
✗✓ |
1 |
if (!formats) |
122 |
|
|
return AVERROR(ENOMEM); |
123 |
|
1 |
ret = ff_set_common_formats(ctx, formats); |
124 |
✗✓ |
1 |
if (ret < 0) |
125 |
|
|
return ret; |
126 |
|
|
|
127 |
|
1 |
formats = ff_all_samplerates(); |
128 |
✗✓ |
1 |
if (!formats) |
129 |
|
|
return AVERROR(ENOMEM); |
130 |
|
1 |
return ff_set_common_samplerates(ctx, formats); |
131 |
|
|
} |
132 |
|
|
|
133 |
|
3 |
static void count_items(char *item_str, int *nb_items) |
134 |
|
|
{ |
135 |
|
|
char *p; |
136 |
|
|
|
137 |
|
3 |
*nb_items = 1; |
138 |
✓✓ |
34 |
for (p = item_str; *p; p++) { |
139 |
✓✗✓✓
|
31 |
if (*p == ' ' || *p == '|') |
140 |
|
3 |
(*nb_items)++; |
141 |
|
|
} |
142 |
|
3 |
} |
143 |
|
|
|
144 |
|
40960 |
static void update_volume(ChanParam *cp, double in) |
145 |
|
|
{ |
146 |
|
40960 |
double delta = in - cp->volume; |
147 |
|
|
|
148 |
✗✓ |
40960 |
if (delta > 0.0) |
149 |
|
|
cp->volume += delta * cp->attack; |
150 |
|
|
else |
151 |
|
40960 |
cp->volume += delta * cp->decay; |
152 |
|
40960 |
} |
153 |
|
|
|
154 |
|
40960 |
static double get_volume(CompandContext *s, double in_lin) |
155 |
|
|
{ |
156 |
|
|
CompandSegment *cs; |
157 |
|
|
double in_log, out_log; |
158 |
|
|
int i; |
159 |
|
|
|
160 |
✗✓ |
40960 |
if (in_lin < s->in_min_lin) |
161 |
|
|
return s->out_min_lin; |
162 |
|
|
|
163 |
|
40960 |
in_log = log(in_lin); |
164 |
|
|
|
165 |
✓✗ |
122966 |
for (i = 1; i < s->nb_segments; i++) |
166 |
✓✓ |
122966 |
if (in_log <= s->segments[i].x) |
167 |
|
40960 |
break; |
168 |
|
40960 |
cs = &s->segments[i - 1]; |
169 |
|
40960 |
in_log -= cs->x; |
170 |
|
40960 |
out_log = cs->y + in_log * (cs->a * in_log + cs->b); |
171 |
|
|
|
172 |
|
40960 |
return exp(out_log); |
173 |
|
|
} |
174 |
|
|
|
175 |
|
20 |
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame) |
176 |
|
|
{ |
177 |
|
20 |
CompandContext *s = ctx->priv; |
178 |
|
20 |
AVFilterLink *inlink = ctx->inputs[0]; |
179 |
|
20 |
const int channels = inlink->channels; |
180 |
|
20 |
const int nb_samples = frame->nb_samples; |
181 |
|
|
AVFrame *out_frame; |
182 |
|
|
int chan, i; |
183 |
|
|
int err; |
184 |
|
|
|
185 |
✓✗ |
20 |
if (av_frame_is_writable(frame)) { |
186 |
|
20 |
out_frame = frame; |
187 |
|
|
} else { |
188 |
|
|
out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples); |
189 |
|
|
if (!out_frame) { |
190 |
|
|
av_frame_free(&frame); |
191 |
|
|
return AVERROR(ENOMEM); |
192 |
|
|
} |
193 |
|
|
err = av_frame_copy_props(out_frame, frame); |
194 |
|
|
if (err < 0) { |
195 |
|
|
av_frame_free(&out_frame); |
196 |
|
|
av_frame_free(&frame); |
197 |
|
|
return err; |
198 |
|
|
} |
199 |
|
|
} |
200 |
|
|
|
201 |
✓✓ |
60 |
for (chan = 0; chan < channels; chan++) { |
202 |
|
40 |
const double *src = (double *)frame->extended_data[chan]; |
203 |
|
40 |
double *dst = (double *)out_frame->extended_data[chan]; |
204 |
|
40 |
ChanParam *cp = &s->channels[chan]; |
205 |
|
|
|
206 |
✓✓ |
41000 |
for (i = 0; i < nb_samples; i++) { |
207 |
|
40960 |
update_volume(cp, fabs(src[i])); |
208 |
|
|
|
209 |
|
40960 |
dst[i] = src[i] * get_volume(s, cp->volume); |
210 |
|
|
} |
211 |
|
|
} |
212 |
|
|
|
213 |
✗✓ |
20 |
if (frame != out_frame) |
214 |
|
|
av_frame_free(&frame); |
215 |
|
|
|
216 |
|
20 |
return ff_filter_frame(ctx->outputs[0], out_frame); |
217 |
|
|
} |
218 |
|
|
|
219 |
|
|
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a)) |
220 |
|
|
|
221 |
|
|
static int compand_delay(AVFilterContext *ctx, AVFrame *frame) |
222 |
|
|
{ |
223 |
|
|
CompandContext *s = ctx->priv; |
224 |
|
|
AVFilterLink *inlink = ctx->inputs[0]; |
225 |
|
|
const int channels = inlink->channels; |
226 |
|
|
const int nb_samples = frame->nb_samples; |
227 |
|
|
int chan, i, av_uninit(dindex), oindex, av_uninit(count); |
228 |
|
|
AVFrame *out_frame = NULL; |
229 |
|
|
int err; |
230 |
|
|
|
231 |
|
|
if (s->pts == AV_NOPTS_VALUE) { |
232 |
|
|
s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts; |
233 |
|
|
} |
234 |
|
|
|
235 |
|
|
av_assert1(channels > 0); /* would corrupt delay_count and delay_index */ |
236 |
|
|
|
237 |
|
|
for (chan = 0; chan < channels; chan++) { |
238 |
|
|
AVFrame *delay_frame = s->delay_frame; |
239 |
|
|
const double *src = (double *)frame->extended_data[chan]; |
240 |
|
|
double *dbuf = (double *)delay_frame->extended_data[chan]; |
241 |
|
|
ChanParam *cp = &s->channels[chan]; |
242 |
|
|
double *dst; |
243 |
|
|
|
244 |
|
|
count = s->delay_count; |
245 |
|
|
dindex = s->delay_index; |
246 |
|
|
for (i = 0, oindex = 0; i < nb_samples; i++) { |
247 |
|
|
const double in = src[i]; |
248 |
|
|
update_volume(cp, fabs(in)); |
249 |
|
|
|
250 |
|
|
if (count >= s->delay_samples) { |
251 |
|
|
if (!out_frame) { |
252 |
|
|
out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i); |
253 |
|
|
if (!out_frame) { |
254 |
|
|
av_frame_free(&frame); |
255 |
|
|
return AVERROR(ENOMEM); |
256 |
|
|
} |
257 |
|
|
err = av_frame_copy_props(out_frame, frame); |
258 |
|
|
if (err < 0) { |
259 |
|
|
av_frame_free(&out_frame); |
260 |
|
|
av_frame_free(&frame); |
261 |
|
|
return err; |
262 |
|
|
} |
263 |
|
|
out_frame->pts = s->pts; |
264 |
|
|
s->pts += av_rescale_q(nb_samples - i, |
265 |
|
|
(AVRational){ 1, inlink->sample_rate }, |
266 |
|
|
inlink->time_base); |
267 |
|
|
} |
268 |
|
|
|
269 |
|
|
dst = (double *)out_frame->extended_data[chan]; |
270 |
|
|
dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume); |
271 |
|
|
} else { |
272 |
|
|
count++; |
273 |
|
|
} |
274 |
|
|
|
275 |
|
|
dbuf[dindex] = in; |
276 |
|
|
dindex = MOD(dindex + 1, s->delay_samples); |
277 |
|
|
} |
278 |
|
|
} |
279 |
|
|
|
280 |
|
|
s->delay_count = count; |
281 |
|
|
s->delay_index = dindex; |
282 |
|
|
|
283 |
|
|
av_frame_free(&frame); |
284 |
|
|
|
285 |
|
|
if (out_frame) { |
286 |
|
|
err = ff_filter_frame(ctx->outputs[0], out_frame); |
287 |
|
|
return err; |
288 |
|
|
} |
289 |
|
|
|
290 |
|
|
return 0; |
291 |
|
|
} |
292 |
|
|
|
293 |
|
|
static int compand_drain(AVFilterLink *outlink) |
294 |
|
|
{ |
295 |
|
|
AVFilterContext *ctx = outlink->src; |
296 |
|
|
CompandContext *s = ctx->priv; |
297 |
|
|
const int channels = outlink->channels; |
298 |
|
|
AVFrame *frame = NULL; |
299 |
|
|
int chan, i, dindex; |
300 |
|
|
|
301 |
|
|
/* 2048 is to limit output frame size during drain */ |
302 |
|
|
frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count)); |
303 |
|
|
if (!frame) |
304 |
|
|
return AVERROR(ENOMEM); |
305 |
|
|
frame->pts = s->pts; |
306 |
|
|
s->pts += av_rescale_q(frame->nb_samples, |
307 |
|
|
(AVRational){ 1, outlink->sample_rate }, outlink->time_base); |
308 |
|
|
|
309 |
|
|
av_assert0(channels > 0); |
310 |
|
|
for (chan = 0; chan < channels; chan++) { |
311 |
|
|
AVFrame *delay_frame = s->delay_frame; |
312 |
|
|
double *dbuf = (double *)delay_frame->extended_data[chan]; |
313 |
|
|
double *dst = (double *)frame->extended_data[chan]; |
314 |
|
|
ChanParam *cp = &s->channels[chan]; |
315 |
|
|
|
316 |
|
|
dindex = s->delay_index; |
317 |
|
|
for (i = 0; i < frame->nb_samples; i++) { |
318 |
|
|
dst[i] = dbuf[dindex] * get_volume(s, cp->volume); |
319 |
|
|
dindex = MOD(dindex + 1, s->delay_samples); |
320 |
|
|
} |
321 |
|
|
} |
322 |
|
|
s->delay_count -= frame->nb_samples; |
323 |
|
|
s->delay_index = dindex; |
324 |
|
|
|
325 |
|
|
return ff_filter_frame(outlink, frame); |
326 |
|
|
} |
327 |
|
|
|
328 |
|
1 |
static int config_output(AVFilterLink *outlink) |
329 |
|
|
{ |
330 |
|
1 |
AVFilterContext *ctx = outlink->src; |
331 |
|
1 |
CompandContext *s = ctx->priv; |
332 |
|
1 |
const int sample_rate = outlink->sample_rate; |
333 |
|
1 |
double radius = s->curve_dB * M_LN10 / 20.0; |
334 |
|
1 |
char *p, *saveptr = NULL; |
335 |
|
1 |
const int channels = outlink->channels; |
336 |
|
|
int nb_attacks, nb_decays, nb_points; |
337 |
|
|
int new_nb_items, num; |
338 |
|
|
int i; |
339 |
|
|
int err; |
340 |
|
|
|
341 |
|
|
|
342 |
|
1 |
count_items(s->attacks, &nb_attacks); |
343 |
|
1 |
count_items(s->decays, &nb_decays); |
344 |
|
1 |
count_items(s->points, &nb_points); |
345 |
|
|
|
346 |
✗✓ |
1 |
if (channels <= 0) { |
347 |
|
|
av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels); |
348 |
|
|
return AVERROR(EINVAL); |
349 |
|
|
} |
350 |
|
|
|
351 |
✓✗✗✓
|
1 |
if (nb_attacks > channels || nb_decays > channels) { |
352 |
|
|
av_log(ctx, AV_LOG_WARNING, |
353 |
|
|
"Number of attacks/decays bigger than number of channels. Ignoring rest of entries.\n"); |
354 |
|
|
nb_attacks = FFMIN(nb_attacks, channels); |
355 |
|
|
nb_decays = FFMIN(nb_decays, channels); |
356 |
|
|
} |
357 |
|
|
|
358 |
|
1 |
uninit(ctx); |
359 |
|
|
|
360 |
|
1 |
s->channels = av_mallocz_array(channels, sizeof(*s->channels)); |
361 |
|
1 |
s->nb_segments = (nb_points + 4) * 2; |
362 |
|
1 |
s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments)); |
363 |
|
|
|
364 |
✓✗✗✓
|
1 |
if (!s->channels || !s->segments) { |
365 |
|
|
uninit(ctx); |
366 |
|
|
return AVERROR(ENOMEM); |
367 |
|
|
} |
368 |
|
|
|
369 |
|
1 |
p = s->attacks; |
370 |
✓✓ |
2 |
for (i = 0, new_nb_items = 0; i < nb_attacks; i++) { |
371 |
|
1 |
char *tstr = av_strtok(p, " |", &saveptr); |
372 |
✗✓ |
1 |
if (!tstr) { |
373 |
|
|
uninit(ctx); |
374 |
|
|
return AVERROR(EINVAL); |
375 |
|
|
} |
376 |
|
1 |
p = NULL; |
377 |
|
1 |
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1; |
378 |
✗✓ |
1 |
if (s->channels[i].attack < 0) { |
379 |
|
|
uninit(ctx); |
380 |
|
|
return AVERROR(EINVAL); |
381 |
|
|
} |
382 |
|
|
} |
383 |
|
1 |
nb_attacks = new_nb_items; |
384 |
|
|
|
385 |
|
1 |
p = s->decays; |
386 |
✓✓ |
2 |
for (i = 0, new_nb_items = 0; i < nb_decays; i++) { |
387 |
|
1 |
char *tstr = av_strtok(p, " |", &saveptr); |
388 |
✗✓ |
1 |
if (!tstr) { |
389 |
|
|
uninit(ctx); |
390 |
|
|
return AVERROR(EINVAL); |
391 |
|
|
} |
392 |
|
1 |
p = NULL; |
393 |
|
1 |
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1; |
394 |
✗✓ |
1 |
if (s->channels[i].decay < 0) { |
395 |
|
|
uninit(ctx); |
396 |
|
|
return AVERROR(EINVAL); |
397 |
|
|
} |
398 |
|
|
} |
399 |
|
1 |
nb_decays = new_nb_items; |
400 |
|
|
|
401 |
✗✓ |
1 |
if (nb_attacks != nb_decays) { |
402 |
|
|
av_log(ctx, AV_LOG_ERROR, |
403 |
|
|
"Number of attacks %d differs from number of decays %d.\n", |
404 |
|
|
nb_attacks, nb_decays); |
405 |
|
|
uninit(ctx); |
406 |
|
|
return AVERROR(EINVAL); |
407 |
|
|
} |
408 |
|
|
|
409 |
✓✓ |
2 |
for (i = nb_decays; i < channels; i++) { |
410 |
|
1 |
s->channels[i].attack = s->channels[nb_decays - 1].attack; |
411 |
|
1 |
s->channels[i].decay = s->channels[nb_decays - 1].decay; |
412 |
|
|
} |
413 |
|
|
|
414 |
|
|
#define S(x) s->segments[2 * ((x) + 1)] |
415 |
|
1 |
p = s->points; |
416 |
✓✓ |
5 |
for (i = 0, new_nb_items = 0; i < nb_points; i++) { |
417 |
|
4 |
char *tstr = av_strtok(p, " |", &saveptr); |
418 |
|
4 |
p = NULL; |
419 |
✓✗✗✓
|
4 |
if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) { |
420 |
|
|
av_log(ctx, AV_LOG_ERROR, |
421 |
|
|
"Invalid and/or missing input/output value.\n"); |
422 |
|
|
uninit(ctx); |
423 |
|
|
return AVERROR(EINVAL); |
424 |
|
|
} |
425 |
✓✓✗✓
|
4 |
if (i && S(i - 1).x > S(i).x) { |
426 |
|
|
av_log(ctx, AV_LOG_ERROR, |
427 |
|
|
"Transfer function input values must be increasing.\n"); |
428 |
|
|
uninit(ctx); |
429 |
|
|
return AVERROR(EINVAL); |
430 |
|
|
} |
431 |
|
4 |
S(i).y -= S(i).x; |
432 |
|
4 |
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y); |
433 |
|
4 |
new_nb_items++; |
434 |
|
|
} |
435 |
|
1 |
num = new_nb_items; |
436 |
|
|
|
437 |
|
|
/* Add 0,0 if necessary */ |
438 |
✓✗✓✗
|
1 |
if (num == 0 || S(num - 1).x) |
439 |
|
1 |
num++; |
440 |
|
|
|
441 |
|
|
#undef S |
442 |
|
|
#define S(x) s->segments[2 * (x)] |
443 |
|
|
/* Add a tail off segment at the start */ |
444 |
|
1 |
S(0).x = S(1).x - 2 * s->curve_dB; |
445 |
|
1 |
S(0).y = S(1).y; |
446 |
|
1 |
num++; |
447 |
|
|
|
448 |
|
|
/* Join adjacent colinear segments */ |
449 |
✓✓ |
5 |
for (i = 2; i < num; i++) { |
450 |
|
4 |
double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x); |
451 |
|
4 |
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x); |
452 |
|
|
int j; |
453 |
|
|
|
454 |
✓✓ |
4 |
if (fabs(g1 - g2)) |
455 |
|
3 |
continue; |
456 |
|
1 |
num--; |
457 |
✓✓ |
5 |
for (j = --i; j < num; j++) |
458 |
|
4 |
S(j) = S(j + 1); |
459 |
|
|
} |
460 |
|
|
|
461 |
✓✓ |
9 |
for (i = 0; i < s->nb_segments; i += 2) { |
462 |
|
8 |
s->segments[i].y += s->gain_dB; |
463 |
|
8 |
s->segments[i].x *= M_LN10 / 20; |
464 |
|
8 |
s->segments[i].y *= M_LN10 / 20; |
465 |
|
|
} |
466 |
|
|
|
467 |
|
|
#define L(x) s->segments[i - (x)] |
468 |
✓✓ |
7 |
for (i = 4; i < s->nb_segments; i += 2) { |
469 |
|
|
double x, y, cx, cy, in1, in2, out1, out2, theta, len, r; |
470 |
|
|
|
471 |
|
6 |
L(4).a = 0; |
472 |
|
6 |
L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x); |
473 |
|
|
|
474 |
|
6 |
L(2).a = 0; |
475 |
|
6 |
L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x); |
476 |
|
|
|
477 |
|
6 |
theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x); |
478 |
|
6 |
len = hypot(L(2).x - L(4).x, L(2).y - L(4).y); |
479 |
✓✓ |
6 |
r = FFMIN(radius, len); |
480 |
|
6 |
L(3).x = L(2).x - r * cos(theta); |
481 |
|
6 |
L(3).y = L(2).y - r * sin(theta); |
482 |
|
|
|
483 |
|
6 |
theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x); |
484 |
|
6 |
len = hypot(L(0).x - L(2).x, L(0).y - L(2).y); |
485 |
✓✓ |
6 |
r = FFMIN(radius, len / 2); |
486 |
|
6 |
x = L(2).x + r * cos(theta); |
487 |
|
6 |
y = L(2).y + r * sin(theta); |
488 |
|
|
|
489 |
|
6 |
cx = (L(3).x + L(2).x + x) / 3; |
490 |
|
6 |
cy = (L(3).y + L(2).y + y) / 3; |
491 |
|
|
|
492 |
|
6 |
L(2).x = x; |
493 |
|
6 |
L(2).y = y; |
494 |
|
|
|
495 |
|
6 |
in1 = cx - L(3).x; |
496 |
|
6 |
out1 = cy - L(3).y; |
497 |
|
6 |
in2 = L(2).x - L(3).x; |
498 |
|
6 |
out2 = L(2).y - L(3).y; |
499 |
|
6 |
L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1); |
500 |
|
6 |
L(3).b = out1 / in1 - L(3).a * in1; |
501 |
|
|
} |
502 |
|
1 |
L(3).x = 0; |
503 |
|
1 |
L(3).y = L(2).y; |
504 |
|
|
|
505 |
|
1 |
s->in_min_lin = exp(s->segments[1].x); |
506 |
|
1 |
s->out_min_lin = exp(s->segments[1].y); |
507 |
|
|
|
508 |
✓✓ |
3 |
for (i = 0; i < channels; i++) { |
509 |
|
2 |
ChanParam *cp = &s->channels[i]; |
510 |
|
|
|
511 |
✗✓ |
2 |
if (cp->attack > 1.0 / sample_rate) |
512 |
|
|
cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack)); |
513 |
|
|
else |
514 |
|
2 |
cp->attack = 1.0; |
515 |
✓✗ |
2 |
if (cp->decay > 1.0 / sample_rate) |
516 |
|
2 |
cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay)); |
517 |
|
|
else |
518 |
|
|
cp->decay = 1.0; |
519 |
|
2 |
cp->volume = ff_exp10(s->initial_volume / 20); |
520 |
|
|
} |
521 |
|
|
|
522 |
|
1 |
s->delay_samples = s->delay * sample_rate; |
523 |
✓✗ |
1 |
if (s->delay_samples <= 0) { |
524 |
|
1 |
s->compand = compand_nodelay; |
525 |
|
1 |
return 0; |
526 |
|
|
} |
527 |
|
|
|
528 |
|
|
s->delay_frame = av_frame_alloc(); |
529 |
|
|
if (!s->delay_frame) { |
530 |
|
|
uninit(ctx); |
531 |
|
|
return AVERROR(ENOMEM); |
532 |
|
|
} |
533 |
|
|
|
534 |
|
|
s->delay_frame->format = outlink->format; |
535 |
|
|
s->delay_frame->nb_samples = s->delay_samples; |
536 |
|
|
s->delay_frame->channel_layout = outlink->channel_layout; |
537 |
|
|
|
538 |
|
|
err = av_frame_get_buffer(s->delay_frame, 0); |
539 |
|
|
if (err) |
540 |
|
|
return err; |
541 |
|
|
|
542 |
|
|
s->compand = compand_delay; |
543 |
|
|
return 0; |
544 |
|
|
} |
545 |
|
|
|
546 |
|
20 |
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
547 |
|
|
{ |
548 |
|
20 |
AVFilterContext *ctx = inlink->dst; |
549 |
|
20 |
CompandContext *s = ctx->priv; |
550 |
|
|
|
551 |
|
20 |
return s->compand(ctx, frame); |
552 |
|
|
} |
553 |
|
|
|
554 |
|
20 |
static int request_frame(AVFilterLink *outlink) |
555 |
|
|
{ |
556 |
|
20 |
AVFilterContext *ctx = outlink->src; |
557 |
|
20 |
CompandContext *s = ctx->priv; |
558 |
|
20 |
int ret = 0; |
559 |
|
|
|
560 |
|
20 |
ret = ff_request_frame(ctx->inputs[0]); |
561 |
|
|
|
562 |
✓✓✓✗ ✗✓ |
20 |
if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count) |
563 |
|
|
ret = compand_drain(outlink); |
564 |
|
|
|
565 |
|
20 |
return ret; |
566 |
|
|
} |
567 |
|
|
|
568 |
|
|
static const AVFilterPad compand_inputs[] = { |
569 |
|
|
{ |
570 |
|
|
.name = "default", |
571 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
572 |
|
|
.filter_frame = filter_frame, |
573 |
|
|
}, |
574 |
|
|
{ NULL } |
575 |
|
|
}; |
576 |
|
|
|
577 |
|
|
static const AVFilterPad compand_outputs[] = { |
578 |
|
|
{ |
579 |
|
|
.name = "default", |
580 |
|
|
.request_frame = request_frame, |
581 |
|
|
.config_props = config_output, |
582 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
583 |
|
|
}, |
584 |
|
|
{ NULL } |
585 |
|
|
}; |
586 |
|
|
|
587 |
|
|
|
588 |
|
|
AVFilter ff_af_compand = { |
589 |
|
|
.name = "compand", |
590 |
|
|
.description = NULL_IF_CONFIG_SMALL( |
591 |
|
|
"Compress or expand audio dynamic range."), |
592 |
|
|
.query_formats = query_formats, |
593 |
|
|
.priv_size = sizeof(CompandContext), |
594 |
|
|
.priv_class = &compand_class, |
595 |
|
|
.init = init, |
596 |
|
|
.uninit = uninit, |
597 |
|
|
.inputs = compand_inputs, |
598 |
|
|
.outputs = compand_outputs, |
599 |
|
|
}; |