FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/af_afir.c
Date: 2024-07-26 21:54:09
Exec Total Coverage
Lines: 0 381 0.0%
Functions: 0 13 0.0%
Branches: 0 248 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2017 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * An arbitrary audio FIR filter
24 */
25
26 #include <float.h>
27
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/tx.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/channel_layout.h"
34 #include "libavutil/float_dsp.h"
35 #include "libavutil/frame.h"
36 #include "libavutil/log.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/rational.h"
39
40 #include "audio.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "formats.h"
44 #include "internal.h"
45 #include "af_afirdsp.h"
46
47 #define MAX_IR_STREAMS 32
48
49 typedef struct AudioFIRSegment {
50 int nb_partitions;
51 int part_size;
52 int block_size;
53 int fft_length;
54 int coeff_size;
55 int input_size;
56 int input_offset;
57
58 int *output_offset;
59 int *part_index;
60
61 AVFrame *sumin;
62 AVFrame *sumout;
63 AVFrame *blockout;
64 AVFrame *tempin;
65 AVFrame *tempout;
66 AVFrame *buffer;
67 AVFrame *coeff;
68 AVFrame *input;
69 AVFrame *output;
70
71 AVTXContext **ctx, **tx, **itx;
72 av_tx_fn ctx_fn, tx_fn, itx_fn;
73 } AudioFIRSegment;
74
75 typedef struct AudioFIRContext {
76 const AVClass *class;
77
78 float wet_gain;
79 float dry_gain;
80 float length;
81 int gtype;
82 float ir_norm;
83 float ir_link;
84 float ir_gain;
85 int ir_format;
86 int ir_load;
87 float max_ir_len;
88 int response;
89 int w, h;
90 AVRational frame_rate;
91 int ir_channel;
92 int minp;
93 int maxp;
94 int nb_irs;
95 int prev_selir;
96 int selir;
97 int precision;
98 int format;
99
100 int eof_coeffs[MAX_IR_STREAMS];
101 int have_coeffs[MAX_IR_STREAMS];
102 int nb_taps[MAX_IR_STREAMS];
103 int nb_segments[MAX_IR_STREAMS];
104 int max_offset[MAX_IR_STREAMS];
105 int nb_channels;
106 int one2many;
107 int prev_is_disabled;
108 int *loading;
109 double *ch_gain;
110
111 AudioFIRSegment seg[MAX_IR_STREAMS][1024];
112
113 AVFrame *in;
114 AVFrame *xfade[2];
115 AVFrame *fadein[2];
116 AVFrame *ir[MAX_IR_STREAMS];
117 AVFrame *norm_ir[MAX_IR_STREAMS];
118 int min_part_size;
119 int max_part_size;
120 int64_t pts;
121
122 AudioFIRDSPContext afirdsp;
123 AVFloatDSPContext *fdsp;
124 } AudioFIRContext;
125
126 #define DEPTH 32
127 #include "afir_template.c"
128
129 #undef DEPTH
130 #define DEPTH 64
131 #include "afir_template.c"
132
133 static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
134 {
135 AudioFIRContext *s = ctx->priv;
136 const int min_part_size = s->min_part_size;
137 const int prev_selir = s->prev_selir;
138 const int selir = s->selir;
139
140 for (int offset = 0; offset < out->nb_samples; offset += min_part_size) {
141 switch (s->format) {
142 case AV_SAMPLE_FMT_FLTP:
143 fir_quantums_float(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
144 break;
145 case AV_SAMPLE_FMT_DBLP:
146 fir_quantums_double(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
147 break;
148 }
149
150 if (selir != prev_selir && s->loading[ch] != 0)
151 s->loading[ch] += min_part_size;
152 }
153
154 return 0;
155 }
156
157 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
158 {
159 AVFrame *out = arg;
160 const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
161 const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
162
163 for (int ch = start; ch < end; ch++)
164 fir_channel(ctx, out, ch);
165
166 return 0;
167 }
168
169 static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
170 {
171 AVFilterContext *ctx = outlink->src;
172 AVFrame *out;
173
174 out = ff_get_audio_buffer(outlink, in->nb_samples);
175 if (!out) {
176 av_frame_free(&in);
177 return AVERROR(ENOMEM);
178 }
179 av_frame_copy_props(out, in);
180 out->pts = s->pts = in->pts;
181
182 s->in = in;
183 ff_filter_execute(ctx, fir_channels, out, NULL,
184 FFMIN(outlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
185 s->prev_is_disabled = ctx->is_disabled;
186
187 av_frame_free(&in);
188 s->in = NULL;
189
190 return ff_filter_frame(outlink, out);
191 }
192
193 static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int selir,
194 int offset, int nb_partitions, int part_size, int index)
195 {
196 AudioFIRContext *s = ctx->priv;
197 const size_t cpu_align = av_cpu_max_align();
198 union { double d; float f; } cscale, scale, iscale;
199 enum AVTXType tx_type;
200 int ret;
201
202 seg->tx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->tx));
203 seg->ctx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->ctx));
204 seg->itx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->itx));
205 if (!seg->tx || !seg->ctx || !seg->itx)
206 return AVERROR(ENOMEM);
207
208 seg->fft_length = (part_size + 1) * 2;
209 seg->part_size = part_size;
210 seg->coeff_size = FFALIGN(seg->part_size + 1, cpu_align);
211 seg->block_size = FFMAX(seg->coeff_size * 2, FFALIGN(seg->fft_length, cpu_align));
212 seg->nb_partitions = nb_partitions;
213 seg->input_size = offset + s->min_part_size;
214 seg->input_offset = offset;
215
216 seg->part_index = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->part_index));
217 seg->output_offset = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->output_offset));
218 if (!seg->part_index || !seg->output_offset)
219 return AVERROR(ENOMEM);
220
221 switch (s->format) {
222 case AV_SAMPLE_FMT_FLTP:
223 cscale.f = 1.f;
224 scale.f = 1.f / sqrtf(2.f * part_size);
225 iscale.f = 1.f / sqrtf(2.f * part_size);
226 tx_type = AV_TX_FLOAT_RDFT;
227 break;
228 case AV_SAMPLE_FMT_DBLP:
229 cscale.d = 1.0;
230 scale.d = 1.0 / sqrt(2.0 * part_size);
231 iscale.d = 1.0 / sqrt(2.0 * part_size);
232 tx_type = AV_TX_DOUBLE_RDFT;
233 break;
234 default:
235 av_assert1(0);
236 }
237
238 for (int ch = 0; ch < ctx->inputs[0]->ch_layout.nb_channels && part_size >= 1; ch++) {
239 ret = av_tx_init(&seg->ctx[ch], &seg->ctx_fn, tx_type,
240 0, 2 * part_size, &cscale, 0);
241 if (ret < 0)
242 return ret;
243
244 ret = av_tx_init(&seg->tx[ch], &seg->tx_fn, tx_type,
245 0, 2 * part_size, &scale, 0);
246 if (ret < 0)
247 return ret;
248 ret = av_tx_init(&seg->itx[ch], &seg->itx_fn, tx_type,
249 1, 2 * part_size, &iscale, 0);
250 if (ret < 0)
251 return ret;
252 }
253
254 seg->sumin = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
255 seg->sumout = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
256 seg->blockout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size * seg->nb_partitions);
257 seg->tempin = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
258 seg->tempout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
259 seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
260 seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
261 seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size * 5);
262 if (!seg->buffer || !seg->sumin || !seg->sumout || !seg->blockout ||
263 !seg->input || !seg->output || !seg->tempin || !seg->tempout)
264 return AVERROR(ENOMEM);
265
266 return 0;
267 }
268
269 static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
270 {
271 AudioFIRContext *s = ctx->priv;
272
273 if (seg->ctx) {
274 for (int ch = 0; ch < s->nb_channels; ch++)
275 av_tx_uninit(&seg->ctx[ch]);
276 }
277 av_freep(&seg->ctx);
278
279 if (seg->tx) {
280 for (int ch = 0; ch < s->nb_channels; ch++)
281 av_tx_uninit(&seg->tx[ch]);
282 }
283 av_freep(&seg->tx);
284
285 if (seg->itx) {
286 for (int ch = 0; ch < s->nb_channels; ch++)
287 av_tx_uninit(&seg->itx[ch]);
288 }
289 av_freep(&seg->itx);
290
291 av_freep(&seg->output_offset);
292 av_freep(&seg->part_index);
293
294 av_frame_free(&seg->tempin);
295 av_frame_free(&seg->tempout);
296 av_frame_free(&seg->blockout);
297 av_frame_free(&seg->sumin);
298 av_frame_free(&seg->sumout);
299 av_frame_free(&seg->buffer);
300 av_frame_free(&seg->input);
301 av_frame_free(&seg->output);
302 seg->input_size = 0;
303
304 for (int i = 0; i < MAX_IR_STREAMS; i++)
305 av_frame_free(&seg->coeff);
306 }
307
308 static int convert_coeffs(AVFilterContext *ctx, int selir)
309 {
310 AudioFIRContext *s = ctx->priv;
311 int ret, nb_taps, cur_nb_taps;
312
313 if (!s->nb_taps[selir]) {
314 int part_size, max_part_size;
315 int left, offset = 0;
316
317 s->nb_taps[selir] = ff_inlink_queued_samples(ctx->inputs[1 + selir]);
318 if (s->nb_taps[selir] <= 0)
319 return AVERROR(EINVAL);
320
321 if (s->minp > s->maxp)
322 s->maxp = s->minp;
323
324 if (s->nb_segments[selir])
325 goto skip;
326
327 left = s->nb_taps[selir];
328 part_size = 1 << av_log2(s->minp);
329 max_part_size = 1 << av_log2(s->maxp);
330
331 for (int i = 0; left > 0; i++) {
332 int step = (part_size == max_part_size) ? INT_MAX : 1 + (i == 0);
333 int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
334
335 s->nb_segments[selir] = i + 1;
336 ret = init_segment(ctx, &s->seg[selir][i], selir, offset, nb_partitions, part_size, i);
337 if (ret < 0)
338 return ret;
339 offset += nb_partitions * part_size;
340 s->max_offset[selir] = offset;
341 left -= nb_partitions * part_size;
342 part_size *= 2;
343 part_size = FFMIN(part_size, max_part_size);
344 }
345 }
346
347 skip:
348 if (!s->ir[selir]) {
349 ret = ff_inlink_consume_samples(ctx->inputs[1 + selir], s->nb_taps[selir], s->nb_taps[selir], &s->ir[selir]);
350 if (ret < 0)
351 return ret;
352 if (ret == 0)
353 return AVERROR_BUG;
354 }
355
356 cur_nb_taps = s->ir[selir]->nb_samples;
357 nb_taps = cur_nb_taps;
358
359 if (!s->norm_ir[selir] || s->norm_ir[selir]->nb_samples < nb_taps) {
360 av_frame_free(&s->norm_ir[selir]);
361 s->norm_ir[selir] = ff_get_audio_buffer(ctx->inputs[0], FFALIGN(nb_taps, 8));
362 if (!s->norm_ir[selir])
363 return AVERROR(ENOMEM);
364 }
365
366 av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
367 av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments[selir]);
368
369 switch (s->format) {
370 case AV_SAMPLE_FMT_FLTP:
371 for (int ch = 0; ch < s->nb_channels; ch++) {
372 const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
373
374 s->ch_gain[ch] = ir_gain_float(ctx, s, nb_taps, tsrc);
375 }
376
377 if (s->ir_link) {
378 float gain = +INFINITY;
379
380 for (int ch = 0; ch < s->nb_channels; ch++)
381 gain = fminf(gain, s->ch_gain[ch]);
382
383 for (int ch = 0; ch < s->nb_channels; ch++)
384 s->ch_gain[ch] = gain;
385 }
386
387 for (int ch = 0; ch < s->nb_channels; ch++) {
388 const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
389 float *time = (float *)s->norm_ir[selir]->extended_data[ch];
390
391 memcpy(time, tsrc, sizeof(*time) * nb_taps);
392 for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
393 time[i] = 0;
394
395 ir_scale_float(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
396
397 for (int n = 0; n < s->nb_segments[selir]; n++) {
398 AudioFIRSegment *seg = &s->seg[selir][n];
399
400 if (!seg->coeff)
401 seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
402 if (!seg->coeff)
403 return AVERROR(ENOMEM);
404
405 for (int i = 0; i < seg->nb_partitions; i++)
406 convert_channel_float(ctx, s, ch, seg, i, selir);
407 }
408 }
409 break;
410 case AV_SAMPLE_FMT_DBLP:
411 for (int ch = 0; ch < s->nb_channels; ch++) {
412 const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
413
414 s->ch_gain[ch] = ir_gain_double(ctx, s, nb_taps, tsrc);
415 }
416
417 if (s->ir_link) {
418 double gain = +INFINITY;
419
420 for (int ch = 0; ch < s->nb_channels; ch++)
421 gain = fmin(gain, s->ch_gain[ch]);
422
423 for (int ch = 0; ch < s->nb_channels; ch++)
424 s->ch_gain[ch] = gain;
425 }
426
427 for (int ch = 0; ch < s->nb_channels; ch++) {
428 const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
429 double *time = (double *)s->norm_ir[selir]->extended_data[ch];
430
431 memcpy(time, tsrc, sizeof(*time) * nb_taps);
432 for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
433 time[i] = 0;
434
435 ir_scale_double(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
436
437 for (int n = 0; n < s->nb_segments[selir]; n++) {
438 AudioFIRSegment *seg = &s->seg[selir][n];
439
440 if (!seg->coeff)
441 seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
442 if (!seg->coeff)
443 return AVERROR(ENOMEM);
444
445 for (int i = 0; i < seg->nb_partitions; i++)
446 convert_channel_double(ctx, s, ch, seg, i, selir);
447 }
448 }
449 break;
450 }
451
452 s->have_coeffs[selir] = 1;
453
454 return 0;
455 }
456
457 static int check_ir(AVFilterLink *link, int selir)
458 {
459 AVFilterContext *ctx = link->dst;
460 AudioFIRContext *s = ctx->priv;
461 int nb_taps, max_nb_taps;
462
463 nb_taps = ff_inlink_queued_samples(link);
464 max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
465 if (nb_taps > max_nb_taps) {
466 av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
467 return AVERROR(EINVAL);
468 }
469
470 if (ff_inlink_check_available_samples(link, nb_taps + 1) == 1)
471 s->eof_coeffs[selir] = 1;
472
473 return 0;
474 }
475
476 static int activate(AVFilterContext *ctx)
477 {
478 AudioFIRContext *s = ctx->priv;
479 AVFilterLink *outlink = ctx->outputs[0];
480 int ret, status, available, wanted;
481 AVFrame *in = NULL;
482 int64_t pts;
483
484 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
485
486 for (int i = 0; i < s->nb_irs; i++) {
487 const int selir = i;
488
489 if (s->ir_load && selir != s->selir)
490 continue;
491
492 if (!s->eof_coeffs[selir]) {
493 ret = check_ir(ctx->inputs[1 + selir], selir);
494 if (ret < 0)
495 return ret;
496
497 if (!s->eof_coeffs[selir]) {
498 if (ff_outlink_frame_wanted(ctx->outputs[0]))
499 ff_inlink_request_frame(ctx->inputs[1 + selir]);
500 return 0;
501 }
502 }
503
504 if (!s->have_coeffs[selir] && s->eof_coeffs[selir]) {
505 ret = convert_coeffs(ctx, selir);
506 if (ret < 0)
507 return ret;
508 }
509 }
510
511 available = ff_inlink_queued_samples(ctx->inputs[0]);
512 wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
513 ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
514 if (ret > 0)
515 ret = fir_frame(s, in, outlink);
516
517 if (s->selir != s->prev_selir && s->loading[0] == 0)
518 s->prev_selir = s->selir;
519
520 if (ret < 0)
521 return ret;
522
523 if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
524 ff_filter_set_ready(ctx, 10);
525 return 0;
526 }
527
528 if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
529 if (status == AVERROR_EOF) {
530 ff_outlink_set_status(ctx->outputs[0], status, pts);
531 return 0;
532 }
533 }
534
535 if (ff_outlink_frame_wanted(ctx->outputs[0])) {
536 ff_inlink_request_frame(ctx->inputs[0]);
537 return 0;
538 }
539
540 return FFERROR_NOT_READY;
541 }
542
543 static int query_formats(AVFilterContext *ctx)
544 {
545 AudioFIRContext *s = ctx->priv;
546 static const enum AVSampleFormat sample_fmts[3][3] = {
547 { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
548 { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE },
549 { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
550 };
551 int ret;
552
553 if (s->ir_format) {
554 ret = ff_set_common_all_channel_counts(ctx);
555 if (ret < 0)
556 return ret;
557 } else {
558 AVFilterChannelLayouts *mono = NULL;
559 AVFilterChannelLayouts *layouts = ff_all_channel_counts();
560
561 if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts)) < 0)
562 return ret;
563 if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
564 return ret;
565
566 ret = ff_add_channel_layout(&mono, &(AVChannelLayout)AV_CHANNEL_LAYOUT_MONO);
567 if (ret)
568 return ret;
569 for (int i = 1; i < ctx->nb_inputs; i++) {
570 if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
571 return ret;
572 }
573 }
574
575 if ((ret = ff_set_common_formats_from_list(ctx, sample_fmts[s->precision])) < 0)
576 return ret;
577
578 return ff_set_common_all_samplerates(ctx);
579 }
580
581 static int config_output(AVFilterLink *outlink)
582 {
583 AVFilterContext *ctx = outlink->src;
584 AudioFIRContext *s = ctx->priv;
585 int ret;
586
587 s->one2many = ctx->inputs[1 + s->selir]->ch_layout.nb_channels == 1;
588 outlink->sample_rate = ctx->inputs[0]->sample_rate;
589 outlink->time_base = ctx->inputs[0]->time_base;
590 if ((ret = av_channel_layout_copy(&outlink->ch_layout, &ctx->inputs[0]->ch_layout)) < 0)
591 return ret;
592 outlink->ch_layout.nb_channels = ctx->inputs[0]->ch_layout.nb_channels;
593
594 s->format = outlink->format;
595 s->nb_channels = outlink->ch_layout.nb_channels;
596 s->ch_gain = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->ch_gain));
597 s->loading = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->loading));
598 if (!s->loading || !s->ch_gain)
599 return AVERROR(ENOMEM);
600
601 s->fadein[0] = ff_get_audio_buffer(outlink, s->min_part_size);
602 s->fadein[1] = ff_get_audio_buffer(outlink, s->min_part_size);
603 if (!s->fadein[0] || !s->fadein[1])
604 return AVERROR(ENOMEM);
605
606 s->xfade[0] = ff_get_audio_buffer(outlink, s->min_part_size);
607 s->xfade[1] = ff_get_audio_buffer(outlink, s->min_part_size);
608 if (!s->xfade[0] || !s->xfade[1])
609 return AVERROR(ENOMEM);
610
611 switch (s->format) {
612 case AV_SAMPLE_FMT_FLTP:
613 for (int ch = 0; ch < s->nb_channels; ch++) {
614 float *dst0 = (float *)s->xfade[0]->extended_data[ch];
615 float *dst1 = (float *)s->xfade[1]->extended_data[ch];
616
617 for (int n = 0; n < s->min_part_size; n++) {
618 dst0[n] = (n + 1.f) / s->min_part_size;
619 dst1[n] = 1.f - dst0[n];
620 }
621 }
622 break;
623 case AV_SAMPLE_FMT_DBLP:
624 for (int ch = 0; ch < s->nb_channels; ch++) {
625 double *dst0 = (double *)s->xfade[0]->extended_data[ch];
626 double *dst1 = (double *)s->xfade[1]->extended_data[ch];
627
628 for (int n = 0; n < s->min_part_size; n++) {
629 dst0[n] = (n + 1.0) / s->min_part_size;
630 dst1[n] = 1.0 - dst0[n];
631 }
632 }
633 break;
634 }
635
636 return 0;
637 }
638
639 static av_cold void uninit(AVFilterContext *ctx)
640 {
641 AudioFIRContext *s = ctx->priv;
642
643 av_freep(&s->fdsp);
644 av_freep(&s->ch_gain);
645 av_freep(&s->loading);
646
647 for (int i = 0; i < s->nb_irs; i++) {
648 for (int j = 0; j < s->nb_segments[i]; j++)
649 uninit_segment(ctx, &s->seg[i][j]);
650
651 av_frame_free(&s->ir[i]);
652 av_frame_free(&s->norm_ir[i]);
653 }
654
655 av_frame_free(&s->fadein[0]);
656 av_frame_free(&s->fadein[1]);
657
658 av_frame_free(&s->xfade[0]);
659 av_frame_free(&s->xfade[1]);
660 }
661
662 static av_cold int init(AVFilterContext *ctx)
663 {
664 AudioFIRContext *s = ctx->priv;
665 AVFilterPad pad;
666 int ret;
667
668 s->prev_selir = FFMIN(s->nb_irs - 1, s->selir);
669
670 pad = (AVFilterPad) {
671 .name = "main",
672 .type = AVMEDIA_TYPE_AUDIO,
673 };
674
675 ret = ff_append_inpad(ctx, &pad);
676 if (ret < 0)
677 return ret;
678
679 for (int n = 0; n < s->nb_irs; n++) {
680 pad = (AVFilterPad) {
681 .name = av_asprintf("ir%d", n),
682 .type = AVMEDIA_TYPE_AUDIO,
683 };
684
685 if (!pad.name)
686 return AVERROR(ENOMEM);
687
688 ret = ff_append_inpad_free_name(ctx, &pad);
689 if (ret < 0)
690 return ret;
691 }
692
693 s->fdsp = avpriv_float_dsp_alloc(0);
694 if (!s->fdsp)
695 return AVERROR(ENOMEM);
696
697 ff_afir_init(&s->afirdsp);
698
699 s->min_part_size = 1 << av_log2(s->minp);
700 s->max_part_size = 1 << av_log2(s->maxp);
701
702 return 0;
703 }
704
705 static int process_command(AVFilterContext *ctx,
706 const char *cmd,
707 const char *arg,
708 char *res,
709 int res_len,
710 int flags)
711 {
712 AudioFIRContext *s = ctx->priv;
713 int prev_selir, ret;
714
715 prev_selir = s->selir;
716 ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
717 if (ret < 0)
718 return ret;
719
720 s->selir = FFMIN(s->nb_irs - 1, s->selir);
721 if (s->selir != prev_selir) {
722 s->prev_selir = prev_selir;
723
724 for (int ch = 0; ch < s->nb_channels; ch++)
725 s->loading[ch] = 1;
726 }
727
728 return 0;
729 }
730
731 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
732 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
733 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
734 #define OFFSET(x) offsetof(AudioFIRContext, x)
735
736 static const AVOption afir_options[] = {
737 { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
738 { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
739 { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
740 { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 4, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
741 { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
742 { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
743 { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
744 { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
745 { "ac", "AC gain", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
746 { "rms", "RMS gain", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
747 { "irnorm", "set IR norm", OFFSET(ir_norm), AV_OPT_TYPE_FLOAT, {.dbl=1}, -1, 2, AF },
748 { "irlink", "set IR link", OFFSET(ir_link), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
749 { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
750 { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, .unit = "irfmt" },
751 { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irfmt" },
752 { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irfmt" },
753 { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
754 { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF|AV_OPT_FLAG_DEPRECATED },
755 { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF|AV_OPT_FLAG_DEPRECATED },
756 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF|AV_OPT_FLAG_DEPRECATED },
757 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF|AV_OPT_FLAG_DEPRECATED },
758 { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 65536, AF },
759 { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 65536, AF },
760 { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
761 { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
762 { "precision", "set processing precision", OFFSET(precision), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, AF, .unit = "precision" },
763 { "auto", "set auto processing precision", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
764 { "float", "set single-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
765 { "double","set double-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
766 { "irload", "set IR loading type", OFFSET(ir_load), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, .unit = "irload" },
767 { "init", "load all IRs on init", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irload" },
768 { "access", "load IR on access", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irload" },
769 { NULL }
770 };
771
772 AVFILTER_DEFINE_CLASS(afir);
773
774 static const AVFilterPad outputs[] = {
775 {
776 .name = "default",
777 .type = AVMEDIA_TYPE_AUDIO,
778 .config_props = config_output,
779 },
780 };
781
782 const AVFilter ff_af_afir = {
783 .name = "afir",
784 .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
785 .priv_size = sizeof(AudioFIRContext),
786 .priv_class = &afir_class,
787 FILTER_QUERY_FUNC(query_formats),
788 FILTER_OUTPUTS(outputs),
789 .init = init,
790 .activate = activate,
791 .uninit = uninit,
792 .process_command = process_command,
793 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS |
794 AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
795 AVFILTER_FLAG_SLICE_THREADS,
796 };
797