FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/af_afir.c
Date: 2024-04-25 15:36:26
Exec Total Coverage
Lines: 0 380 0.0%
Functions: 0 13 0.0%
Branches: 0 248 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2017 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * An arbitrary audio FIR filter
24 */
25
26 #include <float.h>
27
28 #include "libavutil/cpu.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/tx.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/channel_layout.h"
33 #include "libavutil/float_dsp.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/log.h"
36 #include "libavutil/opt.h"
37
38 #include "audio.h"
39 #include "avfilter.h"
40 #include "filters.h"
41 #include "formats.h"
42 #include "internal.h"
43 #include "af_afir.h"
44 #include "af_afirdsp.h"
45
46 #define DEPTH 32
47 #include "afir_template.c"
48
49 #undef DEPTH
50 #define DEPTH 64
51 #include "afir_template.c"
52
53 static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
54 {
55 AudioFIRContext *s = ctx->priv;
56 const int min_part_size = s->min_part_size;
57 const int prev_selir = s->prev_selir;
58 const int selir = s->selir;
59
60 for (int offset = 0; offset < out->nb_samples; offset += min_part_size) {
61 switch (s->format) {
62 case AV_SAMPLE_FMT_FLTP:
63 fir_quantums_float(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
64 break;
65 case AV_SAMPLE_FMT_DBLP:
66 fir_quantums_double(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
67 break;
68 }
69
70 if (selir != prev_selir && s->loading[ch] != 0)
71 s->loading[ch] += min_part_size;
72 }
73
74 return 0;
75 }
76
77 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
78 {
79 AVFrame *out = arg;
80 const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
81 const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
82
83 for (int ch = start; ch < end; ch++)
84 fir_channel(ctx, out, ch);
85
86 return 0;
87 }
88
89 static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
90 {
91 AVFilterContext *ctx = outlink->src;
92 AVFrame *out;
93
94 out = ff_get_audio_buffer(outlink, in->nb_samples);
95 if (!out) {
96 av_frame_free(&in);
97 return AVERROR(ENOMEM);
98 }
99 av_frame_copy_props(out, in);
100 out->pts = s->pts = in->pts;
101
102 s->in = in;
103 ff_filter_execute(ctx, fir_channels, out, NULL,
104 FFMIN(outlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
105 s->prev_is_disabled = ctx->is_disabled;
106
107 av_frame_free(&in);
108 s->in = NULL;
109
110 return ff_filter_frame(outlink, out);
111 }
112
113 static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int selir,
114 int offset, int nb_partitions, int part_size, int index)
115 {
116 AudioFIRContext *s = ctx->priv;
117 const size_t cpu_align = av_cpu_max_align();
118 union { double d; float f; } cscale, scale, iscale;
119 enum AVTXType tx_type;
120 int ret;
121
122 seg->tx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->tx));
123 seg->ctx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->ctx));
124 seg->itx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->itx));
125 if (!seg->tx || !seg->ctx || !seg->itx)
126 return AVERROR(ENOMEM);
127
128 seg->fft_length = (part_size + 1) * 2;
129 seg->part_size = part_size;
130 seg->coeff_size = FFALIGN(seg->part_size + 1, cpu_align);
131 seg->block_size = FFMAX(seg->coeff_size * 2, FFALIGN(seg->fft_length, cpu_align));
132 seg->nb_partitions = nb_partitions;
133 seg->input_size = offset + s->min_part_size;
134 seg->input_offset = offset;
135
136 seg->part_index = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->part_index));
137 seg->output_offset = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->output_offset));
138 if (!seg->part_index || !seg->output_offset)
139 return AVERROR(ENOMEM);
140
141 switch (s->format) {
142 case AV_SAMPLE_FMT_FLTP:
143 cscale.f = 1.f;
144 scale.f = 1.f / sqrtf(2.f * part_size);
145 iscale.f = 1.f / sqrtf(2.f * part_size);
146 tx_type = AV_TX_FLOAT_RDFT;
147 break;
148 case AV_SAMPLE_FMT_DBLP:
149 cscale.d = 1.0;
150 scale.d = 1.0 / sqrt(2.0 * part_size);
151 iscale.d = 1.0 / sqrt(2.0 * part_size);
152 tx_type = AV_TX_DOUBLE_RDFT;
153 break;
154 }
155
156 for (int ch = 0; ch < ctx->inputs[0]->ch_layout.nb_channels && part_size >= 1; ch++) {
157 ret = av_tx_init(&seg->ctx[ch], &seg->ctx_fn, tx_type,
158 0, 2 * part_size, &cscale, 0);
159 if (ret < 0)
160 return ret;
161
162 ret = av_tx_init(&seg->tx[ch], &seg->tx_fn, tx_type,
163 0, 2 * part_size, &scale, 0);
164 if (ret < 0)
165 return ret;
166 ret = av_tx_init(&seg->itx[ch], &seg->itx_fn, tx_type,
167 1, 2 * part_size, &iscale, 0);
168 if (ret < 0)
169 return ret;
170 }
171
172 seg->sumin = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
173 seg->sumout = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
174 seg->blockout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size * seg->nb_partitions);
175 seg->tempin = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
176 seg->tempout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
177 seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
178 seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
179 seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size * 5);
180 if (!seg->buffer || !seg->sumin || !seg->sumout || !seg->blockout ||
181 !seg->input || !seg->output || !seg->tempin || !seg->tempout)
182 return AVERROR(ENOMEM);
183
184 return 0;
185 }
186
187 static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
188 {
189 AudioFIRContext *s = ctx->priv;
190
191 if (seg->ctx) {
192 for (int ch = 0; ch < s->nb_channels; ch++)
193 av_tx_uninit(&seg->ctx[ch]);
194 }
195 av_freep(&seg->ctx);
196
197 if (seg->tx) {
198 for (int ch = 0; ch < s->nb_channels; ch++)
199 av_tx_uninit(&seg->tx[ch]);
200 }
201 av_freep(&seg->tx);
202
203 if (seg->itx) {
204 for (int ch = 0; ch < s->nb_channels; ch++)
205 av_tx_uninit(&seg->itx[ch]);
206 }
207 av_freep(&seg->itx);
208
209 av_freep(&seg->output_offset);
210 av_freep(&seg->part_index);
211
212 av_frame_free(&seg->tempin);
213 av_frame_free(&seg->tempout);
214 av_frame_free(&seg->blockout);
215 av_frame_free(&seg->sumin);
216 av_frame_free(&seg->sumout);
217 av_frame_free(&seg->buffer);
218 av_frame_free(&seg->input);
219 av_frame_free(&seg->output);
220 seg->input_size = 0;
221
222 for (int i = 0; i < MAX_IR_STREAMS; i++)
223 av_frame_free(&seg->coeff);
224 }
225
226 static int convert_coeffs(AVFilterContext *ctx, int selir)
227 {
228 AudioFIRContext *s = ctx->priv;
229 int ret, nb_taps, cur_nb_taps;
230
231 if (!s->nb_taps[selir]) {
232 int part_size, max_part_size;
233 int left, offset = 0;
234
235 s->nb_taps[selir] = ff_inlink_queued_samples(ctx->inputs[1 + selir]);
236 if (s->nb_taps[selir] <= 0)
237 return AVERROR(EINVAL);
238
239 if (s->minp > s->maxp)
240 s->maxp = s->minp;
241
242 if (s->nb_segments[selir])
243 goto skip;
244
245 left = s->nb_taps[selir];
246 part_size = 1 << av_log2(s->minp);
247 max_part_size = 1 << av_log2(s->maxp);
248
249 for (int i = 0; left > 0; i++) {
250 int step = (part_size == max_part_size) ? INT_MAX : 1 + (i == 0);
251 int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
252
253 s->nb_segments[selir] = i + 1;
254 ret = init_segment(ctx, &s->seg[selir][i], selir, offset, nb_partitions, part_size, i);
255 if (ret < 0)
256 return ret;
257 offset += nb_partitions * part_size;
258 s->max_offset[selir] = offset;
259 left -= nb_partitions * part_size;
260 part_size *= 2;
261 part_size = FFMIN(part_size, max_part_size);
262 }
263 }
264
265 skip:
266 if (!s->ir[selir]) {
267 ret = ff_inlink_consume_samples(ctx->inputs[1 + selir], s->nb_taps[selir], s->nb_taps[selir], &s->ir[selir]);
268 if (ret < 0)
269 return ret;
270 if (ret == 0)
271 return AVERROR_BUG;
272 }
273
274 cur_nb_taps = s->ir[selir]->nb_samples;
275 nb_taps = cur_nb_taps;
276
277 if (!s->norm_ir[selir] || s->norm_ir[selir]->nb_samples < nb_taps) {
278 av_frame_free(&s->norm_ir[selir]);
279 s->norm_ir[selir] = ff_get_audio_buffer(ctx->inputs[0], FFALIGN(nb_taps, 8));
280 if (!s->norm_ir[selir])
281 return AVERROR(ENOMEM);
282 }
283
284 av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
285 av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments[selir]);
286
287 switch (s->format) {
288 case AV_SAMPLE_FMT_FLTP:
289 for (int ch = 0; ch < s->nb_channels; ch++) {
290 const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
291
292 s->ch_gain[ch] = ir_gain_float(ctx, s, nb_taps, tsrc);
293 }
294
295 if (s->ir_link) {
296 float gain = +INFINITY;
297
298 for (int ch = 0; ch < s->nb_channels; ch++)
299 gain = fminf(gain, s->ch_gain[ch]);
300
301 for (int ch = 0; ch < s->nb_channels; ch++)
302 s->ch_gain[ch] = gain;
303 }
304
305 for (int ch = 0; ch < s->nb_channels; ch++) {
306 const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
307 float *time = (float *)s->norm_ir[selir]->extended_data[ch];
308
309 memcpy(time, tsrc, sizeof(*time) * nb_taps);
310 for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
311 time[i] = 0;
312
313 ir_scale_float(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
314
315 for (int n = 0; n < s->nb_segments[selir]; n++) {
316 AudioFIRSegment *seg = &s->seg[selir][n];
317
318 if (!seg->coeff)
319 seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
320 if (!seg->coeff)
321 return AVERROR(ENOMEM);
322
323 for (int i = 0; i < seg->nb_partitions; i++)
324 convert_channel_float(ctx, s, ch, seg, i, selir);
325 }
326 }
327 break;
328 case AV_SAMPLE_FMT_DBLP:
329 for (int ch = 0; ch < s->nb_channels; ch++) {
330 const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
331
332 s->ch_gain[ch] = ir_gain_double(ctx, s, nb_taps, tsrc);
333 }
334
335 if (s->ir_link) {
336 double gain = +INFINITY;
337
338 for (int ch = 0; ch < s->nb_channels; ch++)
339 gain = fmin(gain, s->ch_gain[ch]);
340
341 for (int ch = 0; ch < s->nb_channels; ch++)
342 s->ch_gain[ch] = gain;
343 }
344
345 for (int ch = 0; ch < s->nb_channels; ch++) {
346 const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
347 double *time = (double *)s->norm_ir[selir]->extended_data[ch];
348
349 memcpy(time, tsrc, sizeof(*time) * nb_taps);
350 for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
351 time[i] = 0;
352
353 ir_scale_double(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
354
355 for (int n = 0; n < s->nb_segments[selir]; n++) {
356 AudioFIRSegment *seg = &s->seg[selir][n];
357
358 if (!seg->coeff)
359 seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
360 if (!seg->coeff)
361 return AVERROR(ENOMEM);
362
363 for (int i = 0; i < seg->nb_partitions; i++)
364 convert_channel_double(ctx, s, ch, seg, i, selir);
365 }
366 }
367 break;
368 }
369
370 s->have_coeffs[selir] = 1;
371
372 return 0;
373 }
374
375 static int check_ir(AVFilterLink *link, int selir)
376 {
377 AVFilterContext *ctx = link->dst;
378 AudioFIRContext *s = ctx->priv;
379 int nb_taps, max_nb_taps;
380
381 nb_taps = ff_inlink_queued_samples(link);
382 max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
383 if (nb_taps > max_nb_taps) {
384 av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
385 return AVERROR(EINVAL);
386 }
387
388 if (ff_inlink_check_available_samples(link, nb_taps + 1) == 1)
389 s->eof_coeffs[selir] = 1;
390
391 return 0;
392 }
393
394 static int activate(AVFilterContext *ctx)
395 {
396 AudioFIRContext *s = ctx->priv;
397 AVFilterLink *outlink = ctx->outputs[0];
398 int ret, status, available, wanted;
399 AVFrame *in = NULL;
400 int64_t pts;
401
402 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
403
404 for (int i = 0; i < s->nb_irs; i++) {
405 const int selir = i;
406
407 if (s->ir_load && selir != s->selir)
408 continue;
409
410 if (!s->eof_coeffs[selir]) {
411 ret = check_ir(ctx->inputs[1 + selir], selir);
412 if (ret < 0)
413 return ret;
414
415 if (!s->eof_coeffs[selir]) {
416 if (ff_outlink_frame_wanted(ctx->outputs[0]))
417 ff_inlink_request_frame(ctx->inputs[1 + selir]);
418 return 0;
419 }
420 }
421
422 if (!s->have_coeffs[selir] && s->eof_coeffs[selir]) {
423 ret = convert_coeffs(ctx, selir);
424 if (ret < 0)
425 return ret;
426 }
427 }
428
429 available = ff_inlink_queued_samples(ctx->inputs[0]);
430 wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
431 ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
432 if (ret > 0)
433 ret = fir_frame(s, in, outlink);
434
435 if (s->selir != s->prev_selir && s->loading[0] == 0)
436 s->prev_selir = s->selir;
437
438 if (ret < 0)
439 return ret;
440
441 if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
442 ff_filter_set_ready(ctx, 10);
443 return 0;
444 }
445
446 if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
447 if (status == AVERROR_EOF) {
448 ff_outlink_set_status(ctx->outputs[0], status, pts);
449 return 0;
450 }
451 }
452
453 if (ff_outlink_frame_wanted(ctx->outputs[0])) {
454 ff_inlink_request_frame(ctx->inputs[0]);
455 return 0;
456 }
457
458 return FFERROR_NOT_READY;
459 }
460
461 static int query_formats(AVFilterContext *ctx)
462 {
463 AudioFIRContext *s = ctx->priv;
464 static const enum AVSampleFormat sample_fmts[3][3] = {
465 { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
466 { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE },
467 { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
468 };
469 int ret;
470
471 if (s->ir_format) {
472 ret = ff_set_common_all_channel_counts(ctx);
473 if (ret < 0)
474 return ret;
475 } else {
476 AVFilterChannelLayouts *mono = NULL;
477 AVFilterChannelLayouts *layouts = ff_all_channel_counts();
478
479 if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts)) < 0)
480 return ret;
481 if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
482 return ret;
483
484 ret = ff_add_channel_layout(&mono, &(AVChannelLayout)AV_CHANNEL_LAYOUT_MONO);
485 if (ret)
486 return ret;
487 for (int i = 1; i < ctx->nb_inputs; i++) {
488 if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
489 return ret;
490 }
491 }
492
493 if ((ret = ff_set_common_formats_from_list(ctx, sample_fmts[s->precision])) < 0)
494 return ret;
495
496 return ff_set_common_all_samplerates(ctx);
497 }
498
499 static int config_output(AVFilterLink *outlink)
500 {
501 AVFilterContext *ctx = outlink->src;
502 AudioFIRContext *s = ctx->priv;
503 int ret;
504
505 s->one2many = ctx->inputs[1 + s->selir]->ch_layout.nb_channels == 1;
506 outlink->sample_rate = ctx->inputs[0]->sample_rate;
507 outlink->time_base = ctx->inputs[0]->time_base;
508 if ((ret = av_channel_layout_copy(&outlink->ch_layout, &ctx->inputs[0]->ch_layout)) < 0)
509 return ret;
510 outlink->ch_layout.nb_channels = ctx->inputs[0]->ch_layout.nb_channels;
511
512 s->format = outlink->format;
513 s->nb_channels = outlink->ch_layout.nb_channels;
514 s->ch_gain = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->ch_gain));
515 s->loading = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->loading));
516 if (!s->loading || !s->ch_gain)
517 return AVERROR(ENOMEM);
518
519 s->fadein[0] = ff_get_audio_buffer(outlink, s->min_part_size);
520 s->fadein[1] = ff_get_audio_buffer(outlink, s->min_part_size);
521 if (!s->fadein[0] || !s->fadein[1])
522 return AVERROR(ENOMEM);
523
524 s->xfade[0] = ff_get_audio_buffer(outlink, s->min_part_size);
525 s->xfade[1] = ff_get_audio_buffer(outlink, s->min_part_size);
526 if (!s->xfade[0] || !s->xfade[1])
527 return AVERROR(ENOMEM);
528
529 switch (s->format) {
530 case AV_SAMPLE_FMT_FLTP:
531 for (int ch = 0; ch < s->nb_channels; ch++) {
532 float *dst0 = (float *)s->xfade[0]->extended_data[ch];
533 float *dst1 = (float *)s->xfade[1]->extended_data[ch];
534
535 for (int n = 0; n < s->min_part_size; n++) {
536 dst0[n] = (n + 1.f) / s->min_part_size;
537 dst1[n] = 1.f - dst0[n];
538 }
539 }
540 break;
541 case AV_SAMPLE_FMT_DBLP:
542 for (int ch = 0; ch < s->nb_channels; ch++) {
543 double *dst0 = (double *)s->xfade[0]->extended_data[ch];
544 double *dst1 = (double *)s->xfade[1]->extended_data[ch];
545
546 for (int n = 0; n < s->min_part_size; n++) {
547 dst0[n] = (n + 1.0) / s->min_part_size;
548 dst1[n] = 1.0 - dst0[n];
549 }
550 }
551 break;
552 }
553
554 return 0;
555 }
556
557 static av_cold void uninit(AVFilterContext *ctx)
558 {
559 AudioFIRContext *s = ctx->priv;
560
561 av_freep(&s->fdsp);
562 av_freep(&s->ch_gain);
563 av_freep(&s->loading);
564
565 for (int i = 0; i < s->nb_irs; i++) {
566 for (int j = 0; j < s->nb_segments[i]; j++)
567 uninit_segment(ctx, &s->seg[i][j]);
568
569 av_frame_free(&s->ir[i]);
570 av_frame_free(&s->norm_ir[i]);
571 }
572
573 av_frame_free(&s->fadein[0]);
574 av_frame_free(&s->fadein[1]);
575
576 av_frame_free(&s->xfade[0]);
577 av_frame_free(&s->xfade[1]);
578 }
579
580 static av_cold int init(AVFilterContext *ctx)
581 {
582 AudioFIRContext *s = ctx->priv;
583 AVFilterPad pad;
584 int ret;
585
586 s->prev_selir = FFMIN(s->nb_irs - 1, s->selir);
587
588 pad = (AVFilterPad) {
589 .name = "main",
590 .type = AVMEDIA_TYPE_AUDIO,
591 };
592
593 ret = ff_append_inpad(ctx, &pad);
594 if (ret < 0)
595 return ret;
596
597 for (int n = 0; n < s->nb_irs; n++) {
598 pad = (AVFilterPad) {
599 .name = av_asprintf("ir%d", n),
600 .type = AVMEDIA_TYPE_AUDIO,
601 };
602
603 if (!pad.name)
604 return AVERROR(ENOMEM);
605
606 ret = ff_append_inpad_free_name(ctx, &pad);
607 if (ret < 0)
608 return ret;
609 }
610
611 s->fdsp = avpriv_float_dsp_alloc(0);
612 if (!s->fdsp)
613 return AVERROR(ENOMEM);
614
615 ff_afir_init(&s->afirdsp);
616
617 s->min_part_size = 1 << av_log2(s->minp);
618 s->max_part_size = 1 << av_log2(s->maxp);
619
620 return 0;
621 }
622
623 static int process_command(AVFilterContext *ctx,
624 const char *cmd,
625 const char *arg,
626 char *res,
627 int res_len,
628 int flags)
629 {
630 AudioFIRContext *s = ctx->priv;
631 int prev_selir, ret;
632
633 prev_selir = s->selir;
634 ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
635 if (ret < 0)
636 return ret;
637
638 s->selir = FFMIN(s->nb_irs - 1, s->selir);
639 if (s->selir != prev_selir) {
640 s->prev_selir = prev_selir;
641
642 for (int ch = 0; ch < s->nb_channels; ch++)
643 s->loading[ch] = 1;
644 }
645
646 return 0;
647 }
648
649 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
650 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
651 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
652 #define OFFSET(x) offsetof(AudioFIRContext, x)
653
654 static const AVOption afir_options[] = {
655 { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
656 { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
657 { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
658 { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 4, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
659 { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
660 { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
661 { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
662 { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
663 { "ac", "AC gain", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
664 { "rms", "RMS gain", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
665 { "irnorm", "set IR norm", OFFSET(ir_norm), AV_OPT_TYPE_FLOAT, {.dbl=1}, -1, 2, AF },
666 { "irlink", "set IR link", OFFSET(ir_link), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
667 { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
668 { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, .unit = "irfmt" },
669 { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irfmt" },
670 { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irfmt" },
671 { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
672 { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF|AV_OPT_FLAG_DEPRECATED },
673 { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF|AV_OPT_FLAG_DEPRECATED },
674 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF|AV_OPT_FLAG_DEPRECATED },
675 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF|AV_OPT_FLAG_DEPRECATED },
676 { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 65536, AF },
677 { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 65536, AF },
678 { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
679 { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
680 { "precision", "set processing precision", OFFSET(precision), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, AF, .unit = "precision" },
681 { "auto", "set auto processing precision", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
682 { "float", "set single-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
683 { "double","set double-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
684 { "irload", "set IR loading type", OFFSET(ir_load), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, .unit = "irload" },
685 { "init", "load all IRs on init", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irload" },
686 { "access", "load IR on access", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irload" },
687 { NULL }
688 };
689
690 AVFILTER_DEFINE_CLASS(afir);
691
692 static const AVFilterPad outputs[] = {
693 {
694 .name = "default",
695 .type = AVMEDIA_TYPE_AUDIO,
696 .config_props = config_output,
697 },
698 };
699
700 const AVFilter ff_af_afir = {
701 .name = "afir",
702 .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
703 .priv_size = sizeof(AudioFIRContext),
704 .priv_class = &afir_class,
705 FILTER_QUERY_FUNC(query_formats),
706 FILTER_OUTPUTS(outputs),
707 .init = init,
708 .activate = activate,
709 .uninit = uninit,
710 .process_command = process_command,
711 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS |
712 AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
713 AVFILTER_FLAG_SLICE_THREADS,
714 };
715