FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/af_headphone.c
Date: 2024-07-26 21:54:09
Exec Total Coverage
Lines: 0 421 0.0%
Functions: 0 13 0.0%
Branches: 0 210 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (C) 2017 Paul B Mahol
3 * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <math.h>
22
23 #include "libavutil/avstring.h"
24 #include "libavutil/channel_layout.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/mem.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/tx.h"
30
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "audio.h"
36
37 #define TIME_DOMAIN 0
38 #define FREQUENCY_DOMAIN 1
39
40 #define HRIR_STEREO 0
41 #define HRIR_MULTI 1
42
43 typedef struct HeadphoneContext {
44 const AVClass *class;
45
46 char *map;
47 int type;
48
49 int lfe_channel;
50
51 int have_hrirs;
52 int eof_hrirs;
53
54 int ir_len;
55 int air_len;
56
57 int nb_hrir_inputs;
58
59 int nb_irs;
60
61 float gain;
62 float lfe_gain, gain_lfe;
63
64 float *ringbuffer[2];
65 int write[2];
66
67 int buffer_length;
68 int n_fft;
69 int size;
70 int hrir_fmt;
71
72 float *data_ir[2];
73 float *temp_src[2];
74 AVComplexFloat *out_fft[2];
75 AVComplexFloat *in_fft[2];
76 AVComplexFloat *temp_afft[2];
77
78 AVTXContext *fft[2], *ifft[2];
79 av_tx_fn tx_fn[2], itx_fn[2];
80 AVComplexFloat *data_hrtf[2];
81
82 float (*scalarproduct_float)(const float *v1, const float *v2, int len);
83 struct hrir_inputs {
84 int ir_len;
85 int eof;
86 } hrir_in[64];
87 AVChannelLayout map_channel_layout;
88 enum AVChannel mapping[64];
89 uint8_t hrir_map[64];
90 } HeadphoneContext;
91
92 static int parse_channel_name(const char *arg, enum AVChannel *rchannel)
93 {
94 int channel = av_channel_from_string(arg);
95
96 if (channel < 0 || channel >= 64)
97 return AVERROR(EINVAL);
98 *rchannel = channel;
99 return 0;
100 }
101
102 static void parse_map(AVFilterContext *ctx)
103 {
104 HeadphoneContext *s = ctx->priv;
105 char *arg, *tokenizer, *p;
106 uint64_t used_channels = 0;
107
108 p = s->map;
109 while ((arg = av_strtok(p, "|", &tokenizer))) {
110 enum AVChannel out_channel;
111
112 p = NULL;
113 if (parse_channel_name(arg, &out_channel)) {
114 av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", arg);
115 continue;
116 }
117 if (used_channels & (1ULL << out_channel)) {
118 av_log(ctx, AV_LOG_WARNING, "Ignoring duplicate channel '%s'.\n", arg);
119 continue;
120 }
121 used_channels |= (1ULL << out_channel);
122 s->mapping[s->nb_irs] = out_channel;
123 s->nb_irs++;
124 }
125 av_channel_layout_from_mask(&s->map_channel_layout, used_channels);
126
127 if (s->hrir_fmt == HRIR_MULTI)
128 s->nb_hrir_inputs = 1;
129 else
130 s->nb_hrir_inputs = s->nb_irs;
131 }
132
133 typedef struct ThreadData {
134 AVFrame *in, *out;
135 int *write;
136 float **ir;
137 int *n_clippings;
138 float **ringbuffer;
139 float **temp_src;
140 AVComplexFloat **out_fft;
141 AVComplexFloat **in_fft;
142 AVComplexFloat **temp_afft;
143 } ThreadData;
144
145 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
146 {
147 HeadphoneContext *s = ctx->priv;
148 ThreadData *td = arg;
149 AVFrame *in = td->in, *out = td->out;
150 int offset = jobnr;
151 int *write = &td->write[jobnr];
152 const float *const ir = td->ir[jobnr];
153 int *n_clippings = &td->n_clippings[jobnr];
154 float *ringbuffer = td->ringbuffer[jobnr];
155 float *temp_src = td->temp_src[jobnr];
156 const int ir_len = s->ir_len;
157 const int air_len = s->air_len;
158 const float *src = (const float *)in->data[0];
159 float *dst = (float *)out->data[0];
160 const int in_channels = in->ch_layout.nb_channels;
161 const int buffer_length = s->buffer_length;
162 const uint32_t modulo = (uint32_t)buffer_length - 1;
163 float *buffer[64];
164 int wr = *write;
165 int read;
166 int i, l;
167
168 dst += offset;
169 for (l = 0; l < in_channels; l++) {
170 buffer[l] = ringbuffer + l * buffer_length;
171 }
172
173 for (i = 0; i < in->nb_samples; i++) {
174 const float *cur_ir = ir;
175
176 *dst = 0;
177 for (l = 0; l < in_channels; l++) {
178 *(buffer[l] + wr) = src[l];
179 }
180
181 for (l = 0; l < in_channels; cur_ir += air_len, l++) {
182 const float *const bptr = buffer[l];
183
184 if (l == s->lfe_channel) {
185 *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
186 continue;
187 }
188
189 read = (wr - (ir_len - 1)) & modulo;
190
191 if (read + ir_len < buffer_length) {
192 memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
193 } else {
194 int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
195
196 memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
197 memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
198 }
199
200 dst[0] += s->scalarproduct_float(cur_ir, temp_src, FFALIGN(ir_len, 32));
201 }
202
203 if (fabsf(dst[0]) > 1)
204 n_clippings[0]++;
205
206 dst += 2;
207 src += in_channels;
208 wr = (wr + 1) & modulo;
209 }
210
211 *write = wr;
212
213 return 0;
214 }
215
216 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
217 {
218 HeadphoneContext *s = ctx->priv;
219 ThreadData *td = arg;
220 AVFrame *in = td->in, *out = td->out;
221 int offset = jobnr;
222 int *write = &td->write[jobnr];
223 AVComplexFloat *hrtf = s->data_hrtf[jobnr];
224 int *n_clippings = &td->n_clippings[jobnr];
225 float *ringbuffer = td->ringbuffer[jobnr];
226 const int ir_len = s->ir_len;
227 const float *src = (const float *)in->data[0];
228 float *dst = (float *)out->data[0];
229 const int in_channels = in->ch_layout.nb_channels;
230 const int buffer_length = s->buffer_length;
231 const uint32_t modulo = (uint32_t)buffer_length - 1;
232 AVComplexFloat *fft_out = s->out_fft[jobnr];
233 AVComplexFloat *fft_in = s->in_fft[jobnr];
234 AVComplexFloat *fft_acc = s->temp_afft[jobnr];
235 AVTXContext *ifft = s->ifft[jobnr];
236 AVTXContext *fft = s->fft[jobnr];
237 av_tx_fn tx_fn = s->tx_fn[jobnr];
238 av_tx_fn itx_fn = s->itx_fn[jobnr];
239 const int n_fft = s->n_fft;
240 const float fft_scale = 1.0f / s->n_fft;
241 AVComplexFloat *hrtf_offset;
242 int wr = *write;
243 int n_read;
244 int i, j;
245
246 dst += offset;
247
248 n_read = FFMIN(ir_len, in->nb_samples);
249 for (j = 0; j < n_read; j++) {
250 dst[2 * j] = ringbuffer[wr];
251 ringbuffer[wr] = 0.0;
252 wr = (wr + 1) & modulo;
253 }
254
255 for (j = n_read; j < in->nb_samples; j++) {
256 dst[2 * j] = 0;
257 }
258
259 memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
260
261 for (i = 0; i < in_channels; i++) {
262 if (i == s->lfe_channel) {
263 for (j = 0; j < in->nb_samples; j++) {
264 dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
265 }
266 continue;
267 }
268
269 offset = i * n_fft;
270 hrtf_offset = hrtf + s->hrir_map[i] * n_fft;
271
272 memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
273
274 for (j = 0; j < in->nb_samples; j++) {
275 fft_in[j].re = src[j * in_channels + i];
276 }
277
278 tx_fn(fft, fft_out, fft_in, sizeof(*fft_in));
279
280 for (j = 0; j < n_fft; j++) {
281 const AVComplexFloat *hcomplex = hrtf_offset + j;
282 const float re = fft_out[j].re;
283 const float im = fft_out[j].im;
284
285 fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
286 fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
287 }
288 }
289
290 itx_fn(ifft, fft_out, fft_acc, sizeof(*fft_acc));
291
292 for (j = 0; j < in->nb_samples; j++) {
293 dst[2 * j] += fft_out[j].re * fft_scale;
294 if (fabsf(dst[2 * j]) > 1)
295 n_clippings[0]++;
296 }
297
298 for (j = 0; j < ir_len - 1; j++) {
299 int write_pos = (wr + j) & modulo;
300
301 *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
302 }
303
304 *write = wr;
305
306 return 0;
307 }
308
309 static int check_ir(AVFilterLink *inlink, int input_number)
310 {
311 AVFilterContext *ctx = inlink->dst;
312 HeadphoneContext *s = ctx->priv;
313 int ir_len, max_ir_len;
314
315 ir_len = ff_inlink_queued_samples(inlink);
316 max_ir_len = 65536;
317 if (ir_len > max_ir_len) {
318 av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
319 return AVERROR(EINVAL);
320 }
321 s->hrir_in[input_number].ir_len = ir_len;
322 s->ir_len = FFMAX(ir_len, s->ir_len);
323
324 if (ff_inlink_check_available_samples(inlink, ir_len + 1) == 1) {
325 s->hrir_in[input_number].eof = 1;
326 return 1;
327 }
328
329 if (!s->hrir_in[input_number].eof) {
330 ff_inlink_request_frame(inlink);
331 return 0;
332 }
333
334 return 0;
335 }
336
337 static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
338 {
339 AVFilterContext *ctx = outlink->src;
340 int n_clippings[2] = { 0 };
341 ThreadData td;
342 AVFrame *out;
343
344 out = ff_get_audio_buffer(outlink, in->nb_samples);
345 if (!out) {
346 av_frame_free(&in);
347 return AVERROR(ENOMEM);
348 }
349 out->pts = in->pts;
350
351 td.in = in; td.out = out; td.write = s->write;
352 td.ir = s->data_ir; td.n_clippings = n_clippings;
353 td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
354 td.out_fft = s->out_fft;
355 td.in_fft = s->in_fft;
356 td.temp_afft = s->temp_afft;
357
358 if (s->type == TIME_DOMAIN) {
359 ff_filter_execute(ctx, headphone_convolute, &td, NULL, 2);
360 } else {
361 ff_filter_execute(ctx, headphone_fast_convolute, &td, NULL, 2);
362 }
363
364 if (n_clippings[0] + n_clippings[1] > 0) {
365 av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
366 n_clippings[0] + n_clippings[1], out->nb_samples * 2);
367 }
368
369 av_frame_free(&in);
370 return ff_filter_frame(outlink, out);
371 }
372
373 static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
374 {
375 struct HeadphoneContext *s = ctx->priv;
376 const int ir_len = s->ir_len;
377 int nb_input_channels = ctx->inputs[0]->ch_layout.nb_channels;
378 const int nb_hrir_channels = s->nb_hrir_inputs == 1 ? ctx->inputs[1]->ch_layout.nb_channels : s->nb_hrir_inputs * 2;
379 float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
380 AVFrame *frame;
381 int ret = 0;
382 int n_fft;
383 int i, j, k;
384
385 s->air_len = 1 << (32 - ff_clz(ir_len));
386 if (s->type == TIME_DOMAIN) {
387 s->air_len = FFALIGN(s->air_len, 32);
388 }
389 s->buffer_length = 1 << (32 - ff_clz(s->air_len));
390 s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
391
392 if (s->type == FREQUENCY_DOMAIN) {
393 float scale = 1.f;
394
395 ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
396 if (ret < 0)
397 goto fail;
398 ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
399 if (ret < 0)
400 goto fail;
401 ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
402 if (ret < 0)
403 goto fail;
404 ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
405 if (ret < 0)
406 goto fail;
407
408 if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
409 av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
410 ret = AVERROR(ENOMEM);
411 goto fail;
412 }
413 }
414
415 if (s->type == TIME_DOMAIN) {
416 s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
417 s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
418 } else {
419 s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
420 s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
421 s->out_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
422 s->out_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
423 s->in_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
424 s->in_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
425 s->temp_afft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
426 s->temp_afft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
427 if (!s->in_fft[0] || !s->in_fft[1] ||
428 !s->out_fft[0] || !s->out_fft[1] ||
429 !s->temp_afft[0] || !s->temp_afft[1]) {
430 ret = AVERROR(ENOMEM);
431 goto fail;
432 }
433 }
434
435 if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
436 ret = AVERROR(ENOMEM);
437 goto fail;
438 }
439
440 if (s->type == TIME_DOMAIN) {
441 s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
442 s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
443
444 s->data_ir[0] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[0]));
445 s->data_ir[1] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[1]));
446 if (!s->data_ir[0] || !s->data_ir[1] || !s->temp_src[0] || !s->temp_src[1]) {
447 ret = AVERROR(ENOMEM);
448 goto fail;
449 }
450 } else {
451 s->data_hrtf[0] = av_calloc(n_fft, sizeof(*s->data_hrtf[0]) * nb_hrir_channels);
452 s->data_hrtf[1] = av_calloc(n_fft, sizeof(*s->data_hrtf[1]) * nb_hrir_channels);
453 if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
454 ret = AVERROR(ENOMEM);
455 goto fail;
456 }
457 }
458
459 for (i = 0; i < s->nb_hrir_inputs; av_frame_free(&frame), i++) {
460 int len = s->hrir_in[i].ir_len;
461 float *ptr;
462
463 ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &frame);
464 if (ret < 0)
465 goto fail;
466 ptr = (float *)frame->extended_data[0];
467
468 if (s->hrir_fmt == HRIR_STEREO) {
469 int idx = av_channel_layout_index_from_channel(&s->map_channel_layout,
470 s->mapping[i]);
471 if (idx < 0)
472 continue;
473
474 s->hrir_map[i] = idx;
475 if (s->type == TIME_DOMAIN) {
476 float *data_ir_l = s->data_ir[0] + idx * s->air_len;
477 float *data_ir_r = s->data_ir[1] + idx * s->air_len;
478
479 for (j = 0; j < len; j++) {
480 data_ir_l[j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
481 data_ir_r[j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
482 }
483 } else {
484 AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
485 AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
486 AVComplexFloat *fft_in_l = s->in_fft[0];
487 AVComplexFloat *fft_in_r = s->in_fft[1];
488
489 for (j = 0; j < len; j++) {
490 fft_in_l[j].re = ptr[j * 2 ] * gain_lin;
491 fft_in_r[j].re = ptr[j * 2 + 1] * gain_lin;
492 }
493
494 s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
495 s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
496 }
497 } else {
498 int I, N = ctx->inputs[1]->ch_layout.nb_channels;
499
500 for (k = 0; k < N / 2; k++) {
501 int idx = av_channel_layout_index_from_channel(&inlink->ch_layout,
502 s->mapping[k]);
503 if (idx < 0)
504 continue;
505
506 s->hrir_map[k] = idx;
507 I = k * 2;
508 if (s->type == TIME_DOMAIN) {
509 float *data_ir_l = s->data_ir[0] + idx * s->air_len;
510 float *data_ir_r = s->data_ir[1] + idx * s->air_len;
511
512 for (j = 0; j < len; j++) {
513 data_ir_l[j] = ptr[len * N - j * N - N + I ] * gain_lin;
514 data_ir_r[j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
515 }
516 } else {
517 AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
518 AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
519 AVComplexFloat *fft_in_l = s->in_fft[0];
520 AVComplexFloat *fft_in_r = s->in_fft[1];
521
522 for (j = 0; j < len; j++) {
523 fft_in_l[j].re = ptr[j * N + I ] * gain_lin;
524 fft_in_r[j].re = ptr[j * N + I + 1] * gain_lin;
525 }
526
527 s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
528 s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
529 }
530 }
531 }
532 }
533
534 s->have_hrirs = 1;
535
536 fail:
537 return ret;
538 }
539
540 static int activate(AVFilterContext *ctx)
541 {
542 HeadphoneContext *s = ctx->priv;
543 AVFilterLink *inlink = ctx->inputs[0];
544 AVFilterLink *outlink = ctx->outputs[0];
545 AVFrame *in = NULL;
546 int i, ret;
547
548 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
549 if (!s->eof_hrirs) {
550 int eof = 1;
551 for (i = 0; i < s->nb_hrir_inputs; i++) {
552 AVFilterLink *input = ctx->inputs[i + 1];
553
554 if (s->hrir_in[i].eof)
555 continue;
556
557 if ((ret = check_ir(input, i)) <= 0)
558 return ret;
559
560 if (s->hrir_in[i].eof) {
561 if (!ff_inlink_queued_samples(input)) {
562 av_log(ctx, AV_LOG_ERROR, "No samples provided for "
563 "HRIR stream %d.\n", i);
564 return AVERROR_INVALIDDATA;
565 }
566 } else {
567 eof = 0;
568 }
569 }
570 if (!eof) {
571 ff_filter_set_ready(ctx, 100);
572 return 0;
573 }
574 s->eof_hrirs = 1;
575
576 ret = convert_coeffs(ctx, inlink);
577 if (ret < 0)
578 return ret;
579 } else if (!s->have_hrirs)
580 return AVERROR_EOF;
581
582 if ((ret = ff_inlink_consume_samples(inlink, s->size, s->size, &in)) > 0) {
583 ret = headphone_frame(s, in, outlink);
584 if (ret < 0)
585 return ret;
586 }
587
588 if (ret < 0)
589 return ret;
590
591 FF_FILTER_FORWARD_STATUS(inlink, outlink);
592 if (ff_outlink_frame_wanted(outlink))
593 ff_inlink_request_frame(inlink);
594
595 return 0;
596 }
597
598 static int query_formats(AVFilterContext *ctx)
599 {
600 struct HeadphoneContext *s = ctx->priv;
601 AVFilterFormats *formats = NULL;
602 AVFilterChannelLayouts *layouts = NULL;
603 AVFilterChannelLayouts *stereo_layout = NULL;
604 AVFilterChannelLayouts *hrir_layouts = NULL;
605 int ret, i;
606
607 ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
608 if (ret)
609 return ret;
610 ret = ff_set_common_formats(ctx, formats);
611 if (ret)
612 return ret;
613
614 layouts = ff_all_channel_layouts();
615 if (!layouts)
616 return AVERROR(ENOMEM);
617
618 ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
619 if (ret)
620 return ret;
621
622 ret = ff_add_channel_layout(&stereo_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
623 if (ret)
624 return ret;
625 ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->incfg.channel_layouts);
626 if (ret)
627 return ret;
628
629 if (s->hrir_fmt == HRIR_MULTI) {
630 hrir_layouts = ff_all_channel_counts();
631 if (!hrir_layouts)
632 return AVERROR(ENOMEM);
633 ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->outcfg.channel_layouts);
634 if (ret)
635 return ret;
636 } else {
637 for (i = 1; i <= s->nb_hrir_inputs; i++) {
638 ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->outcfg.channel_layouts);
639 if (ret)
640 return ret;
641 }
642 }
643
644 return ff_set_common_all_samplerates(ctx);
645 }
646
647 static int config_input(AVFilterLink *inlink)
648 {
649 AVFilterContext *ctx = inlink->dst;
650 HeadphoneContext *s = ctx->priv;
651
652 if (s->nb_irs < inlink->ch_layout.nb_channels) {
653 av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->ch_layout.nb_channels);
654 return AVERROR(EINVAL);
655 }
656
657 s->lfe_channel = av_channel_layout_index_from_channel(&inlink->ch_layout,
658 AV_CHAN_LOW_FREQUENCY);
659 return 0;
660 }
661
662 static av_cold int init(AVFilterContext *ctx)
663 {
664 HeadphoneContext *s = ctx->priv;
665 int i, ret;
666
667 AVFilterPad pad = {
668 .name = "in0",
669 .type = AVMEDIA_TYPE_AUDIO,
670 .config_props = config_input,
671 };
672 if ((ret = ff_append_inpad(ctx, &pad)) < 0)
673 return ret;
674
675 if (!s->map) {
676 av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
677 return AVERROR(EINVAL);
678 }
679
680 parse_map(ctx);
681
682 for (i = 0; i < s->nb_hrir_inputs; i++) {
683 char *name = av_asprintf("hrir%d", i);
684 AVFilterPad pad = {
685 .name = name,
686 .type = AVMEDIA_TYPE_AUDIO,
687 };
688 if (!name)
689 return AVERROR(ENOMEM);
690 if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
691 return ret;
692 }
693
694 if (s->type == TIME_DOMAIN) {
695 AVFloatDSPContext *fdsp = avpriv_float_dsp_alloc(0);
696 if (!fdsp)
697 return AVERROR(ENOMEM);
698 s->scalarproduct_float = fdsp->scalarproduct_float;
699 av_free(fdsp);
700 }
701
702 return 0;
703 }
704
705 static int config_output(AVFilterLink *outlink)
706 {
707 AVFilterContext *ctx = outlink->src;
708 HeadphoneContext *s = ctx->priv;
709 AVFilterLink *inlink = ctx->inputs[0];
710
711 if (s->hrir_fmt == HRIR_MULTI) {
712 AVFilterLink *hrir_link = ctx->inputs[1];
713
714 if (hrir_link->ch_layout.nb_channels < inlink->ch_layout.nb_channels * 2) {
715 av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->ch_layout.nb_channels * 2);
716 return AVERROR(EINVAL);
717 }
718 }
719
720 s->gain_lfe = expf((s->gain - 3 * inlink->ch_layout.nb_channels + s->lfe_gain) / 20 * M_LN10);
721
722 return 0;
723 }
724
725 static av_cold void uninit(AVFilterContext *ctx)
726 {
727 HeadphoneContext *s = ctx->priv;
728
729 av_tx_uninit(&s->ifft[0]);
730 av_tx_uninit(&s->ifft[1]);
731 av_tx_uninit(&s->fft[0]);
732 av_tx_uninit(&s->fft[1]);
733 av_freep(&s->data_ir[0]);
734 av_freep(&s->data_ir[1]);
735 av_freep(&s->ringbuffer[0]);
736 av_freep(&s->ringbuffer[1]);
737 av_freep(&s->temp_src[0]);
738 av_freep(&s->temp_src[1]);
739 av_freep(&s->out_fft[0]);
740 av_freep(&s->out_fft[1]);
741 av_freep(&s->in_fft[0]);
742 av_freep(&s->in_fft[1]);
743 av_freep(&s->temp_afft[0]);
744 av_freep(&s->temp_afft[1]);
745 av_freep(&s->data_hrtf[0]);
746 av_freep(&s->data_hrtf[1]);
747 }
748
749 #define OFFSET(x) offsetof(HeadphoneContext, x)
750 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
751
752 static const AVOption headphone_options[] = {
753 { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
754 { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
755 { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
756 { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, .unit = "type" },
757 { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, .unit = "type" },
758 { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, .unit = "type" },
759 { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
760 { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, .unit = "hrir" },
761 { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, .unit = "hrir" },
762 { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, .unit = "hrir" },
763 { NULL }
764 };
765
766 AVFILTER_DEFINE_CLASS(headphone);
767
768 static const AVFilterPad outputs[] = {
769 {
770 .name = "default",
771 .type = AVMEDIA_TYPE_AUDIO,
772 .config_props = config_output,
773 },
774 };
775
776 const AVFilter ff_af_headphone = {
777 .name = "headphone",
778 .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
779 .priv_size = sizeof(HeadphoneContext),
780 .priv_class = &headphone_class,
781 .init = init,
782 .uninit = uninit,
783 .activate = activate,
784 .inputs = NULL,
785 FILTER_OUTPUTS(outputs),
786 FILTER_QUERY_FUNC(query_formats),
787 .flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_DYNAMIC_INPUTS,
788 };
789