Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2011 Mina Nagy Zaki | ||
3 | * Copyright (c) 2000 Edward Beingessner And Sundry Contributors. | ||
4 | * This source code is freely redistributable and may be used for any purpose. | ||
5 | * This copyright notice must be maintained. Edward Beingessner And Sundry | ||
6 | * Contributors are not responsible for the consequences of using this | ||
7 | * software. | ||
8 | * | ||
9 | * This file is part of FFmpeg. | ||
10 | * | ||
11 | * FFmpeg is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU Lesser General Public | ||
13 | * License as published by the Free Software Foundation; either | ||
14 | * version 2.1 of the License, or (at your option) any later version. | ||
15 | * | ||
16 | * FFmpeg is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * Lesser General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU Lesser General Public | ||
22 | * License along with FFmpeg; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | /** | ||
27 | * @file | ||
28 | * Stereo Widening Effect. Adds audio cues to move stereo image in | ||
29 | * front of the listener. Adapted from the libsox earwax effect. | ||
30 | */ | ||
31 | |||
32 | #include "libavutil/channel_layout.h" | ||
33 | #include "avfilter.h" | ||
34 | #include "audio.h" | ||
35 | #include "filters.h" | ||
36 | #include "formats.h" | ||
37 | |||
38 | #define NUMTAPS 32 | ||
39 | |||
40 | static const int8_t filt[NUMTAPS * 2] = { | ||
41 | /* 30° 330° */ | ||
42 | 4, -6, /* 32 tap stereo FIR filter. */ | ||
43 | 4, -11, /* One side filters as if the */ | ||
44 | -1, -5, /* signal was from 30 degrees */ | ||
45 | 3, 3, /* from the ear, the other as */ | ||
46 | -2, 5, /* if 330 degrees. */ | ||
47 | -5, 0, | ||
48 | 9, 1, | ||
49 | 6, 3, /* Input */ | ||
50 | -4, -1, /* Left Right */ | ||
51 | -5, -3, /* __________ __________ */ | ||
52 | -2, -5, /* | | | | */ | ||
53 | -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */ | ||
54 | 6, -7, /* / |__________| |__________| \ */ | ||
55 | 30, -29, /* / \ / \ */ | ||
56 | 12, -3, /* / X \ */ | ||
57 | -11, 4, /* / / \ \ */ | ||
58 | -3, 7, /* ____V_____ __________V V__________ _____V____ */ | ||
59 | -20, 23, /* | | | | | | | | */ | ||
60 | 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */ | ||
61 | 1, -6, /* |__________| |__________| |__________| |__________| */ | ||
62 | -14, -5, /* \ ___ / \ ___ / */ | ||
63 | 15, -18, /* \ / \ / _____ \ / \ / */ | ||
64 | 6, 7, /* `->| + |<--' / \ `-->| + |<-' */ | ||
65 | 15, -10, /* \___/ _/ \_ \___/ */ | ||
66 | -14, 22, /* \ / \ / \ / */ | ||
67 | -7, -2, /* `--->| | | |<---' */ | ||
68 | -4, 9, /* \_/ \_/ */ | ||
69 | 6, -12, /* */ | ||
70 | 6, -6, /* Headphones */ | ||
71 | 0, -11, | ||
72 | 0, -5, | ||
73 | 4, 0}; | ||
74 | |||
75 | typedef struct EarwaxContext { | ||
76 | int16_t filter[2][NUMTAPS]; | ||
77 | int16_t taps[4][NUMTAPS * 2]; | ||
78 | |||
79 | AVFrame *frame[2]; | ||
80 | } EarwaxContext; | ||
81 | |||
82 | 1 | static int query_formats(AVFilterContext *ctx) | |
83 | { | ||
84 | static const int sample_rates[] = { 44100, -1 }; | ||
85 | int ret; | ||
86 | |||
87 | 1 | AVFilterFormats *formats = NULL; | |
88 | 1 | AVFilterChannelLayouts *layout = NULL; | |
89 | |||
90 |
2/4✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 1 times.
✗ Branch 4 not taken.
|
2 | if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_S16P )) < 0 || |
91 |
1/2✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
2 | (ret = ff_set_common_formats (ctx , formats )) < 0 || |
92 |
1/2✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
2 | (ret = ff_add_channel_layout (&layout , &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO)) < 0 || |
93 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
|
2 | (ret = ff_set_common_channel_layouts (ctx , layout )) < 0 || |
94 | 1 | (ret = ff_set_common_samplerates_from_list(ctx, sample_rates)) < 0) | |
95 | ✗ | return ret; | |
96 | |||
97 | 1 | return 0; | |
98 | } | ||
99 | |||
100 | //FIXME: replace with DSPContext.scalarproduct_int16 | ||
101 | 160 | static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, | |
102 | const int16_t *filt, int16_t *out) | ||
103 | { | ||
104 | int32_t sample; | ||
105 | int16_t j; | ||
106 | |||
107 |
2/2✓ Branch 0 taken 327680 times.
✓ Branch 1 taken 160 times.
|
327840 | while (in < endin) { |
108 | 327680 | sample = 0; | |
109 |
2/2✓ Branch 0 taken 10485760 times.
✓ Branch 1 taken 327680 times.
|
10813440 | for (j = 0; j < NUMTAPS; j++) |
110 | 10485760 | sample += in[j] * filt[j]; | |
111 | 327680 | *out = av_clip_int16(sample >> 7); | |
112 | 327680 | out++; | |
113 | 327680 | in++; | |
114 | } | ||
115 | |||
116 | 160 | return out; | |
117 | } | ||
118 | |||
119 | 1 | static int config_input(AVFilterLink *inlink) | |
120 | { | ||
121 | 1 | EarwaxContext *s = inlink->dst->priv; | |
122 | |||
123 |
2/2✓ Branch 0 taken 32 times.
✓ Branch 1 taken 1 times.
|
33 | for (int i = 0; i < NUMTAPS; i++) { |
124 | 32 | s->filter[0][i] = filt[i * 2]; | |
125 | 32 | s->filter[1][i] = filt[i * 2 + 1]; | |
126 | } | ||
127 | |||
128 | 1 | return 0; | |
129 | } | ||
130 | |||
131 | 80 | static void convolve(AVFilterContext *ctx, AVFrame *in, | |
132 | int input_ch, int output_ch, | ||
133 | int filter_ch, int tap_ch) | ||
134 | { | ||
135 | 80 | EarwaxContext *s = ctx->priv; | |
136 | int16_t *taps, *endin, *dst, *src; | ||
137 | int len; | ||
138 | |||
139 | 80 | taps = s->taps[tap_ch]; | |
140 | 80 | dst = (int16_t *)s->frame[input_ch]->data[output_ch]; | |
141 | 80 | src = (int16_t *)in->data[input_ch]; | |
142 | |||
143 | 80 | len = FFMIN(NUMTAPS, in->nb_samples); | |
144 | // copy part of new input and process with saved input | ||
145 | 80 | memcpy(taps+NUMTAPS, src, len * sizeof(*taps)); | |
146 | 80 | dst = scalarproduct(taps, taps + len, s->filter[filter_ch], dst); | |
147 | |||
148 | // process current input | ||
149 |
1/2✓ Branch 0 taken 80 times.
✗ Branch 1 not taken.
|
80 | if (in->nb_samples >= NUMTAPS) { |
150 | 80 | endin = src + in->nb_samples - NUMTAPS; | |
151 | 80 | scalarproduct(src, endin, s->filter[filter_ch], dst); | |
152 | |||
153 | // save part of input for next round | ||
154 | 80 | memcpy(taps, endin, NUMTAPS * sizeof(*taps)); | |
155 | } else { | ||
156 | ✗ | memmove(taps, taps + in->nb_samples, NUMTAPS * sizeof(*taps)); | |
157 | } | ||
158 | 80 | } | |
159 | |||
160 | 40 | static void mix(AVFilterContext *ctx, AVFrame *out, | |
161 | int output_ch, int f0, int f1, int i0, int i1) | ||
162 | { | ||
163 | 40 | EarwaxContext *s = ctx->priv; | |
164 | 40 | const int16_t *srcl = (const int16_t *)s->frame[f0]->data[i0]; | |
165 | 40 | const int16_t *srcr = (const int16_t *)s->frame[f1]->data[i1]; | |
166 | 40 | int16_t *dst = (int16_t *)out->data[output_ch]; | |
167 | |||
168 |
2/2✓ Branch 0 taken 163840 times.
✓ Branch 1 taken 40 times.
|
163880 | for (int n = 0; n < out->nb_samples; n++) |
169 | 163840 | dst[n] = av_clip_int16(srcl[n] + srcr[n]); | |
170 | 40 | } | |
171 | |||
172 | 20 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |
173 | { | ||
174 | 20 | AVFilterContext *ctx = inlink->dst; | |
175 | 20 | EarwaxContext *s = ctx->priv; | |
176 | 20 | AVFilterLink *outlink = ctx->outputs[0]; | |
177 | 20 | AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples); | |
178 | |||
179 |
2/2✓ Branch 0 taken 40 times.
✓ Branch 1 taken 20 times.
|
60 | for (int ch = 0; ch < 2; ch++) { |
180 |
3/4✓ Branch 0 taken 38 times.
✓ Branch 1 taken 2 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 38 times.
|
40 | if (!s->frame[ch] || s->frame[ch]->nb_samples < in->nb_samples) { |
181 | 2 | av_frame_free(&s->frame[ch]); | |
182 | 2 | s->frame[ch] = ff_get_audio_buffer(outlink, in->nb_samples); | |
183 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if (!s->frame[ch]) { |
184 | ✗ | av_frame_free(&in); | |
185 | ✗ | av_frame_free(&out); | |
186 | ✗ | return AVERROR(ENOMEM); | |
187 | } | ||
188 | } | ||
189 | } | ||
190 | |||
191 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
|
20 | if (!out) { |
192 | ✗ | av_frame_free(&in); | |
193 | ✗ | return AVERROR(ENOMEM); | |
194 | } | ||
195 | 20 | av_frame_copy_props(out, in); | |
196 | |||
197 | 20 | convolve(ctx, in, 0, 0, 0, 0); | |
198 | 20 | convolve(ctx, in, 0, 1, 1, 1); | |
199 | 20 | convolve(ctx, in, 1, 0, 0, 2); | |
200 | 20 | convolve(ctx, in, 1, 1, 1, 3); | |
201 | |||
202 | 20 | mix(ctx, out, 0, 0, 1, 1, 0); | |
203 | 20 | mix(ctx, out, 1, 0, 1, 0, 1); | |
204 | |||
205 | 20 | av_frame_free(&in); | |
206 | 20 | return ff_filter_frame(outlink, out); | |
207 | } | ||
208 | |||
209 | 2 | static av_cold void uninit(AVFilterContext *ctx) | |
210 | { | ||
211 | 2 | EarwaxContext *s = ctx->priv; | |
212 | |||
213 | 2 | av_frame_free(&s->frame[0]); | |
214 | 2 | av_frame_free(&s->frame[1]); | |
215 | 2 | } | |
216 | |||
217 | static const AVFilterPad earwax_inputs[] = { | ||
218 | { | ||
219 | .name = "default", | ||
220 | .type = AVMEDIA_TYPE_AUDIO, | ||
221 | .filter_frame = filter_frame, | ||
222 | .config_props = config_input, | ||
223 | }, | ||
224 | }; | ||
225 | |||
226 | const AVFilter ff_af_earwax = { | ||
227 | .name = "earwax", | ||
228 | .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."), | ||
229 | .priv_size = sizeof(EarwaxContext), | ||
230 | .uninit = uninit, | ||
231 | FILTER_INPUTS(earwax_inputs), | ||
232 | FILTER_OUTPUTS(ff_audio_default_filterpad), | ||
233 | FILTER_QUERY_FUNC(query_formats), | ||
234 | }; | ||
235 |