GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavfilter/af_earwax.c Lines: 70 77 90.9 %
Date: 2021-01-26 01:16:58 Branches: 21 30 70.0 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2011 Mina Nagy Zaki
3
 * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
4
 * This source code is freely redistributable and may be used for any purpose.
5
 * This copyright notice must be maintained.  Edward Beingessner And Sundry
6
 * Contributors are not responsible for the consequences of using this
7
 * software.
8
 *
9
 * This file is part of FFmpeg.
10
 *
11
 * FFmpeg is free software; you can redistribute it and/or
12
 * modify it under the terms of the GNU Lesser General Public
13
 * License as published by the Free Software Foundation; either
14
 * version 2.1 of the License, or (at your option) any later version.
15
 *
16
 * FFmpeg is distributed in the hope that it will be useful,
17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19
 * Lesser General Public License for more details.
20
 *
21
 * You should have received a copy of the GNU Lesser General Public
22
 * License along with FFmpeg; if not, write to the Free Software
23
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24
 */
25
26
/**
27
 * @file
28
 * Stereo Widening Effect. Adds audio cues to move stereo image in
29
 * front of the listener. Adapted from the libsox earwax effect.
30
 */
31
32
#include "libavutil/channel_layout.h"
33
#include "avfilter.h"
34
#include "audio.h"
35
#include "formats.h"
36
37
#define NUMTAPS 32
38
39
static const int8_t filt[NUMTAPS * 2] = {
40
/* 30°  330° */
41
    4,   -6,     /* 32 tap stereo FIR filter. */
42
    4,  -11,     /* One side filters as if the */
43
   -1,   -5,     /* signal was from 30 degrees */
44
    3,    3,     /* from the ear, the other as */
45
   -2,    5,     /* if 330 degrees. */
46
   -5,    0,
47
    9,    1,
48
    6,    3,     /*                         Input                         */
49
   -4,   -1,     /*                   Left         Right                  */
50
   -5,   -3,     /*                __________   __________                */
51
   -2,   -5,     /*               |          | |          |               */
52
   -7,    1,     /*           .---|  Hh,0(f) | |  Hh,0(f) |---.           */
53
    6,   -7,     /*          /    |__________| |__________|    \          */
54
   30,  -29,     /*         /                \ /                \         */
55
   12,   -3,     /*        /                  X                  \        */
56
  -11,    4,     /*       /                  / \                  \       */
57
   -3,    7,     /*  ____V_____   __________V   V__________   _____V____  */
58
  -20,   23,     /* |          | |          |   |          | |          | */
59
    2,    0,     /* | Hh,30(f) | | Hh,330(f)|   | Hh,330(f)| | Hh,30(f) | */
60
    1,   -6,     /* |__________| |__________|   |__________| |__________| */
61
  -14,   -5,     /*      \     ___      /           \      ___     /      */
62
   15,  -18,     /*       \   /   \    /    _____    \    /   \   /       */
63
    6,    7,     /*        `->| + |<--'    /     \    `-->| + |<-'        */
64
   15,  -10,     /*           \___/      _/       \_      \___/           */
65
  -14,   22,     /*               \     / \       / \     /               */
66
   -7,   -2,     /*                `--->| |       | |<---'                */
67
   -4,    9,     /*                     \_/       \_/                     */
68
    6,  -12,     /*                                                       */
69
    6,   -6,     /*                       Headphones                      */
70
    0,  -11,
71
    0,   -5,
72
    4,    0};
73
74
typedef struct EarwaxContext {
75
    int16_t filter[2][NUMTAPS];
76
    int16_t taps[4][NUMTAPS * 2];
77
78
    AVFrame *frame[2];
79
} EarwaxContext;
80
81
1
static int query_formats(AVFilterContext *ctx)
82
{
83
    static const int sample_rates[] = { 44100, -1 };
84
    int ret;
85
86
1
    AVFilterFormats *formats = NULL;
87
1
    AVFilterChannelLayouts *layout = NULL;
88
89

2
    if ((ret = ff_add_format                 (&formats, AV_SAMPLE_FMT_S16P                )) < 0 ||
90
2
        (ret = ff_set_common_formats         (ctx     , formats                           )) < 0 ||
91
2
        (ret = ff_add_channel_layout         (&layout , AV_CH_LAYOUT_STEREO               )) < 0 ||
92
2
        (ret = ff_set_common_channel_layouts (ctx     , layout                            )) < 0 ||
93
1
        (ret = ff_set_common_samplerates     (ctx     , ff_make_format_list(sample_rates) )) < 0)
94
        return ret;
95
96
1
    return 0;
97
}
98
99
//FIXME: replace with DSPContext.scalarproduct_int16
100
160
static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin,
101
                                     const int16_t *filt, int16_t *out)
102
{
103
    int32_t sample;
104
    int16_t j;
105
106
82080
    while (in < endin) {
107
81920
        sample = 0;
108
2703360
        for (j = 0; j < NUMTAPS; j++)
109
2621440
            sample += in[j] * filt[j];
110
81920
        *out = av_clip_int16(sample >> 7);
111
81920
        out++;
112
81920
        in++;
113
    }
114
115
160
    return out;
116
}
117
118
1
static int config_input(AVFilterLink *inlink)
119
{
120
1
    EarwaxContext *s = inlink->dst->priv;
121
122
33
    for (int i = 0; i < NUMTAPS; i++) {
123
32
        s->filter[0][i] = filt[i * 2];
124
32
        s->filter[1][i] = filt[i * 2 + 1];
125
    }
126
127
1
    return 0;
128
}
129
130
80
static void convolve(AVFilterContext *ctx, AVFrame *in,
131
                     int input_ch, int output_ch,
132
                     int filter_ch, int tap_ch)
133
{
134
80
    EarwaxContext *s = ctx->priv;
135
    int16_t *taps, *endin, *dst, *src;
136
    int len;
137
138
80
    taps  = s->taps[tap_ch];
139
80
    dst   = (int16_t *)s->frame[input_ch]->data[output_ch];
140
80
    src   = (int16_t *)in->data[input_ch];
141
142
80
    len = FFMIN(NUMTAPS, in->nb_samples);
143
    // copy part of new input and process with saved input
144
80
    memcpy(taps+NUMTAPS, src, len * sizeof(*taps));
145
80
    dst = scalarproduct(taps, taps + len, s->filter[filter_ch], dst);
146
147
    // process current input
148
80
    if (in->nb_samples >= NUMTAPS) {
149
80
        endin = src + in->nb_samples - NUMTAPS;
150
80
        scalarproduct(src, endin, s->filter[filter_ch], dst);
151
152
        // save part of input for next round
153
80
        memcpy(taps, endin, NUMTAPS * sizeof(*taps));
154
    } else {
155
        memmove(taps, taps + in->nb_samples, NUMTAPS * sizeof(*taps));
156
    }
157
80
}
158
159
40
static void mix(AVFilterContext *ctx, AVFrame *out,
160
                int output_ch, int f0, int f1, int i0, int i1)
161
{
162
40
    EarwaxContext *s = ctx->priv;
163
40
    const int16_t *srcl = (const int16_t *)s->frame[f0]->data[i0];
164
40
    const int16_t *srcr = (const int16_t *)s->frame[f1]->data[i1];
165
40
    int16_t *dst = (int16_t *)out->data[output_ch];
166
167
41000
    for (int n = 0; n < out->nb_samples; n++)
168
40960
        dst[n] = av_clip_int16(srcl[n] + srcr[n]);
169
40
}
170
171
20
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
172
{
173
20
    AVFilterContext *ctx = inlink->dst;
174
20
    EarwaxContext *s = ctx->priv;
175
20
    AVFilterLink *outlink = ctx->outputs[0];
176
20
    AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples);
177
178
60
    for (int ch = 0; ch < 2; ch++) {
179

40
        if (!s->frame[ch] || s->frame[ch]->nb_samples < in->nb_samples) {
180
2
            av_frame_free(&s->frame[ch]);
181
2
            s->frame[ch] = ff_get_audio_buffer(outlink, in->nb_samples);
182
2
            if (!s->frame[ch]) {
183
                av_frame_free(&in);
184
                av_frame_free(&out);
185
                return AVERROR(ENOMEM);
186
            }
187
        }
188
    }
189
190
20
    if (!out) {
191
        av_frame_free(&in);
192
        return AVERROR(ENOMEM);
193
    }
194
20
    av_frame_copy_props(out, in);
195
196
20
    convolve(ctx, in, 0, 0, 0, 0);
197
20
    convolve(ctx, in, 0, 1, 1, 1);
198
20
    convolve(ctx, in, 1, 0, 0, 2);
199
20
    convolve(ctx, in, 1, 1, 1, 3);
200
201
20
    mix(ctx, out, 0, 0, 1, 1, 0);
202
20
    mix(ctx, out, 1, 0, 1, 0, 1);
203
204
20
    av_frame_free(&in);
205
20
    return ff_filter_frame(outlink, out);
206
}
207
208
1
static av_cold void uninit(AVFilterContext *ctx)
209
{
210
1
    EarwaxContext *s = ctx->priv;
211
212
1
    av_frame_free(&s->frame[0]);
213
1
    av_frame_free(&s->frame[1]);
214
1
}
215
216
static const AVFilterPad earwax_inputs[] = {
217
    {
218
        .name         = "default",
219
        .type         = AVMEDIA_TYPE_AUDIO,
220
        .filter_frame = filter_frame,
221
        .config_props = config_input,
222
    },
223
    { NULL }
224
};
225
226
static const AVFilterPad earwax_outputs[] = {
227
    {
228
        .name = "default",
229
        .type = AVMEDIA_TYPE_AUDIO,
230
    },
231
    { NULL }
232
};
233
234
AVFilter ff_af_earwax = {
235
    .name           = "earwax",
236
    .description    = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
237
    .query_formats  = query_formats,
238
    .priv_size      = sizeof(EarwaxContext),
239
    .uninit         = uninit,
240
    .inputs         = earwax_inputs,
241
    .outputs        = earwax_outputs,
242
};