1 |
|
|
/* |
2 |
|
|
* This file is part of FFmpeg. |
3 |
|
|
* |
4 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
5 |
|
|
* modify it under the terms of the GNU Lesser General Public |
6 |
|
|
* License as published by the Free Software Foundation; either |
7 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
8 |
|
|
* |
9 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 |
|
|
* Lesser General Public License for more details. |
13 |
|
|
* |
14 |
|
|
* You should have received a copy of the GNU Lesser General Public |
15 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
16 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include "libavutil/channel_layout.h" |
20 |
|
|
#include "libavutil/ffmath.h" |
21 |
|
|
#include "libavutil/opt.h" |
22 |
|
|
#include "avfilter.h" |
23 |
|
|
#include "audio.h" |
24 |
|
|
#include "formats.h" |
25 |
|
|
|
26 |
|
|
typedef struct ASubBoostContext { |
27 |
|
|
const AVClass *class; |
28 |
|
|
|
29 |
|
|
double dry_gain; |
30 |
|
|
double wet_gain; |
31 |
|
|
double feedback; |
32 |
|
|
double decay; |
33 |
|
|
double delay; |
34 |
|
|
double cutoff; |
35 |
|
|
double slope; |
36 |
|
|
|
37 |
|
|
double a0, a1, a2; |
38 |
|
|
double b0, b1, b2; |
39 |
|
|
|
40 |
|
|
int *write_pos; |
41 |
|
|
int buffer_samples; |
42 |
|
|
|
43 |
|
|
AVFrame *w; |
44 |
|
|
AVFrame *buffer; |
45 |
|
|
} ASubBoostContext; |
46 |
|
|
|
47 |
|
|
static int query_formats(AVFilterContext *ctx) |
48 |
|
|
{ |
49 |
|
|
AVFilterFormats *formats = NULL; |
50 |
|
|
AVFilterChannelLayouts *layouts = NULL; |
51 |
|
|
static const enum AVSampleFormat sample_fmts[] = { |
52 |
|
|
AV_SAMPLE_FMT_DBLP, |
53 |
|
|
AV_SAMPLE_FMT_NONE |
54 |
|
|
}; |
55 |
|
|
int ret; |
56 |
|
|
|
57 |
|
|
formats = ff_make_format_list(sample_fmts); |
58 |
|
|
if (!formats) |
59 |
|
|
return AVERROR(ENOMEM); |
60 |
|
|
ret = ff_set_common_formats(ctx, formats); |
61 |
|
|
if (ret < 0) |
62 |
|
|
return ret; |
63 |
|
|
|
64 |
|
|
layouts = ff_all_channel_counts(); |
65 |
|
|
if (!layouts) |
66 |
|
|
return AVERROR(ENOMEM); |
67 |
|
|
|
68 |
|
|
ret = ff_set_common_channel_layouts(ctx, layouts); |
69 |
|
|
if (ret < 0) |
70 |
|
|
return ret; |
71 |
|
|
|
72 |
|
|
formats = ff_all_samplerates(); |
73 |
|
|
return ff_set_common_samplerates(ctx, formats); |
74 |
|
|
} |
75 |
|
|
|
76 |
|
|
static int get_coeffs(AVFilterContext *ctx) |
77 |
|
|
{ |
78 |
|
|
ASubBoostContext *s = ctx->priv; |
79 |
|
|
AVFilterLink *inlink = ctx->inputs[0]; |
80 |
|
|
double w0 = 2 * M_PI * s->cutoff / inlink->sample_rate; |
81 |
|
|
double alpha = sin(w0) / 2 * sqrt(2. * (1. / s->slope - 1.) + 2.); |
82 |
|
|
|
83 |
|
|
s->a0 = 1 + alpha; |
84 |
|
|
s->a1 = -2 * cos(w0); |
85 |
|
|
s->a2 = 1 - alpha; |
86 |
|
|
s->b0 = (1 - cos(w0)) / 2; |
87 |
|
|
s->b1 = 1 - cos(w0); |
88 |
|
|
s->b2 = (1 - cos(w0)) / 2; |
89 |
|
|
|
90 |
|
|
s->a1 /= s->a0; |
91 |
|
|
s->a2 /= s->a0; |
92 |
|
|
s->b0 /= s->a0; |
93 |
|
|
s->b1 /= s->a0; |
94 |
|
|
s->b2 /= s->a0; |
95 |
|
|
|
96 |
|
|
s->buffer_samples = inlink->sample_rate * s->delay / 1000; |
97 |
|
|
|
98 |
|
|
return 0; |
99 |
|
|
} |
100 |
|
|
|
101 |
|
|
static int config_input(AVFilterLink *inlink) |
102 |
|
|
{ |
103 |
|
|
AVFilterContext *ctx = inlink->dst; |
104 |
|
|
ASubBoostContext *s = ctx->priv; |
105 |
|
|
|
106 |
|
|
s->buffer = ff_get_audio_buffer(inlink, inlink->sample_rate / 10); |
107 |
|
|
s->w = ff_get_audio_buffer(inlink, 2); |
108 |
|
|
s->write_pos = av_calloc(inlink->channels, sizeof(*s->write_pos)); |
109 |
|
|
if (!s->buffer || !s->w || !s->write_pos) |
110 |
|
|
return AVERROR(ENOMEM); |
111 |
|
|
|
112 |
|
|
return get_coeffs(ctx); |
113 |
|
|
} |
114 |
|
|
|
115 |
|
|
typedef struct ThreadData { |
116 |
|
|
AVFrame *in, *out; |
117 |
|
|
} ThreadData; |
118 |
|
|
|
119 |
|
|
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) |
120 |
|
|
{ |
121 |
|
|
ASubBoostContext *s = ctx->priv; |
122 |
|
|
ThreadData *td = arg; |
123 |
|
|
AVFrame *out = td->out; |
124 |
|
|
AVFrame *in = td->in; |
125 |
|
|
const double mix = ctx->is_disabled ? 0. : 1.; |
126 |
|
|
const double wet = ctx->is_disabled ? 1. : s->wet_gain; |
127 |
|
|
const double dry = ctx->is_disabled ? 1. : s->dry_gain; |
128 |
|
|
const double feedback = s->feedback, decay = s->decay; |
129 |
|
|
const double b0 = s->b0; |
130 |
|
|
const double b1 = s->b1; |
131 |
|
|
const double b2 = s->b2; |
132 |
|
|
const double a1 = -s->a1; |
133 |
|
|
const double a2 = -s->a2; |
134 |
|
|
const int start = (in->channels * jobnr) / nb_jobs; |
135 |
|
|
const int end = (in->channels * (jobnr+1)) / nb_jobs; |
136 |
|
|
const int buffer_samples = s->buffer_samples; |
137 |
|
|
|
138 |
|
|
for (int ch = start; ch < end; ch++) { |
139 |
|
|
const double *src = (const double *)in->extended_data[ch]; |
140 |
|
|
double *dst = (double *)out->extended_data[ch]; |
141 |
|
|
double *buffer = (double *)s->buffer->extended_data[ch]; |
142 |
|
|
double *w = (double *)s->w->extended_data[ch]; |
143 |
|
|
int write_pos = s->write_pos[ch]; |
144 |
|
|
|
145 |
|
|
for (int n = 0; n < in->nb_samples; n++) { |
146 |
|
|
double out_sample; |
147 |
|
|
|
148 |
|
|
out_sample = src[n] * b0 + w[0]; |
149 |
|
|
w[0] = b1 * src[n] + w[1] + a1 * out_sample; |
150 |
|
|
w[1] = b2 * src[n] + a2 * out_sample; |
151 |
|
|
|
152 |
|
|
buffer[write_pos] = buffer[write_pos] * decay + out_sample * feedback; |
153 |
|
|
dst[n] = (src[n] * dry + buffer[write_pos] * mix) * wet; |
154 |
|
|
|
155 |
|
|
if (++write_pos >= buffer_samples) |
156 |
|
|
write_pos = 0; |
157 |
|
|
} |
158 |
|
|
|
159 |
|
|
s->write_pos[ch] = write_pos; |
160 |
|
|
} |
161 |
|
|
|
162 |
|
|
return 0; |
163 |
|
|
} |
164 |
|
|
|
165 |
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
166 |
|
|
{ |
167 |
|
|
AVFilterContext *ctx = inlink->dst; |
168 |
|
|
AVFilterLink *outlink = ctx->outputs[0]; |
169 |
|
|
ThreadData td; |
170 |
|
|
AVFrame *out; |
171 |
|
|
|
172 |
|
|
if (av_frame_is_writable(in)) { |
173 |
|
|
out = in; |
174 |
|
|
} else { |
175 |
|
|
out = ff_get_audio_buffer(outlink, in->nb_samples); |
176 |
|
|
if (!out) { |
177 |
|
|
av_frame_free(&in); |
178 |
|
|
return AVERROR(ENOMEM); |
179 |
|
|
} |
180 |
|
|
av_frame_copy_props(out, in); |
181 |
|
|
} |
182 |
|
|
|
183 |
|
|
td.in = in; td.out = out; |
184 |
|
|
ctx->internal->execute(ctx, filter_channels, &td, NULL, FFMIN(inlink->channels, |
185 |
|
|
ff_filter_get_nb_threads(ctx))); |
186 |
|
|
|
187 |
|
|
if (out != in) |
188 |
|
|
av_frame_free(&in); |
189 |
|
|
return ff_filter_frame(outlink, out); |
190 |
|
|
} |
191 |
|
|
|
192 |
|
|
static av_cold void uninit(AVFilterContext *ctx) |
193 |
|
|
{ |
194 |
|
|
ASubBoostContext *s = ctx->priv; |
195 |
|
|
|
196 |
|
|
av_frame_free(&s->buffer); |
197 |
|
|
av_frame_free(&s->w); |
198 |
|
|
av_freep(&s->write_pos); |
199 |
|
|
} |
200 |
|
|
|
201 |
|
|
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, |
202 |
|
|
char *res, int res_len, int flags) |
203 |
|
|
{ |
204 |
|
|
int ret; |
205 |
|
|
|
206 |
|
|
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); |
207 |
|
|
if (ret < 0) |
208 |
|
|
return ret; |
209 |
|
|
|
210 |
|
|
return get_coeffs(ctx); |
211 |
|
|
} |
212 |
|
|
|
213 |
|
|
#define OFFSET(x) offsetof(ASubBoostContext, x) |
214 |
|
|
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM |
215 |
|
|
|
216 |
|
|
static const AVOption asubboost_options[] = { |
217 |
|
|
{ "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.7}, 0, 1, FLAGS }, |
218 |
|
|
{ "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.7}, 0, 1, FLAGS }, |
219 |
|
|
{ "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=0.7}, 0, 1, FLAGS }, |
220 |
|
|
{ "feedback", "set feedback", OFFSET(feedback), AV_OPT_TYPE_DOUBLE, {.dbl=0.9}, 0, 1, FLAGS }, |
221 |
|
|
{ "cutoff", "set cutoff", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 50, 900, FLAGS }, |
222 |
|
|
{ "slope", "set slope", OFFSET(slope), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0001, 1, FLAGS }, |
223 |
|
|
{ "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 100, FLAGS }, |
224 |
|
|
{ NULL } |
225 |
|
|
}; |
226 |
|
|
|
227 |
|
|
AVFILTER_DEFINE_CLASS(asubboost); |
228 |
|
|
|
229 |
|
|
static const AVFilterPad inputs[] = { |
230 |
|
|
{ |
231 |
|
|
.name = "default", |
232 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
233 |
|
|
.filter_frame = filter_frame, |
234 |
|
|
.config_props = config_input, |
235 |
|
|
}, |
236 |
|
|
{ NULL } |
237 |
|
|
}; |
238 |
|
|
|
239 |
|
|
static const AVFilterPad outputs[] = { |
240 |
|
|
{ |
241 |
|
|
.name = "default", |
242 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
243 |
|
|
}, |
244 |
|
|
{ NULL } |
245 |
|
|
}; |
246 |
|
|
|
247 |
|
|
AVFilter ff_af_asubboost = { |
248 |
|
|
.name = "asubboost", |
249 |
|
|
.description = NULL_IF_CONFIG_SMALL("Boost subwoofer frequencies."), |
250 |
|
|
.query_formats = query_formats, |
251 |
|
|
.priv_size = sizeof(ASubBoostContext), |
252 |
|
|
.priv_class = &asubboost_class, |
253 |
|
|
.uninit = uninit, |
254 |
|
|
.inputs = inputs, |
255 |
|
|
.outputs = outputs, |
256 |
|
|
.process_command = process_command, |
257 |
|
|
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | |
258 |
|
|
AVFILTER_FLAG_SLICE_THREADS, |
259 |
|
|
}; |