Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2023 Paul B Mahol |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/common.h" |
22 |
|
|
#include "libavutil/float_dsp.h" |
23 |
|
|
#include "libavutil/mem.h" |
24 |
|
|
#include "libavutil/opt.h" |
25 |
|
|
|
26 |
|
|
#include "audio.h" |
27 |
|
|
#include "avfilter.h" |
28 |
|
|
#include "formats.h" |
29 |
|
|
#include "filters.h" |
30 |
|
|
|
31 |
|
|
enum OutModes { |
32 |
|
|
IN_MODE, |
33 |
|
|
DESIRED_MODE, |
34 |
|
|
OUT_MODE, |
35 |
|
|
NOISE_MODE, |
36 |
|
|
ERROR_MODE, |
37 |
|
|
NB_OMODES |
38 |
|
|
}; |
39 |
|
|
|
40 |
|
|
typedef struct AudioRLSContext { |
41 |
|
|
const AVClass *class; |
42 |
|
|
|
43 |
|
|
int order; |
44 |
|
|
float lambda; |
45 |
|
|
float delta; |
46 |
|
|
int output_mode; |
47 |
|
|
int precision; |
48 |
|
|
|
49 |
|
|
int kernel_size; |
50 |
|
|
AVFrame *offset; |
51 |
|
|
AVFrame *delay; |
52 |
|
|
AVFrame *coeffs; |
53 |
|
|
AVFrame *p, *dp; |
54 |
|
|
AVFrame *gains; |
55 |
|
|
AVFrame *u, *tmp; |
56 |
|
|
|
57 |
|
|
AVFrame *frame[2]; |
58 |
|
|
|
59 |
|
|
int (*filter_channels)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); |
60 |
|
|
|
61 |
|
|
AVFloatDSPContext *fdsp; |
62 |
|
|
} AudioRLSContext; |
63 |
|
|
|
64 |
|
|
#define OFFSET(x) offsetof(AudioRLSContext, x) |
65 |
|
|
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
66 |
|
|
#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM |
67 |
|
|
|
68 |
|
|
static const AVOption arls_options[] = { |
69 |
|
|
{ "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=16}, 1, INT16_MAX, A }, |
70 |
|
|
{ "lambda", "set the filter lambda", OFFSET(lambda), AV_OPT_TYPE_FLOAT, {.dbl=1.f}, 0, 1, AT }, |
71 |
|
|
{ "delta", "set the filter delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=2.f}, 0, INT16_MAX, A }, |
72 |
|
|
{ "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, AT, .unit = "mode" }, |
73 |
|
|
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AT, .unit = "mode" }, |
74 |
|
|
{ "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, AT, .unit = "mode" }, |
75 |
|
|
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AT, .unit = "mode" }, |
76 |
|
|
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, AT, .unit = "mode" }, |
77 |
|
|
{ "e", "error", 0, AV_OPT_TYPE_CONST, {.i64=ERROR_MODE}, 0, 0, AT, .unit = "mode" }, |
78 |
|
|
{ "precision", "set processing precision", OFFSET(precision), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, A, .unit = "precision" }, |
79 |
|
|
{ "auto", "set auto processing precision", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, .unit = "precision" }, |
80 |
|
|
{ "float", "set single-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, .unit = "precision" }, |
81 |
|
|
{ "double","set double-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, .unit = "precision" }, |
82 |
|
|
{ NULL } |
83 |
|
|
}; |
84 |
|
|
|
85 |
|
|
AVFILTER_DEFINE_CLASS(arls); |
86 |
|
|
|
87 |
|
✗ |
static int query_formats(const AVFilterContext *ctx, |
88 |
|
|
AVFilterFormatsConfig **cfg_in, |
89 |
|
|
AVFilterFormatsConfig **cfg_out) |
90 |
|
|
{ |
91 |
|
✗ |
const AudioRLSContext *s = ctx->priv; |
92 |
|
|
static const enum AVSampleFormat sample_fmts[3][3] = { |
93 |
|
|
{ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE }, |
94 |
|
|
{ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE }, |
95 |
|
|
{ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE }, |
96 |
|
|
}; |
97 |
|
|
int ret; |
98 |
|
|
|
99 |
|
✗ |
if ((ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, |
100 |
|
✗ |
sample_fmts[s->precision])) < 0) |
101 |
|
✗ |
return ret; |
102 |
|
|
|
103 |
|
✗ |
return 0; |
104 |
|
|
} |
105 |
|
|
|
106 |
|
✗ |
static int activate(AVFilterContext *ctx) |
107 |
|
|
{ |
108 |
|
✗ |
AudioRLSContext *s = ctx->priv; |
109 |
|
|
int i, ret, status; |
110 |
|
|
int nb_samples; |
111 |
|
|
int64_t pts; |
112 |
|
|
|
113 |
|
✗ |
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); |
114 |
|
|
|
115 |
|
✗ |
nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]), |
116 |
|
|
ff_inlink_queued_samples(ctx->inputs[1])); |
117 |
|
✗ |
for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) { |
118 |
|
✗ |
if (s->frame[i]) |
119 |
|
✗ |
continue; |
120 |
|
|
|
121 |
|
✗ |
if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) { |
122 |
|
✗ |
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]); |
123 |
|
✗ |
if (ret < 0) |
124 |
|
✗ |
return ret; |
125 |
|
|
} |
126 |
|
|
} |
127 |
|
|
|
128 |
|
✗ |
if (s->frame[0] && s->frame[1]) { |
129 |
|
|
AVFrame *out; |
130 |
|
|
|
131 |
|
✗ |
out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples); |
132 |
|
✗ |
if (!out) { |
133 |
|
✗ |
av_frame_free(&s->frame[0]); |
134 |
|
✗ |
av_frame_free(&s->frame[1]); |
135 |
|
✗ |
return AVERROR(ENOMEM); |
136 |
|
|
} |
137 |
|
|
|
138 |
|
✗ |
ff_filter_execute(ctx, s->filter_channels, out, NULL, |
139 |
|
✗ |
FFMIN(ctx->outputs[0]->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx))); |
140 |
|
|
|
141 |
|
✗ |
out->pts = s->frame[0]->pts; |
142 |
|
✗ |
out->duration = s->frame[0]->duration; |
143 |
|
|
|
144 |
|
✗ |
av_frame_free(&s->frame[0]); |
145 |
|
✗ |
av_frame_free(&s->frame[1]); |
146 |
|
|
|
147 |
|
✗ |
ret = ff_filter_frame(ctx->outputs[0], out); |
148 |
|
✗ |
if (ret < 0) |
149 |
|
✗ |
return ret; |
150 |
|
|
} |
151 |
|
|
|
152 |
|
✗ |
if (!nb_samples) { |
153 |
|
✗ |
for (i = 0; i < 2; i++) { |
154 |
|
✗ |
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) { |
155 |
|
✗ |
ff_outlink_set_status(ctx->outputs[0], status, pts); |
156 |
|
✗ |
return 0; |
157 |
|
|
} |
158 |
|
|
} |
159 |
|
|
} |
160 |
|
|
|
161 |
|
✗ |
if (ff_outlink_frame_wanted(ctx->outputs[0])) { |
162 |
|
✗ |
for (i = 0; i < 2; i++) { |
163 |
|
✗ |
if (s->frame[i] || ff_inlink_queued_samples(ctx->inputs[i]) > 0) |
164 |
|
✗ |
continue; |
165 |
|
✗ |
ff_inlink_request_frame(ctx->inputs[i]); |
166 |
|
✗ |
return 0; |
167 |
|
|
} |
168 |
|
|
} |
169 |
|
✗ |
return 0; |
170 |
|
|
} |
171 |
|
|
|
172 |
|
|
#define DEPTH 32 |
173 |
|
|
#include "arls_template.c" |
174 |
|
|
|
175 |
|
|
#undef DEPTH |
176 |
|
|
#define DEPTH 64 |
177 |
|
|
#include "arls_template.c" |
178 |
|
|
|
179 |
|
✗ |
static int config_output(AVFilterLink *outlink) |
180 |
|
|
{ |
181 |
|
✗ |
AVFilterContext *ctx = outlink->src; |
182 |
|
✗ |
AudioRLSContext *s = ctx->priv; |
183 |
|
|
|
184 |
|
✗ |
s->kernel_size = FFALIGN(s->order, 16); |
185 |
|
|
|
186 |
|
✗ |
if (!s->offset) |
187 |
|
✗ |
s->offset = ff_get_audio_buffer(outlink, 1); |
188 |
|
✗ |
if (!s->delay) |
189 |
|
✗ |
s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size); |
190 |
|
✗ |
if (!s->coeffs) |
191 |
|
✗ |
s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size); |
192 |
|
✗ |
if (!s->gains) |
193 |
|
✗ |
s->gains = ff_get_audio_buffer(outlink, s->kernel_size); |
194 |
|
✗ |
if (!s->p) |
195 |
|
✗ |
s->p = ff_get_audio_buffer(outlink, s->kernel_size * s->kernel_size); |
196 |
|
✗ |
if (!s->dp) |
197 |
|
✗ |
s->dp = ff_get_audio_buffer(outlink, s->kernel_size * s->kernel_size); |
198 |
|
✗ |
if (!s->u) |
199 |
|
✗ |
s->u = ff_get_audio_buffer(outlink, s->kernel_size); |
200 |
|
✗ |
if (!s->tmp) |
201 |
|
✗ |
s->tmp = ff_get_audio_buffer(outlink, s->kernel_size); |
202 |
|
|
|
203 |
|
✗ |
if (!s->delay || !s->coeffs || !s->p || !s->dp || !s->gains || !s->offset || !s->u || !s->tmp) |
204 |
|
✗ |
return AVERROR(ENOMEM); |
205 |
|
|
|
206 |
|
✗ |
for (int ch = 0; ch < s->offset->ch_layout.nb_channels; ch++) { |
207 |
|
✗ |
int *dst = (int *)s->offset->extended_data[ch]; |
208 |
|
|
|
209 |
|
✗ |
for (int i = 0; i < s->kernel_size; i++) |
210 |
|
✗ |
dst[0] = s->kernel_size - 1; |
211 |
|
|
} |
212 |
|
|
|
213 |
|
✗ |
switch (outlink->format) { |
214 |
|
✗ |
case AV_SAMPLE_FMT_DBLP: |
215 |
|
✗ |
for (int ch = 0; ch < s->p->ch_layout.nb_channels; ch++) { |
216 |
|
✗ |
double *dst = (double *)s->p->extended_data[ch]; |
217 |
|
|
|
218 |
|
✗ |
for (int i = 0; i < s->kernel_size; i++) |
219 |
|
✗ |
dst[i * s->kernel_size + i] = s->delta; |
220 |
|
|
} |
221 |
|
|
|
222 |
|
✗ |
s->filter_channels = filter_channels_double; |
223 |
|
✗ |
break; |
224 |
|
✗ |
case AV_SAMPLE_FMT_FLTP: |
225 |
|
✗ |
for (int ch = 0; ch < s->p->ch_layout.nb_channels; ch++) { |
226 |
|
✗ |
float *dst = (float *)s->p->extended_data[ch]; |
227 |
|
|
|
228 |
|
✗ |
for (int i = 0; i < s->kernel_size; i++) |
229 |
|
✗ |
dst[i * s->kernel_size + i] = s->delta; |
230 |
|
|
} |
231 |
|
|
|
232 |
|
✗ |
s->filter_channels = filter_channels_float; |
233 |
|
✗ |
break; |
234 |
|
|
} |
235 |
|
|
|
236 |
|
✗ |
return 0; |
237 |
|
|
} |
238 |
|
|
|
239 |
|
✗ |
static av_cold int init(AVFilterContext *ctx) |
240 |
|
|
{ |
241 |
|
✗ |
AudioRLSContext *s = ctx->priv; |
242 |
|
|
|
243 |
|
✗ |
s->fdsp = avpriv_float_dsp_alloc(0); |
244 |
|
✗ |
if (!s->fdsp) |
245 |
|
✗ |
return AVERROR(ENOMEM); |
246 |
|
|
|
247 |
|
✗ |
return 0; |
248 |
|
|
} |
249 |
|
|
|
250 |
|
✗ |
static av_cold void uninit(AVFilterContext *ctx) |
251 |
|
|
{ |
252 |
|
✗ |
AudioRLSContext *s = ctx->priv; |
253 |
|
|
|
254 |
|
✗ |
av_freep(&s->fdsp); |
255 |
|
✗ |
av_frame_free(&s->delay); |
256 |
|
✗ |
av_frame_free(&s->coeffs); |
257 |
|
✗ |
av_frame_free(&s->gains); |
258 |
|
✗ |
av_frame_free(&s->offset); |
259 |
|
✗ |
av_frame_free(&s->p); |
260 |
|
✗ |
av_frame_free(&s->dp); |
261 |
|
✗ |
av_frame_free(&s->u); |
262 |
|
✗ |
av_frame_free(&s->tmp); |
263 |
|
✗ |
} |
264 |
|
|
|
265 |
|
|
static const AVFilterPad inputs[] = { |
266 |
|
|
{ |
267 |
|
|
.name = "input", |
268 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
269 |
|
|
}, |
270 |
|
|
{ |
271 |
|
|
.name = "desired", |
272 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
273 |
|
|
}, |
274 |
|
|
}; |
275 |
|
|
|
276 |
|
|
static const AVFilterPad outputs[] = { |
277 |
|
|
{ |
278 |
|
|
.name = "default", |
279 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
280 |
|
|
.config_props = config_output, |
281 |
|
|
}, |
282 |
|
|
}; |
283 |
|
|
|
284 |
|
|
const AVFilter ff_af_arls = { |
285 |
|
|
.name = "arls", |
286 |
|
|
.description = NULL_IF_CONFIG_SMALL("Apply Recursive Least Squares algorithm to first audio stream."), |
287 |
|
|
.priv_size = sizeof(AudioRLSContext), |
288 |
|
|
.priv_class = &arls_class, |
289 |
|
|
.init = init, |
290 |
|
|
.uninit = uninit, |
291 |
|
|
.activate = activate, |
292 |
|
|
FILTER_INPUTS(inputs), |
293 |
|
|
FILTER_OUTPUTS(outputs), |
294 |
|
|
FILTER_QUERY_FUNC2(query_formats), |
295 |
|
|
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | |
296 |
|
|
AVFILTER_FLAG_SLICE_THREADS, |
297 |
|
|
.process_command = ff_filter_process_command, |
298 |
|
|
}; |
299 |
|
|
|