GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavfilter/vf_convolve.c Lines: 0 312 0.0 %
Date: 2020-09-25 14:59:26 Branches: 0 174 0.0 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2017 Paul B Mahol
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
#include <float.h>
22
23
#include "libavutil/imgutils.h"
24
#include "libavutil/opt.h"
25
#include "libavutil/pixdesc.h"
26
#include "libavcodec/avfft.h"
27
28
#include "avfilter.h"
29
#include "formats.h"
30
#include "framesync.h"
31
#include "internal.h"
32
#include "video.h"
33
34
#define MAX_THREADS 16
35
36
typedef struct ConvolveContext {
37
    const AVClass *class;
38
    FFFrameSync fs;
39
40
    FFTContext *fft[4][MAX_THREADS];
41
    FFTContext *ifft[4][MAX_THREADS];
42
43
    int fft_bits[4];
44
    int fft_len[4];
45
    int planewidth[4];
46
    int planeheight[4];
47
48
    FFTComplex *fft_hdata[4];
49
    FFTComplex *fft_vdata[4];
50
    FFTComplex *fft_hdata_impulse[4];
51
    FFTComplex *fft_vdata_impulse[4];
52
53
    int depth;
54
    int planes;
55
    int impulse;
56
    float noise;
57
    int nb_planes;
58
    int got_impulse[4];
59
60
    int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
61
} ConvolveContext;
62
63
#define OFFSET(x) offsetof(ConvolveContext, x)
64
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65
66
static const AVOption convolve_options[] = {
67
    { "planes",  "set planes to convolve",                  OFFSET(planes),   AV_OPT_TYPE_INT,   {.i64=7}, 0, 15, FLAGS },
68
    { "impulse", "when to process impulses",                OFFSET(impulse),  AV_OPT_TYPE_INT,   {.i64=1}, 0,  1, FLAGS, "impulse" },
69
    {   "first", "process only first impulse, ignore rest", 0,                AV_OPT_TYPE_CONST, {.i64=0}, 0,  0, FLAGS, "impulse" },
70
    {   "all",   "process all impulses",                    0,                AV_OPT_TYPE_CONST, {.i64=1}, 0,  0, FLAGS, "impulse" },
71
    { "noise",   "set noise",                               OFFSET(noise),    AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0,  1, FLAGS },
72
    { NULL },
73
};
74
75
static int query_formats(AVFilterContext *ctx)
76
{
77
    static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
78
        AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
79
        AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
80
        AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
81
        AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
82
        AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
83
        AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
84
        AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
85
        AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
86
        AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
87
        AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
88
        AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
89
        AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
90
        AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
91
        AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
92
        AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
93
        AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
94
        AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
95
        AV_PIX_FMT_NONE
96
    };
97
98
    AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
99
    if (!fmts_list)
100
        return AVERROR(ENOMEM);
101
    return ff_set_common_formats(ctx, fmts_list);
102
}
103
104
static int config_input_main(AVFilterLink *inlink)
105
{
106
    ConvolveContext *s = inlink->dst->priv;
107
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
108
    int fft_bits, i;
109
110
    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
111
    s->planewidth[0] = s->planewidth[3] = inlink->w;
112
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
113
    s->planeheight[0] = s->planeheight[3] = inlink->h;
114
115
    s->nb_planes = desc->nb_components;
116
    s->depth = desc->comp[0].depth;
117
118
    for (i = 0; i < s->nb_planes; i++) {
119
        int w = s->planewidth[i];
120
        int h = s->planeheight[i];
121
        int n = FFMAX(w, h);
122
123
        for (fft_bits = 1; 1 << fft_bits < n; fft_bits++);
124
125
        s->fft_bits[i] = fft_bits;
126
        s->fft_len[i] = 1 << s->fft_bits[i];
127
128
        if (!(s->fft_hdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
129
            return AVERROR(ENOMEM);
130
131
        if (!(s->fft_vdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
132
            return AVERROR(ENOMEM);
133
134
        if (!(s->fft_hdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
135
            return AVERROR(ENOMEM);
136
137
        if (!(s->fft_vdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
138
            return AVERROR(ENOMEM);
139
    }
140
141
    return 0;
142
}
143
144
static int config_input_impulse(AVFilterLink *inlink)
145
{
146
    AVFilterContext *ctx  = inlink->dst;
147
148
    if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
149
        ctx->inputs[0]->h != ctx->inputs[1]->h) {
150
        av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
151
        return AVERROR(EINVAL);
152
    }
153
    if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
154
        av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
155
        return AVERROR(EINVAL);
156
    }
157
158
    return 0;
159
}
160
161
typedef struct ThreadData {
162
    FFTComplex *hdata, *vdata;
163
    int plane, n;
164
} ThreadData;
165
166
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
167
{
168
    ConvolveContext *s = ctx->priv;
169
    ThreadData *td = arg;
170
    FFTComplex *hdata = td->hdata;
171
    const int plane = td->plane;
172
    const int n = td->n;
173
    int start = (n * jobnr) / nb_jobs;
174
    int end = (n * (jobnr+1)) / nb_jobs;
175
    int y;
176
177
    for (y = start; y < end; y++) {
178
        av_fft_permute(s->fft[plane][jobnr], hdata + y * n);
179
        av_fft_calc(s->fft[plane][jobnr], hdata + y * n);
180
    }
181
182
    return 0;
183
}
184
185
static void get_input(ConvolveContext *s, FFTComplex *fft_hdata,
186
                      AVFrame *in, int w, int h, int n, int plane, float scale)
187
{
188
    const int iw = (n - w) / 2, ih = (n - h) / 2;
189
    int y, x;
190
191
    if (s->depth == 8) {
192
        for (y = 0; y < h; y++) {
193
            const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
194
195
            for (x = 0; x < w; x++) {
196
                fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
197
                fft_hdata[(y + ih) * n + iw + x].im = 0;
198
            }
199
200
            for (x = 0; x < iw; x++) {
201
                fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
202
                fft_hdata[(y + ih) * n + x].im = 0;
203
            }
204
205
            for (x = n - iw; x < n; x++) {
206
                fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
207
                fft_hdata[(y + ih) * n + x].im = 0;
208
            }
209
        }
210
211
        for (y = 0; y < ih; y++) {
212
            for (x = 0; x < n; x++) {
213
                fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
214
                fft_hdata[y * n + x].im = 0;
215
            }
216
        }
217
218
        for (y = n - ih; y < n; y++) {
219
            for (x = 0; x < n; x++) {
220
                fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
221
                fft_hdata[y * n + x].im = 0;
222
            }
223
        }
224
    } else {
225
        for (y = 0; y < h; y++) {
226
            const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
227
228
            for (x = 0; x < w; x++) {
229
                fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
230
                fft_hdata[(y + ih) * n + iw + x].im = 0;
231
            }
232
233
            for (x = 0; x < iw; x++) {
234
                fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
235
                fft_hdata[(y + ih) * n + x].im = 0;
236
            }
237
238
            for (x = n - iw; x < n; x++) {
239
                fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
240
                fft_hdata[(y + ih) * n + x].im = 0;
241
            }
242
        }
243
244
        for (y = 0; y < ih; y++) {
245
            for (x = 0; x < n; x++) {
246
                fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
247
                fft_hdata[y * n + x].im = 0;
248
            }
249
        }
250
251
        for (y = n - ih; y < n; y++) {
252
            for (x = 0; x < n; x++) {
253
                fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
254
                fft_hdata[y * n + x].im = 0;
255
            }
256
        }
257
    }
258
}
259
260
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
261
{
262
    ConvolveContext *s = ctx->priv;
263
    ThreadData *td = arg;
264
    FFTComplex *hdata = td->hdata;
265
    FFTComplex *vdata = td->vdata;
266
    const int plane = td->plane;
267
    const int n = td->n;
268
    int start = (n * jobnr) / nb_jobs;
269
    int end = (n * (jobnr+1)) / nb_jobs;
270
    int y, x;
271
272
    for (y = start; y < end; y++) {
273
        for (x = 0; x < n; x++) {
274
            vdata[y * n + x].re = hdata[x * n + y].re;
275
            vdata[y * n + x].im = hdata[x * n + y].im;
276
        }
277
278
        av_fft_permute(s->fft[plane][jobnr], vdata + y * n);
279
        av_fft_calc(s->fft[plane][jobnr], vdata + y * n);
280
    }
281
282
    return 0;
283
}
284
285
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
286
{
287
    ConvolveContext *s = ctx->priv;
288
    ThreadData *td = arg;
289
    FFTComplex *hdata = td->hdata;
290
    FFTComplex *vdata = td->vdata;
291
    const int plane = td->plane;
292
    const int n = td->n;
293
    int start = (n * jobnr) / nb_jobs;
294
    int end = (n * (jobnr+1)) / nb_jobs;
295
    int y, x;
296
297
    for (y = start; y < end; y++) {
298
        av_fft_permute(s->ifft[plane][jobnr], vdata + y * n);
299
        av_fft_calc(s->ifft[plane][jobnr], vdata + y * n);
300
301
        for (x = 0; x < n; x++) {
302
            hdata[x * n + y].re = vdata[y * n + x].re;
303
            hdata[x * n + y].im = vdata[y * n + x].im;
304
        }
305
    }
306
307
    return 0;
308
}
309
310
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
311
{
312
    ConvolveContext *s = ctx->priv;
313
    ThreadData *td = arg;
314
    FFTComplex *hdata = td->hdata;
315
    const int plane = td->plane;
316
    const int n = td->n;
317
    int start = (n * jobnr) / nb_jobs;
318
    int end = (n * (jobnr+1)) / nb_jobs;
319
    int y;
320
321
    for (y = start; y < end; y++) {
322
        av_fft_permute(s->ifft[plane][jobnr], hdata + y * n);
323
        av_fft_calc(s->ifft[plane][jobnr], hdata + y * n);
324
    }
325
326
    return 0;
327
}
328
329
static void get_output(ConvolveContext *s, FFTComplex *input, AVFrame *out,
330
                       int w, int h, int n, int plane, float scale)
331
{
332
    const int max = (1 << s->depth) - 1;
333
    const int hh = h / 2;
334
    const int hw = w / 2;
335
    int y, x;
336
337
    if (s->depth == 8) {
338
        for (y = 0; y < hh; y++) {
339
            uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
340
            for (x = 0; x < hw; x++)
341
                dst[x] = av_clip_uint8(input[y * n + x].re * scale);
342
        }
343
        for (y = 0; y < hh; y++) {
344
            uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
345
            for (x = 0; x < hw; x++)
346
                dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
347
        }
348
        for (y = 0; y < hh; y++) {
349
            uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
350
            for (x = 0; x < hw; x++)
351
                dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
352
        }
353
        for (y = 0; y < hh; y++) {
354
            uint8_t *dst = out->data[plane] + y * out->linesize[plane];
355
            for (x = 0; x < hw; x++)
356
                dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
357
        }
358
    } else {
359
        for (y = 0; y < hh; y++) {
360
            uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
361
            for (x = 0; x < hw; x++)
362
                dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
363
        }
364
        for (y = 0; y < hh; y++) {
365
            uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
366
            for (x = 0; x < hw; x++)
367
                dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
368
        }
369
        for (y = 0; y < hh; y++) {
370
            uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
371
            for (x = 0; x < hw; x++)
372
                dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
373
        }
374
        for (y = 0; y < hh; y++) {
375
            uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
376
            for (x = 0; x < hw; x++)
377
                dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
378
        }
379
    }
380
}
381
382
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
383
{
384
    ConvolveContext *s = ctx->priv;
385
    ThreadData *td = arg;
386
    FFTComplex *input = td->hdata;
387
    FFTComplex *filter = td->vdata;
388
    const float noise = s->noise;
389
    const int n = td->n;
390
    int start = (n * jobnr) / nb_jobs;
391
    int end = (n * (jobnr+1)) / nb_jobs;
392
    int y, x;
393
394
    for (y = start; y < end; y++) {
395
        int yn = y * n;
396
397
        for (x = 0; x < n; x++) {
398
            FFTSample re, im, ire, iim;
399
400
            re = input[yn + x].re;
401
            im = input[yn + x].im;
402
            ire = filter[yn + x].re + noise;
403
            iim = filter[yn + x].im;
404
405
            input[yn + x].re = ire * re - iim * im;
406
            input[yn + x].im = iim * re + ire * im;
407
        }
408
    }
409
410
    return 0;
411
}
412
413
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
414
{
415
    ConvolveContext *s = ctx->priv;
416
    ThreadData *td = arg;
417
    FFTComplex *input = td->hdata;
418
    FFTComplex *filter = td->vdata;
419
    const float noise = s->noise;
420
    const int n = td->n;
421
    int start = (n * jobnr) / nb_jobs;
422
    int end = (n * (jobnr+1)) / nb_jobs;
423
    int y, x;
424
425
    for (y = start; y < end; y++) {
426
        int yn = y * n;
427
428
        for (x = 0; x < n; x++) {
429
            FFTSample re, im, ire, iim, div;
430
431
            re = input[yn + x].re;
432
            im = input[yn + x].im;
433
            ire = filter[yn + x].re;
434
            iim = filter[yn + x].im;
435
            div = ire * ire + iim * iim + noise;
436
437
            input[yn + x].re = (ire * re + iim * im) / div;
438
            input[yn + x].im = (ire * im - iim * re) / div;
439
        }
440
    }
441
442
    return 0;
443
}
444
445
static int do_convolve(FFFrameSync *fs)
446
{
447
    AVFilterContext *ctx = fs->parent;
448
    AVFilterLink *outlink = ctx->outputs[0];
449
    ConvolveContext *s = ctx->priv;
450
    AVFrame *mainpic = NULL, *impulsepic = NULL;
451
    int ret, y, x, plane;
452
453
    ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
454
    if (ret < 0)
455
        return ret;
456
    if (!impulsepic)
457
        return ff_filter_frame(outlink, mainpic);
458
459
    for (plane = 0; plane < s->nb_planes; plane++) {
460
        FFTComplex *filter = s->fft_vdata_impulse[plane];
461
        FFTComplex *input = s->fft_vdata[plane];
462
        const int n = s->fft_len[plane];
463
        const int w = s->planewidth[plane];
464
        const int h = s->planeheight[plane];
465
        float total = 0;
466
        ThreadData td;
467
468
        if (!(s->planes & (1 << plane))) {
469
            continue;
470
        }
471
472
        td.plane = plane, td.n = n;
473
        get_input(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f);
474
475
        td.hdata = s->fft_hdata[plane];
476
        td.vdata = s->fft_vdata[plane];
477
478
        ctx->internal->execute(ctx, fft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
479
        ctx->internal->execute(ctx, fft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
480
481
        if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
482
            if (s->depth == 8) {
483
                for (y = 0; y < h; y++) {
484
                    const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
485
                    for (x = 0; x < w; x++) {
486
                        total += src[x];
487
                    }
488
                }
489
            } else {
490
                for (y = 0; y < h; y++) {
491
                    const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
492
                    for (x = 0; x < w; x++) {
493
                        total += src[x];
494
                    }
495
                }
496
            }
497
            total = FFMAX(1, total);
498
499
            get_input(s, s->fft_hdata_impulse[plane], impulsepic, w, h, n, plane, 1.f / total);
500
501
            td.hdata = s->fft_hdata_impulse[plane];
502
            td.vdata = s->fft_vdata_impulse[plane];
503
504
            ctx->internal->execute(ctx, fft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
505
            ctx->internal->execute(ctx, fft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
506
507
            s->got_impulse[plane] = 1;
508
        }
509
510
        td.hdata = input;
511
        td.vdata = filter;
512
513
        ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
514
515
        td.hdata = s->fft_hdata[plane];
516
        td.vdata = s->fft_vdata[plane];
517
518
        ctx->internal->execute(ctx, ifft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
519
        ctx->internal->execute(ctx, ifft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
520
521
        get_output(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f / (n * n));
522
    }
523
524
    return ff_filter_frame(outlink, mainpic);
525
}
526
527
static int config_output(AVFilterLink *outlink)
528
{
529
    AVFilterContext *ctx = outlink->src;
530
    ConvolveContext *s = ctx->priv;
531
    AVFilterLink *mainlink = ctx->inputs[0];
532
    int ret, i, j;
533
534
    s->fs.on_event = do_convolve;
535
    ret = ff_framesync_init_dualinput(&s->fs, ctx);
536
    if (ret < 0)
537
        return ret;
538
    outlink->w = mainlink->w;
539
    outlink->h = mainlink->h;
540
    outlink->time_base = mainlink->time_base;
541
    outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
542
    outlink->frame_rate = mainlink->frame_rate;
543
544
    if ((ret = ff_framesync_configure(&s->fs)) < 0)
545
        return ret;
546
547
    for (i = 0; i < s->nb_planes; i++) {
548
        for (j = 0; j < MAX_THREADS; j++) {
549
            s->fft[i][j]  = av_fft_init(s->fft_bits[i], 0);
550
            s->ifft[i][j] = av_fft_init(s->fft_bits[i], 1);
551
            if (!s->fft[i][j] || !s->ifft[i][j])
552
                return AVERROR(ENOMEM);
553
        }
554
    }
555
556
    return 0;
557
}
558
559
static int activate(AVFilterContext *ctx)
560
{
561
    ConvolveContext *s = ctx->priv;
562
    return ff_framesync_activate(&s->fs);
563
}
564
565
static av_cold int init(AVFilterContext *ctx)
566
{
567
    ConvolveContext *s = ctx->priv;
568
569
    if (!strcmp(ctx->filter->name, "convolve")) {
570
        s->filter = complex_multiply;
571
    } else if (!strcmp(ctx->filter->name, "deconvolve")) {
572
        s->filter = complex_divide;
573
    } else {
574
        return AVERROR_BUG;
575
    }
576
577
    return 0;
578
}
579
580
static av_cold void uninit(AVFilterContext *ctx)
581
{
582
    ConvolveContext *s = ctx->priv;
583
    int i, j;
584
585
    for (i = 0; i < 4; i++) {
586
        av_freep(&s->fft_hdata[i]);
587
        av_freep(&s->fft_vdata[i]);
588
        av_freep(&s->fft_hdata_impulse[i]);
589
        av_freep(&s->fft_vdata_impulse[i]);
590
591
        for (j = 0; j < MAX_THREADS; j++) {
592
            av_fft_end(s->fft[i][j]);
593
            s->fft[i][j] = NULL;
594
            av_fft_end(s->ifft[i][j]);
595
            s->ifft[i][j] = NULL;
596
        }
597
    }
598
599
    ff_framesync_uninit(&s->fs);
600
}
601
602
static const AVFilterPad convolve_inputs[] = {
603
    {
604
        .name          = "main",
605
        .type          = AVMEDIA_TYPE_VIDEO,
606
        .config_props  = config_input_main,
607
    },{
608
        .name          = "impulse",
609
        .type          = AVMEDIA_TYPE_VIDEO,
610
        .config_props  = config_input_impulse,
611
    },
612
    { NULL }
613
};
614
615
static const AVFilterPad convolve_outputs[] = {
616
    {
617
        .name          = "default",
618
        .type          = AVMEDIA_TYPE_VIDEO,
619
        .config_props  = config_output,
620
    },
621
    { NULL }
622
};
623
624
#if CONFIG_CONVOLVE_FILTER
625
626
FRAMESYNC_DEFINE_CLASS(convolve, ConvolveContext, fs);
627
628
AVFilter ff_vf_convolve = {
629
    .name          = "convolve",
630
    .description   = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
631
    .preinit       = convolve_framesync_preinit,
632
    .init          = init,
633
    .uninit        = uninit,
634
    .query_formats = query_formats,
635
    .activate      = activate,
636
    .priv_size     = sizeof(ConvolveContext),
637
    .priv_class    = &convolve_class,
638
    .inputs        = convolve_inputs,
639
    .outputs       = convolve_outputs,
640
    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
641
};
642
643
#endif /* CONFIG_CONVOLVE_FILTER */
644
645
#if CONFIG_DECONVOLVE_FILTER
646
647
static const AVOption deconvolve_options[] = {
648
    { "planes",  "set planes to deconvolve",                OFFSET(planes),   AV_OPT_TYPE_INT,   {.i64=7}, 0, 15, FLAGS },
649
    { "impulse", "when to process impulses",                OFFSET(impulse),  AV_OPT_TYPE_INT,   {.i64=1}, 0,  1, FLAGS, "impulse" },
650
    {   "first", "process only first impulse, ignore rest", 0,                AV_OPT_TYPE_CONST, {.i64=0}, 0,  0, FLAGS, "impulse" },
651
    {   "all",   "process all impulses",                    0,                AV_OPT_TYPE_CONST, {.i64=1}, 0,  0, FLAGS, "impulse" },
652
    { "noise",   "set noise",                               OFFSET(noise),    AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0,  1, FLAGS },
653
    { NULL },
654
};
655
656
FRAMESYNC_DEFINE_CLASS(deconvolve, ConvolveContext, fs);
657
658
AVFilter ff_vf_deconvolve = {
659
    .name          = "deconvolve",
660
    .description   = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
661
    .preinit       = deconvolve_framesync_preinit,
662
    .init          = init,
663
    .uninit        = uninit,
664
    .query_formats = query_formats,
665
    .activate      = activate,
666
    .priv_size     = sizeof(ConvolveContext),
667
    .priv_class    = &deconvolve_class,
668
    .inputs        = convolve_inputs,
669
    .outputs       = convolve_outputs,
670
    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
671
};
672
673
#endif /* CONFIG_DECONVOLVE_FILTER */