FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_vaguedenoiser.c
Date: 2024-11-20 23:03:26
Exec Total Coverage
Lines: 0 291 0.0%
Functions: 0 15 0.0%
Branches: 0 136 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2003 LeFunGus, lefungus@altern.org
3 *
4 * This file is part of FFmpeg
5 *
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #include <float.h>
22
23 #include "libavutil/imgutils.h"
24 #include "libavutil/attributes.h"
25 #include "libavutil/common.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/opt.h"
29
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "video.h"
33
34 typedef struct VagueDenoiserContext {
35 const AVClass *class;
36
37 float threshold;
38 float percent;
39 int method;
40 int type;
41 int nsteps;
42 int planes;
43
44 int depth;
45 int bpc;
46 int peak;
47 int nb_planes;
48 int planeheight[4];
49 int planewidth[4];
50
51 float *block;
52 float *in;
53 float *out;
54 float *tmp;
55
56 int hlowsize[4][32];
57 int hhighsize[4][32];
58 int vlowsize[4][32];
59 int vhighsize[4][32];
60
61 void (*thresholding)(float *block, const int width, const int height,
62 const int stride, const float threshold,
63 const float percent);
64 } VagueDenoiserContext;
65
66 #define OFFSET(x) offsetof(VagueDenoiserContext, x)
67 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
68 static const AVOption vaguedenoiser_options[] = {
69 { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
70 { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, .unit = "method" },
71 { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, .unit = "method" },
72 { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, .unit = "method" },
73 { "garrote", "garrote thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, .unit = "method" },
74 { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
75 { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
76 { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
77 { "type", "set threshold type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0 }, 0, 1, FLAGS, .unit = "type" },
78 { "universal", "universal (VisuShrink)", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, .unit = "type" },
79 { "bayes", "bayes (BayesShrink)", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, .unit = "type" },
80 { NULL }
81 };
82
83 AVFILTER_DEFINE_CLASS(vaguedenoiser);
84
85 #define NPAD 10
86
87 static const float analysis_low[9] = {
88 0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
89 0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
90 };
91
92 static const float analysis_high[7] = {
93 -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
94 0.418092273222212f, 0.040689417609558f, -0.064538882628938f
95 };
96
97 static const float synthesis_low[7] = {
98 -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
99 0.418092273222212f, -0.040689417609558f, -0.064538882628938f
100 };
101
102 static const float synthesis_high[9] = {
103 -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
104 -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
105 };
106
107 static const enum AVPixelFormat pix_fmts[] = {
108 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10,
109 AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
110 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
111 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
112 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
113 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
114 AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
115 AV_PIX_FMT_YUVJ411P,
116 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
117 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
118 AV_PIX_FMT_YUV440P10,
119 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
120 AV_PIX_FMT_YUV440P12,
121 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
122 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
123 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
124 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
125 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
126 AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
127 AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
128 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
129 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
130 AV_PIX_FMT_NONE
131 };
132
133 static int config_input(AVFilterLink *inlink)
134 {
135 VagueDenoiserContext *s = inlink->dst->priv;
136 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
137 int p, i, nsteps_width, nsteps_height, nsteps_max;
138
139 s->depth = desc->comp[0].depth;
140 s->bpc = (s->depth + 7) / 8;
141 s->nb_planes = desc->nb_components;
142
143 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
144 s->planeheight[0] = s->planeheight[3] = inlink->h;
145 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
146 s->planewidth[0] = s->planewidth[3] = inlink->w;
147
148 s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
149 s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
150 s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
151 s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
152
153 if (!s->block || !s->in || !s->out || !s->tmp)
154 return AVERROR(ENOMEM);
155
156 s->threshold *= 1 << (s->depth - 8);
157 s->peak = (1 << s->depth) - 1;
158
159 nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
160 nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
161
162 for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
163 if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
164 break;
165 }
166
167 s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
168
169 for (p = 0; p < 4; p++) {
170 s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
171 s->hhighsize[p][0] = s->planewidth[p] >> 1;
172 s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
173 s->vhighsize[p][0] = s->planeheight[p] >> 1;
174
175 for (i = 1; i < s->nsteps; i++) {
176 s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
177 s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
178 s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
179 s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
180 }
181 }
182
183 return 0;
184 }
185
186 static inline void copy(const float *p1, float *p2, const int length)
187 {
188 memcpy(p2, p1, length * sizeof(float));
189 }
190
191 static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
192 {
193 int i;
194
195 for (i = 0; i < length; i++) {
196 p2[i] = *p1;
197 p1 += stride1;
198 }
199 }
200
201 static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
202 {
203 int i;
204
205 for (i = 0; i < length; i++) {
206 *p2 = p1[i];
207 p2 += stride2;
208 }
209 }
210
211 // Do symmetric extension of data using prescribed symmetries
212 // Original values are in output[npad] through output[npad+size-1]
213 // New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
214 // extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
215 // same for right boundary
216 // if right_ext=1 then ... 3 2 1 0 | 1 2 3
217 static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
218 {
219 int first = NPAD;
220 int last = NPAD - 1 + size;
221 const int originalLast = last;
222 int i, nextend, idx;
223
224 if (left_ext == 2)
225 output[--first] = output[NPAD];
226 if (right_ext == 2)
227 output[++last] = output[originalLast];
228
229 // extend left end
230 nextend = first;
231 for (i = 0; i < nextend; i++)
232 output[--first] = output[NPAD + 1 + i];
233
234 idx = NPAD + NPAD - 1 + size;
235
236 // extend right end
237 nextend = idx - last;
238 for (i = 0; i < nextend; i++)
239 output[++last] = output[originalLast - 1 - i];
240 }
241
242 static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
243 {
244 int i;
245
246 symmetric_extension(input, size, 1, 1);
247
248 for (i = NPAD; i < NPAD + low_size; i++) {
249 const float a = input[2 * i - 14] * analysis_low[0];
250 const float b = input[2 * i - 13] * analysis_low[1];
251 const float c = input[2 * i - 12] * analysis_low[2];
252 const float d = input[2 * i - 11] * analysis_low[3];
253 const float e = input[2 * i - 10] * analysis_low[4];
254 const float f = input[2 * i - 9] * analysis_low[3];
255 const float g = input[2 * i - 8] * analysis_low[2];
256 const float h = input[2 * i - 7] * analysis_low[1];
257 const float k = input[2 * i - 6] * analysis_low[0];
258
259 output[i] = a + b + c + d + e + f + g + h + k;
260 }
261
262 for (i = NPAD; i < NPAD + low_size; i++) {
263 const float a = input[2 * i - 12] * analysis_high[0];
264 const float b = input[2 * i - 11] * analysis_high[1];
265 const float c = input[2 * i - 10] * analysis_high[2];
266 const float d = input[2 * i - 9] * analysis_high[3];
267 const float e = input[2 * i - 8] * analysis_high[2];
268 const float f = input[2 * i - 7] * analysis_high[1];
269 const float g = input[2 * i - 6] * analysis_high[0];
270
271 output[i + low_size] = a + b + c + d + e + f + g;
272 }
273 }
274
275 static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
276 {
277 const int low_size = (size + 1) >> 1;
278 const int high_size = size >> 1;
279 int left_ext = 1, right_ext, i;
280 int findex;
281
282 memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
283
284 right_ext = (size % 2 == 0) ? 2 : 1;
285 symmetric_extension(temp, low_size, left_ext, right_ext);
286
287 memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
288 findex = (size + 2) >> 1;
289
290 for (i = 9; i < findex + 11; i++) {
291 const float a = temp[i] * synthesis_low[0];
292 const float b = temp[i] * synthesis_low[1];
293 const float c = temp[i] * synthesis_low[2];
294 const float d = temp[i] * synthesis_low[3];
295
296 output[2 * i - 13] += a;
297 output[2 * i - 12] += b;
298 output[2 * i - 11] += c;
299 output[2 * i - 10] += d;
300 output[2 * i - 9] += c;
301 output[2 * i - 8] += b;
302 output[2 * i - 7] += a;
303 }
304
305 memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
306
307 left_ext = 2;
308 right_ext = (size % 2 == 0) ? 1 : 2;
309 symmetric_extension(temp, high_size, left_ext, right_ext);
310
311 for (i = 8; i < findex + 11; i++) {
312 const float a = temp[i] * synthesis_high[0];
313 const float b = temp[i] * synthesis_high[1];
314 const float c = temp[i] * synthesis_high[2];
315 const float d = temp[i] * synthesis_high[3];
316 const float e = temp[i] * synthesis_high[4];
317
318 output[2 * i - 13] += a;
319 output[2 * i - 12] += b;
320 output[2 * i - 11] += c;
321 output[2 * i - 10] += d;
322 output[2 * i - 9] += e;
323 output[2 * i - 8] += d;
324 output[2 * i - 7] += c;
325 output[2 * i - 6] += b;
326 output[2 * i - 5] += a;
327 }
328 }
329
330 static void hard_thresholding(float *block, const int width, const int height,
331 const int stride, const float threshold,
332 const float percent)
333 {
334 const float frac = 1.f - percent * 0.01f;
335 int y, x;
336
337 for (y = 0; y < height; y++) {
338 for (x = 0; x < width; x++) {
339 if (FFABS(block[x]) <= threshold)
340 block[x] *= frac;
341 }
342 block += stride;
343 }
344 }
345
346 static void soft_thresholding(float *block, const int width, const int height, const int stride,
347 const float threshold, const float percent)
348 {
349 const float frac = 1.f - percent * 0.01f;
350 const float shift = threshold * 0.01f * percent;
351 int y, x;
352
353 for (y = 0; y < height; y++) {
354 for (x = 0; x < width; x++) {
355 const float temp = FFABS(block[x]);
356 if (temp <= threshold)
357 block[x] *= frac;
358 else
359 block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
360 }
361 block += stride;
362 }
363 }
364
365 static void qian_thresholding(float *block, const int width, const int height,
366 const int stride, const float threshold,
367 const float percent)
368 {
369 const float percent01 = percent * 0.01f;
370 const float tr2 = threshold * threshold * percent01;
371 const float frac = 1.f - percent01;
372 int y, x;
373
374 for (y = 0; y < height; y++) {
375 for (x = 0; x < width; x++) {
376 const float temp = FFABS(block[x]);
377 if (temp <= threshold) {
378 block[x] *= frac;
379 } else {
380 const float tp2 = temp * temp;
381 block[x] *= (tp2 - tr2) / tp2;
382 }
383 }
384 block += stride;
385 }
386 }
387
388 static float bayes_threshold(float *block, const int width, const int height,
389 const int stride, const float threshold)
390 {
391 float mean = 0.f;
392
393 for (int y = 0; y < height; y++) {
394 for (int x = 0; x < width; x++) {
395 mean += block[x] * block[x];
396 }
397 block += stride;
398 }
399
400 mean /= width * height;
401
402 return threshold * threshold / (FFMAX(sqrtf(mean - threshold), FLT_EPSILON));
403 }
404
405 static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
406 {
407 int p, y, x, i, j;
408
409 for (p = 0; p < s->nb_planes; p++) {
410 const int height = s->planeheight[p];
411 const int width = s->planewidth[p];
412 const uint8_t *srcp8 = in->data[p];
413 const uint16_t *srcp16 = (const uint16_t *)in->data[p];
414 uint8_t *dstp8 = out->data[p];
415 uint16_t *dstp16 = (uint16_t *)out->data[p];
416 float *output = s->block;
417 int h_low_size0 = width;
418 int v_low_size0 = height;
419 int nsteps_transform = s->nsteps;
420 int nsteps_invert = s->nsteps;
421 const float *input = s->block;
422
423 if (!((1 << p) & s->planes)) {
424 av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
425 s->planewidth[p] * s->bpc, s->planeheight[p]);
426 continue;
427 }
428
429 if (s->depth <= 8) {
430 for (y = 0; y < height; y++) {
431 for (x = 0; x < width; x++)
432 output[x] = srcp8[x];
433 srcp8 += in->linesize[p];
434 output += width;
435 }
436 } else {
437 for (y = 0; y < height; y++) {
438 for (x = 0; x < width; x++)
439 output[x] = srcp16[x];
440 srcp16 += in->linesize[p] / 2;
441 output += width;
442 }
443 }
444
445 while (nsteps_transform--) {
446 int low_size = (h_low_size0 + 1) >> 1;
447 float *input = s->block;
448 for (j = 0; j < v_low_size0; j++) {
449 copy(input, s->in + NPAD, h_low_size0);
450 transform_step(s->in, s->out, h_low_size0, low_size, s);
451 copy(s->out + NPAD, input, h_low_size0);
452 input += width;
453 }
454
455 low_size = (v_low_size0 + 1) >> 1;
456 input = s->block;
457 for (j = 0; j < h_low_size0; j++) {
458 copyv(input, width, s->in + NPAD, v_low_size0);
459 transform_step(s->in, s->out, v_low_size0, low_size, s);
460 copyh(s->out + NPAD, input, width, v_low_size0);
461 input++;
462 }
463
464 h_low_size0 = (h_low_size0 + 1) >> 1;
465 v_low_size0 = (v_low_size0 + 1) >> 1;
466 }
467
468 if (s->type == 0) {
469 s->thresholding(s->block, width, height, width, s->threshold, s->percent);
470 } else {
471 for (int n = 0; n < s->nsteps; n++) {
472 float threshold;
473 float *block;
474
475 if (n == s->nsteps - 1) {
476 threshold = bayes_threshold(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, s->threshold);
477 s->thresholding(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
478 }
479 block = s->block + s->hlowsize[p][n];
480 threshold = bayes_threshold(block, s->hhighsize[p][n], s->vlowsize[p][n], width, s->threshold);
481 s->thresholding(block, s->hhighsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
482 block = s->block + s->vlowsize[p][n] * width;
483 threshold = bayes_threshold(block, s->hlowsize[p][n], s->vhighsize[p][n], width, s->threshold);
484 s->thresholding(block, s->hlowsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
485 block = s->block + s->hlowsize[p][n] + s->vlowsize[p][n] * width;
486 threshold = bayes_threshold(block, s->hhighsize[p][n], s->vhighsize[p][n], width, s->threshold);
487 s->thresholding(block, s->hhighsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
488 }
489 }
490
491 while (nsteps_invert--) {
492 const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
493 const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
494 float * idx3 = s->block;
495 for (i = 0; i < idx2; i++) {
496 copyv(idx3, width, s->in + NPAD, idx);
497 invert_step(s->in, s->out, s->tmp, idx, s);
498 copyh(s->out + NPAD, idx3, width, idx);
499 idx3++;
500 }
501
502 idx3 = s->block;
503 for (i = 0; i < idx; i++) {
504 copy(idx3, s->in + NPAD, idx2);
505 invert_step(s->in, s->out, s->tmp, idx2, s);
506 copy(s->out + NPAD, idx3, idx2);
507 idx3 += width;
508 }
509 }
510
511 if (s->depth <= 8) {
512 for (y = 0; y < height; y++) {
513 for (x = 0; x < width; x++)
514 dstp8[x] = av_clip_uint8(input[x] + 0.5f);
515 input += width;
516 dstp8 += out->linesize[p];
517 }
518 } else {
519 for (y = 0; y < height; y++) {
520 for (x = 0; x < width; x++)
521 dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
522 input += width;
523 dstp16 += out->linesize[p] / 2;
524 }
525 }
526 }
527 }
528
529 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
530 {
531 AVFilterContext *ctx = inlink->dst;
532 VagueDenoiserContext *s = ctx->priv;
533 AVFilterLink *outlink = ctx->outputs[0];
534 AVFrame *out;
535 int direct = av_frame_is_writable(in);
536
537 if (direct) {
538 out = in;
539 } else {
540 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
541 if (!out) {
542 av_frame_free(&in);
543 return AVERROR(ENOMEM);
544 }
545
546 av_frame_copy_props(out, in);
547 }
548
549 filter(s, in, out);
550
551 if (!direct)
552 av_frame_free(&in);
553
554 return ff_filter_frame(outlink, out);
555 }
556
557 static av_cold int init(AVFilterContext *ctx)
558 {
559 VagueDenoiserContext *s = ctx->priv;
560
561 switch (s->method) {
562 case 0:
563 s->thresholding = hard_thresholding;
564 break;
565 case 1:
566 s->thresholding = soft_thresholding;
567 break;
568 case 2:
569 s->thresholding = qian_thresholding;
570 break;
571 }
572
573 return 0;
574 }
575
576 static av_cold void uninit(AVFilterContext *ctx)
577 {
578 VagueDenoiserContext *s = ctx->priv;
579
580 av_freep(&s->block);
581 av_freep(&s->in);
582 av_freep(&s->out);
583 av_freep(&s->tmp);
584 }
585
586 static const AVFilterPad vaguedenoiser_inputs[] = {
587 {
588 .name = "default",
589 .type = AVMEDIA_TYPE_VIDEO,
590 .config_props = config_input,
591 .filter_frame = filter_frame,
592 },
593 };
594
595
596 const AVFilter ff_vf_vaguedenoiser = {
597 .name = "vaguedenoiser",
598 .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
599 .priv_size = sizeof(VagueDenoiserContext),
600 .priv_class = &vaguedenoiser_class,
601 .init = init,
602 .uninit = uninit,
603 FILTER_INPUTS(vaguedenoiser_inputs),
604 FILTER_OUTPUTS(ff_video_default_filterpad),
605 FILTER_PIXFMTS_ARRAY(pix_fmts),
606 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
607 };
608