FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_guided.c
Date: 2024-11-20 23:03:26
Exec Total Coverage
Lines: 0 186 0.0%
Functions: 0 10 0.0%
Branches: 0 126 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2021 Xuewei Meng
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "libavutil/imgutils.h"
22 #include "libavutil/mem.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "framesync.h"
28 #include "video.h"
29
30 enum FilterModes {
31 BASIC,
32 FAST,
33 NB_MODES,
34 };
35
36 enum GuidanceModes {
37 OFF,
38 ON,
39 NB_GUIDANCE_MODES,
40 };
41
42 typedef struct GuidedContext {
43 const AVClass *class;
44 FFFrameSync fs;
45
46 int radius;
47 float eps;
48 int mode;
49 int sub;
50 int guidance;
51 int planes;
52
53 int width;
54 int height;
55
56 int nb_planes;
57 int depth;
58 int planewidth[4];
59 int planeheight[4];
60
61 float *I;
62 float *II;
63 float *P;
64 float *IP;
65 float *meanI;
66 float *meanII;
67 float *meanP;
68 float *meanIP;
69
70 float *A;
71 float *B;
72 float *meanA;
73 float *meanB;
74
75 int (*box_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
76 } GuidedContext;
77
78 #define OFFSET(x) offsetof(GuidedContext, x)
79 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
80 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
81
82 static const AVOption guided_options[] = {
83 { "radius", "set the box radius", OFFSET(radius), AV_OPT_TYPE_INT, {.i64 = 3 }, 1, 20, TFLAGS },
84 { "eps", "set the regularization parameter (with square)", OFFSET(eps), AV_OPT_TYPE_FLOAT, {.dbl = 0.01 }, 0.0, 1, TFLAGS },
85 { "mode", "set filtering mode (0: basic mode; 1: fast mode)", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = BASIC}, BASIC, NB_MODES - 1, TFLAGS, .unit = "mode" },
86 { "basic", "basic guided filter", 0, AV_OPT_TYPE_CONST, {.i64 = BASIC}, 0, 0, TFLAGS, .unit = "mode" },
87 { "fast", "fast guided filter", 0, AV_OPT_TYPE_CONST, {.i64 = FAST }, 0, 0, TFLAGS, .unit = "mode" },
88 { "sub", "subsampling ratio for fast mode", OFFSET(sub), AV_OPT_TYPE_INT, {.i64 = 4 }, 2, 64, TFLAGS },
89 { "guidance", "set guidance mode (0: off mode; 1: on mode)", OFFSET(guidance), AV_OPT_TYPE_INT, {.i64 = OFF }, OFF, NB_GUIDANCE_MODES - 1, FLAGS, .unit = "guidance" },
90 { "off", "only one input is enabled", 0, AV_OPT_TYPE_CONST, {.i64 = OFF }, 0, 0, FLAGS, .unit = "guidance" },
91 { "on", "two inputs are required", 0, AV_OPT_TYPE_CONST, {.i64 = ON }, 0, 0, FLAGS, .unit = "guidance" },
92 { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 0xF, TFLAGS },
93 { NULL }
94 };
95
96 AVFILTER_DEFINE_CLASS(guided);
97
98 typedef struct ThreadData {
99 int width;
100 int height;
101 float *src;
102 float *dst;
103 int srcStride;
104 int dstStride;
105 } ThreadData;
106
107 static int box_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
108 {
109 GuidedContext *s = ctx->priv;
110 ThreadData *t = arg;
111
112 const int width = t->width;
113 const int height = t->height;
114 const int src_stride = t->srcStride;
115 const int dst_stride = t->dstStride;
116 const int slice_start = (height * jobnr) / nb_jobs;
117 const int slice_end = (height * (jobnr + 1)) / nb_jobs;
118 const int radius = s->radius;
119 const float *src = t->src;
120 float *dst = t->dst;
121
122 int w;
123 int numPix;
124 w = (radius << 1) + 1;
125 numPix = w * w;
126 for (int i = slice_start;i < slice_end;i++) {
127 for (int j = 0;j < width;j++) {
128 float temp = 0.0;
129 for (int row = -radius;row <= radius;row++) {
130 for (int col = -radius;col <= radius;col++) {
131 int x = i + row;
132 int y = j + col;
133 x = (x < 0) ? 0 : (x >= height ? height - 1 : x);
134 y = (y < 0) ? 0 : (y >= width ? width - 1 : y);
135 temp += src[x * src_stride + y];
136 }
137 }
138 dst[i * dst_stride + j] = temp / numPix;
139 }
140 }
141 return 0;
142 }
143
144 static const enum AVPixelFormat pix_fmts[] = {
145 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
146 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
147 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
148 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
149 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
150 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
151 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
152 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
153 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
154 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
155 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
156 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
157 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
158 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
159 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
160 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
161 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
162 AV_PIX_FMT_NONE
163 };
164
165 static int config_input(AVFilterLink *inlink)
166 {
167 AVFilterContext *ctx = inlink->dst;
168 GuidedContext *s = ctx->priv;
169 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
170
171 if (s->mode == BASIC) {
172 s->sub = 1;
173 } else if (s->mode == FAST) {
174 if (s->radius >= s->sub)
175 s->radius = s->radius / s->sub;
176 else {
177 s->radius = 1;
178 }
179 }
180
181 s->depth = desc->comp[0].depth;
182 s->width = ctx->inputs[0]->w;
183 s->height = ctx->inputs[0]->h;
184
185 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
186 s->planewidth[0] = s->planewidth[3] = inlink->w;
187 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
188 s->planeheight[0] = s->planeheight[3] = inlink->h;
189
190 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
191 s->box_slice = box_slice;
192 return 0;
193 }
194
195 #define GUIDED(type, name) \
196 static int guided_##name(AVFilterContext *ctx, GuidedContext *s, \
197 const uint8_t *ssrc, const uint8_t *ssrcRef, \
198 uint8_t *ddst, int radius, float eps, int width, int height, \
199 int src_stride, int src_ref_stride, int dst_stride, \
200 float maxval) \
201 { \
202 int ret = 0; \
203 type *dst = (type *)ddst; \
204 const type *src = (const type *)ssrc; \
205 const type *srcRef = (const type *)ssrcRef; \
206 \
207 int sub = s->sub; \
208 int h = (height % sub) == 0 ? height / sub : height / sub + 1; \
209 int w = (width % sub) == 0 ? width / sub : width / sub + 1; \
210 \
211 ThreadData t; \
212 const int nb_threads = ff_filter_get_nb_threads(ctx); \
213 float *I = s->I; \
214 float *II = s->II; \
215 float *P = s->P; \
216 float *IP = s->IP; \
217 float *meanI = s->meanI; \
218 float *meanII = s->meanII; \
219 float *meanP = s->meanP; \
220 float *meanIP = s->meanIP; \
221 float *A = s->A; \
222 float *B = s->B; \
223 float *meanA = s->meanA; \
224 float *meanB = s->meanB; \
225 \
226 for (int i = 0;i < h;i++) { \
227 for (int j = 0;j < w;j++) { \
228 int x = i * w + j; \
229 I[x] = src[(i * src_stride + j) * sub] / maxval; \
230 II[x] = I[x] * I[x]; \
231 P[x] = srcRef[(i * src_ref_stride + j) * sub] / maxval; \
232 IP[x] = I[x] * P[x]; \
233 } \
234 } \
235 \
236 t.width = w; \
237 t.height = h; \
238 t.srcStride = w; \
239 t.dstStride = w; \
240 t.src = I; \
241 t.dst = meanI; \
242 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
243 t.src = II; \
244 t.dst = meanII; \
245 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
246 t.src = P; \
247 t.dst = meanP; \
248 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
249 t.src = IP; \
250 t.dst = meanIP; \
251 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
252 \
253 for (int i = 0;i < h;i++) { \
254 for (int j = 0;j < w;j++) { \
255 int x = i * w + j; \
256 float varI = meanII[x] - (meanI[x] * meanI[x]); \
257 float covIP = meanIP[x] - (meanI[x] * meanP[x]); \
258 A[x] = covIP / (varI + eps); \
259 B[x] = meanP[x] - A[x] * meanI[x]; \
260 } \
261 } \
262 \
263 t.src = A; \
264 t.dst = meanA; \
265 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
266 t.src = B; \
267 t.dst = meanB; \
268 ff_filter_execute(ctx, s->box_slice, &t, NULL, FFMIN(h, nb_threads)); \
269 \
270 for (int i = 0;i < height;i++) { \
271 for (int j = 0;j < width;j++) { \
272 int x = i / sub * w + j / sub; \
273 dst[i * dst_stride + j] = meanA[x] * src[i * src_stride + j] + \
274 meanB[x] * maxval; \
275 } \
276 } \
277 \
278 return ret; \
279 }
280
281 GUIDED(uint8_t, byte)
282 GUIDED(uint16_t, word)
283
284 static int filter_frame(AVFilterContext *ctx, AVFrame **out, AVFrame *in, AVFrame *ref)
285 {
286 GuidedContext *s = ctx->priv;
287 AVFilterLink *outlink = ctx->outputs[0];
288 *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
289 if (!*out)
290 return AVERROR(ENOMEM);
291 av_frame_copy_props(*out, in);
292
293 for (int plane = 0; plane < s->nb_planes; plane++) {
294 if (!(s->planes & (1 << plane))) {
295 av_image_copy_plane((*out)->data[plane], (*out)->linesize[plane],
296 in->data[plane], in->linesize[plane],
297 s->planewidth[plane] * ((s->depth + 7) / 8), s->planeheight[plane]);
298 continue;
299 }
300 if (s->depth <= 8)
301 guided_byte(ctx, s, in->data[plane], ref->data[plane], (*out)->data[plane], s->radius, s->eps,
302 s->planewidth[plane], s->planeheight[plane],
303 in->linesize[plane], ref->linesize[plane], (*out)->linesize[plane], (1 << s->depth) - 1.f);
304 else
305 guided_word(ctx, s, in->data[plane], ref->data[plane], (*out)->data[plane], s->radius, s->eps,
306 s->planewidth[plane], s->planeheight[plane],
307 in->linesize[plane] / 2, ref->linesize[plane] / 2, (*out)->linesize[plane] / 2, (1 << s->depth) - 1.f);
308 }
309
310 return 0;
311 }
312
313 static int process_frame(FFFrameSync *fs)
314 {
315 AVFilterContext *ctx = fs->parent;
316 AVFilterLink *outlink = ctx->outputs[0];
317 AVFrame *out_frame = NULL, *main_frame = NULL, *ref_frame = NULL;
318 int ret;
319 ret = ff_framesync_dualinput_get(fs, &main_frame, &ref_frame);
320 if (ret < 0)
321 return ret;
322
323 if (ctx->is_disabled)
324 return ff_filter_frame(outlink, main_frame);
325
326 ret = filter_frame(ctx, &out_frame, main_frame, ref_frame);
327 if (ret < 0)
328 return ret;
329 av_frame_free(&main_frame);
330
331 return ff_filter_frame(outlink, out_frame);
332 }
333
334 static int config_output(AVFilterLink *outlink)
335 {
336 AVFilterContext *ctx = outlink->src;
337 GuidedContext *s = ctx->priv;
338 AVFilterLink *mainlink = ctx->inputs[0];
339 FilterLink *il = ff_filter_link(mainlink);
340 FilterLink *ol = ff_filter_link(outlink);
341 FFFrameSyncIn *in;
342 int w, h, ret;
343
344 if (s->guidance == ON) {
345 if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
346 ctx->inputs[0]->h != ctx->inputs[1]->h) {
347 av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
348 return AVERROR(EINVAL);
349 }
350 }
351
352 outlink->w = w = mainlink->w;
353 outlink->h = h = mainlink->h;
354 outlink->time_base = mainlink->time_base;
355 outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
356 ol->frame_rate = il->frame_rate;
357
358 s->I = av_calloc(w * h, sizeof(*s->I));
359 s->II = av_calloc(w * h, sizeof(*s->II));
360 s->P = av_calloc(w * h, sizeof(*s->P));
361 s->IP = av_calloc(w * h, sizeof(*s->IP));
362 s->meanI = av_calloc(w * h, sizeof(*s->meanI));
363 s->meanII = av_calloc(w * h, sizeof(*s->meanII));
364 s->meanP = av_calloc(w * h, sizeof(*s->meanP));
365 s->meanIP = av_calloc(w * h, sizeof(*s->meanIP));
366
367 s->A = av_calloc(w * h, sizeof(*s->A));
368 s->B = av_calloc(w * h, sizeof(*s->B));
369 s->meanA = av_calloc(w * h, sizeof(*s->meanA));
370 s->meanB = av_calloc(w * h, sizeof(*s->meanA));
371
372 if (!s->I || !s->II || !s->P || !s->IP || !s->meanI || !s->meanII || !s->meanP ||
373 !s->meanIP || !s->A || !s->B || !s->meanA || !s->meanB)
374 return AVERROR(ENOMEM);
375
376 if (s->guidance == OFF)
377 return 0;
378
379 if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
380 return ret;
381
382 outlink->time_base = s->fs.time_base;
383
384 in = s->fs.in;
385 in[0].time_base = mainlink->time_base;
386 in[1].time_base = ctx->inputs[1]->time_base;
387 in[0].sync = 2;
388 in[0].before = EXT_INFINITY;
389 in[0].after = EXT_INFINITY;
390 in[1].sync = 1;
391 in[1].before = EXT_INFINITY;
392 in[1].after = EXT_INFINITY;
393 s->fs.opaque = s;
394 s->fs.on_event = process_frame;
395
396 return ff_framesync_configure(&s->fs);
397 }
398
399 static int activate(AVFilterContext *ctx)
400 {
401 GuidedContext *s = ctx->priv;
402 AVFilterLink *outlink = ctx->outputs[0];
403 AVFilterLink *inlink = ctx->inputs[0];
404 AVFrame *frame = NULL;
405 AVFrame *out = NULL;
406 int ret, status;
407 int64_t pts;
408 if (s->guidance)
409 return ff_framesync_activate(&s->fs);
410
411 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
412
413 if ((ret = ff_inlink_consume_frame(inlink, &frame)) > 0) {
414 if (ctx->is_disabled)
415 return ff_filter_frame(outlink, frame);
416
417 ret = filter_frame(ctx, &out, frame, frame);
418 av_frame_free(&frame);
419 if (ret < 0)
420 return ret;
421 ret = ff_filter_frame(outlink, out);
422 }
423 if (ret < 0)
424 return ret;
425 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
426 ff_outlink_set_status(outlink, status, pts);
427 return 0;
428 }
429 if (ff_outlink_frame_wanted(outlink))
430 ff_inlink_request_frame(inlink);
431 return 0;
432 }
433
434 static av_cold int init(AVFilterContext *ctx)
435 {
436 GuidedContext *s = ctx->priv;
437 AVFilterPad pad = { 0 };
438 int ret;
439
440 pad.type = AVMEDIA_TYPE_VIDEO;
441 pad.name = "source";
442 pad.config_props = config_input;
443
444 if ((ret = ff_append_inpad(ctx, &pad)) < 0)
445 return ret;
446
447 if (s->guidance == ON) {
448 pad.type = AVMEDIA_TYPE_VIDEO;
449 pad.name = "guidance";
450 pad.config_props = NULL;
451
452 if ((ret = ff_append_inpad(ctx, &pad)) < 0)
453 return ret;
454 }
455
456 return 0;
457 }
458
459 static av_cold void uninit(AVFilterContext *ctx)
460 {
461 GuidedContext *s = ctx->priv;
462 if (s->guidance == ON)
463 ff_framesync_uninit(&s->fs);
464
465 av_freep(&s->I);
466 av_freep(&s->II);
467 av_freep(&s->P);
468 av_freep(&s->IP);
469 av_freep(&s->meanI);
470 av_freep(&s->meanII);
471 av_freep(&s->meanP);
472 av_freep(&s->meanIP);
473 av_freep(&s->A);
474 av_freep(&s->B);
475 av_freep(&s->meanA);
476 av_freep(&s->meanB);
477
478 return;
479 }
480
481 static const AVFilterPad guided_outputs[] = {
482 {
483 .name = "default",
484 .type = AVMEDIA_TYPE_VIDEO,
485 .config_props = config_output,
486 },
487 };
488
489 const AVFilter ff_vf_guided = {
490 .name = "guided",
491 .description = NULL_IF_CONFIG_SMALL("Apply Guided filter."),
492 .init = init,
493 .uninit = uninit,
494 .priv_size = sizeof(GuidedContext),
495 .priv_class = &guided_class,
496 .activate = activate,
497 .inputs = NULL,
498 FILTER_OUTPUTS(guided_outputs),
499 FILTER_PIXFMTS_ARRAY(pix_fmts),
500 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_SLICE_THREADS |
501 AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
502 .process_command = ff_filter_process_command,
503 };
504