FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_derain.c
Date: 2022-12-05 03:11:11
Exec Total Coverage
Lines: 0 28 0.0%
Functions: 0 3 0.0%
Branches: 0 8 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2019 Xuewei Meng
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Filter implementing image derain filter using deep convolutional networks.
24 * http://openaccess.thecvf.com/content_ECCV_2018/html/Xia_Li_Recurrent_Squeeze-and-Excitation_Context_ECCV_2018_paper.html
25 */
26
27 #include "libavformat/avio.h"
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "dnn_filter_common.h"
31 #include "formats.h"
32 #include "internal.h"
33
34 typedef struct DRContext {
35 const AVClass *class;
36 DnnContext dnnctx;
37 int filter_type;
38 } DRContext;
39
40 #define OFFSET(x) offsetof(DRContext, x)
41 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
42 static const AVOption derain_options[] = {
43 { "filter_type", "filter type(derain/dehaze)", OFFSET(filter_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "type" },
44 { "derain", "derain filter flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "type" },
45 { "dehaze", "dehaze filter flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "type" },
46 { "dnn_backend", "DNN backend", OFFSET(dnnctx.backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
47 { "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
48 #if (CONFIG_LIBTENSORFLOW == 1)
49 { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
50 #endif
51 { "model", "path to model file", OFFSET(dnnctx.model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
52 { "input", "input name of the model", OFFSET(dnnctx.model_inputname), AV_OPT_TYPE_STRING, { .str = "x" }, 0, 0, FLAGS },
53 { "output", "output name of the model", OFFSET(dnnctx.model_outputnames_string), AV_OPT_TYPE_STRING, { .str = "y" }, 0, 0, FLAGS },
54 { NULL }
55 };
56
57 AVFILTER_DEFINE_CLASS(derain);
58
59 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
60 {
61 DNNAsyncStatusType async_state = 0;
62 AVFilterContext *ctx = inlink->dst;
63 AVFilterLink *outlink = ctx->outputs[0];
64 DRContext *dr_context = ctx->priv;
65 int dnn_result;
66 AVFrame *out;
67
68 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
69 if (!out) {
70 av_log(ctx, AV_LOG_ERROR, "could not allocate memory for output frame\n");
71 av_frame_free(&in);
72 return AVERROR(ENOMEM);
73 }
74 av_frame_copy_props(out, in);
75
76 dnn_result = ff_dnn_execute_model(&dr_context->dnnctx, in, out);
77 if (dnn_result != 0){
78 av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
79 av_frame_free(&in);
80 return dnn_result;
81 }
82 do {
83 async_state = ff_dnn_get_result(&dr_context->dnnctx, &in, &out);
84 } while (async_state == DAST_NOT_READY);
85
86 if (async_state != DAST_SUCCESS)
87 return AVERROR(EINVAL);
88
89 av_frame_free(&in);
90
91 return ff_filter_frame(outlink, out);
92 }
93
94 static av_cold int init(AVFilterContext *ctx)
95 {
96 DRContext *dr_context = ctx->priv;
97 return ff_dnn_init(&dr_context->dnnctx, DFT_PROCESS_FRAME, ctx);
98 }
99
100 static av_cold void uninit(AVFilterContext *ctx)
101 {
102 DRContext *dr_context = ctx->priv;
103 ff_dnn_uninit(&dr_context->dnnctx);
104 }
105
106 static const AVFilterPad derain_inputs[] = {
107 {
108 .name = "default",
109 .type = AVMEDIA_TYPE_VIDEO,
110 .filter_frame = filter_frame,
111 },
112 };
113
114 static const AVFilterPad derain_outputs[] = {
115 {
116 .name = "default",
117 .type = AVMEDIA_TYPE_VIDEO,
118 },
119 };
120
121 const AVFilter ff_vf_derain = {
122 .name = "derain",
123 .description = NULL_IF_CONFIG_SMALL("Apply derain filter to the input."),
124 .priv_size = sizeof(DRContext),
125 .init = init,
126 .uninit = uninit,
127 FILTER_INPUTS(derain_inputs),
128 FILTER_OUTPUTS(derain_outputs),
129 FILTER_SINGLE_PIXFMT(AV_PIX_FMT_RGB24),
130 .priv_class = &derain_class,
131 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
132 };
133