FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/dnn_filter_common.c
Date: 2024-03-29 11:55:30
Exec Total Coverage
Lines: 0 89 0.0%
Functions: 0 12 0.0%
Branches: 0 44 0.0%

Line Branch Exec Source
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "dnn_filter_common.h"
20 #include "libavutil/avstring.h"
21
22 #define MAX_SUPPORTED_OUTPUTS_NB 4
23
24 static char **separate_output_names(const char *expr, const char *val_sep, int *separated_nb)
25 {
26 char *val, **parsed_vals = NULL;
27 int val_num = 0;
28 if (!expr || !val_sep || !separated_nb) {
29 return NULL;
30 }
31
32 parsed_vals = av_calloc(MAX_SUPPORTED_OUTPUTS_NB, sizeof(*parsed_vals));
33 if (!parsed_vals) {
34 return NULL;
35 }
36
37 do {
38 val = av_get_token(&expr, val_sep);
39 if(val) {
40 parsed_vals[val_num] = val;
41 val_num++;
42 }
43 if (*expr) {
44 expr++;
45 }
46 } while(*expr);
47
48 parsed_vals[val_num] = NULL;
49 *separated_nb = val_num;
50
51 return parsed_vals;
52 }
53
54 int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
55 {
56 DNNBackendType backend = ctx->backend_type;
57
58 if (!ctx->model_filename) {
59 av_log(filter_ctx, AV_LOG_ERROR, "model file for network is not specified\n");
60 return AVERROR(EINVAL);
61 }
62
63 if (backend == DNN_TH) {
64 if (ctx->model_inputname)
65 av_log(filter_ctx, AV_LOG_WARNING, "LibTorch backend do not require inputname, "\
66 "inputname will be ignored.\n");
67 if (ctx->model_outputnames)
68 av_log(filter_ctx, AV_LOG_WARNING, "LibTorch backend do not require outputname(s), "\
69 "all outputname(s) will be ignored.\n");
70 ctx->nb_outputs = 1;
71 } else if (backend == DNN_TF) {
72 if (!ctx->model_inputname) {
73 av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
74 return AVERROR(EINVAL);
75 }
76 ctx->model_outputnames = separate_output_names(ctx->model_outputnames_string, "&", &ctx->nb_outputs);
77 if (!ctx->model_outputnames) {
78 av_log(filter_ctx, AV_LOG_ERROR, "could not parse model output names\n");
79 return AVERROR(EINVAL);
80 }
81 }
82
83 ctx->dnn_module = ff_get_dnn_module(ctx->backend_type, filter_ctx);
84 if (!ctx->dnn_module) {
85 av_log(filter_ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
86 return AVERROR(ENOMEM);
87 }
88 if (!ctx->dnn_module->load_model) {
89 av_log(filter_ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
90 return AVERROR(EINVAL);
91 }
92
93 ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename, func_type, ctx->backend_options, filter_ctx);
94 if (!ctx->model) {
95 av_log(filter_ctx, AV_LOG_ERROR, "could not load DNN model\n");
96 return AVERROR(EINVAL);
97 }
98
99 return 0;
100 }
101
102 int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc)
103 {
104 ctx->model->frame_pre_proc = pre_proc;
105 ctx->model->frame_post_proc = post_proc;
106 return 0;
107 }
108
109 int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc)
110 {
111 ctx->model->detect_post_proc = post_proc;
112 return 0;
113 }
114
115 int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc)
116 {
117 ctx->model->classify_post_proc = post_proc;
118 return 0;
119 }
120
121 int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
122 {
123 return ctx->model->get_input(ctx->model->model, input, ctx->model_inputname);
124 }
125
126 int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
127 {
128 char * output_name = ctx->model_outputnames && ctx->backend_type != DNN_TH ?
129 ctx->model_outputnames[0] : NULL;
130 return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
131 (const char *)output_name, output_width, output_height);
132 }
133
134 int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
135 {
136 DNNExecBaseParams exec_params = {
137 .input_name = ctx->model_inputname,
138 .output_names = (const char **)ctx->model_outputnames,
139 .nb_output = ctx->nb_outputs,
140 .in_frame = in_frame,
141 .out_frame = out_frame,
142 };
143 return (ctx->dnn_module->execute_model)(ctx->model, &exec_params);
144 }
145
146 int ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target)
147 {
148 DNNExecClassificationParams class_params = {
149 {
150 .input_name = ctx->model_inputname,
151 .output_names = (const char **)ctx->model_outputnames,
152 .nb_output = ctx->nb_outputs,
153 .in_frame = in_frame,
154 .out_frame = out_frame,
155 },
156 .target = target,
157 };
158 return (ctx->dnn_module->execute_model)(ctx->model, &class_params.base);
159 }
160
161 DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
162 {
163 return (ctx->dnn_module->get_result)(ctx->model, in_frame, out_frame);
164 }
165
166 int ff_dnn_flush(DnnContext *ctx)
167 {
168 return (ctx->dnn_module->flush)(ctx->model);
169 }
170
171 void ff_dnn_uninit(DnnContext *ctx)
172 {
173 if (ctx->dnn_module) {
174 (ctx->dnn_module->free_model)(&ctx->model);
175 }
176 if (ctx->model_outputnames) {
177 for (int i = 0; i < ctx->nb_outputs; i++)
178 av_free(ctx->model_outputnames[i]);
179
180 av_freep(&ctx->model_outputnames);
181 }
182 }
183