FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_blend.c
Date: 2026-04-22 18:56:46
Exec Total Coverage
Lines: 100 156 64.1%
Functions: 7 15 46.7%
Branches: 31 90 34.4%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2013 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "config_components.h"
22
23 #include "libavutil/eval.h"
24 #include "libavutil/mem.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/pixfmt.h"
27 #include "avfilter.h"
28 #include "filters.h"
29 #include "framesync.h"
30 #include "vf_blend_init.h"
31 #include "video.h"
32 #include "blend.h"
33
34 #define TOP 0
35 #define BOTTOM 1
36
37 typedef struct BlendContext {
38 const AVClass *class;
39 FFFrameSync fs;
40 int hsub, vsub; ///< chroma subsampling values
41 int nb_planes;
42 char *all_expr;
43 /* enum BlendMode */
44 int all_mode;
45 double all_opacity;
46
47 int depth;
48 FilterParams params[4];
49 int tblend;
50 AVFrame *prev_frame; /* only used with tblend */
51 int nb_threads;
52 } BlendContext;
53
54 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
55 enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
56
57 typedef struct ThreadData {
58 const AVFrame *top, *bottom;
59 AVFrame *dst;
60 AVFilterLink *inlink;
61 int plane;
62 int w, h;
63 FilterParams *param;
64 } ThreadData;
65
66 #define OFFSET(x) offsetof(BlendContext, x)
67 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
68
69 static const AVOption blend_options[] = {
70 { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" },
71 { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" },
72 { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" },
73 { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" },
74 { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, .unit = "mode" },
75 { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, .unit = "mode" },
76 { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" },
77 { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" },
78 { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, .unit = "mode" },
79 { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, .unit = "mode" },
80 { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, .unit = "mode" },
81 { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, .unit = "mode" },
82 { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, .unit = "mode" },
83 { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" },
84 { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" },
85 { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, .unit = "mode" },
86 { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, .unit = "mode" },
87 { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, .unit = "mode" },
88 { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, .unit = "mode" },
89 { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, .unit = "mode" },
90 { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, .unit = "mode" },
91 { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, .unit = "mode" },
92 { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, .unit = "mode" },
93 { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, .unit = "mode" },
94 { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, .unit = "mode" },
95 { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, .unit = "mode" },
96 { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, .unit = "mode" },
97 { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, .unit = "mode" },
98 { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, .unit = "mode" },
99 { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, .unit = "mode" },
100 { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, .unit = "mode" },
101 { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, .unit = "mode" },
102 { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, .unit = "mode" },
103 { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, .unit = "mode" },
104 { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, .unit = "mode" },
105 { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, .unit = "mode" },
106 { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, .unit = "mode" },
107 { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, .unit = "mode" },
108 { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, .unit = "mode" },
109 { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, .unit = "mode" },
110 { "softdifference","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTDIFFERENCE}, 0, 0, FLAGS, .unit = "mode" },
111 { "geometric", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GEOMETRIC}, 0, 0, FLAGS, .unit = "mode" },
112 { "harmonic", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARMONIC}, 0, 0, FLAGS, .unit = "mode" },
113 { "bleach", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BLEACH}, 0, 0, FLAGS, .unit = "mode" },
114 { "stain", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_STAIN}, 0, 0, FLAGS, .unit = "mode" },
115 { "interpolate","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_INTERPOLATE},0, 0, FLAGS, .unit = "mode" },
116 { "hardoverlay","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDOVERLAY},0, 0, FLAGS, .unit = "mode" },
117 { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
118 { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
119 { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
120 { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
121 { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
122 { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
123 { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
124 { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
125 { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
126 { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
127 { NULL }
128 };
129
130 FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs);
131
132 #define DEFINE_BLEND_EXPR(type, name, div) \
133 static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \
134 const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
135 uint8_t *_dst, ptrdiff_t dst_linesize, \
136 ptrdiff_t width, ptrdiff_t height, \
137 FilterParams *param, SliceParams *sliceparam) \
138 { \
139 const type *top = (const type*)_top; \
140 const type *bottom = (const type*)_bottom; \
141 double *values = sliceparam->values; \
142 int starty = sliceparam->starty; \
143 type *dst = (type*)_dst; \
144 AVExpr *e = sliceparam->e; \
145 int y, x; \
146 dst_linesize /= div; \
147 top_linesize /= div; \
148 bottom_linesize /= div; \
149 \
150 for (y = 0; y < height; y++) { \
151 values[VAR_Y] = y + starty; \
152 for (x = 0; x < width; x++) { \
153 values[VAR_X] = x; \
154 values[VAR_TOP] = values[VAR_A] = top[x]; \
155 values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \
156 dst[x] = av_expr_eval(e, values, NULL); \
157 } \
158 dst += dst_linesize; \
159 top += top_linesize; \
160 bottom += bottom_linesize; \
161 } \
162 }
163
164 DEFINE_BLEND_EXPR(uint8_t, 8bit, 1)
165 DEFINE_BLEND_EXPR(uint16_t, 16bit, 2)
166 DEFINE_BLEND_EXPR(float, 32bit, 4)
167
168 1323 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
169 {
170 1323 ThreadData *td = arg;
171 1323 int slice_start = (td->h * jobnr ) / nb_jobs;
172 1323 int slice_end = (td->h * (jobnr+1)) / nb_jobs;
173 1323 int height = slice_end - slice_start;
174 1323 const uint8_t *top = td->top->data[td->plane];
175 1323 const uint8_t *bottom = td->bottom->data[td->plane];
176 1323 uint8_t *dst = td->dst->data[td->plane];
177 1323 FilterLink *inl = ff_filter_link(td->inlink);
178 double values[VAR_VARS_NB];
179
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1323 times.
1323 SliceParams sliceparam = {.values = &values[0], .starty = slice_start, .e = td->param->e ? td->param->e[jobnr] : NULL};
180
181 1323 values[VAR_N] = inl->frame_count_out;
182
1/2
✓ Branch 0 taken 1323 times.
✗ Branch 1 not taken.
1323 values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
183 1323 values[VAR_W] = td->w;
184 1323 values[VAR_H] = td->h;
185 1323 values[VAR_SW] = td->w / (double)td->dst->width;
186 1323 values[VAR_SH] = td->h / (double)td->dst->height;
187
188 1323 td->param->blend(top + slice_start * td->top->linesize[td->plane],
189 1323 td->top->linesize[td->plane],
190 1323 bottom + slice_start * td->bottom->linesize[td->plane],
191 1323 td->bottom->linesize[td->plane],
192 1323 dst + slice_start * td->dst->linesize[td->plane],
193 1323 td->dst->linesize[td->plane],
194 1323 td->w, height, td->param, &sliceparam);
195 1323 return 0;
196 }
197
198 49 static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
199 const AVFrame *bottom_buf)
200 {
201 49 BlendContext *s = ctx->priv;
202 49 AVFilterLink *inlink = ctx->inputs[0];
203 49 AVFilterLink *outlink = ctx->outputs[0];
204 AVFrame *dst_buf;
205 int plane;
206
207 49 dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
208
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
49 if (!dst_buf)
209 return top_buf;
210
211
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 49 times.
49 if (av_frame_copy_props(dst_buf, top_buf) < 0) {
212 av_frame_free(&dst_buf);
213 return top_buf;
214 }
215
216
2/2
✓ Branch 0 taken 147 times.
✓ Branch 1 taken 49 times.
196 for (plane = 0; plane < s->nb_planes; plane++) {
217
4/4
✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
147 int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
218
4/4
✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
147 int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
219 147 int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub);
220 147 int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub);
221 147 FilterParams *param = &s->params[plane];
222 147 ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
223 .w = outw, .h = outh, .param = param, .plane = plane,
224 .inlink = inlink };
225
226 147 ff_filter_execute(ctx, filter_slice, &td, NULL,
227 147 FFMIN(outh, s->nb_threads));
228 }
229
230
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
49 if (!s->tblend)
231 av_frame_free(&top_buf);
232
233 49 return dst_buf;
234 }
235
236 static int blend_frame_for_dualinput(FFFrameSync *fs)
237 {
238 AVFilterContext *ctx = fs->parent;
239 AVFrame *top_buf, *bottom_buf, *dst_buf;
240 int ret;
241
242 ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf);
243 if (ret < 0)
244 return ret;
245 if (!bottom_buf)
246 return ff_filter_frame(ctx->outputs[0], top_buf);
247 dst_buf = blend_frame(ctx, top_buf, bottom_buf);
248 return ff_filter_frame(ctx->outputs[0], dst_buf);
249 }
250
251 2 static av_cold int init(AVFilterContext *ctx)
252 {
253 2 BlendContext *s = ctx->priv;
254
255 2 s->tblend = !strcmp(ctx->filter->name, "tblend");
256 2 s->nb_threads = ff_filter_get_nb_threads(ctx);
257
258 2 s->fs.on_event = blend_frame_for_dualinput;
259 2 return 0;
260 }
261
262 static const enum AVPixelFormat pix_fmts[] = {
263 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
264 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
265 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
266 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8,
267 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
268 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9,
269 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV440P10,
270 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
271 AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GRAY10,
272 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
273 AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12,
274 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GRAY12,
275 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_GBRP14,
276 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
277 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
278 AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16,
279 AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, AV_PIX_FMT_GRAYF32,
280 AV_PIX_FMT_NONE
281 };
282
283 2 static av_cold void uninit(AVFilterContext *ctx)
284 {
285 2 BlendContext *s = ctx->priv;
286 int i;
287
288 2 ff_framesync_uninit(&s->fs);
289 2 av_frame_free(&s->prev_frame);
290
291
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
10 for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) {
292
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
8 if (s->params[i].e) {
293 for (int j = 0; j < s->nb_threads; j++)
294 av_expr_free(s->params[i].e[j]);
295 av_freep(&s->params[i].e);
296 }
297 }
298
299 2 }
300
301 1 static int config_params(AVFilterContext *ctx)
302 {
303 1 BlendContext *s = ctx->priv;
304 int ret;
305
306
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 times.
5 for (int plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) {
307 4 FilterParams *param = &s->params[plane];
308
309
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 if (s->all_mode >= 0)
310 4 param->mode = s->all_mode;
311
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
4 if (s->all_opacity < 1)
312 param->opacity = s->all_opacity;
313
314 4 ff_blend_init(param, s->depth);
315
316
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
4 if (s->all_expr && !param->expr_str) {
317 param->expr_str = av_strdup(s->all_expr);
318 if (!param->expr_str)
319 return AVERROR(ENOMEM);
320 }
321
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
4 if (param->expr_str) {
322 if (!param->e) {
323 param->e = av_calloc(s->nb_threads, sizeof(*param->e));
324 if (!param->e)
325 return AVERROR(ENOMEM);
326 }
327 for (int i = 0; i < s->nb_threads; i++) {
328 av_expr_free(param->e[i]);
329 param->e[i] = NULL;
330 ret = av_expr_parse(&param->e[i], param->expr_str, var_names,
331 NULL, NULL, NULL, NULL, 0, ctx);
332 if (ret < 0)
333 return ret;
334 }
335 param->blend = s->depth > 8 ? s->depth > 16 ? blend_expr_32bit : blend_expr_16bit : blend_expr_8bit;
336 }
337 }
338
339 1 return 0;
340 }
341
342 1 static int config_output(AVFilterLink *outlink)
343 {
344 1 FilterLink *outl = ff_filter_link(outlink);
345 1 AVFilterContext *ctx = outlink->src;
346 1 AVFilterLink *toplink = ctx->inputs[TOP];
347 1 FilterLink *tl = ff_filter_link(toplink);
348 1 BlendContext *s = ctx->priv;
349 1 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
350 int ret;
351
352
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (!s->tblend) {
353 AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
354
355 if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) {
356 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
357 "(size %dx%d) do not match the corresponding "
358 "second input link %s parameters (size %dx%d)\n",
359 ctx->input_pads[TOP].name, toplink->w, toplink->h,
360 ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h);
361 return AVERROR(EINVAL);
362 }
363 }
364
365 1 outlink->w = toplink->w;
366 1 outlink->h = toplink->h;
367 1 outlink->time_base = toplink->time_base;
368 1 outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
369 1 outl->frame_rate = tl->frame_rate;
370
371 1 s->hsub = pix_desc->log2_chroma_w;
372 1 s->vsub = pix_desc->log2_chroma_h;
373
374 1 s->depth = pix_desc->comp[0].depth;
375 1 s->nb_planes = av_pix_fmt_count_planes(toplink->format);
376
377
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (!s->tblend)
378 if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
379 return ret;
380
381 1 ret = config_params(ctx);
382
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (ret < 0)
383 return ret;
384
385
1/2
✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
1 if (s->tblend)
386 1 return 0;
387
388 ret = ff_framesync_configure(&s->fs);
389 outlink->time_base = s->fs.time_base;
390
391 return ret;
392 }
393
394 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
395 char *res, int res_len, int flags)
396 {
397 int ret;
398
399 ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
400 if (ret < 0)
401 return ret;
402
403 return config_params(ctx);
404 }
405
406 static const AVFilterPad blend_outputs[] = {
407 {
408 .name = "default",
409 .type = AVMEDIA_TYPE_VIDEO,
410 .config_props = config_output,
411 },
412 };
413
414 #if CONFIG_BLEND_FILTER
415
416 static int activate(AVFilterContext *ctx)
417 {
418 BlendContext *s = ctx->priv;
419 return ff_framesync_activate(&s->fs);
420 }
421
422 static const AVFilterPad blend_inputs[] = {
423 {
424 .name = "top",
425 .type = AVMEDIA_TYPE_VIDEO,
426 },{
427 .name = "bottom",
428 .type = AVMEDIA_TYPE_VIDEO,
429 },
430 };
431
432 const FFFilter ff_vf_blend = {
433 .p.name = "blend",
434 .p.description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
435 .p.priv_class = &blend_class,
436 .p.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
437 .preinit = blend_framesync_preinit,
438 .init = init,
439 .uninit = uninit,
440 .priv_size = sizeof(BlendContext),
441 .activate = activate,
442 FILTER_INPUTS(blend_inputs),
443 FILTER_OUTPUTS(blend_outputs),
444 FILTER_PIXFMTS_ARRAY(pix_fmts),
445 .process_command = process_command,
446 };
447
448 #endif
449
450 #if CONFIG_TBLEND_FILTER
451
452 50 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
453 {
454 50 AVFilterContext *ctx = inlink->dst;
455 50 BlendContext *s = ctx->priv;
456 50 AVFilterLink *outlink = ctx->outputs[0];
457
458
2/2
✓ Branch 0 taken 49 times.
✓ Branch 1 taken 1 times.
50 if (s->prev_frame) {
459 AVFrame *out;
460
461
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
49 if (ctx->is_disabled)
462 out = av_frame_clone(frame);
463 else
464 49 out = blend_frame(ctx, frame, s->prev_frame);
465 49 av_frame_free(&s->prev_frame);
466 49 s->prev_frame = frame;
467 49 return ff_filter_frame(outlink, out);
468 }
469 1 s->prev_frame = frame;
470 1 return 0;
471 }
472
473 AVFILTER_DEFINE_CLASS_EXT(tblend, "tblend", blend_options);
474
475 static const AVFilterPad tblend_inputs[] = {
476 {
477 .name = "default",
478 .type = AVMEDIA_TYPE_VIDEO,
479 .filter_frame = tblend_filter_frame,
480 },
481 };
482
483 const FFFilter ff_vf_tblend = {
484 .p.name = "tblend",
485 .p.description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
486 .p.priv_class = &tblend_class,
487 .p.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
488 .priv_size = sizeof(BlendContext),
489 .init = init,
490 .uninit = uninit,
491 FILTER_INPUTS(tblend_inputs),
492 FILTER_OUTPUTS(blend_outputs),
493 FILTER_PIXFMTS_ARRAY(pix_fmts),
494 .process_command = process_command,
495 };
496
497 #endif
498