Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2013 Paul B Mahol | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | #include "config_components.h" | ||
22 | |||
23 | #include "libavutil/eval.h" | ||
24 | #include "libavutil/mem.h" | ||
25 | #include "libavutil/opt.h" | ||
26 | #include "libavutil/pixfmt.h" | ||
27 | #include "avfilter.h" | ||
28 | #include "framesync.h" | ||
29 | #include "internal.h" | ||
30 | #include "vf_blend_init.h" | ||
31 | #include "video.h" | ||
32 | #include "blend.h" | ||
33 | |||
34 | #define TOP 0 | ||
35 | #define BOTTOM 1 | ||
36 | |||
37 | typedef struct BlendContext { | ||
38 | const AVClass *class; | ||
39 | FFFrameSync fs; | ||
40 | int hsub, vsub; ///< chroma subsampling values | ||
41 | int nb_planes; | ||
42 | char *all_expr; | ||
43 | enum BlendMode all_mode; | ||
44 | double all_opacity; | ||
45 | |||
46 | int depth; | ||
47 | FilterParams params[4]; | ||
48 | int tblend; | ||
49 | AVFrame *prev_frame; /* only used with tblend */ | ||
50 | } BlendContext; | ||
51 | |||
52 | static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL }; | ||
53 | enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB }; | ||
54 | |||
55 | typedef struct ThreadData { | ||
56 | const AVFrame *top, *bottom; | ||
57 | AVFrame *dst; | ||
58 | AVFilterLink *inlink; | ||
59 | int plane; | ||
60 | int w, h; | ||
61 | FilterParams *param; | ||
62 | } ThreadData; | ||
63 | |||
64 | #define OFFSET(x) offsetof(BlendContext, x) | ||
65 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM | ||
66 | |||
67 | static const AVOption blend_options[] = { | ||
68 | { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
69 | { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
70 | { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
71 | { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
72 | { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
73 | { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, .unit = "mode" }, | ||
74 | { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
75 | { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
76 | { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, .unit = "mode" }, | ||
77 | { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
78 | { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, .unit = "mode" }, | ||
79 | { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
80 | { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
81 | { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
82 | { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
83 | { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, .unit = "mode" }, | ||
84 | { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
85 | { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, .unit = "mode" }, | ||
86 | { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, .unit = "mode" }, | ||
87 | { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, .unit = "mode" }, | ||
88 | { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, .unit = "mode" }, | ||
89 | { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
90 | { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
91 | { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, .unit = "mode" }, | ||
92 | { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
93 | { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, .unit = "mode" }, | ||
94 | { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, .unit = "mode" }, | ||
95 | { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, .unit = "mode" }, | ||
96 | { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, .unit = "mode" }, | ||
97 | { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, .unit = "mode" }, | ||
98 | { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, .unit = "mode" }, | ||
99 | { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, .unit = "mode" }, | ||
100 | { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
101 | { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
102 | { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, .unit = "mode" }, | ||
103 | { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
104 | { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
105 | { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
106 | { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
107 | { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, .unit = "mode" }, | ||
108 | { "softdifference","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTDIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
109 | { "geometric", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GEOMETRIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
110 | { "harmonic", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARMONIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
111 | { "bleach", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BLEACH}, 0, 0, FLAGS, .unit = "mode" }, | ||
112 | { "stain", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_STAIN}, 0, 0, FLAGS, .unit = "mode" }, | ||
113 | { "interpolate","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_INTERPOLATE},0, 0, FLAGS, .unit = "mode" }, | ||
114 | { "hardoverlay","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDOVERLAY},0, 0, FLAGS, .unit = "mode" }, | ||
115 | { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
116 | { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
117 | { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
118 | { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
119 | { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
120 | { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
121 | { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
122 | { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
123 | { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
124 | { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
125 | { NULL } | ||
126 | }; | ||
127 | |||
128 | ✗ | FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs); | |
129 | |||
130 | #define DEFINE_BLEND_EXPR(type, name, div) \ | ||
131 | static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \ | ||
132 | const uint8_t *_bottom, ptrdiff_t bottom_linesize, \ | ||
133 | uint8_t *_dst, ptrdiff_t dst_linesize, \ | ||
134 | ptrdiff_t width, ptrdiff_t height, \ | ||
135 | FilterParams *param, double *values, int starty) \ | ||
136 | { \ | ||
137 | const type *top = (const type*)_top; \ | ||
138 | const type *bottom = (const type*)_bottom; \ | ||
139 | type *dst = (type*)_dst; \ | ||
140 | AVExpr *e = param->e; \ | ||
141 | int y, x; \ | ||
142 | dst_linesize /= div; \ | ||
143 | top_linesize /= div; \ | ||
144 | bottom_linesize /= div; \ | ||
145 | \ | ||
146 | for (y = 0; y < height; y++) { \ | ||
147 | values[VAR_Y] = y + starty; \ | ||
148 | for (x = 0; x < width; x++) { \ | ||
149 | values[VAR_X] = x; \ | ||
150 | values[VAR_TOP] = values[VAR_A] = top[x]; \ | ||
151 | values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \ | ||
152 | dst[x] = av_expr_eval(e, values, NULL); \ | ||
153 | } \ | ||
154 | dst += dst_linesize; \ | ||
155 | top += top_linesize; \ | ||
156 | bottom += bottom_linesize; \ | ||
157 | } \ | ||
158 | } | ||
159 | |||
160 | ✗ | DEFINE_BLEND_EXPR(uint8_t, 8bit, 1) | |
161 | ✗ | DEFINE_BLEND_EXPR(uint16_t, 16bit, 2) | |
162 | ✗ | DEFINE_BLEND_EXPR(float, 32bit, 4) | |
163 | |||
164 | 1323 | static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
165 | { | ||
166 | 1323 | ThreadData *td = arg; | |
167 | 1323 | int slice_start = (td->h * jobnr ) / nb_jobs; | |
168 | 1323 | int slice_end = (td->h * (jobnr+1)) / nb_jobs; | |
169 | 1323 | int height = slice_end - slice_start; | |
170 | 1323 | const uint8_t *top = td->top->data[td->plane]; | |
171 | 1323 | const uint8_t *bottom = td->bottom->data[td->plane]; | |
172 | 1323 | uint8_t *dst = td->dst->data[td->plane]; | |
173 | double values[VAR_VARS_NB]; | ||
174 | |||
175 | 1323 | values[VAR_N] = td->inlink->frame_count_out; | |
176 |
1/2✓ Branch 0 taken 1323 times.
✗ Branch 1 not taken.
|
1323 | values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base); |
177 | 1323 | values[VAR_W] = td->w; | |
178 | 1323 | values[VAR_H] = td->h; | |
179 | 1323 | values[VAR_SW] = td->w / (double)td->dst->width; | |
180 | 1323 | values[VAR_SH] = td->h / (double)td->dst->height; | |
181 | |||
182 | 1323 | td->param->blend(top + slice_start * td->top->linesize[td->plane], | |
183 | 1323 | td->top->linesize[td->plane], | |
184 | 1323 | bottom + slice_start * td->bottom->linesize[td->plane], | |
185 | 1323 | td->bottom->linesize[td->plane], | |
186 | 1323 | dst + slice_start * td->dst->linesize[td->plane], | |
187 | 1323 | td->dst->linesize[td->plane], | |
188 | 1323 | td->w, height, td->param, &values[0], slice_start); | |
189 | 1323 | return 0; | |
190 | } | ||
191 | |||
192 | 49 | static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf, | |
193 | const AVFrame *bottom_buf) | ||
194 | { | ||
195 | 49 | BlendContext *s = ctx->priv; | |
196 | 49 | AVFilterLink *inlink = ctx->inputs[0]; | |
197 | 49 | AVFilterLink *outlink = ctx->outputs[0]; | |
198 | AVFrame *dst_buf; | ||
199 | int plane; | ||
200 | |||
201 | 49 | dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
202 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!dst_buf) |
203 | ✗ | return top_buf; | |
204 | |||
205 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 49 times.
|
49 | if (av_frame_copy_props(dst_buf, top_buf) < 0) { |
206 | ✗ | av_frame_free(&dst_buf); | |
207 | ✗ | return top_buf; | |
208 | } | ||
209 | |||
210 |
2/2✓ Branch 0 taken 147 times.
✓ Branch 1 taken 49 times.
|
196 | for (plane = 0; plane < s->nb_planes; plane++) { |
211 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int hsub = plane == 1 || plane == 2 ? s->hsub : 0; |
212 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int vsub = plane == 1 || plane == 2 ? s->vsub : 0; |
213 | 147 | int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub); | |
214 | 147 | int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub); | |
215 | 147 | FilterParams *param = &s->params[plane]; | |
216 | 147 | ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf, | |
217 | .w = outw, .h = outh, .param = param, .plane = plane, | ||
218 | .inlink = inlink }; | ||
219 | |||
220 | 147 | ff_filter_execute(ctx, filter_slice, &td, NULL, | |
221 |
1/2✓ Branch 0 taken 147 times.
✗ Branch 1 not taken.
|
147 | FFMIN(outh, ff_filter_get_nb_threads(ctx))); |
222 | } | ||
223 | |||
224 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!s->tblend) |
225 | ✗ | av_frame_free(&top_buf); | |
226 | |||
227 | 49 | return dst_buf; | |
228 | } | ||
229 | |||
230 | ✗ | static int blend_frame_for_dualinput(FFFrameSync *fs) | |
231 | { | ||
232 | ✗ | AVFilterContext *ctx = fs->parent; | |
233 | AVFrame *top_buf, *bottom_buf, *dst_buf; | ||
234 | int ret; | ||
235 | |||
236 | ✗ | ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf); | |
237 | ✗ | if (ret < 0) | |
238 | ✗ | return ret; | |
239 | ✗ | if (!bottom_buf) | |
240 | ✗ | return ff_filter_frame(ctx->outputs[0], top_buf); | |
241 | ✗ | dst_buf = blend_frame(ctx, top_buf, bottom_buf); | |
242 | ✗ | return ff_filter_frame(ctx->outputs[0], dst_buf); | |
243 | } | ||
244 | |||
245 | 2 | static av_cold int init(AVFilterContext *ctx) | |
246 | { | ||
247 | 2 | BlendContext *s = ctx->priv; | |
248 | |||
249 | 2 | s->tblend = !strcmp(ctx->filter->name, "tblend"); | |
250 | |||
251 | 2 | s->fs.on_event = blend_frame_for_dualinput; | |
252 | 2 | return 0; | |
253 | } | ||
254 | |||
255 | static const enum AVPixelFormat pix_fmts[] = { | ||
256 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, | ||
257 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P, | ||
258 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, | ||
259 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, | ||
260 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, | ||
261 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, | ||
262 | AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV440P10, | ||
263 | AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, | ||
264 | AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GRAY10, | ||
265 | AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, | ||
266 | AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, | ||
267 | AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GRAY12, | ||
268 | AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_GBRP14, | ||
269 | AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, | ||
270 | AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, | ||
271 | AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16, | ||
272 | AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, AV_PIX_FMT_GRAYF32, | ||
273 | AV_PIX_FMT_NONE | ||
274 | }; | ||
275 | |||
276 | 2 | static av_cold void uninit(AVFilterContext *ctx) | |
277 | { | ||
278 | 2 | BlendContext *s = ctx->priv; | |
279 | int i; | ||
280 | |||
281 | 2 | ff_framesync_uninit(&s->fs); | |
282 | 2 | av_frame_free(&s->prev_frame); | |
283 | |||
284 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) |
285 | 8 | av_expr_free(s->params[i].e); | |
286 | 2 | } | |
287 | |||
288 | 1 | static int config_params(AVFilterContext *ctx) | |
289 | { | ||
290 | 1 | BlendContext *s = ctx->priv; | |
291 | int ret; | ||
292 | |||
293 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 times.
|
5 | for (int plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { |
294 | 4 | FilterParams *param = &s->params[plane]; | |
295 | |||
296 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if (s->all_mode >= 0) |
297 | 4 | param->mode = s->all_mode; | |
298 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (s->all_opacity < 1) |
299 | ✗ | param->opacity = s->all_opacity; | |
300 | |||
301 | 4 | ff_blend_init(param, s->depth); | |
302 | |||
303 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
4 | if (s->all_expr && !param->expr_str) { |
304 | ✗ | param->expr_str = av_strdup(s->all_expr); | |
305 | ✗ | if (!param->expr_str) | |
306 | ✗ | return AVERROR(ENOMEM); | |
307 | } | ||
308 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (param->expr_str) { |
309 | ✗ | ret = av_expr_parse(¶m->e, param->expr_str, var_names, | |
310 | NULL, NULL, NULL, NULL, 0, ctx); | ||
311 | ✗ | if (ret < 0) | |
312 | ✗ | return ret; | |
313 | ✗ | param->blend = s->depth > 8 ? s->depth > 16 ? blend_expr_32bit : blend_expr_16bit : blend_expr_8bit; | |
314 | } | ||
315 | } | ||
316 | |||
317 | 1 | return 0; | |
318 | } | ||
319 | |||
320 | 1 | static int config_output(AVFilterLink *outlink) | |
321 | { | ||
322 | 1 | AVFilterContext *ctx = outlink->src; | |
323 | 1 | AVFilterLink *toplink = ctx->inputs[TOP]; | |
324 | 1 | BlendContext *s = ctx->priv; | |
325 | 1 | const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format); | |
326 | int ret; | ||
327 | |||
328 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) { |
329 | ✗ | AVFilterLink *bottomlink = ctx->inputs[BOTTOM]; | |
330 | |||
331 | ✗ | if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) { | |
332 | ✗ | av_log(ctx, AV_LOG_ERROR, "First input link %s parameters " | |
333 | "(size %dx%d) do not match the corresponding " | ||
334 | "second input link %s parameters (size %dx%d)\n", | ||
335 | ✗ | ctx->input_pads[TOP].name, toplink->w, toplink->h, | |
336 | ✗ | ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h); | |
337 | ✗ | return AVERROR(EINVAL); | |
338 | } | ||
339 | } | ||
340 | |||
341 | 1 | outlink->w = toplink->w; | |
342 | 1 | outlink->h = toplink->h; | |
343 | 1 | outlink->time_base = toplink->time_base; | |
344 | 1 | outlink->sample_aspect_ratio = toplink->sample_aspect_ratio; | |
345 | 1 | outlink->frame_rate = toplink->frame_rate; | |
346 | |||
347 | 1 | s->hsub = pix_desc->log2_chroma_w; | |
348 | 1 | s->vsub = pix_desc->log2_chroma_h; | |
349 | |||
350 | 1 | s->depth = pix_desc->comp[0].depth; | |
351 | 1 | s->nb_planes = av_pix_fmt_count_planes(toplink->format); | |
352 | |||
353 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) |
354 | ✗ | if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) | |
355 | ✗ | return ret; | |
356 | |||
357 | 1 | ret = config_params(ctx); | |
358 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
359 | ✗ | return ret; | |
360 | |||
361 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (s->tblend) |
362 | 1 | return 0; | |
363 | |||
364 | ✗ | ret = ff_framesync_configure(&s->fs); | |
365 | ✗ | outlink->time_base = s->fs.time_base; | |
366 | |||
367 | ✗ | return ret; | |
368 | } | ||
369 | |||
370 | ✗ | static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, | |
371 | char *res, int res_len, int flags) | ||
372 | { | ||
373 | int ret; | ||
374 | |||
375 | ✗ | ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); | |
376 | ✗ | if (ret < 0) | |
377 | ✗ | return ret; | |
378 | |||
379 | ✗ | return config_params(ctx); | |
380 | } | ||
381 | |||
382 | static const AVFilterPad blend_outputs[] = { | ||
383 | { | ||
384 | .name = "default", | ||
385 | .type = AVMEDIA_TYPE_VIDEO, | ||
386 | .config_props = config_output, | ||
387 | }, | ||
388 | }; | ||
389 | |||
390 | #if CONFIG_BLEND_FILTER | ||
391 | |||
392 | ✗ | static int activate(AVFilterContext *ctx) | |
393 | { | ||
394 | ✗ | BlendContext *s = ctx->priv; | |
395 | ✗ | return ff_framesync_activate(&s->fs); | |
396 | } | ||
397 | |||
398 | static const AVFilterPad blend_inputs[] = { | ||
399 | { | ||
400 | .name = "top", | ||
401 | .type = AVMEDIA_TYPE_VIDEO, | ||
402 | },{ | ||
403 | .name = "bottom", | ||
404 | .type = AVMEDIA_TYPE_VIDEO, | ||
405 | }, | ||
406 | }; | ||
407 | |||
408 | const AVFilter ff_vf_blend = { | ||
409 | .name = "blend", | ||
410 | .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."), | ||
411 | .preinit = blend_framesync_preinit, | ||
412 | .init = init, | ||
413 | .uninit = uninit, | ||
414 | .priv_size = sizeof(BlendContext), | ||
415 | .activate = activate, | ||
416 | FILTER_INPUTS(blend_inputs), | ||
417 | FILTER_OUTPUTS(blend_outputs), | ||
418 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
419 | .priv_class = &blend_class, | ||
420 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
421 | .process_command = process_command, | ||
422 | }; | ||
423 | |||
424 | #endif | ||
425 | |||
426 | #if CONFIG_TBLEND_FILTER | ||
427 | |||
428 | 50 | static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame) | |
429 | { | ||
430 | 50 | AVFilterContext *ctx = inlink->dst; | |
431 | 50 | BlendContext *s = ctx->priv; | |
432 | 50 | AVFilterLink *outlink = ctx->outputs[0]; | |
433 | |||
434 |
2/2✓ Branch 0 taken 49 times.
✓ Branch 1 taken 1 times.
|
50 | if (s->prev_frame) { |
435 | AVFrame *out; | ||
436 | |||
437 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (ctx->is_disabled) |
438 | ✗ | out = av_frame_clone(frame); | |
439 | else | ||
440 | 49 | out = blend_frame(ctx, frame, s->prev_frame); | |
441 | 49 | av_frame_free(&s->prev_frame); | |
442 | 49 | s->prev_frame = frame; | |
443 | 49 | return ff_filter_frame(outlink, out); | |
444 | } | ||
445 | 1 | s->prev_frame = frame; | |
446 | 1 | return 0; | |
447 | } | ||
448 | |||
449 | AVFILTER_DEFINE_CLASS_EXT(tblend, "tblend", blend_options); | ||
450 | |||
451 | static const AVFilterPad tblend_inputs[] = { | ||
452 | { | ||
453 | .name = "default", | ||
454 | .type = AVMEDIA_TYPE_VIDEO, | ||
455 | .filter_frame = tblend_filter_frame, | ||
456 | }, | ||
457 | }; | ||
458 | |||
459 | const AVFilter ff_vf_tblend = { | ||
460 | .name = "tblend", | ||
461 | .description = NULL_IF_CONFIG_SMALL("Blend successive frames."), | ||
462 | .priv_size = sizeof(BlendContext), | ||
463 | .priv_class = &tblend_class, | ||
464 | .init = init, | ||
465 | .uninit = uninit, | ||
466 | FILTER_INPUTS(tblend_inputs), | ||
467 | FILTER_OUTPUTS(blend_outputs), | ||
468 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
469 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
470 | .process_command = process_command, | ||
471 | }; | ||
472 | |||
473 | #endif | ||
474 |