Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2013 Paul B Mahol | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | #include "config_components.h" | ||
22 | |||
23 | #include "libavutil/eval.h" | ||
24 | #include "libavutil/mem.h" | ||
25 | #include "libavutil/opt.h" | ||
26 | #include "libavutil/pixfmt.h" | ||
27 | #include "avfilter.h" | ||
28 | #include "framesync.h" | ||
29 | #include "internal.h" | ||
30 | #include "vf_blend_init.h" | ||
31 | #include "video.h" | ||
32 | #include "blend.h" | ||
33 | |||
34 | #define TOP 0 | ||
35 | #define BOTTOM 1 | ||
36 | |||
37 | typedef struct BlendContext { | ||
38 | const AVClass *class; | ||
39 | FFFrameSync fs; | ||
40 | int hsub, vsub; ///< chroma subsampling values | ||
41 | int nb_planes; | ||
42 | char *all_expr; | ||
43 | enum BlendMode all_mode; | ||
44 | double all_opacity; | ||
45 | |||
46 | int depth; | ||
47 | FilterParams params[4]; | ||
48 | int tblend; | ||
49 | AVFrame *prev_frame; /* only used with tblend */ | ||
50 | int nb_threads; | ||
51 | } BlendContext; | ||
52 | |||
53 | static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL }; | ||
54 | enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB }; | ||
55 | |||
56 | typedef struct ThreadData { | ||
57 | const AVFrame *top, *bottom; | ||
58 | AVFrame *dst; | ||
59 | AVFilterLink *inlink; | ||
60 | int plane; | ||
61 | int w, h; | ||
62 | FilterParams *param; | ||
63 | } ThreadData; | ||
64 | |||
65 | #define OFFSET(x) offsetof(BlendContext, x) | ||
66 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM | ||
67 | |||
68 | static const AVOption blend_options[] = { | ||
69 | { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
70 | { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
71 | { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
72 | { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
73 | { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
74 | { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, .unit = "mode" }, | ||
75 | { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
76 | { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
77 | { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, .unit = "mode" }, | ||
78 | { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
79 | { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, .unit = "mode" }, | ||
80 | { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
81 | { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
82 | { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
83 | { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
84 | { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, .unit = "mode" }, | ||
85 | { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
86 | { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, .unit = "mode" }, | ||
87 | { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, .unit = "mode" }, | ||
88 | { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, .unit = "mode" }, | ||
89 | { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, .unit = "mode" }, | ||
90 | { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
91 | { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
92 | { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, .unit = "mode" }, | ||
93 | { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
94 | { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, .unit = "mode" }, | ||
95 | { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, .unit = "mode" }, | ||
96 | { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, .unit = "mode" }, | ||
97 | { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, .unit = "mode" }, | ||
98 | { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, .unit = "mode" }, | ||
99 | { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, .unit = "mode" }, | ||
100 | { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, .unit = "mode" }, | ||
101 | { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
102 | { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
103 | { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, .unit = "mode" }, | ||
104 | { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
105 | { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
106 | { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
107 | { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
108 | { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, .unit = "mode" }, | ||
109 | { "softdifference","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTDIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
110 | { "geometric", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GEOMETRIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
111 | { "harmonic", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARMONIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
112 | { "bleach", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BLEACH}, 0, 0, FLAGS, .unit = "mode" }, | ||
113 | { "stain", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_STAIN}, 0, 0, FLAGS, .unit = "mode" }, | ||
114 | { "interpolate","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_INTERPOLATE},0, 0, FLAGS, .unit = "mode" }, | ||
115 | { "hardoverlay","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDOVERLAY},0, 0, FLAGS, .unit = "mode" }, | ||
116 | { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
117 | { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
118 | { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
119 | { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
120 | { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
121 | { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
122 | { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
123 | { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
124 | { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
125 | { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
126 | { NULL } | ||
127 | }; | ||
128 | |||
129 | ✗ | FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs); | |
130 | |||
131 | #define DEFINE_BLEND_EXPR(type, name, div) \ | ||
132 | static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \ | ||
133 | const uint8_t *_bottom, ptrdiff_t bottom_linesize, \ | ||
134 | uint8_t *_dst, ptrdiff_t dst_linesize, \ | ||
135 | ptrdiff_t width, ptrdiff_t height, \ | ||
136 | FilterParams *param, SliceParams *sliceparam) \ | ||
137 | { \ | ||
138 | const type *top = (const type*)_top; \ | ||
139 | const type *bottom = (const type*)_bottom; \ | ||
140 | double *values = sliceparam->values; \ | ||
141 | int starty = sliceparam->starty; \ | ||
142 | type *dst = (type*)_dst; \ | ||
143 | AVExpr *e = sliceparam->e; \ | ||
144 | int y, x; \ | ||
145 | dst_linesize /= div; \ | ||
146 | top_linesize /= div; \ | ||
147 | bottom_linesize /= div; \ | ||
148 | \ | ||
149 | for (y = 0; y < height; y++) { \ | ||
150 | values[VAR_Y] = y + starty; \ | ||
151 | for (x = 0; x < width; x++) { \ | ||
152 | values[VAR_X] = x; \ | ||
153 | values[VAR_TOP] = values[VAR_A] = top[x]; \ | ||
154 | values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \ | ||
155 | dst[x] = av_expr_eval(e, values, NULL); \ | ||
156 | } \ | ||
157 | dst += dst_linesize; \ | ||
158 | top += top_linesize; \ | ||
159 | bottom += bottom_linesize; \ | ||
160 | } \ | ||
161 | } | ||
162 | |||
163 | ✗ | DEFINE_BLEND_EXPR(uint8_t, 8bit, 1) | |
164 | ✗ | DEFINE_BLEND_EXPR(uint16_t, 16bit, 2) | |
165 | ✗ | DEFINE_BLEND_EXPR(float, 32bit, 4) | |
166 | |||
167 | 1323 | static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
168 | { | ||
169 | 1323 | ThreadData *td = arg; | |
170 | 1323 | int slice_start = (td->h * jobnr ) / nb_jobs; | |
171 | 1323 | int slice_end = (td->h * (jobnr+1)) / nb_jobs; | |
172 | 1323 | int height = slice_end - slice_start; | |
173 | 1323 | const uint8_t *top = td->top->data[td->plane]; | |
174 | 1323 | const uint8_t *bottom = td->bottom->data[td->plane]; | |
175 | 1323 | uint8_t *dst = td->dst->data[td->plane]; | |
176 | double values[VAR_VARS_NB]; | ||
177 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1323 times.
|
1323 | SliceParams sliceparam = {.values = &values[0], .starty = slice_start, .e = td->param->e ? td->param->e[jobnr] : NULL}; |
178 | |||
179 | 1323 | values[VAR_N] = td->inlink->frame_count_out; | |
180 |
1/2✓ Branch 0 taken 1323 times.
✗ Branch 1 not taken.
|
1323 | values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base); |
181 | 1323 | values[VAR_W] = td->w; | |
182 | 1323 | values[VAR_H] = td->h; | |
183 | 1323 | values[VAR_SW] = td->w / (double)td->dst->width; | |
184 | 1323 | values[VAR_SH] = td->h / (double)td->dst->height; | |
185 | |||
186 | 1323 | td->param->blend(top + slice_start * td->top->linesize[td->plane], | |
187 | 1323 | td->top->linesize[td->plane], | |
188 | 1323 | bottom + slice_start * td->bottom->linesize[td->plane], | |
189 | 1323 | td->bottom->linesize[td->plane], | |
190 | 1323 | dst + slice_start * td->dst->linesize[td->plane], | |
191 | 1323 | td->dst->linesize[td->plane], | |
192 | 1323 | td->w, height, td->param, &sliceparam); | |
193 | 1323 | return 0; | |
194 | } | ||
195 | |||
196 | 49 | static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf, | |
197 | const AVFrame *bottom_buf) | ||
198 | { | ||
199 | 49 | BlendContext *s = ctx->priv; | |
200 | 49 | AVFilterLink *inlink = ctx->inputs[0]; | |
201 | 49 | AVFilterLink *outlink = ctx->outputs[0]; | |
202 | AVFrame *dst_buf; | ||
203 | int plane; | ||
204 | |||
205 | 49 | dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
206 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!dst_buf) |
207 | ✗ | return top_buf; | |
208 | |||
209 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 49 times.
|
49 | if (av_frame_copy_props(dst_buf, top_buf) < 0) { |
210 | ✗ | av_frame_free(&dst_buf); | |
211 | ✗ | return top_buf; | |
212 | } | ||
213 | |||
214 |
2/2✓ Branch 0 taken 147 times.
✓ Branch 1 taken 49 times.
|
196 | for (plane = 0; plane < s->nb_planes; plane++) { |
215 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int hsub = plane == 1 || plane == 2 ? s->hsub : 0; |
216 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int vsub = plane == 1 || plane == 2 ? s->vsub : 0; |
217 | 147 | int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub); | |
218 | 147 | int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub); | |
219 | 147 | FilterParams *param = &s->params[plane]; | |
220 | 147 | ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf, | |
221 | .w = outw, .h = outh, .param = param, .plane = plane, | ||
222 | .inlink = inlink }; | ||
223 | |||
224 | 147 | ff_filter_execute(ctx, filter_slice, &td, NULL, | |
225 | 147 | FFMIN(outh, s->nb_threads)); | |
226 | } | ||
227 | |||
228 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!s->tblend) |
229 | ✗ | av_frame_free(&top_buf); | |
230 | |||
231 | 49 | return dst_buf; | |
232 | } | ||
233 | |||
234 | ✗ | static int blend_frame_for_dualinput(FFFrameSync *fs) | |
235 | { | ||
236 | ✗ | AVFilterContext *ctx = fs->parent; | |
237 | AVFrame *top_buf, *bottom_buf, *dst_buf; | ||
238 | int ret; | ||
239 | |||
240 | ✗ | ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf); | |
241 | ✗ | if (ret < 0) | |
242 | ✗ | return ret; | |
243 | ✗ | if (!bottom_buf) | |
244 | ✗ | return ff_filter_frame(ctx->outputs[0], top_buf); | |
245 | ✗ | dst_buf = blend_frame(ctx, top_buf, bottom_buf); | |
246 | ✗ | return ff_filter_frame(ctx->outputs[0], dst_buf); | |
247 | } | ||
248 | |||
249 | 2 | static av_cold int init(AVFilterContext *ctx) | |
250 | { | ||
251 | 2 | BlendContext *s = ctx->priv; | |
252 | |||
253 | 2 | s->tblend = !strcmp(ctx->filter->name, "tblend"); | |
254 | 2 | s->nb_threads = ff_filter_get_nb_threads(ctx); | |
255 | |||
256 | 2 | s->fs.on_event = blend_frame_for_dualinput; | |
257 | 2 | return 0; | |
258 | } | ||
259 | |||
260 | static const enum AVPixelFormat pix_fmts[] = { | ||
261 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, | ||
262 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P, | ||
263 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, | ||
264 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, | ||
265 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, | ||
266 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, | ||
267 | AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV440P10, | ||
268 | AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, | ||
269 | AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GRAY10, | ||
270 | AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, | ||
271 | AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, | ||
272 | AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GRAY12, | ||
273 | AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_GBRP14, | ||
274 | AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, | ||
275 | AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, | ||
276 | AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16, | ||
277 | AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, AV_PIX_FMT_GRAYF32, | ||
278 | AV_PIX_FMT_NONE | ||
279 | }; | ||
280 | |||
281 | 2 | static av_cold void uninit(AVFilterContext *ctx) | |
282 | { | ||
283 | 2 | BlendContext *s = ctx->priv; | |
284 | int i; | ||
285 | |||
286 | 2 | ff_framesync_uninit(&s->fs); | |
287 | 2 | av_frame_free(&s->prev_frame); | |
288 | |||
289 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) { |
290 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (s->params[i].e) { |
291 | ✗ | for (int j = 0; j < s->nb_threads; j++) | |
292 | ✗ | av_expr_free(s->params[i].e[j]); | |
293 | ✗ | av_freep(&s->params[i].e); | |
294 | } | ||
295 | } | ||
296 | |||
297 | 2 | } | |
298 | |||
299 | 1 | static int config_params(AVFilterContext *ctx) | |
300 | { | ||
301 | 1 | BlendContext *s = ctx->priv; | |
302 | int ret; | ||
303 | |||
304 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 times.
|
5 | for (int plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { |
305 | 4 | FilterParams *param = &s->params[plane]; | |
306 | |||
307 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if (s->all_mode >= 0) |
308 | 4 | param->mode = s->all_mode; | |
309 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (s->all_opacity < 1) |
310 | ✗ | param->opacity = s->all_opacity; | |
311 | |||
312 | 4 | ff_blend_init(param, s->depth); | |
313 | |||
314 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
4 | if (s->all_expr && !param->expr_str) { |
315 | ✗ | param->expr_str = av_strdup(s->all_expr); | |
316 | ✗ | if (!param->expr_str) | |
317 | ✗ | return AVERROR(ENOMEM); | |
318 | } | ||
319 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (param->expr_str) { |
320 | ✗ | if (!param->e) { | |
321 | ✗ | param->e = av_calloc(s->nb_threads, sizeof(*param->e)); | |
322 | ✗ | if (!param->e) | |
323 | ✗ | return AVERROR(ENOMEM); | |
324 | } | ||
325 | ✗ | for (int i = 0; i < s->nb_threads; i++) { | |
326 | ✗ | av_expr_free(param->e[i]); | |
327 | ✗ | param->e[i] = NULL; | |
328 | ✗ | ret = av_expr_parse(¶m->e[i], param->expr_str, var_names, | |
329 | NULL, NULL, NULL, NULL, 0, ctx); | ||
330 | ✗ | if (ret < 0) | |
331 | ✗ | return ret; | |
332 | } | ||
333 | ✗ | param->blend = s->depth > 8 ? s->depth > 16 ? blend_expr_32bit : blend_expr_16bit : blend_expr_8bit; | |
334 | } | ||
335 | } | ||
336 | |||
337 | 1 | return 0; | |
338 | } | ||
339 | |||
340 | 1 | static int config_output(AVFilterLink *outlink) | |
341 | { | ||
342 | 1 | AVFilterContext *ctx = outlink->src; | |
343 | 1 | AVFilterLink *toplink = ctx->inputs[TOP]; | |
344 | 1 | BlendContext *s = ctx->priv; | |
345 | 1 | const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format); | |
346 | int ret; | ||
347 | |||
348 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) { |
349 | ✗ | AVFilterLink *bottomlink = ctx->inputs[BOTTOM]; | |
350 | |||
351 | ✗ | if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) { | |
352 | ✗ | av_log(ctx, AV_LOG_ERROR, "First input link %s parameters " | |
353 | "(size %dx%d) do not match the corresponding " | ||
354 | "second input link %s parameters (size %dx%d)\n", | ||
355 | ✗ | ctx->input_pads[TOP].name, toplink->w, toplink->h, | |
356 | ✗ | ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h); | |
357 | ✗ | return AVERROR(EINVAL); | |
358 | } | ||
359 | } | ||
360 | |||
361 | 1 | outlink->w = toplink->w; | |
362 | 1 | outlink->h = toplink->h; | |
363 | 1 | outlink->time_base = toplink->time_base; | |
364 | 1 | outlink->sample_aspect_ratio = toplink->sample_aspect_ratio; | |
365 | 1 | outlink->frame_rate = toplink->frame_rate; | |
366 | |||
367 | 1 | s->hsub = pix_desc->log2_chroma_w; | |
368 | 1 | s->vsub = pix_desc->log2_chroma_h; | |
369 | |||
370 | 1 | s->depth = pix_desc->comp[0].depth; | |
371 | 1 | s->nb_planes = av_pix_fmt_count_planes(toplink->format); | |
372 | |||
373 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) |
374 | ✗ | if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) | |
375 | ✗ | return ret; | |
376 | |||
377 | 1 | ret = config_params(ctx); | |
378 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
379 | ✗ | return ret; | |
380 | |||
381 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (s->tblend) |
382 | 1 | return 0; | |
383 | |||
384 | ✗ | ret = ff_framesync_configure(&s->fs); | |
385 | ✗ | outlink->time_base = s->fs.time_base; | |
386 | |||
387 | ✗ | return ret; | |
388 | } | ||
389 | |||
390 | ✗ | static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, | |
391 | char *res, int res_len, int flags) | ||
392 | { | ||
393 | int ret; | ||
394 | |||
395 | ✗ | ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); | |
396 | ✗ | if (ret < 0) | |
397 | ✗ | return ret; | |
398 | |||
399 | ✗ | return config_params(ctx); | |
400 | } | ||
401 | |||
402 | static const AVFilterPad blend_outputs[] = { | ||
403 | { | ||
404 | .name = "default", | ||
405 | .type = AVMEDIA_TYPE_VIDEO, | ||
406 | .config_props = config_output, | ||
407 | }, | ||
408 | }; | ||
409 | |||
410 | #if CONFIG_BLEND_FILTER | ||
411 | |||
412 | ✗ | static int activate(AVFilterContext *ctx) | |
413 | { | ||
414 | ✗ | BlendContext *s = ctx->priv; | |
415 | ✗ | return ff_framesync_activate(&s->fs); | |
416 | } | ||
417 | |||
418 | static const AVFilterPad blend_inputs[] = { | ||
419 | { | ||
420 | .name = "top", | ||
421 | .type = AVMEDIA_TYPE_VIDEO, | ||
422 | },{ | ||
423 | .name = "bottom", | ||
424 | .type = AVMEDIA_TYPE_VIDEO, | ||
425 | }, | ||
426 | }; | ||
427 | |||
428 | const AVFilter ff_vf_blend = { | ||
429 | .name = "blend", | ||
430 | .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."), | ||
431 | .preinit = blend_framesync_preinit, | ||
432 | .init = init, | ||
433 | .uninit = uninit, | ||
434 | .priv_size = sizeof(BlendContext), | ||
435 | .activate = activate, | ||
436 | FILTER_INPUTS(blend_inputs), | ||
437 | FILTER_OUTPUTS(blend_outputs), | ||
438 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
439 | .priv_class = &blend_class, | ||
440 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
441 | .process_command = process_command, | ||
442 | }; | ||
443 | |||
444 | #endif | ||
445 | |||
446 | #if CONFIG_TBLEND_FILTER | ||
447 | |||
448 | 50 | static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame) | |
449 | { | ||
450 | 50 | AVFilterContext *ctx = inlink->dst; | |
451 | 50 | BlendContext *s = ctx->priv; | |
452 | 50 | AVFilterLink *outlink = ctx->outputs[0]; | |
453 | |||
454 |
2/2✓ Branch 0 taken 49 times.
✓ Branch 1 taken 1 times.
|
50 | if (s->prev_frame) { |
455 | AVFrame *out; | ||
456 | |||
457 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (ctx->is_disabled) |
458 | ✗ | out = av_frame_clone(frame); | |
459 | else | ||
460 | 49 | out = blend_frame(ctx, frame, s->prev_frame); | |
461 | 49 | av_frame_free(&s->prev_frame); | |
462 | 49 | s->prev_frame = frame; | |
463 | 49 | return ff_filter_frame(outlink, out); | |
464 | } | ||
465 | 1 | s->prev_frame = frame; | |
466 | 1 | return 0; | |
467 | } | ||
468 | |||
469 | AVFILTER_DEFINE_CLASS_EXT(tblend, "tblend", blend_options); | ||
470 | |||
471 | static const AVFilterPad tblend_inputs[] = { | ||
472 | { | ||
473 | .name = "default", | ||
474 | .type = AVMEDIA_TYPE_VIDEO, | ||
475 | .filter_frame = tblend_filter_frame, | ||
476 | }, | ||
477 | }; | ||
478 | |||
479 | const AVFilter ff_vf_tblend = { | ||
480 | .name = "tblend", | ||
481 | .description = NULL_IF_CONFIG_SMALL("Blend successive frames."), | ||
482 | .priv_size = sizeof(BlendContext), | ||
483 | .priv_class = &tblend_class, | ||
484 | .init = init, | ||
485 | .uninit = uninit, | ||
486 | FILTER_INPUTS(tblend_inputs), | ||
487 | FILTER_OUTPUTS(blend_outputs), | ||
488 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
489 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
490 | .process_command = process_command, | ||
491 | }; | ||
492 | |||
493 | #endif | ||
494 |