Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2013 Paul B Mahol | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | #include "config_components.h" | ||
22 | |||
23 | #include "libavutil/eval.h" | ||
24 | #include "libavutil/mem.h" | ||
25 | #include "libavutil/opt.h" | ||
26 | #include "libavutil/pixfmt.h" | ||
27 | #include "avfilter.h" | ||
28 | #include "filters.h" | ||
29 | #include "framesync.h" | ||
30 | #include "vf_blend_init.h" | ||
31 | #include "video.h" | ||
32 | #include "blend.h" | ||
33 | |||
34 | #define TOP 0 | ||
35 | #define BOTTOM 1 | ||
36 | |||
37 | typedef struct BlendContext { | ||
38 | const AVClass *class; | ||
39 | FFFrameSync fs; | ||
40 | int hsub, vsub; ///< chroma subsampling values | ||
41 | int nb_planes; | ||
42 | char *all_expr; | ||
43 | enum BlendMode all_mode; | ||
44 | double all_opacity; | ||
45 | |||
46 | int depth; | ||
47 | FilterParams params[4]; | ||
48 | int tblend; | ||
49 | AVFrame *prev_frame; /* only used with tblend */ | ||
50 | int nb_threads; | ||
51 | } BlendContext; | ||
52 | |||
53 | static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL }; | ||
54 | enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB }; | ||
55 | |||
56 | typedef struct ThreadData { | ||
57 | const AVFrame *top, *bottom; | ||
58 | AVFrame *dst; | ||
59 | AVFilterLink *inlink; | ||
60 | int plane; | ||
61 | int w, h; | ||
62 | FilterParams *param; | ||
63 | } ThreadData; | ||
64 | |||
65 | #define OFFSET(x) offsetof(BlendContext, x) | ||
66 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM | ||
67 | |||
68 | static const AVOption blend_options[] = { | ||
69 | { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
70 | { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
71 | { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
72 | { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
73 | { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, .unit = "mode" }, | ||
74 | { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, .unit = "mode" }, | ||
75 | { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
76 | { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
77 | { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, .unit = "mode" }, | ||
78 | { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
79 | { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, .unit = "mode" }, | ||
80 | { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
81 | { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
82 | { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
83 | { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
84 | { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, .unit = "mode" }, | ||
85 | { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, .unit = "mode" }, | ||
86 | { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, .unit = "mode" }, | ||
87 | { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, .unit = "mode" }, | ||
88 | { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, .unit = "mode" }, | ||
89 | { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, .unit = "mode" }, | ||
90 | { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
91 | { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
92 | { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, .unit = "mode" }, | ||
93 | { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
94 | { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, .unit = "mode" }, | ||
95 | { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, .unit = "mode" }, | ||
96 | { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, .unit = "mode" }, | ||
97 | { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, .unit = "mode" }, | ||
98 | { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, .unit = "mode" }, | ||
99 | { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, .unit = "mode" }, | ||
100 | { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, .unit = "mode" }, | ||
101 | { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, .unit = "mode" }, | ||
102 | { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
103 | { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, .unit = "mode" }, | ||
104 | { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, .unit = "mode" }, | ||
105 | { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
106 | { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, .unit = "mode" }, | ||
107 | { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, .unit = "mode" }, | ||
108 | { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, .unit = "mode" }, | ||
109 | { "softdifference","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTDIFFERENCE}, 0, 0, FLAGS, .unit = "mode" }, | ||
110 | { "geometric", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GEOMETRIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
111 | { "harmonic", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARMONIC}, 0, 0, FLAGS, .unit = "mode" }, | ||
112 | { "bleach", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BLEACH}, 0, 0, FLAGS, .unit = "mode" }, | ||
113 | { "stain", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_STAIN}, 0, 0, FLAGS, .unit = "mode" }, | ||
114 | { "interpolate","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_INTERPOLATE},0, 0, FLAGS, .unit = "mode" }, | ||
115 | { "hardoverlay","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDOVERLAY},0, 0, FLAGS, .unit = "mode" }, | ||
116 | { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
117 | { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
118 | { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
119 | { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
120 | { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, | ||
121 | { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
122 | { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
123 | { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
124 | { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
125 | { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, | ||
126 | { NULL } | ||
127 | }; | ||
128 | |||
129 | ✗ | FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs); | |
130 | |||
131 | #define DEFINE_BLEND_EXPR(type, name, div) \ | ||
132 | static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \ | ||
133 | const uint8_t *_bottom, ptrdiff_t bottom_linesize, \ | ||
134 | uint8_t *_dst, ptrdiff_t dst_linesize, \ | ||
135 | ptrdiff_t width, ptrdiff_t height, \ | ||
136 | FilterParams *param, SliceParams *sliceparam) \ | ||
137 | { \ | ||
138 | const type *top = (const type*)_top; \ | ||
139 | const type *bottom = (const type*)_bottom; \ | ||
140 | double *values = sliceparam->values; \ | ||
141 | int starty = sliceparam->starty; \ | ||
142 | type *dst = (type*)_dst; \ | ||
143 | AVExpr *e = sliceparam->e; \ | ||
144 | int y, x; \ | ||
145 | dst_linesize /= div; \ | ||
146 | top_linesize /= div; \ | ||
147 | bottom_linesize /= div; \ | ||
148 | \ | ||
149 | for (y = 0; y < height; y++) { \ | ||
150 | values[VAR_Y] = y + starty; \ | ||
151 | for (x = 0; x < width; x++) { \ | ||
152 | values[VAR_X] = x; \ | ||
153 | values[VAR_TOP] = values[VAR_A] = top[x]; \ | ||
154 | values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \ | ||
155 | dst[x] = av_expr_eval(e, values, NULL); \ | ||
156 | } \ | ||
157 | dst += dst_linesize; \ | ||
158 | top += top_linesize; \ | ||
159 | bottom += bottom_linesize; \ | ||
160 | } \ | ||
161 | } | ||
162 | |||
163 | ✗ | DEFINE_BLEND_EXPR(uint8_t, 8bit, 1) | |
164 | ✗ | DEFINE_BLEND_EXPR(uint16_t, 16bit, 2) | |
165 | ✗ | DEFINE_BLEND_EXPR(float, 32bit, 4) | |
166 | |||
167 | 1323 | static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
168 | { | ||
169 | 1323 | ThreadData *td = arg; | |
170 | 1323 | int slice_start = (td->h * jobnr ) / nb_jobs; | |
171 | 1323 | int slice_end = (td->h * (jobnr+1)) / nb_jobs; | |
172 | 1323 | int height = slice_end - slice_start; | |
173 | 1323 | const uint8_t *top = td->top->data[td->plane]; | |
174 | 1323 | const uint8_t *bottom = td->bottom->data[td->plane]; | |
175 | 1323 | uint8_t *dst = td->dst->data[td->plane]; | |
176 | 1323 | FilterLink *inl = ff_filter_link(td->inlink); | |
177 | double values[VAR_VARS_NB]; | ||
178 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1323 times.
|
1323 | SliceParams sliceparam = {.values = &values[0], .starty = slice_start, .e = td->param->e ? td->param->e[jobnr] : NULL}; |
179 | |||
180 | 1323 | values[VAR_N] = inl->frame_count_out; | |
181 |
1/2✓ Branch 0 taken 1323 times.
✗ Branch 1 not taken.
|
1323 | values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base); |
182 | 1323 | values[VAR_W] = td->w; | |
183 | 1323 | values[VAR_H] = td->h; | |
184 | 1323 | values[VAR_SW] = td->w / (double)td->dst->width; | |
185 | 1323 | values[VAR_SH] = td->h / (double)td->dst->height; | |
186 | |||
187 | 1323 | td->param->blend(top + slice_start * td->top->linesize[td->plane], | |
188 | 1323 | td->top->linesize[td->plane], | |
189 | 1323 | bottom + slice_start * td->bottom->linesize[td->plane], | |
190 | 1323 | td->bottom->linesize[td->plane], | |
191 | 1323 | dst + slice_start * td->dst->linesize[td->plane], | |
192 | 1323 | td->dst->linesize[td->plane], | |
193 | 1323 | td->w, height, td->param, &sliceparam); | |
194 | 1323 | return 0; | |
195 | } | ||
196 | |||
197 | 49 | static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf, | |
198 | const AVFrame *bottom_buf) | ||
199 | { | ||
200 | 49 | BlendContext *s = ctx->priv; | |
201 | 49 | AVFilterLink *inlink = ctx->inputs[0]; | |
202 | 49 | AVFilterLink *outlink = ctx->outputs[0]; | |
203 | AVFrame *dst_buf; | ||
204 | int plane; | ||
205 | |||
206 | 49 | dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
207 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!dst_buf) |
208 | ✗ | return top_buf; | |
209 | |||
210 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 49 times.
|
49 | if (av_frame_copy_props(dst_buf, top_buf) < 0) { |
211 | ✗ | av_frame_free(&dst_buf); | |
212 | ✗ | return top_buf; | |
213 | } | ||
214 | |||
215 |
2/2✓ Branch 0 taken 147 times.
✓ Branch 1 taken 49 times.
|
196 | for (plane = 0; plane < s->nb_planes; plane++) { |
216 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int hsub = plane == 1 || plane == 2 ? s->hsub : 0; |
217 |
4/4✓ Branch 0 taken 98 times.
✓ Branch 1 taken 49 times.
✓ Branch 2 taken 49 times.
✓ Branch 3 taken 49 times.
|
147 | int vsub = plane == 1 || plane == 2 ? s->vsub : 0; |
218 | 147 | int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub); | |
219 | 147 | int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub); | |
220 | 147 | FilterParams *param = &s->params[plane]; | |
221 | 147 | ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf, | |
222 | .w = outw, .h = outh, .param = param, .plane = plane, | ||
223 | .inlink = inlink }; | ||
224 | |||
225 | 147 | ff_filter_execute(ctx, filter_slice, &td, NULL, | |
226 | 147 | FFMIN(outh, s->nb_threads)); | |
227 | } | ||
228 | |||
229 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (!s->tblend) |
230 | ✗ | av_frame_free(&top_buf); | |
231 | |||
232 | 49 | return dst_buf; | |
233 | } | ||
234 | |||
235 | ✗ | static int blend_frame_for_dualinput(FFFrameSync *fs) | |
236 | { | ||
237 | ✗ | AVFilterContext *ctx = fs->parent; | |
238 | AVFrame *top_buf, *bottom_buf, *dst_buf; | ||
239 | int ret; | ||
240 | |||
241 | ✗ | ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf); | |
242 | ✗ | if (ret < 0) | |
243 | ✗ | return ret; | |
244 | ✗ | if (!bottom_buf) | |
245 | ✗ | return ff_filter_frame(ctx->outputs[0], top_buf); | |
246 | ✗ | dst_buf = blend_frame(ctx, top_buf, bottom_buf); | |
247 | ✗ | return ff_filter_frame(ctx->outputs[0], dst_buf); | |
248 | } | ||
249 | |||
250 | 2 | static av_cold int init(AVFilterContext *ctx) | |
251 | { | ||
252 | 2 | BlendContext *s = ctx->priv; | |
253 | |||
254 | 2 | s->tblend = !strcmp(ctx->filter->name, "tblend"); | |
255 | 2 | s->nb_threads = ff_filter_get_nb_threads(ctx); | |
256 | |||
257 | 2 | s->fs.on_event = blend_frame_for_dualinput; | |
258 | 2 | return 0; | |
259 | } | ||
260 | |||
261 | static const enum AVPixelFormat pix_fmts[] = { | ||
262 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, | ||
263 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P, | ||
264 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, | ||
265 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, | ||
266 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, | ||
267 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, | ||
268 | AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV440P10, | ||
269 | AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, | ||
270 | AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GRAY10, | ||
271 | AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, | ||
272 | AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, | ||
273 | AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GRAY12, | ||
274 | AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_GBRP14, | ||
275 | AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, | ||
276 | AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, | ||
277 | AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16, | ||
278 | AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, AV_PIX_FMT_GRAYF32, | ||
279 | AV_PIX_FMT_NONE | ||
280 | }; | ||
281 | |||
282 | 2 | static av_cold void uninit(AVFilterContext *ctx) | |
283 | { | ||
284 | 2 | BlendContext *s = ctx->priv; | |
285 | int i; | ||
286 | |||
287 | 2 | ff_framesync_uninit(&s->fs); | |
288 | 2 | av_frame_free(&s->prev_frame); | |
289 | |||
290 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) { |
291 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (s->params[i].e) { |
292 | ✗ | for (int j = 0; j < s->nb_threads; j++) | |
293 | ✗ | av_expr_free(s->params[i].e[j]); | |
294 | ✗ | av_freep(&s->params[i].e); | |
295 | } | ||
296 | } | ||
297 | |||
298 | 2 | } | |
299 | |||
300 | 1 | static int config_params(AVFilterContext *ctx) | |
301 | { | ||
302 | 1 | BlendContext *s = ctx->priv; | |
303 | int ret; | ||
304 | |||
305 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 times.
|
5 | for (int plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { |
306 | 4 | FilterParams *param = &s->params[plane]; | |
307 | |||
308 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if (s->all_mode >= 0) |
309 | 4 | param->mode = s->all_mode; | |
310 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (s->all_opacity < 1) |
311 | ✗ | param->opacity = s->all_opacity; | |
312 | |||
313 | 4 | ff_blend_init(param, s->depth); | |
314 | |||
315 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
4 | if (s->all_expr && !param->expr_str) { |
316 | ✗ | param->expr_str = av_strdup(s->all_expr); | |
317 | ✗ | if (!param->expr_str) | |
318 | ✗ | return AVERROR(ENOMEM); | |
319 | } | ||
320 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (param->expr_str) { |
321 | ✗ | if (!param->e) { | |
322 | ✗ | param->e = av_calloc(s->nb_threads, sizeof(*param->e)); | |
323 | ✗ | if (!param->e) | |
324 | ✗ | return AVERROR(ENOMEM); | |
325 | } | ||
326 | ✗ | for (int i = 0; i < s->nb_threads; i++) { | |
327 | ✗ | av_expr_free(param->e[i]); | |
328 | ✗ | param->e[i] = NULL; | |
329 | ✗ | ret = av_expr_parse(¶m->e[i], param->expr_str, var_names, | |
330 | NULL, NULL, NULL, NULL, 0, ctx); | ||
331 | ✗ | if (ret < 0) | |
332 | ✗ | return ret; | |
333 | } | ||
334 | ✗ | param->blend = s->depth > 8 ? s->depth > 16 ? blend_expr_32bit : blend_expr_16bit : blend_expr_8bit; | |
335 | } | ||
336 | } | ||
337 | |||
338 | 1 | return 0; | |
339 | } | ||
340 | |||
341 | 1 | static int config_output(AVFilterLink *outlink) | |
342 | { | ||
343 | 1 | FilterLink *outl = ff_filter_link(outlink); | |
344 | 1 | AVFilterContext *ctx = outlink->src; | |
345 | 1 | AVFilterLink *toplink = ctx->inputs[TOP]; | |
346 | 1 | FilterLink *tl = ff_filter_link(toplink); | |
347 | 1 | BlendContext *s = ctx->priv; | |
348 | 1 | const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format); | |
349 | int ret; | ||
350 | |||
351 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) { |
352 | ✗ | AVFilterLink *bottomlink = ctx->inputs[BOTTOM]; | |
353 | |||
354 | ✗ | if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) { | |
355 | ✗ | av_log(ctx, AV_LOG_ERROR, "First input link %s parameters " | |
356 | "(size %dx%d) do not match the corresponding " | ||
357 | "second input link %s parameters (size %dx%d)\n", | ||
358 | ✗ | ctx->input_pads[TOP].name, toplink->w, toplink->h, | |
359 | ✗ | ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h); | |
360 | ✗ | return AVERROR(EINVAL); | |
361 | } | ||
362 | } | ||
363 | |||
364 | 1 | outlink->w = toplink->w; | |
365 | 1 | outlink->h = toplink->h; | |
366 | 1 | outlink->time_base = toplink->time_base; | |
367 | 1 | outlink->sample_aspect_ratio = toplink->sample_aspect_ratio; | |
368 | 1 | outl->frame_rate = tl->frame_rate; | |
369 | |||
370 | 1 | s->hsub = pix_desc->log2_chroma_w; | |
371 | 1 | s->vsub = pix_desc->log2_chroma_h; | |
372 | |||
373 | 1 | s->depth = pix_desc->comp[0].depth; | |
374 | 1 | s->nb_planes = av_pix_fmt_count_planes(toplink->format); | |
375 | |||
376 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (!s->tblend) |
377 | ✗ | if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) | |
378 | ✗ | return ret; | |
379 | |||
380 | 1 | ret = config_params(ctx); | |
381 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (ret < 0) |
382 | ✗ | return ret; | |
383 | |||
384 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (s->tblend) |
385 | 1 | return 0; | |
386 | |||
387 | ✗ | ret = ff_framesync_configure(&s->fs); | |
388 | ✗ | outlink->time_base = s->fs.time_base; | |
389 | |||
390 | ✗ | return ret; | |
391 | } | ||
392 | |||
393 | ✗ | static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, | |
394 | char *res, int res_len, int flags) | ||
395 | { | ||
396 | int ret; | ||
397 | |||
398 | ✗ | ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); | |
399 | ✗ | if (ret < 0) | |
400 | ✗ | return ret; | |
401 | |||
402 | ✗ | return config_params(ctx); | |
403 | } | ||
404 | |||
405 | static const AVFilterPad blend_outputs[] = { | ||
406 | { | ||
407 | .name = "default", | ||
408 | .type = AVMEDIA_TYPE_VIDEO, | ||
409 | .config_props = config_output, | ||
410 | }, | ||
411 | }; | ||
412 | |||
413 | #if CONFIG_BLEND_FILTER | ||
414 | |||
415 | ✗ | static int activate(AVFilterContext *ctx) | |
416 | { | ||
417 | ✗ | BlendContext *s = ctx->priv; | |
418 | ✗ | return ff_framesync_activate(&s->fs); | |
419 | } | ||
420 | |||
421 | static const AVFilterPad blend_inputs[] = { | ||
422 | { | ||
423 | .name = "top", | ||
424 | .type = AVMEDIA_TYPE_VIDEO, | ||
425 | },{ | ||
426 | .name = "bottom", | ||
427 | .type = AVMEDIA_TYPE_VIDEO, | ||
428 | }, | ||
429 | }; | ||
430 | |||
431 | const AVFilter ff_vf_blend = { | ||
432 | .name = "blend", | ||
433 | .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."), | ||
434 | .preinit = blend_framesync_preinit, | ||
435 | .init = init, | ||
436 | .uninit = uninit, | ||
437 | .priv_size = sizeof(BlendContext), | ||
438 | .activate = activate, | ||
439 | FILTER_INPUTS(blend_inputs), | ||
440 | FILTER_OUTPUTS(blend_outputs), | ||
441 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
442 | .priv_class = &blend_class, | ||
443 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
444 | .process_command = process_command, | ||
445 | }; | ||
446 | |||
447 | #endif | ||
448 | |||
449 | #if CONFIG_TBLEND_FILTER | ||
450 | |||
451 | 50 | static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame) | |
452 | { | ||
453 | 50 | AVFilterContext *ctx = inlink->dst; | |
454 | 50 | BlendContext *s = ctx->priv; | |
455 | 50 | AVFilterLink *outlink = ctx->outputs[0]; | |
456 | |||
457 |
2/2✓ Branch 0 taken 49 times.
✓ Branch 1 taken 1 times.
|
50 | if (s->prev_frame) { |
458 | AVFrame *out; | ||
459 | |||
460 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 49 times.
|
49 | if (ctx->is_disabled) |
461 | ✗ | out = av_frame_clone(frame); | |
462 | else | ||
463 | 49 | out = blend_frame(ctx, frame, s->prev_frame); | |
464 | 49 | av_frame_free(&s->prev_frame); | |
465 | 49 | s->prev_frame = frame; | |
466 | 49 | return ff_filter_frame(outlink, out); | |
467 | } | ||
468 | 1 | s->prev_frame = frame; | |
469 | 1 | return 0; | |
470 | } | ||
471 | |||
472 | AVFILTER_DEFINE_CLASS_EXT(tblend, "tblend", blend_options); | ||
473 | |||
474 | static const AVFilterPad tblend_inputs[] = { | ||
475 | { | ||
476 | .name = "default", | ||
477 | .type = AVMEDIA_TYPE_VIDEO, | ||
478 | .filter_frame = tblend_filter_frame, | ||
479 | }, | ||
480 | }; | ||
481 | |||
482 | const AVFilter ff_vf_tblend = { | ||
483 | .name = "tblend", | ||
484 | .description = NULL_IF_CONFIG_SMALL("Blend successive frames."), | ||
485 | .priv_size = sizeof(BlendContext), | ||
486 | .priv_class = &tblend_class, | ||
487 | .init = init, | ||
488 | .uninit = uninit, | ||
489 | FILTER_INPUTS(tblend_inputs), | ||
490 | FILTER_OUTPUTS(blend_outputs), | ||
491 | FILTER_PIXFMTS_ARRAY(pix_fmts), | ||
492 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | ||
493 | .process_command = process_command, | ||
494 | }; | ||
495 | |||
496 | #endif | ||
497 |