Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Copyright (c) 2011 Stefano Sabatini | ||
3 | * | ||
4 | * This file is part of FFmpeg. | ||
5 | * | ||
6 | * FFmpeg is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Lesser General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2.1 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * FFmpeg is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Lesser General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Lesser General Public | ||
17 | * License along with FFmpeg; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | */ | ||
20 | |||
21 | /** | ||
22 | * @file | ||
23 | * Compute a look-up table for binding the input value to the output | ||
24 | * value, and apply it to input video. | ||
25 | */ | ||
26 | |||
27 | #include "config_components.h" | ||
28 | |||
29 | #include "libavutil/attributes.h" | ||
30 | #include "libavutil/bswap.h" | ||
31 | #include "libavutil/common.h" | ||
32 | #include "libavutil/eval.h" | ||
33 | #include "libavutil/mem.h" | ||
34 | #include "libavutil/opt.h" | ||
35 | #include "libavutil/pixdesc.h" | ||
36 | #include "avfilter.h" | ||
37 | #include "drawutils.h" | ||
38 | #include "filters.h" | ||
39 | #include "formats.h" | ||
40 | #include "video.h" | ||
41 | |||
42 | static const char *const var_names[] = { | ||
43 | "w", ///< width of the input video | ||
44 | "h", ///< height of the input video | ||
45 | "val", ///< input value for the pixel | ||
46 | "maxval", ///< max value for the pixel | ||
47 | "minval", ///< min value for the pixel | ||
48 | "negval", ///< negated value | ||
49 | "clipval", | ||
50 | NULL | ||
51 | }; | ||
52 | |||
53 | enum var_name { | ||
54 | VAR_W, | ||
55 | VAR_H, | ||
56 | VAR_VAL, | ||
57 | VAR_MAXVAL, | ||
58 | VAR_MINVAL, | ||
59 | VAR_NEGVAL, | ||
60 | VAR_CLIPVAL, | ||
61 | VAR_VARS_NB | ||
62 | }; | ||
63 | |||
64 | typedef struct LutContext { | ||
65 | const AVClass *class; | ||
66 | uint16_t lut[4][256 * 256]; ///< lookup table for each component | ||
67 | char *comp_expr_str[4]; | ||
68 | AVExpr *comp_expr[4]; | ||
69 | int hsub, vsub; | ||
70 | double var_values[VAR_VARS_NB]; | ||
71 | int is_rgb, is_yuv; | ||
72 | int is_planar; | ||
73 | int is_16bit; | ||
74 | int step; | ||
75 | } LutContext; | ||
76 | |||
77 | #define Y 0 | ||
78 | #define U 1 | ||
79 | #define V 2 | ||
80 | #define R 0 | ||
81 | #define G 1 | ||
82 | #define B 2 | ||
83 | #define A 3 | ||
84 | |||
85 | #define OFFSET(x) offsetof(LutContext, x) | ||
86 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM | ||
87 | |||
88 | static const AVOption options[] = { | ||
89 | { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
90 | { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
91 | { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
92 | { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
93 | { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
94 | { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
95 | { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
96 | { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
97 | { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
98 | { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
99 | { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS }, | ||
100 | { NULL } | ||
101 | }; | ||
102 | |||
103 | 115 | static av_cold void uninit(AVFilterContext *ctx) | |
104 | { | ||
105 | 115 | LutContext *s = ctx->priv; | |
106 | int i; | ||
107 | |||
108 |
2/2✓ Branch 0 taken 460 times.
✓ Branch 1 taken 115 times.
|
575 | for (i = 0; i < 4; i++) { |
109 | 460 | av_expr_free(s->comp_expr[i]); | |
110 | 460 | s->comp_expr[i] = NULL; | |
111 | 460 | av_freep(&s->comp_expr_str[i]); | |
112 | } | ||
113 | 115 | } | |
114 | |||
115 | #define YUV_FORMATS \ | ||
116 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \ | ||
117 | AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \ | ||
118 | AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \ | ||
119 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \ | ||
120 | AV_PIX_FMT_YUVJ440P, \ | ||
121 | AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \ | ||
122 | AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \ | ||
123 | AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \ | ||
124 | AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \ | ||
125 | AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \ | ||
126 | AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE | ||
127 | |||
128 | #define RGB_FORMATS \ | ||
129 | AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \ | ||
130 | AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \ | ||
131 | AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, \ | ||
132 | AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGBA64LE, \ | ||
133 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, \ | ||
134 | AV_PIX_FMT_GBRP9LE, AV_PIX_FMT_GBRP10LE, \ | ||
135 | AV_PIX_FMT_GBRAP10LE, \ | ||
136 | AV_PIX_FMT_GBRP12LE, AV_PIX_FMT_GBRP14LE, \ | ||
137 | AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRAP12LE, \ | ||
138 | AV_PIX_FMT_GBRAP16LE | ||
139 | |||
140 | #define GRAY_FORMATS \ | ||
141 | AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9LE, AV_PIX_FMT_GRAY10LE, \ | ||
142 | AV_PIX_FMT_GRAY12LE, AV_PIX_FMT_GRAY14LE, AV_PIX_FMT_GRAY16LE | ||
143 | |||
144 | static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE }; | ||
145 | static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE }; | ||
146 | static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, GRAY_FORMATS, AV_PIX_FMT_NONE }; | ||
147 | |||
148 | 58 | static int query_formats(const AVFilterContext *ctx, | |
149 | AVFilterFormatsConfig **cfg_in, | ||
150 | AVFilterFormatsConfig **cfg_out) | ||
151 | { | ||
152 | 58 | const LutContext *s = ctx->priv; | |
153 | |||
154 |
1/2✓ Branch 0 taken 58 times.
✗ Branch 1 not taken.
|
116 | const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts : |
155 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 58 times.
|
58 | s->is_yuv ? yuv_pix_fmts : |
156 | all_pix_fmts; | ||
157 | 58 | return ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, pix_fmts); | |
158 | } | ||
159 | |||
160 | /** | ||
161 | * Clip value val in the minval - maxval range. | ||
162 | */ | ||
163 | ✗ | static double clip(void *opaque, double val) | |
164 | { | ||
165 | ✗ | LutContext *s = opaque; | |
166 | ✗ | double minval = s->var_values[VAR_MINVAL]; | |
167 | ✗ | double maxval = s->var_values[VAR_MAXVAL]; | |
168 | |||
169 | ✗ | return av_clip(val, minval, maxval); | |
170 | } | ||
171 | |||
172 | /** | ||
173 | * Compute gamma correction for value val, assuming the minval-maxval | ||
174 | * range, val is clipped to a value contained in the same interval. | ||
175 | */ | ||
176 | ✗ | static double compute_gammaval(void *opaque, double gamma) | |
177 | { | ||
178 | ✗ | LutContext *s = opaque; | |
179 | ✗ | double val = s->var_values[VAR_CLIPVAL]; | |
180 | ✗ | double minval = s->var_values[VAR_MINVAL]; | |
181 | ✗ | double maxval = s->var_values[VAR_MAXVAL]; | |
182 | |||
183 | ✗ | return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval; | |
184 | } | ||
185 | |||
186 | /** | ||
187 | * Compute ITU Rec.709 gamma correction of value val. | ||
188 | */ | ||
189 | ✗ | static double compute_gammaval709(void *opaque, double gamma) | |
190 | { | ||
191 | ✗ | LutContext *s = opaque; | |
192 | ✗ | double val = s->var_values[VAR_CLIPVAL]; | |
193 | ✗ | double minval = s->var_values[VAR_MINVAL]; | |
194 | ✗ | double maxval = s->var_values[VAR_MAXVAL]; | |
195 | ✗ | double level = (val - minval) / (maxval - minval); | |
196 | ✗ | level = level < 0.018 ? 4.5 * level | |
197 | ✗ | : 1.099 * pow(level, 1.0 / gamma) - 0.099; | |
198 | ✗ | return level * (maxval - minval) + minval; | |
199 | } | ||
200 | |||
201 | static double (* const funcs1[])(void *, double) = { | ||
202 | clip, | ||
203 | compute_gammaval, | ||
204 | compute_gammaval709, | ||
205 | NULL | ||
206 | }; | ||
207 | |||
208 | static const char * const funcs1_names[] = { | ||
209 | "clip", | ||
210 | "gammaval", | ||
211 | "gammaval709", | ||
212 | NULL | ||
213 | }; | ||
214 | |||
215 | 57 | static int config_props(AVFilterLink *inlink) | |
216 | { | ||
217 | 57 | AVFilterContext *ctx = inlink->dst; | |
218 | 57 | LutContext *s = ctx->priv; | |
219 | 57 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |
220 | uint8_t rgba_map[4]; /* component index -> RGBA color index map */ | ||
221 | int min[4], max[4]; | ||
222 | int val, color, ret; | ||
223 | |||
224 | 57 | s->hsub = desc->log2_chroma_w; | |
225 | 57 | s->vsub = desc->log2_chroma_h; | |
226 | |||
227 | 57 | s->var_values[VAR_W] = inlink->w; | |
228 | 57 | s->var_values[VAR_H] = inlink->h; | |
229 | 57 | s->is_16bit = desc->comp[0].depth > 8; | |
230 | |||
231 |
3/3✓ Branch 0 taken 29 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 26 times.
|
57 | switch (inlink->format) { |
232 | 29 | case AV_PIX_FMT_YUV410P: | |
233 | case AV_PIX_FMT_YUV411P: | ||
234 | case AV_PIX_FMT_YUV420P: | ||
235 | case AV_PIX_FMT_YUV422P: | ||
236 | case AV_PIX_FMT_YUV440P: | ||
237 | case AV_PIX_FMT_YUV444P: | ||
238 | case AV_PIX_FMT_YUVA420P: | ||
239 | case AV_PIX_FMT_YUVA422P: | ||
240 | case AV_PIX_FMT_YUVA444P: | ||
241 | case AV_PIX_FMT_YUV420P9LE: | ||
242 | case AV_PIX_FMT_YUV422P9LE: | ||
243 | case AV_PIX_FMT_YUV444P9LE: | ||
244 | case AV_PIX_FMT_YUVA420P9LE: | ||
245 | case AV_PIX_FMT_YUVA422P9LE: | ||
246 | case AV_PIX_FMT_YUVA444P9LE: | ||
247 | case AV_PIX_FMT_YUV420P10LE: | ||
248 | case AV_PIX_FMT_YUV422P10LE: | ||
249 | case AV_PIX_FMT_YUV440P10LE: | ||
250 | case AV_PIX_FMT_YUV444P10LE: | ||
251 | case AV_PIX_FMT_YUVA420P10LE: | ||
252 | case AV_PIX_FMT_YUVA422P10LE: | ||
253 | case AV_PIX_FMT_YUVA444P10LE: | ||
254 | case AV_PIX_FMT_YUV420P12LE: | ||
255 | case AV_PIX_FMT_YUV422P12LE: | ||
256 | case AV_PIX_FMT_YUV440P12LE: | ||
257 | case AV_PIX_FMT_YUV444P12LE: | ||
258 | case AV_PIX_FMT_YUV420P14LE: | ||
259 | case AV_PIX_FMT_YUV422P14LE: | ||
260 | case AV_PIX_FMT_YUV444P14LE: | ||
261 | case AV_PIX_FMT_YUV420P16LE: | ||
262 | case AV_PIX_FMT_YUV422P16LE: | ||
263 | case AV_PIX_FMT_YUV444P16LE: | ||
264 | case AV_PIX_FMT_YUVA420P16LE: | ||
265 | case AV_PIX_FMT_YUVA422P16LE: | ||
266 | case AV_PIX_FMT_YUVA444P16LE: | ||
267 | 29 | min[Y] = 16 * (1 << (desc->comp[0].depth - 8)); | |
268 | 29 | min[U] = 16 * (1 << (desc->comp[1].depth - 8)); | |
269 | 29 | min[V] = 16 * (1 << (desc->comp[2].depth - 8)); | |
270 | 29 | min[A] = 0; | |
271 | 29 | max[Y] = 235 * (1 << (desc->comp[0].depth - 8)); | |
272 | 29 | max[U] = 240 * (1 << (desc->comp[1].depth - 8)); | |
273 | 29 | max[V] = 240 * (1 << (desc->comp[2].depth - 8)); | |
274 | 29 | max[A] = (1 << desc->comp[0].depth) - 1; | |
275 | 29 | break; | |
276 | 2 | case AV_PIX_FMT_RGB48LE: | |
277 | case AV_PIX_FMT_RGBA64LE: | ||
278 | 2 | min[0] = min[1] = min[2] = min[3] = 0; | |
279 | 2 | max[0] = max[1] = max[2] = max[3] = 65535; | |
280 | 2 | break; | |
281 | 26 | default: | |
282 | 26 | min[0] = min[1] = min[2] = min[3] = 0; | |
283 | 26 | max[0] = max[1] = max[2] = max[3] = 255 * (1 << (desc->comp[0].depth - 8)); | |
284 | } | ||
285 | |||
286 | 57 | s->is_yuv = s->is_rgb = 0; | |
287 | 57 | s->is_planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR; | |
288 |
2/2✓ Branch 1 taken 33 times.
✓ Branch 2 taken 24 times.
|
57 | if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1; |
289 |
2/2✓ Branch 1 taken 18 times.
✓ Branch 2 taken 6 times.
|
24 | else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1; |
290 | |||
291 |
2/2✓ Branch 0 taken 18 times.
✓ Branch 1 taken 39 times.
|
57 | if (s->is_rgb) { |
292 | 18 | ff_fill_rgba_map(rgba_map, inlink->format); | |
293 | 18 | s->step = av_get_bits_per_pixel(desc) >> 3; | |
294 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 8 times.
|
18 | if (s->is_16bit) { |
295 | 10 | s->step = s->step >> 1; | |
296 | } | ||
297 | } | ||
298 | |||
299 |
2/2✓ Branch 0 taken 174 times.
✓ Branch 1 taken 57 times.
|
231 | for (color = 0; color < desc->nb_components; color++) { |
300 | double res; | ||
301 |
2/2✓ Branch 0 taken 63 times.
✓ Branch 1 taken 111 times.
|
174 | int comp = s->is_rgb ? rgba_map[color] : color; |
302 | |||
303 | /* create the parsed expression */ | ||
304 | 174 | av_expr_free(s->comp_expr[color]); | |
305 | 174 | s->comp_expr[color] = NULL; | |
306 | 174 | ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color], | |
307 | var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx); | ||
308 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 174 times.
|
174 | if (ret < 0) { |
309 | ✗ | av_log(ctx, AV_LOG_ERROR, | |
310 | "Error when parsing the expression '%s' for the component %d and color %d.\n", | ||
311 | s->comp_expr_str[comp], comp, color); | ||
312 | ✗ | return AVERROR(EINVAL); | |
313 | } | ||
314 | |||
315 | /* compute the lut */ | ||
316 | 174 | s->var_values[VAR_MAXVAL] = max[color]; | |
317 | 174 | s->var_values[VAR_MINVAL] = min[color]; | |
318 | |||
319 |
2/2✓ Branch 0 taken 11403264 times.
✓ Branch 1 taken 174 times.
|
11403438 | for (val = 0; val < FF_ARRAY_ELEMS(s->lut[comp]); val++) { |
320 | 11403264 | s->var_values[VAR_VAL] = val; | |
321 | 11403264 | s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]); | |
322 | 11403264 | s->var_values[VAR_NEGVAL] = | |
323 | 11403264 | av_clip(min[color] + max[color] - s->var_values[VAR_VAL], | |
324 | min[color], max[color]); | ||
325 | |||
326 | 11403264 | res = av_expr_eval(s->comp_expr[color], s->var_values, s); | |
327 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 11403264 times.
|
11403264 | if (isnan(res)) { |
328 | ✗ | av_log(ctx, AV_LOG_ERROR, | |
329 | "Error when evaluating the expression '%s' for the value %d for the component %d.\n", | ||
330 | s->comp_expr_str[color], val, comp); | ||
331 | ✗ | return AVERROR(EINVAL); | |
332 | } | ||
333 | 11403264 | s->lut[comp][val] = av_clip((int)res, 0, max[A]); | |
334 | 11403264 | av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]); | |
335 | } | ||
336 | } | ||
337 | |||
338 | 57 | return 0; | |
339 | } | ||
340 | |||
341 | struct thread_data { | ||
342 | AVFrame *in; | ||
343 | AVFrame *out; | ||
344 | |||
345 | int w; | ||
346 | int h; | ||
347 | }; | ||
348 | |||
349 | #define LOAD_PACKED_COMMON\ | ||
350 | LutContext *s = ctx->priv;\ | ||
351 | const struct thread_data *td = arg;\ | ||
352 | \ | ||
353 | int i, j;\ | ||
354 | const int w = td->w;\ | ||
355 | const int h = td->h;\ | ||
356 | AVFrame *in = td->in;\ | ||
357 | AVFrame *out = td->out;\ | ||
358 | const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;\ | ||
359 | const int step = s->step;\ | ||
360 | \ | ||
361 | const int slice_start = (h * jobnr ) / nb_jobs;\ | ||
362 | const int slice_end = (h * (jobnr+1)) / nb_jobs;\ | ||
363 | |||
364 | /* packed, 16-bit */ | ||
365 | 2 | static int lut_packed_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
366 | { | ||
367 | 2 | LOAD_PACKED_COMMON | |
368 | |||
369 | uint16_t *inrow, *outrow, *inrow0, *outrow0; | ||
370 | 2 | const int in_linesize = in->linesize[0] / 2; | |
371 | 2 | const int out_linesize = out->linesize[0] / 2; | |
372 | 2 | inrow0 = (uint16_t *)in ->data[0]; | |
373 | 2 | outrow0 = (uint16_t *)out->data[0]; | |
374 | |||
375 |
2/2✓ Branch 0 taken 576 times.
✓ Branch 1 taken 2 times.
|
578 | for (i = slice_start; i < slice_end; i++) { |
376 | 576 | inrow = inrow0 + i * in_linesize; | |
377 | 576 | outrow = outrow0 + i * out_linesize; | |
378 |
2/2✓ Branch 0 taken 202752 times.
✓ Branch 1 taken 576 times.
|
203328 | for (j = 0; j < w; j++) { |
379 | |||
380 |
2/4✓ Branch 0 taken 101376 times.
✓ Branch 1 taken 101376 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
202752 | switch (step) { |
381 | #if HAVE_BIGENDIAN | ||
382 | case 4: outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // Fall-through | ||
383 | case 3: outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // Fall-through | ||
384 | case 2: outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // Fall-through | ||
385 | default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]); | ||
386 | #else | ||
387 | 101376 | case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through | |
388 | 202752 | case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through | |
389 | 202752 | case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through | |
390 | 202752 | default: outrow[0] = tab[0][inrow[0]]; | |
391 | #endif | ||
392 | } | ||
393 | 202752 | outrow += step; | |
394 | 202752 | inrow += step; | |
395 | } | ||
396 | } | ||
397 | |||
398 | 2 | return 0; | |
399 | } | ||
400 | |||
401 | /* packed, 8-bit */ | ||
402 | 6 | static int lut_packed_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
403 | { | ||
404 | 6 | LOAD_PACKED_COMMON | |
405 | |||
406 | uint8_t *inrow, *outrow, *inrow0, *outrow0; | ||
407 | 6 | const int in_linesize = in->linesize[0]; | |
408 | 6 | const int out_linesize = out->linesize[0]; | |
409 | 6 | inrow0 = in ->data[0]; | |
410 | 6 | outrow0 = out->data[0]; | |
411 | |||
412 |
2/2✓ Branch 0 taken 1728 times.
✓ Branch 1 taken 6 times.
|
1734 | for (i = slice_start; i < slice_end; i++) { |
413 | 1728 | inrow = inrow0 + i * in_linesize; | |
414 | 1728 | outrow = outrow0 + i * out_linesize; | |
415 |
2/2✓ Branch 0 taken 608256 times.
✓ Branch 1 taken 1728 times.
|
609984 | for (j = 0; j < w; j++) { |
416 |
2/4✓ Branch 0 taken 405504 times.
✓ Branch 1 taken 202752 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
608256 | switch (step) { |
417 | 405504 | case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through | |
418 | 608256 | case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through | |
419 | 608256 | case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through | |
420 | 608256 | default: outrow[0] = tab[0][inrow[0]]; | |
421 | } | ||
422 | 608256 | outrow += step; | |
423 | 608256 | inrow += step; | |
424 | } | ||
425 | } | ||
426 | |||
427 | 6 | return 0; | |
428 | } | ||
429 | |||
430 | #define LOAD_PLANAR_COMMON\ | ||
431 | LutContext *s = ctx->priv;\ | ||
432 | const struct thread_data *td = arg;\ | ||
433 | int i, j, plane;\ | ||
434 | AVFrame *in = td->in;\ | ||
435 | AVFrame *out = td->out;\ | ||
436 | |||
437 | #define PLANAR_COMMON\ | ||
438 | int vsub = plane == 1 || plane == 2 ? s->vsub : 0;\ | ||
439 | int hsub = plane == 1 || plane == 2 ? s->hsub : 0;\ | ||
440 | int h = AV_CEIL_RSHIFT(td->h, vsub);\ | ||
441 | int w = AV_CEIL_RSHIFT(td->w, hsub);\ | ||
442 | const uint16_t *tab = s->lut[plane];\ | ||
443 | \ | ||
444 | const int slice_start = (h * jobnr ) / nb_jobs;\ | ||
445 | const int slice_end = (h * (jobnr+1)) / nb_jobs;\ | ||
446 | |||
447 | /* planar >8 bit depth */ | ||
448 | 33 | static int lut_planar_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
449 | { | ||
450 | 33 | LOAD_PLANAR_COMMON | |
451 | |||
452 | uint16_t *inrow, *outrow; | ||
453 | |||
454 |
5/6✓ Branch 0 taken 122 times.
✓ Branch 1 taken 6 times.
✓ Branch 2 taken 95 times.
✓ Branch 3 taken 27 times.
✓ Branch 4 taken 95 times.
✗ Branch 5 not taken.
|
128 | for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { |
455 |
8/8✓ Branch 0 taken 67 times.
✓ Branch 1 taken 28 times.
✓ Branch 2 taken 28 times.
✓ Branch 3 taken 39 times.
✓ Branch 4 taken 67 times.
✓ Branch 5 taken 28 times.
✓ Branch 6 taken 28 times.
✓ Branch 7 taken 39 times.
|
95 | PLANAR_COMMON |
456 | |||
457 | 95 | const int in_linesize = in->linesize[plane] / 2; | |
458 | 95 | const int out_linesize = out->linesize[plane] / 2; | |
459 | |||
460 | 95 | inrow = (uint16_t *)in ->data[plane] + slice_start * in_linesize; | |
461 | 95 | outrow = (uint16_t *)out->data[plane] + slice_start * out_linesize; | |
462 | |||
463 |
2/2✓ Branch 0 taken 25056 times.
✓ Branch 1 taken 95 times.
|
25151 | for (i = slice_start; i < slice_end; i++) { |
464 |
2/2✓ Branch 0 taken 7907328 times.
✓ Branch 1 taken 25056 times.
|
7932384 | for (j = 0; j < w; j++) { |
465 | #if HAVE_BIGENDIAN | ||
466 | outrow[j] = av_bswap16(tab[av_bswap16(inrow[j])]); | ||
467 | #else | ||
468 | 7907328 | outrow[j] = tab[inrow[j]]; | |
469 | #endif | ||
470 | } | ||
471 | 25056 | inrow += in_linesize; | |
472 | 25056 | outrow += out_linesize; | |
473 | } | ||
474 | } | ||
475 | |||
476 | 33 | return 0; | |
477 | } | ||
478 | |||
479 | /* planar 8bit depth */ | ||
480 | 16 | static int lut_planar_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
481 | { | ||
482 | 16 | LOAD_PLANAR_COMMON | |
483 | |||
484 | uint8_t *inrow, *outrow; | ||
485 | |||
486 |
5/6✓ Branch 0 taken 62 times.
✓ Branch 1 taken 4 times.
✓ Branch 2 taken 50 times.
✓ Branch 3 taken 12 times.
✓ Branch 4 taken 50 times.
✗ Branch 5 not taken.
|
66 | for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { |
487 |
8/8✓ Branch 0 taken 35 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 20 times.
✓ Branch 4 taken 35 times.
✓ Branch 5 taken 15 times.
✓ Branch 6 taken 15 times.
✓ Branch 7 taken 20 times.
|
50 | PLANAR_COMMON |
488 | |||
489 | 50 | const int in_linesize = in->linesize[plane]; | |
490 | 50 | const int out_linesize = out->linesize[plane]; | |
491 | |||
492 | 50 | inrow = in ->data[plane] + slice_start * in_linesize; | |
493 | 50 | outrow = out->data[plane] + slice_start * out_linesize; | |
494 | |||
495 |
2/2✓ Branch 0 taken 12528 times.
✓ Branch 1 taken 50 times.
|
12578 | for (i = slice_start; i < slice_end; i++) { |
496 |
2/2✓ Branch 0 taken 3763584 times.
✓ Branch 1 taken 12528 times.
|
3776112 | for (j = 0; j < w; j++) |
497 | 3763584 | outrow[j] = tab[inrow[j]]; | |
498 | 12528 | inrow += in_linesize; | |
499 | 12528 | outrow += out_linesize; | |
500 | } | ||
501 | } | ||
502 | |||
503 | 16 | return 0; | |
504 | } | ||
505 | |||
506 | #define PACKED_THREAD_DATA\ | ||
507 | struct thread_data td = {\ | ||
508 | .in = in,\ | ||
509 | .out = out,\ | ||
510 | .w = inlink->w,\ | ||
511 | .h = in->height,\ | ||
512 | };\ | ||
513 | |||
514 | #define PLANAR_THREAD_DATA\ | ||
515 | struct thread_data td = {\ | ||
516 | .in = in,\ | ||
517 | .out = out,\ | ||
518 | .w = inlink->w,\ | ||
519 | .h = inlink->h,\ | ||
520 | };\ | ||
521 | |||
522 | 57 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |
523 | { | ||
524 | 57 | AVFilterContext *ctx = inlink->dst; | |
525 | 57 | LutContext *s = ctx->priv; | |
526 | 57 | AVFilterLink *outlink = ctx->outputs[0]; | |
527 | AVFrame *out; | ||
528 | 57 | int direct = 0; | |
529 | |||
530 |
1/2✓ Branch 1 taken 57 times.
✗ Branch 2 not taken.
|
57 | if (av_frame_is_writable(in)) { |
531 | 57 | direct = 1; | |
532 | 57 | out = in; | |
533 | } else { | ||
534 | ✗ | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
535 | ✗ | if (!out) { | |
536 | ✗ | av_frame_free(&in); | |
537 | ✗ | return AVERROR(ENOMEM); | |
538 | } | ||
539 | ✗ | av_frame_copy_props(out, in); | |
540 | } | ||
541 | |||
542 |
6/6✓ Branch 0 taken 18 times.
✓ Branch 1 taken 39 times.
✓ Branch 2 taken 10 times.
✓ Branch 3 taken 8 times.
✓ Branch 4 taken 2 times.
✓ Branch 5 taken 8 times.
|
59 | if (s->is_rgb && s->is_16bit && !s->is_planar) { |
543 | /* packed, 16-bit */ | ||
544 | 2 | PACKED_THREAD_DATA | |
545 | 2 | ff_filter_execute(ctx, lut_packed_16bits, &td, NULL, | |
546 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | FFMIN(in->height, ff_filter_get_nb_threads(ctx))); |
547 |
4/4✓ Branch 0 taken 16 times.
✓ Branch 1 taken 39 times.
✓ Branch 2 taken 6 times.
✓ Branch 3 taken 10 times.
|
61 | } else if (s->is_rgb && !s->is_planar) { |
548 | /* packed 8 bits */ | ||
549 | 6 | PACKED_THREAD_DATA | |
550 | 6 | ff_filter_execute(ctx, lut_packed_8bits, &td, NULL, | |
551 |
1/2✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
|
6 | FFMIN(in->height, ff_filter_get_nb_threads(ctx))); |
552 |
2/2✓ Branch 0 taken 33 times.
✓ Branch 1 taken 16 times.
|
49 | } else if (s->is_16bit) { |
553 | /* planar >8 bit depth */ | ||
554 | 33 | PLANAR_THREAD_DATA | |
555 | 33 | ff_filter_execute(ctx, lut_planar_16bits, &td, NULL, | |
556 |
1/2✓ Branch 0 taken 33 times.
✗ Branch 1 not taken.
|
33 | FFMIN(in->height, ff_filter_get_nb_threads(ctx))); |
557 | } else { | ||
558 | /* planar 8bit depth */ | ||
559 | 16 | PLANAR_THREAD_DATA | |
560 | 16 | ff_filter_execute(ctx, lut_planar_8bits, &td, NULL, | |
561 |
1/2✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
|
16 | FFMIN(in->height, ff_filter_get_nb_threads(ctx))); |
562 | } | ||
563 | |||
564 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 57 times.
|
57 | if (!direct) |
565 | ✗ | av_frame_free(&in); | |
566 | |||
567 | 57 | return ff_filter_frame(outlink, out); | |
568 | } | ||
569 | |||
570 | ✗ | static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, | |
571 | char *res, int res_len, int flags) | ||
572 | { | ||
573 | ✗ | int ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); | |
574 | |||
575 | ✗ | if (ret < 0) | |
576 | ✗ | return ret; | |
577 | |||
578 | ✗ | return config_props(ctx->inputs[0]); | |
579 | } | ||
580 | |||
581 | static const AVFilterPad inputs[] = { | ||
582 | { .name = "default", | ||
583 | .type = AVMEDIA_TYPE_VIDEO, | ||
584 | .filter_frame = filter_frame, | ||
585 | .config_props = config_props, | ||
586 | }, | ||
587 | }; | ||
588 | |||
589 | #define DEFINE_LUT_FILTER(name_, description_, priv_class_) \ | ||
590 | const AVFilter ff_vf_##name_ = { \ | ||
591 | .name = #name_, \ | ||
592 | .description = NULL_IF_CONFIG_SMALL(description_), \ | ||
593 | .priv_class = &priv_class_ ## _class, \ | ||
594 | .priv_size = sizeof(LutContext), \ | ||
595 | .init = name_##_init, \ | ||
596 | .uninit = uninit, \ | ||
597 | FILTER_INPUTS(inputs), \ | ||
598 | FILTER_OUTPUTS(ff_video_default_filterpad), \ | ||
599 | FILTER_QUERY_FUNC2(query_formats), \ | ||
600 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | \ | ||
601 | AVFILTER_FLAG_SLICE_THREADS, \ | ||
602 | .process_command = process_command, \ | ||
603 | } | ||
604 | |||
605 | AVFILTER_DEFINE_CLASS_EXT(lut, "lut/lutyuv/lutrgb", options); | ||
606 | |||
607 | #if CONFIG_LUT_FILTER | ||
608 | |||
609 | #define lut_init NULL | ||
610 | DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.", | ||
611 | lut); | ||
612 | #undef lut_init | ||
613 | #endif | ||
614 | |||
615 | #if CONFIG_LUTYUV_FILTER | ||
616 | |||
617 | ✗ | static av_cold int lutyuv_init(AVFilterContext *ctx) | |
618 | { | ||
619 | ✗ | LutContext *s = ctx->priv; | |
620 | |||
621 | ✗ | s->is_yuv = 1; | |
622 | |||
623 | ✗ | return 0; | |
624 | } | ||
625 | |||
626 | DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.", | ||
627 | lut); | ||
628 | #endif | ||
629 | |||
630 | #if CONFIG_LUTRGB_FILTER | ||
631 | |||
632 | ✗ | static av_cold int lutrgb_init(AVFilterContext *ctx) | |
633 | { | ||
634 | ✗ | LutContext *s = ctx->priv; | |
635 | |||
636 | ✗ | s->is_rgb = 1; | |
637 | |||
638 | ✗ | return 0; | |
639 | } | ||
640 | |||
641 | DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.", | ||
642 | lut); | ||
643 | #endif | ||
644 |