FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_lenscorrection.c
Date: 2024-04-19 17:50:32
Exec Total Coverage
Lines: 0 87 0.0%
Functions: 0 10 0.0%
Branches: 0 100 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (C) 2007 Richard Spindler (author of frei0r plugin from which this was derived)
3 * Copyright (C) 2014 Daniel Oberhoff
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Lenscorrection filter, algorithm from the frei0r plugin with the same name
25 */
26 #include <stdlib.h>
27 #include <math.h>
28
29 #include "libavutil/colorspace.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33
34 #include "avfilter.h"
35 #include "drawutils.h"
36 #include "internal.h"
37 #include "video.h"
38
39 typedef struct LenscorrectionCtx {
40 const AVClass *av_class;
41 int planewidth[4];
42 int planeheight[4];
43 int depth;
44 int nb_planes;
45 double cx, cy, k1, k2;
46 int interpolation;
47 uint8_t fill_rgba[4];
48 int fill_color[4];
49
50 int32_t *correction[4];
51
52 int (*filter_slice)(AVFilterContext *ctx, void *arg, int job, int nb_jobs, int plane);
53 } LenscorrectionCtx;
54
55 #define OFFSET(x) offsetof(LenscorrectionCtx, x)
56 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
57 static const AVOption lenscorrection_options[] = {
58 { "cx", "set relative center x", OFFSET(cx), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
59 { "cy", "set relative center y", OFFSET(cy), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
60 { "k1", "set quadratic distortion factor", OFFSET(k1), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
61 { "k2", "set double quadratic distortion factor", OFFSET(k2), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
62 { "i", "set interpolation type", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=0}, 0, 64, .flags=FLAGS, .unit = "i" },
63 { "nearest", "nearest neighbour", 0, AV_OPT_TYPE_CONST, {.i64=0},0, 0, .flags=FLAGS, .unit = "i" },
64 { "bilinear", "bilinear", 0, AV_OPT_TYPE_CONST, {.i64=1},0, 0, .flags=FLAGS, .unit = "i" },
65 { "fc", "set the color of the unmapped pixels", OFFSET(fill_rgba), AV_OPT_TYPE_COLOR, {.str="black@0"}, .flags = FLAGS },
66 { NULL }
67 };
68
69 AVFILTER_DEFINE_CLASS(lenscorrection);
70
71 typedef struct ThreadData {
72 AVFrame *in, *out;
73 } ThreadData;
74
75 #define NEAREST(type, name) \
76 static int filter##name##_slice(AVFilterContext *ctx, void *arg, int job, \
77 int nb_jobs, int plane) \
78 { \
79 LenscorrectionCtx *rect = ctx->priv; \
80 ThreadData *td = arg; \
81 AVFrame *in = td->in; \
82 AVFrame *out = td->out; \
83 \
84 const int32_t *correction = rect->correction[plane]; \
85 const int fill_color = rect->fill_color[plane]; \
86 const int w = rect->planewidth[plane], h = rect->planeheight[plane]; \
87 const int xcenter = rect->cx * w; \
88 const int ycenter = rect->cy * h; \
89 const int start = (h * job ) / nb_jobs; \
90 const int end = (h * (job+1)) / nb_jobs; \
91 const int inlinesize = in->linesize[plane] / sizeof(type); \
92 const int outlinesize = out->linesize[plane] / sizeof(type); \
93 const type *indata = (const type *)in->data[plane]; \
94 type *outrow = (type *)out->data[plane] + start * outlinesize; \
95 for (int i = start; i < end; i++, outrow += outlinesize) { \
96 const int off_y = i - ycenter; \
97 type *out = outrow; \
98 for (int j = 0; j < w; j++) { \
99 const int off_x = j - xcenter; \
100 const int64_t radius_mult = correction[j + i*w]; \
101 const int x = xcenter + ((radius_mult * off_x + (1<<23))>>24); \
102 const int y = ycenter + ((radius_mult * off_y + (1<<23))>>24); \
103 const char isvalid = x >= 0 && x < w && y >= 0 && y < h; \
104 *out++ = isvalid ? indata[y * inlinesize + x] : fill_color; \
105 } \
106 } \
107 return 0; \
108 }
109
110
111 NEAREST(uint8_t, 8)
112 NEAREST(uint16_t, 16)
113
114 #define BILINEAR(type, name) \
115 static int filter##name##_slice_bilinear(AVFilterContext *ctx, void *arg, \
116 int job, int nb_jobs, int plane) \
117 { \
118 LenscorrectionCtx *rect = ctx->priv; \
119 ThreadData *td = arg; \
120 AVFrame *in = td->in; \
121 AVFrame *out = td->out; \
122 \
123 const int32_t *correction = rect->correction[plane]; \
124 const int fill_color = rect->fill_color[plane]; \
125 const int depth = rect->depth; \
126 const uint64_t max = (1 << 24) - 1; \
127 const uint64_t add = (1 << 23); \
128 const int w = rect->planewidth[plane], h = rect->planeheight[plane]; \
129 const int xcenter = rect->cx * w; \
130 const int ycenter = rect->cy * h; \
131 const int start = (h * job ) / nb_jobs; \
132 const int end = (h * (job+1)) / nb_jobs; \
133 const int inlinesize = in->linesize[plane] / sizeof(type); \
134 const int outlinesize = out->linesize[plane] / sizeof(type); \
135 const type *indata = (const type *)in->data[plane]; \
136 type *outrow = (type *)out->data[plane] + start * outlinesize; \
137 \
138 for (int i = start; i < end; i++, outrow += outlinesize) { \
139 const int off_y = i - ycenter; \
140 type *out = outrow; \
141 \
142 for (int j = 0; j < w; j++) { \
143 const int off_x = j - xcenter; \
144 const int64_t radius_mult = correction[j + i*w]; \
145 const int x = xcenter + ((radius_mult * off_x + (1<<23)) >> 24); \
146 const int y = ycenter + ((radius_mult * off_y + (1<<23)) >> 24); \
147 const char isvalid = x >= 0 && x <= w - 1 && y >= 0 && y <= h - 1; \
148 \
149 if (isvalid) { \
150 const int nx = FFMIN(x + 1, w - 1); \
151 const int ny = FFMIN(y + 1, h - 1); \
152 const uint64_t du = off_x >= 0 ? (radius_mult * off_x + add) & max : max - ((radius_mult * -off_x + add) & max); \
153 const uint64_t dv = off_y >= 0 ? (radius_mult * off_y + add) & max : max - ((radius_mult * -off_y + add) & max); \
154 const uint64_t p0 = indata[ y * inlinesize + x]; \
155 const uint64_t p1 = indata[ y * inlinesize + nx]; \
156 const uint64_t p2 = indata[ny * inlinesize + x]; \
157 const uint64_t p3 = indata[ny * inlinesize + nx]; \
158 uint64_t sum = 0; \
159 \
160 sum += (max - du) * (max - dv) * p0; \
161 sum += ( du) * (max - dv) * p1; \
162 sum += (max - du) * ( dv) * p2; \
163 sum += ( du) * ( dv) * p3; \
164 \
165 out[j] = av_clip_uintp2_c((sum + (1ULL << 47)) >> 48, depth); \
166 } else { \
167 out[j] = fill_color; \
168 } \
169 } \
170 } \
171 \
172 return 0; \
173 }
174
175 BILINEAR(uint8_t, 8)
176 BILINEAR(uint16_t, 16)
177
178 static const enum AVPixelFormat pix_fmts[] = {
179 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
180 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
181 AV_PIX_FMT_GRAY16,
182 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
183 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
184 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
185 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
186 AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
187 AV_PIX_FMT_YUVJ411P,
188 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
189 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
190 AV_PIX_FMT_YUV440P10,
191 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
192 AV_PIX_FMT_YUV440P12,
193 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
194 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
195 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
196 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
197 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
198 AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
199 AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
200 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
201 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
202 AV_PIX_FMT_NONE
203 };
204
205 static av_cold void uninit(AVFilterContext *ctx)
206 {
207 LenscorrectionCtx *rect = ctx->priv;
208 int i;
209
210 for (i = 0; i < FF_ARRAY_ELEMS(rect->correction); i++) {
211 av_freep(&rect->correction[i]);
212 }
213 }
214
215 static void calc_correction(AVFilterContext *ctx, int plane)
216 {
217 LenscorrectionCtx *rect = ctx->priv;
218 int w = rect->planewidth[plane];
219 int h = rect->planeheight[plane];
220 int xcenter = rect->cx * w;
221 int ycenter = rect->cy * h;
222 int k1 = rect->k1 * (1<<24);
223 int k2 = rect->k2 * (1<<24);
224 const int64_t r2inv = (4LL<<60) / (w * w + h * h);
225
226 for (int j = 0; j < h; j++) {
227 const int off_y = j - ycenter;
228 const int off_y2 = off_y * off_y;
229 for (int i = 0; i < w; i++) {
230 const int off_x = i - xcenter;
231 const int64_t r2 = ((off_x * off_x + off_y2) * r2inv + (1LL<<31)) >> 32;
232 const int64_t r4 = (r2 * r2 + (1<<27)) >> 28;
233 const int radius_mult = (r2 * k1 + r4 * k2 + (1LL<<27) + (1LL<<52))>>28;
234 rect->correction[plane][j * w + i] = radius_mult;
235 }
236 }
237 }
238
239 static int config_output(AVFilterLink *outlink)
240 {
241 AVFilterContext *ctx = outlink->src;
242 LenscorrectionCtx *rect = ctx->priv;
243 AVFilterLink *inlink = ctx->inputs[0];
244 const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
245 int is_rgb = !!(pixdesc->flags & AV_PIX_FMT_FLAG_RGB);
246 uint8_t rgba_map[4];
247 int factor;
248
249 ff_fill_rgba_map(rgba_map, inlink->format);
250 rect->depth = pixdesc->comp[0].depth;
251 factor = 1 << (rect->depth - 8);
252 rect->planeheight[1] = rect->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, pixdesc->log2_chroma_h);
253 rect->planeheight[0] = rect->planeheight[3] = inlink->h;
254 rect->planewidth[1] = rect->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, pixdesc->log2_chroma_w);
255 rect->planewidth[0] = rect->planewidth[3] = inlink->w;
256 rect->nb_planes = av_pix_fmt_count_planes(inlink->format);
257 rect->filter_slice = rect->depth <= 8 ? filter8_slice : filter16_slice;
258 if (rect->interpolation)
259 rect->filter_slice = rect->depth <= 8 ? filter8_slice_bilinear : filter16_slice_bilinear;
260
261 if (is_rgb) {
262 rect->fill_color[rgba_map[0]] = rect->fill_rgba[0] * factor;
263 rect->fill_color[rgba_map[1]] = rect->fill_rgba[1] * factor;
264 rect->fill_color[rgba_map[2]] = rect->fill_rgba[2] * factor;
265 rect->fill_color[rgba_map[3]] = rect->fill_rgba[3] * factor;
266 } else {
267 rect->fill_color[0] = RGB_TO_Y_BT709(rect->fill_rgba[0], rect->fill_rgba[1], rect->fill_rgba[2]) * factor;
268 rect->fill_color[1] = RGB_TO_U_BT709(rect->fill_rgba[0], rect->fill_rgba[1], rect->fill_rgba[2], 0) * factor;
269 rect->fill_color[2] = RGB_TO_V_BT709(rect->fill_rgba[0], rect->fill_rgba[1], rect->fill_rgba[2], 0) * factor;
270 rect->fill_color[3] = rect->fill_rgba[3] * factor;
271 }
272
273 for (int plane = 0; plane < rect->nb_planes; plane++) {
274 int w = rect->planewidth[plane];
275 int h = rect->planeheight[plane];
276
277 if (!rect->correction[plane])
278 rect->correction[plane] = av_malloc_array(w, h * sizeof(**rect->correction));
279 if (!rect->correction[plane])
280 return AVERROR(ENOMEM);
281 calc_correction(ctx, plane);
282 }
283
284 return 0;
285 }
286
287 static int filter_slice(AVFilterContext *ctx, void *arg, int job,
288 int nb_jobs)
289 {
290 LenscorrectionCtx *rect = ctx->priv;
291
292 for (int plane = 0; plane < rect->nb_planes; plane++)
293 rect->filter_slice(ctx, arg, job, nb_jobs, plane);
294
295 return 0;
296 }
297
298 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
299 {
300 AVFilterContext *ctx = inlink->dst;
301 AVFilterLink *outlink = ctx->outputs[0];
302 LenscorrectionCtx *rect = ctx->priv;
303 AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
304 ThreadData td;
305
306 if (!out) {
307 av_frame_free(&in);
308 return AVERROR(ENOMEM);
309 }
310
311 av_frame_copy_props(out, in);
312
313 td.in = in; td.out = out;
314 ff_filter_execute(ctx, filter_slice, &td, NULL,
315 FFMIN(rect->planeheight[1], ff_filter_get_nb_threads(ctx)));
316
317 av_frame_free(&in);
318 return ff_filter_frame(outlink, out);
319 }
320
321 static int process_command(AVFilterContext *ctx,
322 const char *cmd,
323 const char *arg,
324 char *res,
325 int res_len,
326 int flags)
327 {
328 int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
329
330 if (ret < 0)
331 return ret;
332
333 return config_output(ctx->outputs[0]);
334 }
335
336 static const AVFilterPad lenscorrection_inputs[] = {
337 {
338 .name = "default",
339 .type = AVMEDIA_TYPE_VIDEO,
340 .filter_frame = filter_frame,
341 },
342 };
343
344 static const AVFilterPad lenscorrection_outputs[] = {
345 {
346 .name = "default",
347 .type = AVMEDIA_TYPE_VIDEO,
348 .config_props = config_output,
349 },
350 };
351
352 const AVFilter ff_vf_lenscorrection = {
353 .name = "lenscorrection",
354 .description = NULL_IF_CONFIG_SMALL("Rectify the image by correcting for lens distortion."),
355 .priv_size = sizeof(LenscorrectionCtx),
356 FILTER_INPUTS(lenscorrection_inputs),
357 FILTER_OUTPUTS(lenscorrection_outputs),
358 FILTER_PIXFMTS_ARRAY(pix_fmts),
359 .priv_class = &lenscorrection_class,
360 .uninit = uninit,
361 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
362 .process_command = process_command,
363 };
364