FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_colorconstancy.c
Date: 2024-04-18 20:30:25
Exec Total Coverage
Lines: 0 285 0.0%
Functions: 0 16 0.0%
Branches: 0 132 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2018 Mina Sami
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Color Constancy filter
24 *
25 * @see http://colorconstancy.com/
26 *
27 * @cite
28 * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29 */
30
31 #include "libavutil/mem.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34
35 #include "avfilter.h"
36 #include "internal.h"
37 #include "video.h"
38
39 #include <math.h>
40
41 #define GREY_EDGE "greyedge"
42
43 #define SQRT3 1.73205080757
44
45 #define NUM_PLANES 3
46 #define MAX_DIFF_ORD 2
47 #define MAX_META_DATA 4
48 #define MAX_DATA 4
49
50 #define INDEX_TEMP 0
51 #define INDEX_DX 1
52 #define INDEX_DY 2
53 #define INDEX_DXY 3
54 #define INDEX_NORM INDEX_DX
55 #define INDEX_SRC 0
56 #define INDEX_DST 1
57 #define INDEX_ORD 2
58 #define INDEX_DIR 3
59 #define DIR_X 0
60 #define DIR_Y 1
61
62 /**
63 * Used for passing data between threads.
64 */
65 typedef struct ThreadData {
66 AVFrame *in, *out;
67 int meta_data[MAX_META_DATA];
68 double *data[MAX_DATA][NUM_PLANES];
69 } ThreadData;
70
71 /**
72 * Common struct for all algorithms contexts.
73 */
74 typedef struct ColorConstancyContext {
75 const AVClass *class;
76
77 int difford;
78 int minknorm; /**< @minknorm = 0 : getMax instead */
79 double sigma;
80
81 int nb_threads;
82 int planeheight[4];
83 int planewidth[4];
84
85 int filtersize;
86 double *gauss[MAX_DIFF_ORD+1];
87
88 double white[NUM_PLANES];
89 } ColorConstancyContext;
90
91 #define OFFSET(x) offsetof(ColorConstancyContext, x)
92 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
93
94 #define GINDX(s, i) ( (i) - ((s) >> 2) )
95
96 /**
97 * Sets gauss filters used for calculating gauss derivatives. Filter size
98 * depends on sigma which is a user option hence we calculate these
99 * filters each time. Also each higher order depends on lower ones. Sigma
100 * can be zero only at difford = 0, then we only convert data to double
101 * instead.
102 *
103 * @param ctx the filter context.
104 *
105 * @return 0 in case of success, a negative value corresponding to an
106 * AVERROR code in case of failure.
107 */
108 static int set_gauss(AVFilterContext *ctx)
109 {
110 ColorConstancyContext *s = ctx->priv;
111 int filtersize = s->filtersize;
112 int difford = s->difford;
113 double sigma = s->sigma;
114 double sum1, sum2;
115 int i;
116
117 for (i = 0; i <= difford; ++i) {
118 s->gauss[i] = av_calloc(filtersize, sizeof(*s->gauss[i]));
119 if (!s->gauss[i]) {
120 for (; i >= 0; --i) {
121 av_freep(&s->gauss[i]);
122 }
123 return AVERROR(ENOMEM);
124 }
125 }
126
127 // Order 0
128 av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
129 sum1 = 0.0;
130 if (!sigma) {
131 s->gauss[0][0] = 1; // Copying data to double instead of convolution
132 } else {
133 for (i = 0; i < filtersize; ++i) {
134 s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
135 sum1 += s->gauss[0][i];
136 }
137 for (i = 0; i < filtersize; ++i) {
138 s->gauss[0][i] /= sum1;
139 }
140 }
141 // Order 1
142 if (difford > 0) {
143 av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
144 sum1 = 0.0;
145 for (i = 0; i < filtersize; ++i) {
146 s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
147 sum1 += s->gauss[1][i] * GINDX(filtersize, i);
148 }
149
150 for (i = 0; i < filtersize; ++i) {
151 s->gauss[1][i] /= sum1;
152 }
153
154 // Order 2
155 if (difford > 1) {
156 av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
157 sum1 = 0.0;
158 for (i = 0; i < filtersize; ++i) {
159 s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
160 * s->gauss[0][i];
161 sum1 += s->gauss[2][i];
162 }
163
164 sum2 = 0.0;
165 for (i = 0; i < filtersize; ++i) {
166 s->gauss[2][i] -= sum1 / (filtersize);
167 sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
168 }
169 for (i = 0; i < filtersize ; ++i) {
170 s->gauss[2][i] /= sum2;
171 }
172 }
173 }
174 return 0;
175 }
176
177 /**
178 * Frees up buffers used by grey edge for storing derivatives final
179 * and intermidiate results. Number of buffers and number of planes
180 * for last buffer are given so it can be safely called at allocation
181 * failure instances.
182 *
183 * @param td holds the buffers.
184 * @param nb_buff number of buffers to be freed.
185 * @param nb_planes number of planes for last buffer to be freed.
186 */
187 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
188 {
189 int b, p;
190
191 for (b = 0; b < nb_buff; ++b) {
192 for (p = 0; p < NUM_PLANES; ++p) {
193 av_freep(&td->data[b][p]);
194 }
195 }
196 // Final buffer may not be fully allocated at fail cases
197 for (p = 0; p < nb_planes; ++p) {
198 av_freep(&td->data[b][p]);
199 }
200 }
201
202 /**
203 * Allocates buffers used by grey edge for storing derivatives final
204 * and intermidiate results.
205 *
206 * @param ctx the filter context.
207 * @param td holds the buffers.
208 *
209 * @return 0 in case of success, a negative value corresponding to an
210 * AVERROR code in case of failure.
211 */
212 static int setup_derivative_buffers(AVFilterContext* ctx, ThreadData *td)
213 {
214 ColorConstancyContext *s = ctx->priv;
215 int nb_buff = s->difford + 1;
216 int b, p;
217
218 av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
219 for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
220 for (p = 0; p < NUM_PLANES; ++p) {
221 td->data[b][p] = av_calloc(s->planeheight[p] * s->planewidth[p],
222 sizeof(*td->data[b][p]));
223 if (!td->data[b][p]) {
224 cleanup_derivative_buffers(td, b + 1, p);
225 return AVERROR(ENOMEM);
226 }
227 }
228 }
229 return 0;
230 }
231
232 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
233 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
234 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
235
236 /**
237 * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
238 * either horizontally or vertically according to meta data given in thread data.
239 * When convoluting horizontally source is always the in frame withing thread data
240 * while when convoluting vertically source is a buffer.
241 *
242 * @param ctx the filter context.
243 * @param arg data to be passed between threads.
244 * @param jobnr current job nubmer.
245 * @param nb_jobs total number of jobs.
246 *
247 * @return 0.
248 */
249 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
250 {
251 ColorConstancyContext *s = ctx->priv;
252 ThreadData *td = arg;
253 AVFrame *in = td->in;
254 const int ord = td->meta_data[INDEX_ORD];
255 const int dir = td->meta_data[INDEX_DIR];
256 const int src_index = td->meta_data[INDEX_SRC];
257 const int dst_index = td->meta_data[INDEX_DST];
258 const int filtersize = s->filtersize;
259 const double *gauss = s->gauss[ord];
260 int plane;
261
262 for (plane = 0; plane < NUM_PLANES; ++plane) {
263 const int height = s->planeheight[plane];
264 const int width = s->planewidth[plane];
265 const int in_linesize = in->linesize[plane];
266 double *dst = td->data[dst_index][plane];
267 int slice_start, slice_end;
268 int r, c, g;
269
270 if (dir == DIR_X) {
271 /** Applying gauss horizontally along each row */
272 const uint8_t *src = in->data[plane];
273 slice_start = (height * jobnr ) / nb_jobs;
274 slice_end = (height * (jobnr + 1)) / nb_jobs;
275
276 for (r = slice_start; r < slice_end; ++r) {
277 for (c = 0; c < width; ++c) {
278 dst[INDX2D(r, c, width)] = 0;
279 for (g = 0; g < filtersize; ++g) {
280 dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
281 in_linesize, height, width, gauss[g]);
282 }
283 }
284 }
285 } else {
286 /** Applying gauss vertically along each column */
287 const double *src = td->data[src_index][plane];
288 slice_start = (width * jobnr ) / nb_jobs;
289 slice_end = (width * (jobnr + 1)) / nb_jobs;
290
291 for (c = slice_start; c < slice_end; ++c) {
292 for (r = 0; r < height; ++r) {
293 dst[INDX2D(r, c, width)] = 0;
294 for (g = 0; g < filtersize; ++g) {
295 dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
296 width, height, width, gauss[g]);
297 }
298 }
299 }
300 }
301
302 }
303 return 0;
304 }
305
306 /**
307 * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
308 * 1 or 2.
309 *
310 * @param ctx the filter context.
311 * @param arg data to be passed between threads.
312 * @param jobnr current job nubmer.
313 * @param nb_jobs total number of jobs.
314 *
315 * @return 0.
316 */
317 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
318 {
319 ColorConstancyContext *s = ctx->priv;
320 ThreadData *td = arg;
321 const int difford = s->difford;
322 int plane;
323
324 for (plane = 0; plane < NUM_PLANES; ++plane) {
325 const int height = s->planeheight[plane];
326 const int width = s->planewidth[plane];
327 const int64_t numpixels = width * (int64_t)height;
328 const int slice_start = (numpixels * jobnr ) / nb_jobs;
329 const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
330 const double *dx = td->data[INDEX_DX][plane];
331 const double *dy = td->data[INDEX_DY][plane];
332 double *norm = td->data[INDEX_NORM][plane];
333 int i;
334
335 if (difford == 1) {
336 for (i = slice_start; i < slice_end; ++i) {
337 norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
338 }
339 } else {
340 const double *dxy = td->data[INDEX_DXY][plane];
341 for (i = slice_start; i < slice_end; ++i) {
342 norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
343 }
344 }
345 }
346
347 return 0;
348 }
349
350 /**
351 * Utility function for setting up differentiation data/metadata.
352 *
353 * @param ctx the filter context.
354 * @param td to be used for passing data between threads.
355 * @param ord ord of differentiation.
356 * @param dir direction of differentiation.
357 * @param src index of source used for differentiation.
358 * @param dst index destination used for saving differentiation result.
359 * @param dim maximum dimension in current direction.
360 * @param nb_threads number of threads to use.
361 */
362 static void av_always_inline
363 get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir,
364 int src, int dst, int dim, int nb_threads) {
365 td->meta_data[INDEX_ORD] = ord;
366 td->meta_data[INDEX_DIR] = dir;
367 td->meta_data[INDEX_SRC] = src;
368 td->meta_data[INDEX_DST] = dst;
369 ff_filter_execute(ctx, slice_get_derivative, td,
370 NULL, FFMIN(dim, nb_threads));
371 }
372
373 /**
374 * Main control function for calculating gaussian derivatives.
375 *
376 * @param ctx the filter context.
377 * @param td holds the buffers used for storing results.
378 *
379 * @return 0 in case of success, a negative value corresponding to an
380 * AVERROR code in case of failure.
381 */
382 static int get_derivative(AVFilterContext *ctx, ThreadData *td)
383 {
384 ColorConstancyContext *s = ctx->priv;
385 int nb_threads = s->nb_threads;
386 int height = s->planeheight[1];
387 int width = s->planewidth[1];
388
389 switch(s->difford) {
390 case 0:
391 if (!s->sigma) { // Only copy once
392 get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
393 } else {
394 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
395 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
396 // save to INDEX_NORM because this will not be normalied and
397 // end gry edge filter expects result to be found in INDEX_NORM
398 }
399 return 0;
400
401 case 1:
402 get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
403 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
404
405 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
406 get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
407 return 0;
408
409 case 2:
410 get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
411 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
412
413 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
414 get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
415
416 get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
417 get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
418 return 0;
419
420 default:
421 av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
422 return AVERROR(EINVAL);
423 }
424
425 }
426
427 /**
428 * Slice function for grey edge algorithm that does partial summing/maximizing
429 * of gaussian derivatives.
430 *
431 * @param ctx the filter context.
432 * @param arg data to be passed between threads.
433 * @param jobnr current job nubmer.
434 * @param nb_jobs total number of jobs.
435 *
436 * @return 0.
437 */
438 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
439 {
440 ColorConstancyContext *s = ctx->priv;
441 ThreadData *td = arg;
442 AVFrame *in = td->in;
443 int minknorm = s->minknorm;
444 const uint8_t thresh = 255;
445 int plane;
446
447 for (plane = 0; plane < NUM_PLANES; ++plane) {
448 const int height = s->planeheight[plane];
449 const int width = s->planewidth[plane];
450 const int in_linesize = in->linesize[plane];
451 const int slice_start = (height * jobnr) / nb_jobs;
452 const int slice_end = (height * (jobnr+1)) / nb_jobs;
453 const uint8_t *img_data = in->data[plane];
454 const double *src = td->data[INDEX_NORM][plane];
455 double *dst = td->data[INDEX_DST][plane];
456 int r, c;
457
458 dst[jobnr] = 0;
459 if (!minknorm) {
460 for (r = slice_start; r < slice_end; ++r) {
461 for (c = 0; c < width; ++c) {
462 dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
463 * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
464 }
465 }
466 } else {
467 for (r = slice_start; r < slice_end; ++r) {
468 for (c = 0; c < width; ++c) {
469 dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
470 * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
471 }
472 }
473 }
474 }
475 return 0;
476 }
477
478 /**
479 * Main control function for grey edge algorithm.
480 *
481 * @param ctx the filter context.
482 * @param in frame to perfrom grey edge on.
483 *
484 * @return 0 in case of success, a negative value corresponding to an
485 * AVERROR code in case of failure.
486 */
487 static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
488 {
489 ColorConstancyContext *s = ctx->priv;
490 ThreadData td;
491 int minknorm = s->minknorm;
492 int difford = s->difford;
493 double *white = s->white;
494 int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
495 int plane, job, ret;
496
497 td.in = in;
498 ret = setup_derivative_buffers(ctx, &td);
499 if (ret) {
500 return ret;
501 }
502 get_derivative(ctx, &td);
503 if (difford > 0) {
504 ff_filter_execute(ctx, slice_normalize, &td, NULL, nb_jobs);
505 }
506
507 ff_filter_execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
508 if (!minknorm) {
509 for (plane = 0; plane < NUM_PLANES; ++plane) {
510 white[plane] = 0; // All values are absolute
511 for (job = 0; job < nb_jobs; ++job) {
512 white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
513 }
514 }
515 } else {
516 for (plane = 0; plane < NUM_PLANES; ++plane) {
517 white[plane] = 0;
518 for (job = 0; job < nb_jobs; ++job) {
519 white[plane] += td.data[INDEX_DST][plane][job];
520 }
521 white[plane] = pow(white[plane], 1./minknorm);
522 }
523 }
524
525 cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
526 return 0;
527 }
528
529 /**
530 * Normalizes estimated illumination since only illumination vector
531 * direction is required for color constancy.
532 *
533 * @param light the estimated illumination to be normalized in place
534 */
535 static void normalize_light(double *light)
536 {
537 double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
538 int plane;
539
540 // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
541
542 if (!abs_val) {
543 for (plane = 0; plane < NUM_PLANES; ++plane) {
544 light[plane] = 1.0;
545 }
546 } else {
547 for (plane = 0; plane < NUM_PLANES; ++plane) {
548 light[plane] = (light[plane] / abs_val);
549 if (!light[plane]) { // to avoid division by zero when correcting
550 light[plane] = 1.0;
551 }
552 }
553 }
554 }
555
556 /**
557 * Redirects to corresponding algorithm estimation function and performs normalization
558 * after estimation.
559 *
560 * @param ctx the filter context.
561 * @param in frame to perfrom estimation on.
562 *
563 * @return 0 in case of success, a negative value corresponding to an
564 * AVERROR code in case of failure.
565 */
566 static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
567 {
568 ColorConstancyContext *s = ctx->priv;
569 int ret;
570
571 ret = filter_grey_edge(ctx, in);
572
573 av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
574 s->white[0], s->white[1], s->white[2]);
575 normalize_light(s->white);
576 av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
577 s->white[0], s->white[1], s->white[2]);
578
579 return ret;
580 }
581
582 /**
583 * Performs simple correction via diagonal transformation model.
584 *
585 * @param ctx the filter context.
586 * @param arg data to be passed between threads.
587 * @param jobnr current job nubmer.
588 * @param nb_jobs total number of jobs.
589 *
590 * @return 0.
591 */
592 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
593 {
594 ColorConstancyContext *s = ctx->priv;
595 ThreadData *td = arg;
596 AVFrame *in = td->in;
597 AVFrame *out = td->out;
598 int plane;
599
600 for (plane = 0; plane < NUM_PLANES; ++plane) {
601 const int height = s->planeheight[plane];
602 const int width = s->planewidth[plane];
603 const int64_t numpixels = width * (int64_t)height;
604 const int slice_start = (numpixels * jobnr) / nb_jobs;
605 const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
606 const uint8_t *src = in->data[plane];
607 uint8_t *dst = out->data[plane];
608 double temp;
609 unsigned i;
610
611 for (i = slice_start; i < slice_end; ++i) {
612 temp = src[i] / (s->white[plane] * SQRT3);
613 dst[i] = av_clip_uint8((int)(temp + 0.5));
614 }
615 }
616 return 0;
617 }
618
619 /**
620 * Main control function for correcting scene illumination based on
621 * estimated illumination.
622 *
623 * @param ctx the filter context.
624 * @param in holds frame to correct
625 * @param out holds corrected frame
626 */
627 static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
628 {
629 ColorConstancyContext *s = ctx->priv;
630 ThreadData td;
631 int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
632
633 td.in = in;
634 td.out = out;
635 ff_filter_execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
636 }
637
638 static int config_props(AVFilterLink *inlink)
639 {
640 AVFilterContext *ctx = inlink->dst;
641 ColorConstancyContext *s = ctx->priv;
642 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
643 const double break_off_sigma = 3.0;
644 double sigma = s->sigma;
645 int ret;
646
647 if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
648 av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
649 return AVERROR(EINVAL);
650 }
651
652 s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
653 if (ret=set_gauss(ctx)) {
654 return ret;
655 }
656
657 s->nb_threads = ff_filter_get_nb_threads(ctx);
658 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
659 s->planewidth[0] = s->planewidth[3] = inlink->w;
660 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
661 s->planeheight[0] = s->planeheight[3] = inlink->h;
662
663 return 0;
664 }
665
666 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
667 {
668 AVFilterContext *ctx = inlink->dst;
669 AVFilterLink *outlink = ctx->outputs[0];
670 AVFrame *out;
671 int ret;
672 int direct = 0;
673
674 ret = illumination_estimation(ctx, in);
675 if (ret) {
676 av_frame_free(&in);
677 return ret;
678 }
679
680 if (av_frame_is_writable(in)) {
681 direct = 1;
682 out = in;
683 } else {
684 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
685 if (!out) {
686 av_frame_free(&in);
687 return AVERROR(ENOMEM);
688 }
689 av_frame_copy_props(out, in);
690 }
691 chromatic_adaptation(ctx, in, out);
692
693 if (!direct)
694 av_frame_free(&in);
695
696 return ff_filter_frame(outlink, out);
697 }
698
699 static av_cold void uninit(AVFilterContext *ctx)
700 {
701 ColorConstancyContext *s = ctx->priv;
702 int difford = s->difford;
703 int i;
704
705 for (i = 0; i <= difford; ++i) {
706 av_freep(&s->gauss[i]);
707 }
708 }
709
710 static const AVFilterPad colorconstancy_inputs[] = {
711 {
712 .name = "default",
713 .type = AVMEDIA_TYPE_VIDEO,
714 .config_props = config_props,
715 .filter_frame = filter_frame,
716 },
717 };
718
719 static const AVOption greyedge_options[] = {
720 { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
721 { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
722 { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
723 { NULL }
724 };
725
726 AVFILTER_DEFINE_CLASS(greyedge);
727
728 const AVFilter ff_vf_greyedge = {
729 .name = GREY_EDGE,
730 .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
731 .priv_size = sizeof(ColorConstancyContext),
732 .priv_class = &greyedge_class,
733 .uninit = uninit,
734 FILTER_INPUTS(colorconstancy_inputs),
735 FILTER_OUTPUTS(ff_video_default_filterpad),
736 // TODO: support more formats
737 // FIXME: error when saving to .jpg
738 FILTER_SINGLE_PIXFMT(AV_PIX_FMT_GBRP),
739 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
740 };
741