FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/vf_colorconstancy.c
Date: 2022-12-09 07:38:14
Exec Total Coverage
Lines: 0 280 0.0%
Functions: 0 16 0.0%
Branches: 0 132 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2018 Mina Sami
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Color Constancy filter
24 *
25 * @see http://colorconstancy.com/
26 *
27 * @cite
28 * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29 */
30
31 #include "config_components.h"
32
33 #include "libavutil/imgutils.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36
37 #include "avfilter.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "video.h"
41
42 #include <math.h>
43
44 #define GREY_EDGE "greyedge"
45
46 #define SQRT3 1.73205080757
47
48 #define NUM_PLANES 3
49 #define MAX_DIFF_ORD 2
50 #define MAX_META_DATA 4
51 #define MAX_DATA 4
52
53 #define INDEX_TEMP 0
54 #define INDEX_DX 1
55 #define INDEX_DY 2
56 #define INDEX_DXY 3
57 #define INDEX_NORM INDEX_DX
58 #define INDEX_SRC 0
59 #define INDEX_DST 1
60 #define INDEX_ORD 2
61 #define INDEX_DIR 3
62 #define DIR_X 0
63 #define DIR_Y 1
64
65 /**
66 * Used for passing data between threads.
67 */
68 typedef struct ThreadData {
69 AVFrame *in, *out;
70 int meta_data[MAX_META_DATA];
71 double *data[MAX_DATA][NUM_PLANES];
72 } ThreadData;
73
74 /**
75 * Common struct for all algorithms contexts.
76 */
77 typedef struct ColorConstancyContext {
78 const AVClass *class;
79
80 int difford;
81 int minknorm; /**< @minknorm = 0 : getMax instead */
82 double sigma;
83
84 int nb_threads;
85 int planeheight[4];
86 int planewidth[4];
87
88 int filtersize;
89 double *gauss[MAX_DIFF_ORD+1];
90
91 double white[NUM_PLANES];
92 } ColorConstancyContext;
93
94 #define OFFSET(x) offsetof(ColorConstancyContext, x)
95 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
96
97 #define GINDX(s, i) ( (i) - ((s) >> 2) )
98
99 /**
100 * Sets gauss filters used for calculating gauss derivatives. Filter size
101 * depends on sigma which is a user option hence we calculate these
102 * filters each time. Also each higher order depends on lower ones. Sigma
103 * can be zero only at difford = 0, then we only convert data to double
104 * instead.
105 *
106 * @param ctx the filter context.
107 *
108 * @return 0 in case of success, a negative value corresponding to an
109 * AVERROR code in case of failure.
110 */
111 static int set_gauss(AVFilterContext *ctx)
112 {
113 ColorConstancyContext *s = ctx->priv;
114 int filtersize = s->filtersize;
115 int difford = s->difford;
116 double sigma = s->sigma;
117 double sum1, sum2;
118 int i;
119
120 for (i = 0; i <= difford; ++i) {
121 s->gauss[i] = av_calloc(filtersize, sizeof(*s->gauss[i]));
122 if (!s->gauss[i]) {
123 for (; i >= 0; --i) {
124 av_freep(&s->gauss[i]);
125 }
126 return AVERROR(ENOMEM);
127 }
128 }
129
130 // Order 0
131 av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
132 sum1 = 0.0;
133 if (!sigma) {
134 s->gauss[0][0] = 1; // Copying data to double instead of convolution
135 } else {
136 for (i = 0; i < filtersize; ++i) {
137 s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
138 sum1 += s->gauss[0][i];
139 }
140 for (i = 0; i < filtersize; ++i) {
141 s->gauss[0][i] /= sum1;
142 }
143 }
144 // Order 1
145 if (difford > 0) {
146 av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
147 sum1 = 0.0;
148 for (i = 0; i < filtersize; ++i) {
149 s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
150 sum1 += s->gauss[1][i] * GINDX(filtersize, i);
151 }
152
153 for (i = 0; i < filtersize; ++i) {
154 s->gauss[1][i] /= sum1;
155 }
156
157 // Order 2
158 if (difford > 1) {
159 av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
160 sum1 = 0.0;
161 for (i = 0; i < filtersize; ++i) {
162 s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
163 * s->gauss[0][i];
164 sum1 += s->gauss[2][i];
165 }
166
167 sum2 = 0.0;
168 for (i = 0; i < filtersize; ++i) {
169 s->gauss[2][i] -= sum1 / (filtersize);
170 sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
171 }
172 for (i = 0; i < filtersize ; ++i) {
173 s->gauss[2][i] /= sum2;
174 }
175 }
176 }
177 return 0;
178 }
179
180 /**
181 * Frees up buffers used by grey edge for storing derivatives final
182 * and intermidiate results. Number of buffers and number of planes
183 * for last buffer are given so it can be safely called at allocation
184 * failure instances.
185 *
186 * @param td holds the buffers.
187 * @param nb_buff number of buffers to be freed.
188 * @param nb_planes number of planes for last buffer to be freed.
189 */
190 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
191 {
192 int b, p;
193
194 for (b = 0; b < nb_buff; ++b) {
195 for (p = 0; p < NUM_PLANES; ++p) {
196 av_freep(&td->data[b][p]);
197 }
198 }
199 // Final buffer may not be fully allocated at fail cases
200 for (p = 0; p < nb_planes; ++p) {
201 av_freep(&td->data[b][p]);
202 }
203 }
204
205 /**
206 * Allocates buffers used by grey edge for storing derivatives final
207 * and intermidiate results.
208 *
209 * @param ctx the filter context.
210 * @param td holds the buffers.
211 *
212 * @return 0 in case of success, a negative value corresponding to an
213 * AVERROR code in case of failure.
214 */
215 static int setup_derivative_buffers(AVFilterContext* ctx, ThreadData *td)
216 {
217 ColorConstancyContext *s = ctx->priv;
218 int nb_buff = s->difford + 1;
219 int b, p;
220
221 av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
222 for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
223 for (p = 0; p < NUM_PLANES; ++p) {
224 td->data[b][p] = av_calloc(s->planeheight[p] * s->planewidth[p],
225 sizeof(*td->data[b][p]));
226 if (!td->data[b][p]) {
227 cleanup_derivative_buffers(td, b + 1, p);
228 return AVERROR(ENOMEM);
229 }
230 }
231 }
232 return 0;
233 }
234
235 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
236 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
237 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
238
239 /**
240 * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
241 * either horizontally or vertically according to meta data given in thread data.
242 * When convoluting horizontally source is always the in frame withing thread data
243 * while when convoluting vertically source is a buffer.
244 *
245 * @param ctx the filter context.
246 * @param arg data to be passed between threads.
247 * @param jobnr current job nubmer.
248 * @param nb_jobs total number of jobs.
249 *
250 * @return 0.
251 */
252 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
253 {
254 ColorConstancyContext *s = ctx->priv;
255 ThreadData *td = arg;
256 AVFrame *in = td->in;
257 const int ord = td->meta_data[INDEX_ORD];
258 const int dir = td->meta_data[INDEX_DIR];
259 const int src_index = td->meta_data[INDEX_SRC];
260 const int dst_index = td->meta_data[INDEX_DST];
261 const int filtersize = s->filtersize;
262 const double *gauss = s->gauss[ord];
263 int plane;
264
265 for (plane = 0; plane < NUM_PLANES; ++plane) {
266 const int height = s->planeheight[plane];
267 const int width = s->planewidth[plane];
268 const int in_linesize = in->linesize[plane];
269 double *dst = td->data[dst_index][plane];
270 int slice_start, slice_end;
271 int r, c, g;
272
273 if (dir == DIR_X) {
274 /** Applying gauss horizontally along each row */
275 const uint8_t *src = in->data[plane];
276 slice_start = (height * jobnr ) / nb_jobs;
277 slice_end = (height * (jobnr + 1)) / nb_jobs;
278
279 for (r = slice_start; r < slice_end; ++r) {
280 for (c = 0; c < width; ++c) {
281 dst[INDX2D(r, c, width)] = 0;
282 for (g = 0; g < filtersize; ++g) {
283 dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
284 in_linesize, height, width, gauss[g]);
285 }
286 }
287 }
288 } else {
289 /** Applying gauss vertically along each column */
290 const double *src = td->data[src_index][plane];
291 slice_start = (width * jobnr ) / nb_jobs;
292 slice_end = (width * (jobnr + 1)) / nb_jobs;
293
294 for (c = slice_start; c < slice_end; ++c) {
295 for (r = 0; r < height; ++r) {
296 dst[INDX2D(r, c, width)] = 0;
297 for (g = 0; g < filtersize; ++g) {
298 dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
299 width, height, width, gauss[g]);
300 }
301 }
302 }
303 }
304
305 }
306 return 0;
307 }
308
309 /**
310 * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
311 * 1 or 2.
312 *
313 * @param ctx the filter context.
314 * @param arg data to be passed between threads.
315 * @param jobnr current job nubmer.
316 * @param nb_jobs total number of jobs.
317 *
318 * @return 0.
319 */
320 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
321 {
322 ColorConstancyContext *s = ctx->priv;
323 ThreadData *td = arg;
324 const int difford = s->difford;
325 int plane;
326
327 for (plane = 0; plane < NUM_PLANES; ++plane) {
328 const int height = s->planeheight[plane];
329 const int width = s->planewidth[plane];
330 const int64_t numpixels = width * (int64_t)height;
331 const int slice_start = (numpixels * jobnr ) / nb_jobs;
332 const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
333 const double *dx = td->data[INDEX_DX][plane];
334 const double *dy = td->data[INDEX_DY][plane];
335 double *norm = td->data[INDEX_NORM][plane];
336 int i;
337
338 if (difford == 1) {
339 for (i = slice_start; i < slice_end; ++i) {
340 norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
341 }
342 } else {
343 const double *dxy = td->data[INDEX_DXY][plane];
344 for (i = slice_start; i < slice_end; ++i) {
345 norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
346 }
347 }
348 }
349
350 return 0;
351 }
352
353 /**
354 * Utility function for setting up differentiation data/metadata.
355 *
356 * @param ctx the filter context.
357 * @param td to be used for passing data between threads.
358 * @param ord ord of differentiation.
359 * @param dir direction of differentiation.
360 * @param src index of source used for differentiation.
361 * @param dst index destination used for saving differentiation result.
362 * @param dim maximum dimension in current direction.
363 * @param nb_threads number of threads to use.
364 */
365 static void av_always_inline
366 get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir,
367 int src, int dst, int dim, int nb_threads) {
368 td->meta_data[INDEX_ORD] = ord;
369 td->meta_data[INDEX_DIR] = dir;
370 td->meta_data[INDEX_SRC] = src;
371 td->meta_data[INDEX_DST] = dst;
372 ff_filter_execute(ctx, slice_get_derivative, td,
373 NULL, FFMIN(dim, nb_threads));
374 }
375
376 /**
377 * Main control function for calculating gaussian derivatives.
378 *
379 * @param ctx the filter context.
380 * @param td holds the buffers used for storing results.
381 *
382 * @return 0 in case of success, a negative value corresponding to an
383 * AVERROR code in case of failure.
384 */
385 static int get_derivative(AVFilterContext *ctx, ThreadData *td)
386 {
387 ColorConstancyContext *s = ctx->priv;
388 int nb_threads = s->nb_threads;
389 int height = s->planeheight[1];
390 int width = s->planewidth[1];
391
392 switch(s->difford) {
393 case 0:
394 if (!s->sigma) { // Only copy once
395 get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
396 } else {
397 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
398 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
399 // save to INDEX_NORM because this will not be normalied and
400 // end gry edge filter expects result to be found in INDEX_NORM
401 }
402 return 0;
403
404 case 1:
405 get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
406 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
407
408 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
409 get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
410 return 0;
411
412 case 2:
413 get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
414 get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
415
416 get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
417 get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
418
419 get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
420 get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
421 return 0;
422
423 default:
424 av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
425 return AVERROR(EINVAL);
426 }
427
428 }
429
430 /**
431 * Slice function for grey edge algorithm that does partial summing/maximizing
432 * of gaussian derivatives.
433 *
434 * @param ctx the filter context.
435 * @param arg data to be passed between threads.
436 * @param jobnr current job nubmer.
437 * @param nb_jobs total number of jobs.
438 *
439 * @return 0.
440 */
441 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
442 {
443 ColorConstancyContext *s = ctx->priv;
444 ThreadData *td = arg;
445 AVFrame *in = td->in;
446 int minknorm = s->minknorm;
447 const uint8_t thresh = 255;
448 int plane;
449
450 for (plane = 0; plane < NUM_PLANES; ++plane) {
451 const int height = s->planeheight[plane];
452 const int width = s->planewidth[plane];
453 const int in_linesize = in->linesize[plane];
454 const int slice_start = (height * jobnr) / nb_jobs;
455 const int slice_end = (height * (jobnr+1)) / nb_jobs;
456 const uint8_t *img_data = in->data[plane];
457 const double *src = td->data[INDEX_NORM][plane];
458 double *dst = td->data[INDEX_DST][plane];
459 int r, c;
460
461 dst[jobnr] = 0;
462 if (!minknorm) {
463 for (r = slice_start; r < slice_end; ++r) {
464 for (c = 0; c < width; ++c) {
465 dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
466 * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
467 }
468 }
469 } else {
470 for (r = slice_start; r < slice_end; ++r) {
471 for (c = 0; c < width; ++c) {
472 dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
473 * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
474 }
475 }
476 }
477 }
478 return 0;
479 }
480
481 /**
482 * Main control function for grey edge algorithm.
483 *
484 * @param ctx the filter context.
485 * @param in frame to perfrom grey edge on.
486 *
487 * @return 0 in case of success, a negative value corresponding to an
488 * AVERROR code in case of failure.
489 */
490 static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
491 {
492 ColorConstancyContext *s = ctx->priv;
493 ThreadData td;
494 int minknorm = s->minknorm;
495 int difford = s->difford;
496 double *white = s->white;
497 int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
498 int plane, job, ret;
499
500 td.in = in;
501 ret = setup_derivative_buffers(ctx, &td);
502 if (ret) {
503 return ret;
504 }
505 get_derivative(ctx, &td);
506 if (difford > 0) {
507 ff_filter_execute(ctx, slice_normalize, &td, NULL, nb_jobs);
508 }
509
510 ff_filter_execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
511 if (!minknorm) {
512 for (plane = 0; plane < NUM_PLANES; ++plane) {
513 white[plane] = 0; // All values are absolute
514 for (job = 0; job < nb_jobs; ++job) {
515 white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
516 }
517 }
518 } else {
519 for (plane = 0; plane < NUM_PLANES; ++plane) {
520 white[plane] = 0;
521 for (job = 0; job < nb_jobs; ++job) {
522 white[plane] += td.data[INDEX_DST][plane][job];
523 }
524 white[plane] = pow(white[plane], 1./minknorm);
525 }
526 }
527
528 cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
529 return 0;
530 }
531
532 /**
533 * Normalizes estimated illumination since only illumination vector
534 * direction is required for color constancy.
535 *
536 * @param light the estimated illumination to be normalized in place
537 */
538 static void normalize_light(double *light)
539 {
540 double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
541 int plane;
542
543 // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
544
545 if (!abs_val) {
546 for (plane = 0; plane < NUM_PLANES; ++plane) {
547 light[plane] = 1.0;
548 }
549 } else {
550 for (plane = 0; plane < NUM_PLANES; ++plane) {
551 light[plane] = (light[plane] / abs_val);
552 if (!light[plane]) { // to avoid division by zero when correcting
553 light[plane] = 1.0;
554 }
555 }
556 }
557 }
558
559 /**
560 * Redirects to corresponding algorithm estimation function and performs normalization
561 * after estimation.
562 *
563 * @param ctx the filter context.
564 * @param in frame to perfrom estimation on.
565 *
566 * @return 0 in case of success, a negative value corresponding to an
567 * AVERROR code in case of failure.
568 */
569 static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
570 {
571 ColorConstancyContext *s = ctx->priv;
572 int ret;
573
574 ret = filter_grey_edge(ctx, in);
575
576 av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
577 s->white[0], s->white[1], s->white[2]);
578 normalize_light(s->white);
579 av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
580 s->white[0], s->white[1], s->white[2]);
581
582 return ret;
583 }
584
585 /**
586 * Performs simple correction via diagonal transformation model.
587 *
588 * @param ctx the filter context.
589 * @param arg data to be passed between threads.
590 * @param jobnr current job nubmer.
591 * @param nb_jobs total number of jobs.
592 *
593 * @return 0.
594 */
595 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
596 {
597 ColorConstancyContext *s = ctx->priv;
598 ThreadData *td = arg;
599 AVFrame *in = td->in;
600 AVFrame *out = td->out;
601 int plane;
602
603 for (plane = 0; plane < NUM_PLANES; ++plane) {
604 const int height = s->planeheight[plane];
605 const int width = s->planewidth[plane];
606 const int64_t numpixels = width * (int64_t)height;
607 const int slice_start = (numpixels * jobnr) / nb_jobs;
608 const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
609 const uint8_t *src = in->data[plane];
610 uint8_t *dst = out->data[plane];
611 double temp;
612 unsigned i;
613
614 for (i = slice_start; i < slice_end; ++i) {
615 temp = src[i] / (s->white[plane] * SQRT3);
616 dst[i] = av_clip_uint8((int)(temp + 0.5));
617 }
618 }
619 return 0;
620 }
621
622 /**
623 * Main control function for correcting scene illumination based on
624 * estimated illumination.
625 *
626 * @param ctx the filter context.
627 * @param in holds frame to correct
628 * @param out holds corrected frame
629 */
630 static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
631 {
632 ColorConstancyContext *s = ctx->priv;
633 ThreadData td;
634 int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
635
636 td.in = in;
637 td.out = out;
638 ff_filter_execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
639 }
640
641 static int config_props(AVFilterLink *inlink)
642 {
643 AVFilterContext *ctx = inlink->dst;
644 ColorConstancyContext *s = ctx->priv;
645 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
646 const double break_off_sigma = 3.0;
647 double sigma = s->sigma;
648 int ret;
649
650 if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
651 av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
652 return AVERROR(EINVAL);
653 }
654
655 s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
656 if (ret=set_gauss(ctx)) {
657 return ret;
658 }
659
660 s->nb_threads = ff_filter_get_nb_threads(ctx);
661 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
662 s->planewidth[0] = s->planewidth[3] = inlink->w;
663 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
664 s->planeheight[0] = s->planeheight[3] = inlink->h;
665
666 return 0;
667 }
668
669 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
670 {
671 AVFilterContext *ctx = inlink->dst;
672 AVFilterLink *outlink = ctx->outputs[0];
673 AVFrame *out;
674 int ret;
675 int direct = 0;
676
677 ret = illumination_estimation(ctx, in);
678 if (ret) {
679 av_frame_free(&in);
680 return ret;
681 }
682
683 if (av_frame_is_writable(in)) {
684 direct = 1;
685 out = in;
686 } else {
687 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
688 if (!out) {
689 av_frame_free(&in);
690 return AVERROR(ENOMEM);
691 }
692 av_frame_copy_props(out, in);
693 }
694 chromatic_adaptation(ctx, in, out);
695
696 if (!direct)
697 av_frame_free(&in);
698
699 return ff_filter_frame(outlink, out);
700 }
701
702 static av_cold void uninit(AVFilterContext *ctx)
703 {
704 ColorConstancyContext *s = ctx->priv;
705 int difford = s->difford;
706 int i;
707
708 for (i = 0; i <= difford; ++i) {
709 av_freep(&s->gauss[i]);
710 }
711 }
712
713 static const AVFilterPad colorconstancy_inputs[] = {
714 {
715 .name = "default",
716 .type = AVMEDIA_TYPE_VIDEO,
717 .config_props = config_props,
718 .filter_frame = filter_frame,
719 },
720 };
721
722 static const AVFilterPad colorconstancy_outputs[] = {
723 {
724 .name = "default",
725 .type = AVMEDIA_TYPE_VIDEO,
726 },
727 };
728
729 #if CONFIG_GREYEDGE_FILTER
730
731 static const AVOption greyedge_options[] = {
732 { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
733 { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
734 { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
735 { NULL }
736 };
737
738 AVFILTER_DEFINE_CLASS(greyedge);
739
740 const AVFilter ff_vf_greyedge = {
741 .name = GREY_EDGE,
742 .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
743 .priv_size = sizeof(ColorConstancyContext),
744 .priv_class = &greyedge_class,
745 .uninit = uninit,
746 FILTER_INPUTS(colorconstancy_inputs),
747 FILTER_OUTPUTS(colorconstancy_outputs),
748 // TODO: support more formats
749 // FIXME: error when saving to .jpg
750 FILTER_SINGLE_PIXFMT(AV_PIX_FMT_GBRP),
751 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
752 };
753
754 #endif /* CONFIG_GREY_EDGE_FILTER */
755