GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavfilter/vf_colorconstancy.c Lines: 0 282 0.0 %
Date: 2020-08-13 15:06:06 Branches: 0 132 0.0 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2018 Mina Sami
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
/**
22
 * @file
23
 * Color Constancy filter
24
 *
25
 * @see http://colorconstancy.com/
26
 *
27
 * @cite
28
 * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29
 */
30
31
#include "libavutil/imgutils.h"
32
#include "libavutil/opt.h"
33
#include "libavutil/pixdesc.h"
34
35
#include "avfilter.h"
36
#include "formats.h"
37
#include "internal.h"
38
#include "video.h"
39
40
#include <math.h>
41
42
#define GREY_EDGE "greyedge"
43
44
#define SQRT3 1.73205080757
45
46
#define NUM_PLANES    3
47
#define MAX_DIFF_ORD  2
48
#define MAX_META_DATA 4
49
#define MAX_DATA      4
50
51
#define INDEX_TEMP 0
52
#define INDEX_DX   1
53
#define INDEX_DY   2
54
#define INDEX_DXY  3
55
#define INDEX_NORM INDEX_DX
56
#define INDEX_SRC  0
57
#define INDEX_DST  1
58
#define INDEX_ORD  2
59
#define INDEX_DIR  3
60
#define DIR_X 0
61
#define DIR_Y 1
62
63
/**
64
 * Used for passing data between threads.
65
 */
66
typedef struct ThreadData {
67
    AVFrame *in, *out;
68
    int meta_data[MAX_META_DATA];
69
    double  *data[MAX_DATA][NUM_PLANES];
70
} ThreadData;
71
72
/**
73
 * Common struct for all algorithms contexts.
74
 */
75
typedef struct ColorConstancyContext {
76
    const AVClass *class;
77
78
    int difford;
79
    int minknorm; /**< @minknorm = 0 : getMax instead */
80
    double sigma;
81
82
    int nb_threads;
83
    int planeheight[4];
84
    int planewidth[4];
85
86
    int filtersize;
87
    double *gauss[MAX_DIFF_ORD+1];
88
89
    double white[NUM_PLANES];
90
} ColorConstancyContext;
91
92
#define OFFSET(x) offsetof(ColorConstancyContext, x)
93
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94
95
#define GINDX(s, i) ( (i) - ((s) >> 2) )
96
97
/**
98
 * Sets gauss filters used for calculating gauss derivatives. Filter size
99
 * depends on sigma which is a user option hence we calculate these
100
 * filters each time. Also each higher order depends on lower ones. Sigma
101
 * can be zero only at difford = 0, then we only convert data to double
102
 * instead.
103
 *
104
 * @param ctx the filter context.
105
 *
106
 * @return 0 in case of success, a negative value corresponding to an
107
 * AVERROR code in case of failure.
108
 */
109
static int set_gauss(AVFilterContext *ctx)
110
{
111
    ColorConstancyContext *s = ctx->priv;
112
    int filtersize = s->filtersize;
113
    int difford    = s->difford;
114
    double sigma   = s->sigma;
115
    double sum1, sum2;
116
    int i;
117
118
    for (i = 0; i <= difford; ++i) {
119
        s->gauss[i] = av_mallocz_array(filtersize, sizeof(*s->gauss[i]));
120
        if (!s->gauss[i]) {
121
            for (; i >= 0; --i) {
122
                av_freep(&s->gauss[i]);
123
            }
124
            return AVERROR(ENOMEM);
125
        }
126
    }
127
128
    // Order 0
129
    av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
130
    sum1 = 0.0;
131
    if (!sigma) {
132
        s->gauss[0][0] = 1; // Copying data to double instead of convolution
133
    } else {
134
        for (i = 0; i < filtersize; ++i) {
135
            s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
136
            sum1 += s->gauss[0][i];
137
        }
138
        for (i = 0; i < filtersize; ++i) {
139
            s->gauss[0][i] /= sum1;
140
        }
141
    }
142
    // Order 1
143
    if (difford > 0) {
144
        av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
145
        sum1 = 0.0;
146
        for (i = 0; i < filtersize; ++i) {
147
            s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
148
            sum1 += s->gauss[1][i] * GINDX(filtersize, i);
149
        }
150
151
        for (i = 0; i < filtersize; ++i) {
152
            s->gauss[1][i] /= sum1;
153
        }
154
155
        // Order 2
156
        if (difford > 1) {
157
            av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
158
            sum1 = 0.0;
159
            for (i = 0; i < filtersize; ++i) {
160
                s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
161
                                 * s->gauss[0][i];
162
                sum1 += s->gauss[2][i];
163
            }
164
165
            sum2 = 0.0;
166
            for (i = 0; i < filtersize; ++i) {
167
                s->gauss[2][i] -= sum1 / (filtersize);
168
                sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
169
            }
170
            for (i = 0; i < filtersize ; ++i) {
171
                s->gauss[2][i] /= sum2;
172
            }
173
        }
174
    }
175
    return 0;
176
}
177
178
/**
179
 * Frees up buffers used by grey edge for storing derivatives final
180
 * and intermidiate results. Number of buffers and number of planes
181
 * for last buffer are given so it can be safely called at allocation
182
 * failure instances.
183
 *
184
 * @param td holds the buffers.
185
 * @param nb_buff number of buffers to be freed.
186
 * @param nb_planes number of planes for last buffer to be freed.
187
 */
188
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
189
{
190
    int b, p;
191
192
    for (b = 0; b < nb_buff; ++b) {
193
        for (p = 0; p < NUM_PLANES; ++p) {
194
            av_freep(&td->data[b][p]);
195
        }
196
    }
197
    // Final buffer may not be fully allocated at fail cases
198
    for (p = 0; p < nb_planes; ++p) {
199
        av_freep(&td->data[b][p]);
200
    }
201
}
202
203
/**
204
 * Allocates buffers used by grey edge for storing derivatives final
205
 * and intermidiate results.
206
 *
207
 * @param ctx the filter context.
208
 * @param td holds the buffers.
209
 *
210
 * @return 0 in case of success, a negative value corresponding to an
211
 * AVERROR code in case of failure.
212
 */
213
static int setup_derivative_buffers(AVFilterContext* ctx, ThreadData *td)
214
{
215
    ColorConstancyContext *s = ctx->priv;
216
    int nb_buff = s->difford + 1;
217
    int b, p;
218
219
    av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
220
    for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
221
        for (p = 0; p < NUM_PLANES; ++p) {
222
            td->data[b][p] = av_mallocz_array(s->planeheight[p] * s->planewidth[p], sizeof(*td->data[b][p]));
223
            if (!td->data[b][p]) {
224
                cleanup_derivative_buffers(td, b + 1, p);
225
                return AVERROR(ENOMEM);
226
            }
227
        }
228
    }
229
    return 0;
230
}
231
232
#define CLAMP(x, mx) av_clip((x), 0, (mx-1))
233
#define INDX2D(r, c, w) ( (r) * (w) + (c) )
234
#define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
235
236
/**
237
 * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
238
 * either horizontally or vertically according to meta data given in thread data.
239
 * When convoluting horizontally source is always the in frame withing thread data
240
 * while when convoluting vertically source is a buffer.
241
 *
242
 * @param ctx the filter context.
243
 * @param arg data to be passed between threads.
244
 * @param jobnr current job nubmer.
245
 * @param nb_jobs total number of jobs.
246
 *
247
 * @return 0.
248
 */
249
static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
250
{
251
    ColorConstancyContext *s = ctx->priv;
252
    ThreadData *td = arg;
253
    AVFrame *in = td->in;
254
    const int ord = td->meta_data[INDEX_ORD];
255
    const int dir = td->meta_data[INDEX_DIR];
256
    const int src_index  = td->meta_data[INDEX_SRC];
257
    const int dst_index  = td->meta_data[INDEX_DST];
258
    const int filtersize = s->filtersize;
259
    const double *gauss  = s->gauss[ord];
260
    int plane;
261
262
    for (plane = 0; plane < NUM_PLANES; ++plane) {
263
        const int height      = s->planeheight[plane];
264
        const int width       = s->planewidth[plane];
265
        const int in_linesize = in->linesize[plane];
266
        double *dst = td->data[dst_index][plane];
267
        int slice_start, slice_end;
268
        int r, c, g;
269
270
        if (dir == DIR_X) {
271
            /** Applying gauss horizontally along each row */
272
            const uint8_t *src = in->data[plane];
273
            slice_start = (height * jobnr      ) / nb_jobs;
274
            slice_end   = (height * (jobnr + 1)) / nb_jobs;
275
276
            for (r = slice_start; r < slice_end; ++r) {
277
                for (c = 0; c < width; ++c) {
278
                    dst[INDX2D(r, c, width)] = 0;
279
                    for (g = 0; g < filtersize; ++g) {
280
                        dst[INDX2D(r, c, width)] += GAUSS(src, r,                        c + GINDX(filtersize, g),
281
                                                          in_linesize, height, width, gauss[g]);
282
                    }
283
                }
284
            }
285
        } else {
286
            /** Applying gauss vertically along each column */
287
            const double *src = td->data[src_index][plane];
288
            slice_start = (width * jobnr      ) / nb_jobs;
289
            slice_end   = (width * (jobnr + 1)) / nb_jobs;
290
291
            for (c = slice_start; c < slice_end; ++c) {
292
                for (r = 0; r < height; ++r) {
293
                    dst[INDX2D(r, c, width)] = 0;
294
                    for (g = 0; g < filtersize; ++g) {
295
                        dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
296
                                                          width, height, width, gauss[g]);
297
                    }
298
                }
299
            }
300
        }
301
302
    }
303
    return 0;
304
}
305
306
/**
307
 * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
308
 * 1 or 2.
309
 *
310
 * @param ctx the filter context.
311
 * @param arg data to be passed between threads.
312
 * @param jobnr current job nubmer.
313
 * @param nb_jobs total number of jobs.
314
 *
315
 * @return 0.
316
 */
317
static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
318
{
319
    ColorConstancyContext *s = ctx->priv;
320
    ThreadData *td = arg;
321
    const int difford = s->difford;
322
    int plane;
323
324
    for (plane = 0; plane < NUM_PLANES; ++plane) {
325
        const int height = s->planeheight[plane];
326
        const int width  = s->planewidth[plane];
327
        const int64_t numpixels = width * (int64_t)height;
328
        const int slice_start   = (numpixels * jobnr    ) / nb_jobs;
329
        const int slice_end     = (numpixels * (jobnr+1)) / nb_jobs;
330
        const double *dx = td->data[INDEX_DX][plane];
331
        const double *dy = td->data[INDEX_DY][plane];
332
        double *norm = td->data[INDEX_NORM][plane];
333
        int i;
334
335
        if (difford == 1) {
336
            for (i = slice_start; i < slice_end; ++i) {
337
                norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
338
            }
339
        } else {
340
            const double *dxy = td->data[INDEX_DXY][plane];
341
            for (i = slice_start; i < slice_end; ++i) {
342
                norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
343
            }
344
        }
345
    }
346
347
    return 0;
348
}
349
350
/**
351
 * Utility function for setting up differentiation data/metadata.
352
 *
353
 * @param ctx the filter context.
354
 * @param td to be used for passing data between threads.
355
 * @param ord ord of differentiation.
356
 * @param dir direction of differentiation.
357
 * @param src index of source used for differentiation.
358
 * @param dst index destination used for saving differentiation result.
359
 * @param dim maximum dimension in current direction.
360
 * @param nb_threads number of threads to use.
361
 */
362
static void av_always_inline
363
get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir,
364
          int src, int dst, int dim, int nb_threads) {
365
    td->meta_data[INDEX_ORD] = ord;
366
    td->meta_data[INDEX_DIR] = dir;
367
    td->meta_data[INDEX_SRC] = src;
368
    td->meta_data[INDEX_DST] = dst;
369
    ctx->internal->execute(ctx, slice_get_derivative, td, NULL, FFMIN(dim, nb_threads));
370
}
371
372
/**
373
 * Main control function for calculating gaussian derivatives.
374
 *
375
 * @param ctx the filter context.
376
 * @param td holds the buffers used for storing results.
377
 *
378
 * @return 0 in case of success, a negative value corresponding to an
379
 * AVERROR code in case of failure.
380
 */
381
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
382
{
383
    ColorConstancyContext *s = ctx->priv;
384
    int nb_threads = s->nb_threads;
385
    int height = s->planeheight[1];
386
    int width  = s->planewidth[1];
387
388
    switch(s->difford) {
389
    case 0:
390
        if (!s->sigma) { // Only copy once
391
            get_deriv(ctx, td, 0, DIR_X, 0         , INDEX_NORM, height, nb_threads);
392
        } else {
393
            get_deriv(ctx, td, 0, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
394
            get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
395
            // save to INDEX_NORM because this will not be normalied and
396
            // end gry edge filter expects result to be found in INDEX_NORM
397
        }
398
        return 0;
399
400
    case 1:
401
        get_deriv(ctx, td, 1, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
402
        get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX,   width , nb_threads);
403
404
        get_deriv(ctx, td, 0, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
405
        get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY,   width , nb_threads);
406
        return 0;
407
408
    case 2:
409
        get_deriv(ctx, td, 2, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
410
        get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX,   width , nb_threads);
411
412
        get_deriv(ctx, td, 0, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
413
        get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY,   width , nb_threads);
414
415
        get_deriv(ctx, td, 1, DIR_X, 0,          INDEX_TEMP, height, nb_threads);
416
        get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY,  width , nb_threads);
417
        return 0;
418
419
    default:
420
        av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
421
        return AVERROR(EINVAL);
422
    }
423
424
}
425
426
/**
427
 * Slice function for grey edge algorithm that does partial summing/maximizing
428
 * of gaussian derivatives.
429
 *
430
 * @param ctx the filter context.
431
 * @param arg data to be passed between threads.
432
 * @param jobnr current job nubmer.
433
 * @param nb_jobs total number of jobs.
434
 *
435
 * @return 0.
436
 */
437
static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
438
{
439
    ColorConstancyContext *s = ctx->priv;
440
    ThreadData *td = arg;
441
    AVFrame *in    = td->in;
442
    int minknorm   = s->minknorm;
443
    const uint8_t thresh = 255;
444
    int plane;
445
446
    for (plane = 0; plane < NUM_PLANES; ++plane) {
447
        const int height        = s->planeheight[plane];
448
        const int width         = s->planewidth[plane];
449
        const int in_linesize   = in->linesize[plane];
450
        const int slice_start   = (height * jobnr) / nb_jobs;
451
        const int slice_end     = (height * (jobnr+1)) / nb_jobs;
452
        const uint8_t *img_data = in->data[plane];
453
        const double *src       = td->data[INDEX_NORM][plane];
454
        double *dst             = td->data[INDEX_DST][plane];
455
        int r, c;
456
457
        dst[jobnr] = 0;
458
        if (!minknorm) {
459
            for (r = slice_start; r < slice_end; ++r) {
460
                for (c = 0; c < width; ++c) {
461
                    dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
462
                                        * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
463
                }
464
            }
465
        } else {
466
            for (r = slice_start; r < slice_end; ++r) {
467
                for (c = 0; c < width; ++c) {
468
                    dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
469
                                    * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
470
                }
471
            }
472
        }
473
    }
474
    return 0;
475
}
476
477
/**
478
 * Main control function for grey edge algorithm.
479
 *
480
 * @param ctx the filter context.
481
 * @param in frame to perfrom grey edge on.
482
 *
483
 * @return 0 in case of success, a negative value corresponding to an
484
 * AVERROR code in case of failure.
485
 */
486
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
487
{
488
    ColorConstancyContext *s = ctx->priv;
489
    ThreadData td;
490
    int minknorm  = s->minknorm;
491
    int difford   = s->difford;
492
    double *white = s->white;
493
    int nb_jobs   = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
494
    int plane, job, ret;
495
496
    td.in = in;
497
    ret = setup_derivative_buffers(ctx, &td);
498
    if (ret) {
499
        return ret;
500
    }
501
    get_derivative(ctx, &td);
502
    if (difford > 0) {
503
        ctx->internal->execute(ctx, slice_normalize, &td, NULL, nb_jobs);
504
    }
505
506
    ctx->internal->execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
507
    if (!minknorm) {
508
        for (plane = 0; plane < NUM_PLANES; ++plane) {
509
            white[plane] = 0; // All values are absolute
510
            for (job = 0; job < nb_jobs; ++job) {
511
                white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
512
            }
513
        }
514
    } else {
515
        for (plane = 0; plane < NUM_PLANES; ++plane) {
516
            white[plane] = 0;
517
            for (job = 0; job < nb_jobs; ++job) {
518
                white[plane] += td.data[INDEX_DST][plane][job];
519
            }
520
            white[plane] = pow(white[plane], 1./minknorm);
521
        }
522
    }
523
524
    cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
525
    return 0;
526
}
527
528
/**
529
 * Normalizes estimated illumination since only illumination vector
530
 * direction is required for color constancy.
531
 *
532
 * @param light the estimated illumination to be normalized in place
533
 */
534
static void normalize_light(double *light)
535
{
536
    double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
537
    int plane;
538
539
    // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
540
541
    if (!abs_val) {
542
        for (plane = 0; plane < NUM_PLANES; ++plane) {
543
            light[plane] = 1.0;
544
        }
545
    } else {
546
        for (plane = 0; plane < NUM_PLANES; ++plane) {
547
            light[plane] = (light[plane] / abs_val);
548
            if (!light[plane]) { // to avoid division by zero when correcting
549
                light[plane] = 1.0;
550
            }
551
        }
552
    }
553
}
554
555
/**
556
 * Redirects to corresponding algorithm estimation function and performs normalization
557
 * after estimation.
558
 *
559
 * @param ctx the filter context.
560
 * @param in frame to perfrom estimation on.
561
 *
562
 * @return 0 in case of success, a negative value corresponding to an
563
 * AVERROR code in case of failure.
564
 */
565
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
566
{
567
    ColorConstancyContext *s = ctx->priv;
568
    int ret;
569
570
    ret = filter_grey_edge(ctx, in);
571
572
    av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
573
           s->white[0], s->white[1], s->white[2]);
574
    normalize_light(s->white);
575
    av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
576
           s->white[0], s->white[1], s->white[2]);
577
578
    return ret;
579
}
580
581
/**
582
 * Performs simple correction via diagonal transformation model.
583
 *
584
 * @param ctx the filter context.
585
 * @param arg data to be passed between threads.
586
 * @param jobnr current job nubmer.
587
 * @param nb_jobs total number of jobs.
588
 *
589
 * @return 0.
590
 */
591
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
592
{
593
    ColorConstancyContext *s = ctx->priv;
594
    ThreadData *td = arg;
595
    AVFrame *in = td->in;
596
    AVFrame *out = td->out;
597
    int plane;
598
599
    for (plane = 0; plane < NUM_PLANES; ++plane) {
600
        const int height = s->planeheight[plane];
601
        const int width  = s->planewidth[plane];
602
        const int64_t numpixels = width * (int64_t)height;
603
        const int slice_start   = (numpixels * jobnr) / nb_jobs;
604
        const int slice_end     = (numpixels * (jobnr+1)) / nb_jobs;
605
        const uint8_t *src = in->data[plane];
606
        uint8_t *dst       = out->data[plane];
607
        double temp;
608
        unsigned i;
609
610
        for (i = slice_start; i < slice_end; ++i) {
611
            temp = src[i] / (s->white[plane] * SQRT3);
612
            dst[i] = av_clip_uint8((int)(temp + 0.5));
613
        }
614
    }
615
    return 0;
616
}
617
618
/**
619
 * Main control function for correcting scene illumination based on
620
 * estimated illumination.
621
 *
622
 * @param ctx the filter context.
623
 * @param in holds frame to correct
624
 * @param out holds corrected frame
625
 */
626
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
627
{
628
    ColorConstancyContext *s = ctx->priv;
629
    ThreadData td;
630
    int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
631
632
    td.in  = in;
633
    td.out = out;
634
    ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
635
}
636
637
static int query_formats(AVFilterContext *ctx)
638
{
639
    static const enum AVPixelFormat pix_fmts[] = {
640
        // TODO: support more formats
641
        // FIXME: error when saving to .jpg
642
        AV_PIX_FMT_GBRP,
643
        AV_PIX_FMT_NONE
644
    };
645
646
    return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
647
}
648
649
static int config_props(AVFilterLink *inlink)
650
{
651
    AVFilterContext *ctx = inlink->dst;
652
    ColorConstancyContext *s = ctx->priv;
653
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
654
    const double break_off_sigma = 3.0;
655
    double sigma = s->sigma;
656
    int ret;
657
658
    if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
659
        av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
660
        return AVERROR(EINVAL);
661
    }
662
663
    s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
664
    if (ret=set_gauss(ctx)) {
665
        return ret;
666
    }
667
668
    s->nb_threads = ff_filter_get_nb_threads(ctx);
669
    s->planewidth[1]  = s->planewidth[2]  = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
670
    s->planewidth[0]  = s->planewidth[3]  = inlink->w;
671
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
672
    s->planeheight[0] = s->planeheight[3] = inlink->h;
673
674
    return 0;
675
}
676
677
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
678
{
679
    AVFilterContext *ctx = inlink->dst;
680
    AVFilterLink *outlink = ctx->outputs[0];
681
    AVFrame *out;
682
    int ret;
683
    int direct = 0;
684
685
    ret = illumination_estimation(ctx, in);
686
    if (ret) {
687
        av_frame_free(&in);
688
        return ret;
689
    }
690
691
    if (av_frame_is_writable(in)) {
692
        direct = 1;
693
        out = in;
694
    } else {
695
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
696
        if (!out) {
697
            av_frame_free(&in);
698
            return AVERROR(ENOMEM);
699
        }
700
        av_frame_copy_props(out, in);
701
    }
702
    chromatic_adaptation(ctx, in, out);
703
704
    if (!direct)
705
        av_frame_free(&in);
706
707
    return ff_filter_frame(outlink, out);
708
}
709
710
static av_cold void uninit(AVFilterContext *ctx)
711
{
712
    ColorConstancyContext *s = ctx->priv;
713
    int difford = s->difford;
714
    int i;
715
716
    for (i = 0; i <= difford; ++i) {
717
        av_freep(&s->gauss[i]);
718
    }
719
}
720
721
static const AVFilterPad colorconstancy_inputs[] = {
722
    {
723
        .name         = "default",
724
        .type         = AVMEDIA_TYPE_VIDEO,
725
        .config_props = config_props,
726
        .filter_frame = filter_frame,
727
    },
728
    { NULL }
729
};
730
731
static const AVFilterPad colorconstancy_outputs[] = {
732
    {
733
        .name = "default",
734
        .type = AVMEDIA_TYPE_VIDEO,
735
    },
736
    { NULL }
737
};
738
739
#if CONFIG_GREYEDGE_FILTER
740
741
static const AVOption greyedge_options[] = {
742
    { "difford",  "set differentiation order", OFFSET(difford),  AV_OPT_TYPE_INT,    {.i64=1}, 0,   2,      FLAGS },
743
    { "minknorm", "set Minkowski norm",        OFFSET(minknorm), AV_OPT_TYPE_INT,    {.i64=1}, 0,   20,     FLAGS },
744
    { "sigma",    "set sigma",                 OFFSET(sigma),    AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
745
    { NULL }
746
};
747
748
AVFILTER_DEFINE_CLASS(greyedge);
749
750
AVFilter ff_vf_greyedge = {
751
    .name          = GREY_EDGE,
752
    .description   = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
753
    .priv_size     = sizeof(ColorConstancyContext),
754
    .priv_class    = &greyedge_class,
755
    .query_formats = query_formats,
756
    .uninit        = uninit,
757
    .inputs        = colorconstancy_inputs,
758
    .outputs       = colorconstancy_outputs,
759
    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
760
};
761
762
#endif /* CONFIG_GREY_EDGE_FILTER */