GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavfilter/vf_colormatrix.c Lines: 141 279 50.5 %
Date: 2020-08-14 10:39:37 Branches: 40 80 50.0 %

Line Branch Exec Source
1
/*
2
 * ColorMatrix v2.2 for Avisynth 2.5.x
3
 *
4
 * Copyright (C) 2006-2007 Kevin Stone
5
 *
6
 * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
7
 *
8
 * This program is free software; you can redistribute it and/or modify it
9
 * under the terms of the GNU General Public License as published by the
10
 * Free Software Foundation; either version 2 of the License, or (at your
11
 * option) any later version.
12
 *
13
 * This program is distributed in the hope that it will be useful, but
14
 * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16
 * License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software Foundation,
20
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22
23
/**
24
 * @file
25
 * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
26
 * Dijkhof.  It adds the ability to convert between any of: Rec.709, FCC,
27
 * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
28
 * adds an option to use scaled or non-scaled coefficients, and more...
29
 */
30
31
#include <float.h>
32
#include "avfilter.h"
33
#include "formats.h"
34
#include "internal.h"
35
#include "video.h"
36
#include "libavutil/opt.h"
37
#include "libavutil/pixdesc.h"
38
#include "libavutil/avstring.h"
39
40
#define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
41
#define CB(n) av_clip_uint8(n)
42
43
static const double yuv_coeff_luma[5][3] = {
44
    { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
45
    { +0.5900, +0.1100, +0.3000 }, // FCC (1)
46
    { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
47
    { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
48
    { +0.6780, +0.0593, +0.2627 }, // Rec.2020 (4)
49
};
50
51
enum ColorMode {
52
    COLOR_MODE_NONE = -1,
53
    COLOR_MODE_BT709,
54
    COLOR_MODE_FCC,
55
    COLOR_MODE_BT601,
56
    COLOR_MODE_SMPTE240M,
57
    COLOR_MODE_BT2020,
58
    COLOR_MODE_COUNT
59
};
60
61
typedef struct ColorMatrixContext {
62
    const AVClass *class;
63
    int yuv_convert[25][3][3];
64
    int interlaced;
65
    int source, dest;        ///< ColorMode
66
    int mode;
67
    int hsub, vsub;
68
} ColorMatrixContext;
69
70
typedef struct ThreadData {
71
    AVFrame *dst;
72
    const AVFrame *src;
73
    int c2;
74
    int c3;
75
    int c4;
76
    int c5;
77
    int c6;
78
    int c7;
79
} ThreadData;
80
81
#define OFFSET(x) offsetof(ColorMatrixContext, x)
82
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
83
84
static const AVOption colormatrix_options[] = {
85
    { "src", "set source color matrix",      OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
86
    { "dst", "set destination color matrix", OFFSET(dest),   AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
87
    { "bt709",     "set BT.709 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709},       .flags=FLAGS, .unit="color_mode" },
88
    { "fcc",       "set FCC colorspace   ",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC},         .flags=FLAGS, .unit="color_mode" },
89
    { "bt601",     "set BT.601 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
90
    { "bt470",     "set BT.470 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
91
    { "bt470bg",   "set BT.470 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
92
    { "smpte170m", "set SMTPE-170M colorspace",  0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
93
    { "smpte240m", "set SMPTE-240M colorspace",  0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M},   .flags=FLAGS, .unit="color_mode" },
94
    { "bt2020",    "set BT.2020 colorspace",     0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT2020},      .flags=FLAGS, .unit="color_mode" },
95
    { NULL }
96
};
97
98
AVFILTER_DEFINE_CLASS(colormatrix);
99
100
#define ma m[0][0]
101
#define mb m[0][1]
102
#define mc m[0][2]
103
#define md m[1][0]
104
#define me m[1][1]
105
#define mf m[1][2]
106
#define mg m[2][0]
107
#define mh m[2][1]
108
#define mi m[2][2]
109
110
#define ima im[0][0]
111
#define imb im[0][1]
112
#define imc im[0][2]
113
#define imd im[1][0]
114
#define ime im[1][1]
115
#define imf im[1][2]
116
#define img im[2][0]
117
#define imh im[2][1]
118
#define imi im[2][2]
119
120
60
static void inverse3x3(double im[3][3], double m[3][3])
121
{
122
60
    double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
123
60
    det = 1.0 / det;
124
60
    ima = det * (me * mi - mf * mh);
125
60
    imb = det * (mc * mh - mb * mi);
126
60
    imc = det * (mb * mf - mc * me);
127
60
    imd = det * (mf * mg - md * mi);
128
60
    ime = det * (ma * mi - mc * mg);
129
60
    imf = det * (mc * md - ma * mf);
130
60
    img = det * (md * mh - me * mg);
131
60
    imh = det * (mb * mg - ma * mh);
132
60
    imi = det * (ma * me - mb * md);
133
60
}
134
135
300
static void solve_coefficients(double cm[3][3], double rgb[3][3], double yuv[3][3])
136
{
137
    int i, j;
138
1200
    for (i = 0; i < 3; i++)
139
3600
        for (j = 0; j < 3; j++)
140
2700
            cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
141
300
}
142
143
12
static void calc_coefficients(AVFilterContext *ctx)
144
{
145
12
    ColorMatrixContext *color = ctx->priv;
146
    double yuv_coeff[5][3][3];
147
    double rgb_coeffd[5][3][3];
148
    double yuv_convertd[25][3][3];
149
    double bscale, rscale;
150
12
    int v = 0;
151
    int i, j, k;
152
72
    for (i = 0; i < 5; i++) {
153
60
        yuv_coeff[i][0][0] = yuv_coeff_luma[i][0];
154
60
        yuv_coeff[i][0][1] = yuv_coeff_luma[i][1];
155
60
        yuv_coeff[i][0][2] = yuv_coeff_luma[i][2];
156
60
        bscale = 0.5 / (yuv_coeff[i][0][1] - 1.0);
157
60
        rscale = 0.5 / (yuv_coeff[i][0][2] - 1.0);
158
60
        yuv_coeff[i][1][0] = bscale * yuv_coeff[i][0][0];
159
60
        yuv_coeff[i][1][1] = 0.5;
160
60
        yuv_coeff[i][1][2] = bscale * yuv_coeff[i][0][2];
161
60
        yuv_coeff[i][2][0] = rscale * yuv_coeff[i][0][0];
162
60
        yuv_coeff[i][2][1] = rscale * yuv_coeff[i][0][1];
163
60
        yuv_coeff[i][2][2] = 0.5;
164
    }
165
72
    for (i = 0; i < 5; i++)
166
60
        inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
167
72
    for (i = 0; i < 5; i++) {
168
360
        for (j = 0; j < 5; j++) {
169
300
            solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
170
1200
            for (k = 0; k < 3; k++) {
171
900
                color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
172
900
                color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
173
900
                color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
174
            }
175

300
            if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
176
300
                color->yuv_convert[v][2][0] != 0) {
177
                av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
178
            }
179
300
            v++;
180
        }
181
    }
182
12
}
183
184
static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m", "bt2020"};
185
186
12
static av_cold int init(AVFilterContext *ctx)
187
{
188
12
    ColorMatrixContext *color = ctx->priv;
189
190
12
    if (color->dest == COLOR_MODE_NONE) {
191
        av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
192
        return AVERROR(EINVAL);
193
    }
194
195
12
    if (color->source == color->dest) {
196
        av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
197
        return AVERROR(EINVAL);
198
    }
199
200
12
    calc_coefficients(ctx);
201
202
12
    return 0;
203
}
204
205
static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
206
{
207
    const ThreadData *td = arg;
208
    const AVFrame *src = td->src;
209
    AVFrame *dst = td->dst;
210
    const int height = src->height;
211
    const int width = src->width*2;
212
    const int src_pitch = src->linesize[0];
213
    const int dst_pitch = dst->linesize[0];
214
    const int slice_start = (height *  jobnr   ) / nb_jobs;
215
    const int slice_end   = (height * (jobnr+1)) / nb_jobs;
216
    const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
217
    unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
218
    const int c2 = td->c2;
219
    const int c3 = td->c3;
220
    const int c4 = td->c4;
221
    const int c5 = td->c5;
222
    const int c6 = td->c6;
223
    const int c7 = td->c7;
224
    int x, y;
225
226
    for (y = slice_start; y < slice_end; y++) {
227
        for (x = 0; x < width; x += 4) {
228
            const int u = srcp[x + 0] - 128;
229
            const int v = srcp[x + 2] - 128;
230
            const int uvval = c2 * u + c3 * v + 1081344;
231
            dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
232
            dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
233
            dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
234
            dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
235
        }
236
        srcp += src_pitch;
237
        dstp += dst_pitch;
238
    }
239
240
    return 0;
241
}
242
243
static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
244
{
245
    const ThreadData *td = arg;
246
    const AVFrame *src = td->src;
247
    AVFrame *dst = td->dst;
248
    const int height = src->height;
249
    const int width = src->width;
250
    const int slice_start = (height *  jobnr   ) / nb_jobs;
251
    const int slice_end   = (height * (jobnr+1)) / nb_jobs;
252
    const int src_pitchY  = src->linesize[0];
253
    const int src_pitchUV = src->linesize[1];
254
    const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
255
    const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
256
    const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
257
    const int dst_pitchY  = dst->linesize[0];
258
    const int dst_pitchUV = dst->linesize[1];
259
    unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
260
    unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
261
    unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
262
    const int c2 = td->c2;
263
    const int c3 = td->c3;
264
    const int c4 = td->c4;
265
    const int c5 = td->c5;
266
    const int c6 = td->c6;
267
    const int c7 = td->c7;
268
    int x, y;
269
270
    for (y = slice_start; y < slice_end; y++) {
271
        for (x = 0; x < width; x++) {
272
            const int u = srcpU[x] - 128;
273
            const int v = srcpV[x] - 128;
274
            const int uvval = c2 * u + c3 * v + 1081344;
275
            dstpY[x] = CB((65536 * (srcpY[x] - 16) + uvval) >> 16);
276
            dstpU[x] = CB((c4 * u + c5 * v + 8421376) >> 16);
277
            dstpV[x] = CB((c6 * u + c7 * v + 8421376) >> 16);
278
        }
279
        srcpY += src_pitchY;
280
        dstpY += dst_pitchY;
281
        srcpU += src_pitchUV;
282
        srcpV += src_pitchUV;
283
        dstpU += dst_pitchUV;
284
        dstpV += dst_pitchUV;
285
    }
286
287
    return 0;
288
}
289
290
static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
291
{
292
    const ThreadData *td = arg;
293
    const AVFrame *src = td->src;
294
    AVFrame *dst = td->dst;
295
    const int height = src->height;
296
    const int width = src->width;
297
    const int slice_start = (height *  jobnr   ) / nb_jobs;
298
    const int slice_end   = (height * (jobnr+1)) / nb_jobs;
299
    const int src_pitchY  = src->linesize[0];
300
    const int src_pitchUV = src->linesize[1];
301
    const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
302
    const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
303
    const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
304
    const int dst_pitchY  = dst->linesize[0];
305
    const int dst_pitchUV = dst->linesize[1];
306
    unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
307
    unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
308
    unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
309
    const int c2 = td->c2;
310
    const int c3 = td->c3;
311
    const int c4 = td->c4;
312
    const int c5 = td->c5;
313
    const int c6 = td->c6;
314
    const int c7 = td->c7;
315
    int x, y;
316
317
    for (y = slice_start; y < slice_end; y++) {
318
        for (x = 0; x < width; x += 2) {
319
            const int u = srcpU[x >> 1] - 128;
320
            const int v = srcpV[x >> 1] - 128;
321
            const int uvval = c2 * u + c3 * v + 1081344;
322
            dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
323
            dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
324
            dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
325
            dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
326
        }
327
        srcpY += src_pitchY;
328
        dstpY += dst_pitchY;
329
        srcpU += src_pitchUV;
330
        srcpV += src_pitchUV;
331
        dstpU += dst_pitchUV;
332
        dstpV += dst_pitchUV;
333
    }
334
335
    return 0;
336
}
337
338
60
static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
339
{
340
60
    const ThreadData *td = arg;
341
60
    const AVFrame *src = td->src;
342
60
    AVFrame *dst = td->dst;
343
60
    const int height = FFALIGN(src->height, 2) >> 1;
344
60
    const int width = src->width;
345
60
    const int slice_start = ((height *  jobnr   ) / nb_jobs) << 1;
346
60
    const int slice_end   = ((height * (jobnr+1)) / nb_jobs) << 1;
347
60
    const int src_pitchY  = src->linesize[0];
348
60
    const int src_pitchUV = src->linesize[1];
349
60
    const int dst_pitchY  = dst->linesize[0];
350
60
    const int dst_pitchUV = dst->linesize[1];
351
60
    const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
352
60
    const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
353
60
    const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
354
60
    const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
355
60
    unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
356
60
    unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
357
60
    unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
358
60
    unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
359
60
    const int c2 = td->c2;
360
60
    const int c3 = td->c3;
361
60
    const int c4 = td->c4;
362
60
    const int c5 = td->c5;
363
60
    const int c6 = td->c6;
364
60
    const int c7 = td->c7;
365
    int x, y;
366
367
8700
    for (y = slice_start; y < slice_end; y += 2) {
368
1529280
        for (x = 0; x < width; x += 2) {
369
1520640
            const int u = srcpU[x >> 1] - 128;
370
1520640
            const int v = srcpV[x >> 1] - 128;
371
1520640
            const int uvval = c2 * u + c3 * v + 1081344;
372
1520640
            dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
373
1520640
            dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
374
1520640
            dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
375
1520640
            dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
376
1520640
            dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
377
1520640
            dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
378
        }
379
8640
        srcpY += src_pitchY << 1;
380
8640
        dstpY += dst_pitchY << 1;
381
8640
        srcpN += src_pitchY << 1;
382
8640
        dstpN += dst_pitchY << 1;
383
8640
        srcpU += src_pitchUV;
384
8640
        srcpV += src_pitchUV;
385
8640
        dstpU += dst_pitchUV;
386
8640
        dstpV += dst_pitchUV;
387
    }
388
389
60
    return 0;
390
}
391
392
12
static int config_input(AVFilterLink *inlink)
393
{
394
12
    AVFilterContext *ctx = inlink->dst;
395
12
    ColorMatrixContext *color = ctx->priv;
396
12
    const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
397
398
12
    color->hsub = pix_desc->log2_chroma_w;
399
12
    color->vsub = pix_desc->log2_chroma_h;
400
401
12
    av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
402
12
           color_modes[color->source], color_modes[color->dest]);
403
404
12
    return 0;
405
}
406
407
12
static int query_formats(AVFilterContext *ctx)
408
{
409
    static const enum AVPixelFormat pix_fmts[] = {
410
        AV_PIX_FMT_YUV444P,
411
        AV_PIX_FMT_YUV422P,
412
        AV_PIX_FMT_YUV420P,
413
        AV_PIX_FMT_UYVY422,
414
        AV_PIX_FMT_NONE
415
    };
416
12
    AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
417
12
    if (!fmts_list)
418
        return AVERROR(ENOMEM);
419
12
    return ff_set_common_formats(ctx, fmts_list);
420
}
421
422
60
static int filter_frame(AVFilterLink *link, AVFrame *in)
423
{
424
60
    AVFilterContext *ctx = link->dst;
425
60
    ColorMatrixContext *color = ctx->priv;
426
60
    AVFilterLink *outlink = ctx->outputs[0];
427
    AVFrame *out;
428
60
    ThreadData td = {0};
429
430
60
    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
431
60
    if (!out) {
432
        av_frame_free(&in);
433
        return AVERROR(ENOMEM);
434
    }
435
60
    av_frame_copy_props(out, in);
436
437
60
    if (color->source == COLOR_MODE_NONE) {
438
        enum AVColorSpace cs = in->colorspace;
439
        enum ColorMode source;
440
441
        switch(cs) {
442
        case AVCOL_SPC_BT709     : source = COLOR_MODE_BT709     ; break;
443
        case AVCOL_SPC_FCC       : source = COLOR_MODE_FCC       ; break;
444
        case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
445
        case AVCOL_SPC_BT470BG   : source = COLOR_MODE_BT601     ; break;
446
        case AVCOL_SPC_SMPTE170M : source = COLOR_MODE_BT601     ; break;
447
        case AVCOL_SPC_BT2020_NCL: source = COLOR_MODE_BT2020    ; break;
448
        case AVCOL_SPC_BT2020_CL : source = COLOR_MODE_BT2020    ; break;
449
        default :
450
            av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
451
            av_frame_free(&out);
452
            return AVERROR(EINVAL);
453
        }
454
        color->mode = source * 5 + color->dest;
455
    } else
456
60
        color->mode = color->source * 5 + color->dest;
457
458

60
    switch(color->dest) {
459
15
    case COLOR_MODE_BT709    : out->colorspace = AVCOL_SPC_BT709     ; break;
460
15
    case COLOR_MODE_FCC      : out->colorspace = AVCOL_SPC_FCC       ; break;
461
15
    case COLOR_MODE_SMPTE240M: out->colorspace = AVCOL_SPC_SMPTE240M ; break;
462
15
    case COLOR_MODE_BT601    : out->colorspace = AVCOL_SPC_BT470BG   ; break;
463
    case COLOR_MODE_BT2020   : out->colorspace = AVCOL_SPC_BT2020_NCL; break;
464
    }
465
466
60
    td.src = in;
467
60
    td.dst = out;
468
60
    td.c2 = color->yuv_convert[color->mode][0][1];
469
60
    td.c3 = color->yuv_convert[color->mode][0][2];
470
60
    td.c4 = color->yuv_convert[color->mode][1][1];
471
60
    td.c5 = color->yuv_convert[color->mode][1][2];
472
60
    td.c6 = color->yuv_convert[color->mode][2][1];
473
60
    td.c7 = color->yuv_convert[color->mode][2][2];
474
475
60
    if (in->format == AV_PIX_FMT_YUV444P)
476
        ctx->internal->execute(ctx, process_slice_yuv444p, &td, NULL,
477
                               FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
478
60
    else if (in->format == AV_PIX_FMT_YUV422P)
479
        ctx->internal->execute(ctx, process_slice_yuv422p, &td, NULL,
480
                               FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
481
60
    else if (in->format == AV_PIX_FMT_YUV420P)
482
120
        ctx->internal->execute(ctx, process_slice_yuv420p, &td, NULL,
483
120
                               FFMIN(in->height / 2, ff_filter_get_nb_threads(ctx)));
484
    else
485
        ctx->internal->execute(ctx, process_slice_uyvy422, &td, NULL,
486
                               FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
487
488
60
    av_frame_free(&in);
489
60
    return ff_filter_frame(outlink, out);
490
}
491
492
static const AVFilterPad colormatrix_inputs[] = {
493
    {
494
        .name         = "default",
495
        .type         = AVMEDIA_TYPE_VIDEO,
496
        .config_props = config_input,
497
        .filter_frame = filter_frame,
498
    },
499
    { NULL }
500
};
501
502
static const AVFilterPad colormatrix_outputs[] = {
503
    {
504
        .name = "default",
505
        .type = AVMEDIA_TYPE_VIDEO,
506
    },
507
    { NULL }
508
};
509
510
AVFilter ff_vf_colormatrix = {
511
    .name          = "colormatrix",
512
    .description   = NULL_IF_CONFIG_SMALL("Convert color matrix."),
513
    .priv_size     = sizeof(ColorMatrixContext),
514
    .init          = init,
515
    .query_formats = query_formats,
516
    .inputs        = colormatrix_inputs,
517
    .outputs       = colormatrix_outputs,
518
    .priv_class    = &colormatrix_class,
519
    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
520
};