Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com> |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
/* |
22 |
|
|
* @file |
23 |
|
|
* Convert between colorspaces. |
24 |
|
|
*/ |
25 |
|
|
|
26 |
|
|
#include "libavutil/avassert.h" |
27 |
|
|
#include "libavutil/csp.h" |
28 |
|
|
#include "libavutil/mem.h" |
29 |
|
|
#include "libavutil/mem_internal.h" |
30 |
|
|
#include "libavutil/opt.h" |
31 |
|
|
#include "libavutil/pixdesc.h" |
32 |
|
|
#include "libavutil/pixfmt.h" |
33 |
|
|
|
34 |
|
|
#include "avfilter.h" |
35 |
|
|
#include "colorspacedsp.h" |
36 |
|
|
#include "filters.h" |
37 |
|
|
#include "formats.h" |
38 |
|
|
#include "video.h" |
39 |
|
|
#include "colorspace.h" |
40 |
|
|
|
41 |
|
|
enum DitherMode { |
42 |
|
|
DITHER_NONE, |
43 |
|
|
DITHER_FSB, |
44 |
|
|
DITHER_NB, |
45 |
|
|
}; |
46 |
|
|
|
47 |
|
|
enum Colorspace { |
48 |
|
|
CS_UNSPECIFIED, |
49 |
|
|
CS_BT470M, |
50 |
|
|
CS_BT470BG, |
51 |
|
|
CS_BT601_6_525, |
52 |
|
|
CS_BT601_6_625, |
53 |
|
|
CS_BT709, |
54 |
|
|
CS_SMPTE170M, |
55 |
|
|
CS_SMPTE240M, |
56 |
|
|
CS_BT2020, |
57 |
|
|
CS_NB, |
58 |
|
|
}; |
59 |
|
|
|
60 |
|
|
enum WhitepointAdaptation { |
61 |
|
|
WP_ADAPT_BRADFORD, |
62 |
|
|
WP_ADAPT_VON_KRIES, |
63 |
|
|
NB_WP_ADAPT_NON_IDENTITY, |
64 |
|
|
WP_ADAPT_IDENTITY = NB_WP_ADAPT_NON_IDENTITY, |
65 |
|
|
NB_WP_ADAPT, |
66 |
|
|
}; |
67 |
|
|
|
68 |
|
|
static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = { |
69 |
|
|
[CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED, |
70 |
|
|
[CS_BT470M] = AVCOL_TRC_GAMMA22, |
71 |
|
|
[CS_BT470BG] = AVCOL_TRC_GAMMA28, |
72 |
|
|
[CS_BT601_6_525] = AVCOL_TRC_SMPTE170M, |
73 |
|
|
[CS_BT601_6_625] = AVCOL_TRC_SMPTE170M, |
74 |
|
|
[CS_BT709] = AVCOL_TRC_BT709, |
75 |
|
|
[CS_SMPTE170M] = AVCOL_TRC_SMPTE170M, |
76 |
|
|
[CS_SMPTE240M] = AVCOL_TRC_SMPTE240M, |
77 |
|
|
[CS_BT2020] = AVCOL_TRC_BT2020_10, |
78 |
|
|
[CS_NB] = AVCOL_TRC_UNSPECIFIED, |
79 |
|
|
}; |
80 |
|
|
|
81 |
|
|
static const enum AVColorPrimaries default_prm[CS_NB + 1] = { |
82 |
|
|
[CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED, |
83 |
|
|
[CS_BT470M] = AVCOL_PRI_BT470M, |
84 |
|
|
[CS_BT470BG] = AVCOL_PRI_BT470BG, |
85 |
|
|
[CS_BT601_6_525] = AVCOL_PRI_SMPTE170M, |
86 |
|
|
[CS_BT601_6_625] = AVCOL_PRI_BT470BG, |
87 |
|
|
[CS_BT709] = AVCOL_PRI_BT709, |
88 |
|
|
[CS_SMPTE170M] = AVCOL_PRI_SMPTE170M, |
89 |
|
|
[CS_SMPTE240M] = AVCOL_PRI_SMPTE240M, |
90 |
|
|
[CS_BT2020] = AVCOL_PRI_BT2020, |
91 |
|
|
[CS_NB] = AVCOL_PRI_UNSPECIFIED, |
92 |
|
|
}; |
93 |
|
|
|
94 |
|
|
static const enum AVColorSpace default_csp[CS_NB + 1] = { |
95 |
|
|
[CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED, |
96 |
|
|
[CS_BT470M] = AVCOL_SPC_SMPTE170M, |
97 |
|
|
[CS_BT470BG] = AVCOL_SPC_BT470BG, |
98 |
|
|
[CS_BT601_6_525] = AVCOL_SPC_SMPTE170M, |
99 |
|
|
[CS_BT601_6_625] = AVCOL_SPC_BT470BG, |
100 |
|
|
[CS_BT709] = AVCOL_SPC_BT709, |
101 |
|
|
[CS_SMPTE170M] = AVCOL_SPC_SMPTE170M, |
102 |
|
|
[CS_SMPTE240M] = AVCOL_SPC_SMPTE240M, |
103 |
|
|
[CS_BT2020] = AVCOL_SPC_BT2020_NCL, |
104 |
|
|
[CS_NB] = AVCOL_SPC_UNSPECIFIED, |
105 |
|
|
}; |
106 |
|
|
|
107 |
|
|
struct TransferCharacteristics { |
108 |
|
|
double alpha, beta, gamma, delta; |
109 |
|
|
}; |
110 |
|
|
|
111 |
|
|
typedef struct ColorSpaceContext { |
112 |
|
|
const AVClass *class; |
113 |
|
|
|
114 |
|
|
ColorSpaceDSPContext dsp; |
115 |
|
|
|
116 |
|
|
enum Colorspace user_all, user_iall; |
117 |
|
|
enum AVColorSpace in_csp, out_csp, user_csp, user_icsp; |
118 |
|
|
enum AVColorRange in_rng, out_rng, user_rng, user_irng; |
119 |
|
|
enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc; |
120 |
|
|
enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm; |
121 |
|
|
enum AVPixelFormat in_format, user_format; |
122 |
|
|
int fast_mode; |
123 |
|
|
enum DitherMode dither; |
124 |
|
|
enum WhitepointAdaptation wp_adapt; |
125 |
|
|
|
126 |
|
|
int16_t *rgb[3]; |
127 |
|
|
ptrdiff_t rgb_stride; |
128 |
|
|
unsigned rgb_sz; |
129 |
|
|
int *dither_scratch[3][2], *dither_scratch_base[3][2]; |
130 |
|
|
|
131 |
|
|
const AVColorPrimariesDesc *in_primaries, *out_primaries; |
132 |
|
|
int lrgb2lrgb_passthrough; |
133 |
|
|
DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8]; |
134 |
|
|
|
135 |
|
|
const struct TransferCharacteristics *in_txchr, *out_txchr; |
136 |
|
|
int rgb2rgb_passthrough; |
137 |
|
|
int16_t *lin_lut, *delin_lut; |
138 |
|
|
|
139 |
|
|
const AVLumaCoefficients *in_lumacoef, *out_lumacoef; |
140 |
|
|
int yuv2yuv_passthrough, yuv2yuv_fastmode; |
141 |
|
|
DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8]; |
142 |
|
|
DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8]; |
143 |
|
|
DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8]; |
144 |
|
|
DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8]; |
145 |
|
|
yuv2rgb_fn yuv2rgb; |
146 |
|
|
rgb2yuv_fn rgb2yuv; |
147 |
|
|
rgb2yuv_fsb_fn rgb2yuv_fsb; |
148 |
|
|
yuv2yuv_fn yuv2yuv; |
149 |
|
|
double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3]; |
150 |
|
|
int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng; |
151 |
|
|
|
152 |
|
|
int did_warn_range; |
153 |
|
|
} ColorSpaceContext; |
154 |
|
|
|
155 |
|
|
// FIXME deal with odd width/heights |
156 |
|
|
// FIXME faster linearize/delinearize implementation (integer pow) |
157 |
|
|
// FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz) |
158 |
|
|
// FIXME test that the values in (de)lin_lut don't exceed their container storage |
159 |
|
|
// type size (only useful if we keep the LUT and don't move to fast integer pow) |
160 |
|
|
// FIXME dithering if bitdepth goes down? |
161 |
|
|
// FIXME bitexact for fate integration? |
162 |
|
|
|
163 |
|
|
// FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't |
164 |
|
|
// find any actual tables that document their real values... |
165 |
|
|
// See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters |
166 |
|
|
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = { |
167 |
|
|
[AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 }, |
168 |
|
|
[AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 }, |
169 |
|
|
[AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 }, |
170 |
|
|
[AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 }, |
171 |
|
|
[AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 }, |
172 |
|
|
[AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 }, |
173 |
|
|
[AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 }, |
174 |
|
|
[AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 }, |
175 |
|
|
[AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 }, |
176 |
|
|
[AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 }, |
177 |
|
|
}; |
178 |
|
|
|
179 |
|
|
static const struct TransferCharacteristics * |
180 |
|
✗ |
get_transfer_characteristics(enum AVColorTransferCharacteristic trc) |
181 |
|
|
{ |
182 |
|
|
const struct TransferCharacteristics *coeffs; |
183 |
|
|
|
184 |
|
✗ |
if (trc >= AVCOL_TRC_NB) |
185 |
|
✗ |
return NULL; |
186 |
|
✗ |
coeffs = &transfer_characteristics[trc]; |
187 |
|
✗ |
if (!coeffs->alpha) |
188 |
|
✗ |
return NULL; |
189 |
|
|
|
190 |
|
✗ |
return coeffs; |
191 |
|
|
} |
192 |
|
|
|
193 |
|
✗ |
static int fill_gamma_table(ColorSpaceContext *s) |
194 |
|
|
{ |
195 |
|
|
int n; |
196 |
|
✗ |
double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta; |
197 |
|
✗ |
double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta; |
198 |
|
✗ |
double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta; |
199 |
|
✗ |
double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta; |
200 |
|
✗ |
double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta; |
201 |
|
|
|
202 |
|
✗ |
s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2); |
203 |
|
✗ |
if (!s->lin_lut) |
204 |
|
✗ |
return AVERROR(ENOMEM); |
205 |
|
✗ |
s->delin_lut = &s->lin_lut[32768]; |
206 |
|
✗ |
for (n = 0; n < 32768; n++) { |
207 |
|
✗ |
double v = (n - 2048.0) / 28672.0, d, l; |
208 |
|
|
|
209 |
|
|
// delinearize |
210 |
|
✗ |
if (v <= -out_beta) { |
211 |
|
✗ |
d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0); |
212 |
|
✗ |
} else if (v < out_beta) { |
213 |
|
✗ |
d = out_delta * v; |
214 |
|
|
} else { |
215 |
|
✗ |
d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0); |
216 |
|
|
} |
217 |
|
✗ |
s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0)); |
218 |
|
|
|
219 |
|
|
// linearize |
220 |
|
✗ |
if (v <= -in_beta * in_delta) { |
221 |
|
✗ |
l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma); |
222 |
|
✗ |
} else if (v < in_beta * in_delta) { |
223 |
|
✗ |
l = v * in_idelta; |
224 |
|
|
} else { |
225 |
|
✗ |
l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma); |
226 |
|
|
} |
227 |
|
✗ |
s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0)); |
228 |
|
|
} |
229 |
|
|
|
230 |
|
✗ |
return 0; |
231 |
|
|
} |
232 |
|
|
|
233 |
|
|
/* |
234 |
|
|
* See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html |
235 |
|
|
* This function uses the Bradford mechanism. |
236 |
|
|
*/ |
237 |
|
✗ |
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, |
238 |
|
|
const AVWhitepointCoefficients *wp_src, |
239 |
|
|
const AVWhitepointCoefficients *wp_dst) |
240 |
|
|
{ |
241 |
|
|
static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = { |
242 |
|
|
[WP_ADAPT_BRADFORD] = { |
243 |
|
|
{ 0.8951, 0.2664, -0.1614 }, |
244 |
|
|
{ -0.7502, 1.7135, 0.0367 }, |
245 |
|
|
{ 0.0389, -0.0685, 1.0296 }, |
246 |
|
|
}, [WP_ADAPT_VON_KRIES] = { |
247 |
|
|
{ 0.40024, 0.70760, -0.08081 }, |
248 |
|
|
{ -0.22630, 1.16532, 0.04570 }, |
249 |
|
|
{ 0.00000, 0.00000, 0.91822 }, |
250 |
|
|
}, |
251 |
|
|
}; |
252 |
|
✗ |
const double (*ma)[3] = ma_tbl[wp_adapt]; |
253 |
|
✗ |
double xw_src = av_q2d(wp_src->x), yw_src = av_q2d(wp_src->y); |
254 |
|
✗ |
double xw_dst = av_q2d(wp_dst->x), yw_dst = av_q2d(wp_dst->y); |
255 |
|
✗ |
double zw_src = 1.0 - xw_src - yw_src; |
256 |
|
✗ |
double zw_dst = 1.0 - xw_dst - yw_dst; |
257 |
|
|
double mai[3][3], fac[3][3], tmp[3][3]; |
258 |
|
|
double rs, gs, bs, rd, gd, bd; |
259 |
|
|
|
260 |
|
✗ |
ff_matrix_invert_3x3(ma, mai); |
261 |
|
✗ |
rs = ma[0][0] * xw_src + ma[0][1] * yw_src + ma[0][2] * zw_src; |
262 |
|
✗ |
gs = ma[1][0] * xw_src + ma[1][1] * yw_src + ma[1][2] * zw_src; |
263 |
|
✗ |
bs = ma[2][0] * xw_src + ma[2][1] * yw_src + ma[2][2] * zw_src; |
264 |
|
✗ |
rd = ma[0][0] * xw_dst + ma[0][1] * yw_dst + ma[0][2] * zw_dst; |
265 |
|
✗ |
gd = ma[1][0] * xw_dst + ma[1][1] * yw_dst + ma[1][2] * zw_dst; |
266 |
|
✗ |
bd = ma[2][0] * xw_dst + ma[2][1] * yw_dst + ma[2][2] * zw_dst; |
267 |
|
✗ |
fac[0][0] = rd / rs; |
268 |
|
✗ |
fac[1][1] = gd / gs; |
269 |
|
✗ |
fac[2][2] = bd / bs; |
270 |
|
✗ |
fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0; |
271 |
|
✗ |
ff_matrix_mul_3x3(tmp, ma, fac); |
272 |
|
✗ |
ff_matrix_mul_3x3(out, tmp, mai); |
273 |
|
✗ |
} |
274 |
|
|
|
275 |
|
✗ |
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, |
276 |
|
|
int w, int h, const int16_t *lut) |
277 |
|
|
{ |
278 |
|
|
int y, x, n; |
279 |
|
|
|
280 |
|
✗ |
for (n = 0; n < 3; n++) { |
281 |
|
✗ |
int16_t *data = buf[n]; |
282 |
|
|
|
283 |
|
✗ |
for (y = 0; y < h; y++) { |
284 |
|
✗ |
for (x = 0; x < w; x++) |
285 |
|
✗ |
data[x] = lut[av_clip_uintp2(2048 + data[x], 15)]; |
286 |
|
|
|
287 |
|
✗ |
data += stride; |
288 |
|
|
} |
289 |
|
|
} |
290 |
|
✗ |
} |
291 |
|
|
|
292 |
|
|
typedef struct ThreadData { |
293 |
|
|
AVFrame *in, *out; |
294 |
|
|
ptrdiff_t in_linesize[3], out_linesize[3]; |
295 |
|
|
int in_ss_h, out_ss_h; |
296 |
|
|
} ThreadData; |
297 |
|
|
|
298 |
|
✗ |
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs) |
299 |
|
|
{ |
300 |
|
✗ |
const ThreadData *td = data; |
301 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
302 |
|
|
uint8_t *in_data[3], *out_data[3]; |
303 |
|
|
int16_t *rgb[3]; |
304 |
|
✗ |
int h_in = (td->in->height + 1) >> 1; |
305 |
|
✗ |
int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs); |
306 |
|
✗ |
int w = td->in->width, h = h2 - h1; |
307 |
|
|
|
308 |
|
✗ |
in_data[0] = td->in->data[0] + td->in_linesize[0] * h1; |
309 |
|
✗ |
in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h); |
310 |
|
✗ |
in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h); |
311 |
|
✗ |
out_data[0] = td->out->data[0] + td->out_linesize[0] * h1; |
312 |
|
✗ |
out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h); |
313 |
|
✗ |
out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h); |
314 |
|
✗ |
rgb[0] = s->rgb[0] + s->rgb_stride * h1; |
315 |
|
✗ |
rgb[1] = s->rgb[1] + s->rgb_stride * h1; |
316 |
|
✗ |
rgb[2] = s->rgb[2] + s->rgb_stride * h1; |
317 |
|
|
|
318 |
|
|
// FIXME for simd, also make sure we do pictures with negative stride |
319 |
|
|
// top-down so we don't overwrite lines with padding of data before it |
320 |
|
|
// in the same buffer (same as swscale) |
321 |
|
|
|
322 |
|
✗ |
if (s->yuv2yuv_fastmode) { |
323 |
|
|
// FIXME possibly use a fast mode in case only the y range changes? |
324 |
|
|
// since in that case, only the diagonal entries in yuv2yuv_coeffs[] |
325 |
|
|
// are non-zero |
326 |
|
✗ |
s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h, |
327 |
|
✗ |
s->yuv2yuv_coeffs, s->yuv_offset); |
328 |
|
|
} else { |
329 |
|
|
// FIXME maybe (for caching efficiency) do pipeline per-line instead of |
330 |
|
|
// full buffer per function? (Or, since yuv2rgb requires 2 lines: per |
331 |
|
|
// 2 lines, for yuv420.) |
332 |
|
|
/* |
333 |
|
|
* General design: |
334 |
|
|
* - yuv2rgb converts from whatever range the input was ([16-235/240] or |
335 |
|
|
* [0,255] or the 10/12bpp equivalents thereof) to an integer version |
336 |
|
|
* of RGB in psuedo-restricted 15+sign bits. That means that the float |
337 |
|
|
* range [0.0,1.0] is in [0,28762], and the remainder of the int16_t |
338 |
|
|
* range is used for overflow/underflow outside the representable |
339 |
|
|
* range of this RGB type. rgb2yuv is the exact opposite. |
340 |
|
|
* - gamma correction is done using a LUT since that appears to work |
341 |
|
|
* fairly fast. |
342 |
|
|
* - If the input is chroma-subsampled (420/422), the yuv2rgb conversion |
343 |
|
|
* (or rgb2yuv conversion) uses nearest-neighbour sampling to read |
344 |
|
|
* read chroma pixels at luma resolution. If you want some more fancy |
345 |
|
|
* filter, you can use swscale to convert to yuv444p. |
346 |
|
|
* - all coefficients are 14bit (so in the [-2.0,2.0] range). |
347 |
|
|
*/ |
348 |
|
✗ |
s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h, |
349 |
|
✗ |
s->yuv2rgb_coeffs, s->yuv_offset[0]); |
350 |
|
✗ |
if (!s->rgb2rgb_passthrough) { |
351 |
|
✗ |
apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut); |
352 |
|
✗ |
if (!s->lrgb2lrgb_passthrough) |
353 |
|
✗ |
s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs); |
354 |
|
✗ |
apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut); |
355 |
|
|
} |
356 |
|
✗ |
if (s->dither == DITHER_FSB) { |
357 |
|
✗ |
s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h, |
358 |
|
✗ |
s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch); |
359 |
|
|
} else { |
360 |
|
✗ |
s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h, |
361 |
|
✗ |
s->rgb2yuv_coeffs, s->yuv_offset[1]); |
362 |
|
|
} |
363 |
|
|
} |
364 |
|
|
|
365 |
|
✗ |
return 0; |
366 |
|
|
} |
367 |
|
|
|
368 |
|
✗ |
static int get_range_off(AVFilterContext *ctx, int *off, |
369 |
|
|
int *y_rng, int *uv_rng, |
370 |
|
|
enum AVColorRange rng, int depth) |
371 |
|
|
{ |
372 |
|
✗ |
switch (rng) { |
373 |
|
✗ |
case AVCOL_RANGE_UNSPECIFIED: { |
374 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
375 |
|
|
|
376 |
|
✗ |
if (!s->did_warn_range) { |
377 |
|
✗ |
av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n"); |
378 |
|
✗ |
s->did_warn_range = 1; |
379 |
|
|
} |
380 |
|
|
} |
381 |
|
|
// fall-through |
382 |
|
|
case AVCOL_RANGE_MPEG: |
383 |
|
✗ |
*off = 16 << (depth - 8); |
384 |
|
✗ |
*y_rng = 219 << (depth - 8); |
385 |
|
✗ |
*uv_rng = 224 << (depth - 8); |
386 |
|
✗ |
break; |
387 |
|
✗ |
case AVCOL_RANGE_JPEG: |
388 |
|
✗ |
*off = 0; |
389 |
|
✗ |
*y_rng = *uv_rng = (256 << (depth - 8)) - 1; |
390 |
|
✗ |
break; |
391 |
|
✗ |
default: |
392 |
|
✗ |
return AVERROR(EINVAL); |
393 |
|
|
} |
394 |
|
|
|
395 |
|
✗ |
return 0; |
396 |
|
|
} |
397 |
|
|
|
398 |
|
✗ |
static int create_filtergraph(AVFilterContext *ctx, |
399 |
|
|
const AVFrame *in, const AVFrame *out) |
400 |
|
|
{ |
401 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
402 |
|
✗ |
const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format); |
403 |
|
✗ |
const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format); |
404 |
|
✗ |
int m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0; |
405 |
|
|
|
406 |
|
|
#define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12) |
407 |
|
|
#define supported_subsampling(lcw, lch) \ |
408 |
|
|
(((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1)) |
409 |
|
|
#define supported_format(d) \ |
410 |
|
|
((d) != NULL && (d)->nb_components == 3 && \ |
411 |
|
|
!((d)->flags & AV_PIX_FMT_FLAG_RGB) && \ |
412 |
|
|
supported_depth((d)->comp[0].depth) && \ |
413 |
|
|
supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h)) |
414 |
|
|
|
415 |
|
✗ |
if (!supported_format(in_desc)) { |
416 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
417 |
|
|
"Unsupported input format %d (%s) or bitdepth (%d)\n", |
418 |
|
✗ |
in->format, av_get_pix_fmt_name(in->format), |
419 |
|
|
in_desc ? in_desc->comp[0].depth : -1); |
420 |
|
✗ |
return AVERROR(EINVAL); |
421 |
|
|
} |
422 |
|
✗ |
if (!supported_format(out_desc)) { |
423 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
424 |
|
|
"Unsupported output format %d (%s) or bitdepth (%d)\n", |
425 |
|
✗ |
out->format, av_get_pix_fmt_name(out->format), |
426 |
|
|
out_desc ? out_desc->comp[0].depth : -1); |
427 |
|
✗ |
return AVERROR(EINVAL); |
428 |
|
|
} |
429 |
|
|
|
430 |
|
✗ |
if (in->color_primaries != s->in_prm) s->in_primaries = NULL; |
431 |
|
✗ |
if (out->color_primaries != s->out_prm) s->out_primaries = NULL; |
432 |
|
✗ |
if (in->color_trc != s->in_trc) s->in_txchr = NULL; |
433 |
|
✗ |
if (out->color_trc != s->out_trc) s->out_txchr = NULL; |
434 |
|
✗ |
if (in->colorspace != s->in_csp || |
435 |
|
✗ |
in->color_range != s->in_rng) s->in_lumacoef = NULL; |
436 |
|
✗ |
if (out->color_range != s->out_rng) s->rgb2yuv = NULL; |
437 |
|
|
|
438 |
|
✗ |
if (!s->out_primaries || !s->in_primaries) { |
439 |
|
✗ |
s->in_prm = in->color_primaries; |
440 |
|
✗ |
if (s->user_iall != CS_UNSPECIFIED) |
441 |
|
✗ |
s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)]; |
442 |
|
✗ |
if (s->user_iprm != AVCOL_PRI_UNSPECIFIED) |
443 |
|
✗ |
s->in_prm = s->user_iprm; |
444 |
|
✗ |
s->in_primaries = av_csp_primaries_desc_from_id(s->in_prm); |
445 |
|
✗ |
if (!s->in_primaries) { |
446 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
447 |
|
|
"Unsupported input primaries %d (%s)\n", |
448 |
|
✗ |
s->in_prm, av_color_primaries_name(s->in_prm)); |
449 |
|
✗ |
return AVERROR(EINVAL); |
450 |
|
|
} |
451 |
|
✗ |
s->out_prm = out->color_primaries; |
452 |
|
✗ |
s->out_primaries = av_csp_primaries_desc_from_id(s->out_prm); |
453 |
|
✗ |
if (!s->out_primaries) { |
454 |
|
✗ |
if (s->out_prm == AVCOL_PRI_UNSPECIFIED) { |
455 |
|
✗ |
if (s->user_all == CS_UNSPECIFIED) { |
456 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n"); |
457 |
|
|
} else { |
458 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
459 |
|
✗ |
"Unsupported output color property %d\n", s->user_all); |
460 |
|
|
} |
461 |
|
|
} else { |
462 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
463 |
|
|
"Unsupported output primaries %d (%s)\n", |
464 |
|
✗ |
s->out_prm, av_color_primaries_name(s->out_prm)); |
465 |
|
|
} |
466 |
|
✗ |
return AVERROR(EINVAL); |
467 |
|
|
} |
468 |
|
✗ |
s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries, |
469 |
|
|
sizeof(*s->in_primaries)); |
470 |
|
✗ |
if (!s->lrgb2lrgb_passthrough) { |
471 |
|
|
double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3]; |
472 |
|
|
const AVWhitepointCoefficients *wp_out, *wp_in; |
473 |
|
|
|
474 |
|
✗ |
wp_out = &s->out_primaries->wp; |
475 |
|
✗ |
wp_in = &s->in_primaries->wp; |
476 |
|
✗ |
ff_fill_rgb2xyz_table(&s->out_primaries->prim, wp_out, rgb2xyz); |
477 |
|
✗ |
ff_matrix_invert_3x3(rgb2xyz, xyz2rgb); |
478 |
|
✗ |
ff_fill_rgb2xyz_table(&s->in_primaries->prim, wp_in, rgb2xyz); |
479 |
|
✗ |
if (memcmp(wp_in, wp_out, sizeof(*wp_in)) != 0 && |
480 |
|
✗ |
s->wp_adapt != WP_ADAPT_IDENTITY) { |
481 |
|
|
double wpconv[3][3], tmp[3][3]; |
482 |
|
|
|
483 |
|
✗ |
fill_whitepoint_conv_table(wpconv, s->wp_adapt, &s->in_primaries->wp, |
484 |
|
✗ |
&s->out_primaries->wp); |
485 |
|
✗ |
ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv); |
486 |
|
✗ |
ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb); |
487 |
|
|
} else { |
488 |
|
✗ |
ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb); |
489 |
|
|
} |
490 |
|
✗ |
for (m = 0; m < 3; m++) |
491 |
|
✗ |
for (n = 0; n < 3; n++) { |
492 |
|
✗ |
s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]); |
493 |
|
✗ |
for (o = 1; o < 8; o++) |
494 |
|
✗ |
s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0]; |
495 |
|
|
} |
496 |
|
|
|
497 |
|
|
} |
498 |
|
|
} |
499 |
|
|
|
500 |
|
✗ |
if (!s->in_txchr) { |
501 |
|
✗ |
av_freep(&s->lin_lut); |
502 |
|
✗ |
s->in_trc = in->color_trc; |
503 |
|
✗ |
if (s->user_iall != CS_UNSPECIFIED) |
504 |
|
✗ |
s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)]; |
505 |
|
✗ |
if (s->user_itrc != AVCOL_TRC_UNSPECIFIED) |
506 |
|
✗ |
s->in_trc = s->user_itrc; |
507 |
|
✗ |
s->in_txchr = get_transfer_characteristics(s->in_trc); |
508 |
|
✗ |
if (!s->in_txchr) { |
509 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
510 |
|
|
"Unsupported input transfer characteristics %d (%s)\n", |
511 |
|
✗ |
s->in_trc, av_color_transfer_name(s->in_trc)); |
512 |
|
✗ |
return AVERROR(EINVAL); |
513 |
|
|
} |
514 |
|
|
} |
515 |
|
|
|
516 |
|
✗ |
if (!s->out_txchr) { |
517 |
|
✗ |
av_freep(&s->lin_lut); |
518 |
|
✗ |
s->out_trc = out->color_trc; |
519 |
|
✗ |
s->out_txchr = get_transfer_characteristics(s->out_trc); |
520 |
|
✗ |
if (!s->out_txchr) { |
521 |
|
✗ |
if (s->out_trc == AVCOL_TRC_UNSPECIFIED) { |
522 |
|
✗ |
if (s->user_all == CS_UNSPECIFIED) { |
523 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
524 |
|
|
"Please specify output transfer characteristics\n"); |
525 |
|
|
} else { |
526 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
527 |
|
✗ |
"Unsupported output color property %d\n", s->user_all); |
528 |
|
|
} |
529 |
|
|
} else { |
530 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
531 |
|
|
"Unsupported output transfer characteristics %d (%s)\n", |
532 |
|
✗ |
s->out_trc, av_color_transfer_name(s->out_trc)); |
533 |
|
|
} |
534 |
|
✗ |
return AVERROR(EINVAL); |
535 |
|
|
} |
536 |
|
|
} |
537 |
|
|
|
538 |
|
✗ |
s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough && |
539 |
|
✗ |
!memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr))); |
540 |
|
✗ |
if (!s->rgb2rgb_passthrough && !s->lin_lut) { |
541 |
|
✗ |
res = fill_gamma_table(s); |
542 |
|
✗ |
if (res < 0) |
543 |
|
✗ |
return res; |
544 |
|
|
} |
545 |
|
|
|
546 |
|
✗ |
if (!s->in_lumacoef) { |
547 |
|
✗ |
s->in_csp = in->colorspace; |
548 |
|
✗ |
if (s->user_iall != CS_UNSPECIFIED) |
549 |
|
✗ |
s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)]; |
550 |
|
✗ |
if (s->user_icsp != AVCOL_SPC_UNSPECIFIED) |
551 |
|
✗ |
s->in_csp = s->user_icsp; |
552 |
|
✗ |
s->in_rng = in->color_range; |
553 |
|
✗ |
if (s->user_irng != AVCOL_RANGE_UNSPECIFIED) |
554 |
|
✗ |
s->in_rng = s->user_irng; |
555 |
|
✗ |
s->in_lumacoef = av_csp_luma_coeffs_from_avcsp(s->in_csp); |
556 |
|
✗ |
if (!s->in_lumacoef) { |
557 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
558 |
|
|
"Unsupported input colorspace %d (%s)\n", |
559 |
|
✗ |
s->in_csp, av_color_space_name(s->in_csp)); |
560 |
|
✗ |
return AVERROR(EINVAL); |
561 |
|
|
} |
562 |
|
✗ |
redo_yuv2rgb = 1; |
563 |
|
|
} |
564 |
|
|
|
565 |
|
✗ |
if (!s->rgb2yuv) { |
566 |
|
✗ |
s->out_rng = out->color_range; |
567 |
|
✗ |
redo_rgb2yuv = 1; |
568 |
|
|
} |
569 |
|
|
|
570 |
|
✗ |
fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h && |
571 |
|
✗ |
in_desc->log2_chroma_w == out_desc->log2_chroma_w; |
572 |
|
✗ |
s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical; |
573 |
|
✗ |
s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng && |
574 |
|
✗ |
!memcmp(s->in_lumacoef, s->out_lumacoef, |
575 |
|
✗ |
sizeof(*s->in_lumacoef)) && |
576 |
|
✗ |
in_desc->comp[0].depth == out_desc->comp[0].depth; |
577 |
|
✗ |
if (!s->yuv2yuv_passthrough) { |
578 |
|
✗ |
if (redo_yuv2rgb) { |
579 |
|
✗ |
double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs; |
580 |
|
|
int off, bits, in_rng; |
581 |
|
|
|
582 |
|
✗ |
res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng, |
583 |
|
✗ |
s->in_rng, in_desc->comp[0].depth); |
584 |
|
✗ |
if (res < 0) { |
585 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
586 |
|
|
"Unsupported input color range %d (%s)\n", |
587 |
|
✗ |
s->in_rng, av_color_range_name(s->in_rng)); |
588 |
|
✗ |
return res; |
589 |
|
|
} |
590 |
|
✗ |
for (n = 0; n < 8; n++) |
591 |
|
✗ |
s->yuv_offset[0][n] = off; |
592 |
|
✗ |
ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv); |
593 |
|
✗ |
ff_matrix_invert_3x3(rgb2yuv, yuv2rgb); |
594 |
|
✗ |
bits = 1 << (in_desc->comp[0].depth - 1); |
595 |
|
✗ |
for (n = 0; n < 3; n++) { |
596 |
|
✗ |
for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) { |
597 |
|
✗ |
s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng); |
598 |
|
✗ |
for (o = 1; o < 8; o++) |
599 |
|
✗ |
s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0]; |
600 |
|
|
} |
601 |
|
|
} |
602 |
|
|
av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0); |
603 |
|
|
av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0); |
604 |
|
|
av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]); |
605 |
|
|
av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]); |
606 |
|
✗ |
s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1] |
607 |
|
✗ |
[in_desc->log2_chroma_h + in_desc->log2_chroma_w]; |
608 |
|
|
} |
609 |
|
|
|
610 |
|
✗ |
if (redo_rgb2yuv) { |
611 |
|
✗ |
double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs; |
612 |
|
|
int off, out_rng, bits; |
613 |
|
|
|
614 |
|
✗ |
res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng, |
615 |
|
✗ |
s->out_rng, out_desc->comp[0].depth); |
616 |
|
✗ |
if (res < 0) { |
617 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
618 |
|
|
"Unsupported output color range %d (%s)\n", |
619 |
|
✗ |
s->out_rng, av_color_range_name(s->out_rng)); |
620 |
|
✗ |
return res; |
621 |
|
|
} |
622 |
|
✗ |
for (n = 0; n < 8; n++) |
623 |
|
✗ |
s->yuv_offset[1][n] = off; |
624 |
|
✗ |
ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv); |
625 |
|
✗ |
bits = 1 << (29 - out_desc->comp[0].depth); |
626 |
|
✗ |
for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) { |
627 |
|
✗ |
for (m = 0; m < 3; m++) { |
628 |
|
✗ |
s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672); |
629 |
|
✗ |
for (o = 1; o < 8; o++) |
630 |
|
✗ |
s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0]; |
631 |
|
|
} |
632 |
|
|
} |
633 |
|
|
av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]); |
634 |
|
✗ |
s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1] |
635 |
|
✗ |
[out_desc->log2_chroma_h + out_desc->log2_chroma_w]; |
636 |
|
✗ |
s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1] |
637 |
|
✗ |
[out_desc->log2_chroma_h + out_desc->log2_chroma_w]; |
638 |
|
|
} |
639 |
|
|
|
640 |
|
✗ |
if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) { |
641 |
|
✗ |
int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth; |
642 |
|
✗ |
double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs; |
643 |
|
✗ |
double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs; |
644 |
|
|
double yuv2yuv[3][3]; |
645 |
|
|
int in_rng, out_rng; |
646 |
|
|
|
647 |
|
✗ |
ff_matrix_mul_3x3(yuv2yuv, yuv2rgb, rgb2yuv); |
648 |
|
✗ |
for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) { |
649 |
|
✗ |
for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) { |
650 |
|
✗ |
s->yuv2yuv_coeffs[m][n][0] = |
651 |
|
✗ |
lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) / |
652 |
|
✗ |
(in_rng * (1 << odepth))); |
653 |
|
✗ |
for (o = 1; o < 8; o++) |
654 |
|
✗ |
s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0]; |
655 |
|
|
} |
656 |
|
|
} |
657 |
|
|
av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0); |
658 |
|
|
av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0); |
659 |
|
✗ |
s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1] |
660 |
|
✗ |
[in_desc->log2_chroma_h + in_desc->log2_chroma_w]; |
661 |
|
|
} |
662 |
|
|
} |
663 |
|
|
|
664 |
|
✗ |
return 0; |
665 |
|
|
} |
666 |
|
|
|
667 |
|
✗ |
static av_cold int init(AVFilterContext *ctx) |
668 |
|
|
{ |
669 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
670 |
|
|
|
671 |
|
✗ |
s->out_csp = s->user_csp == AVCOL_SPC_UNSPECIFIED ? |
672 |
|
✗ |
default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp; |
673 |
|
✗ |
s->out_lumacoef = av_csp_luma_coeffs_from_avcsp(s->out_csp); |
674 |
|
✗ |
if (!s->out_lumacoef) { |
675 |
|
✗ |
if (s->out_csp == AVCOL_SPC_UNSPECIFIED) { |
676 |
|
✗ |
if (s->user_all == CS_UNSPECIFIED) { |
677 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
678 |
|
|
"Please specify output colorspace\n"); |
679 |
|
|
} else { |
680 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
681 |
|
✗ |
"Unsupported output color property %d\n", s->user_all); |
682 |
|
|
} |
683 |
|
|
} else { |
684 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, |
685 |
|
✗ |
"Unsupported output colorspace %d (%s)\n", s->out_csp, |
686 |
|
|
av_color_space_name(s->out_csp)); |
687 |
|
|
} |
688 |
|
✗ |
return AVERROR(EINVAL); |
689 |
|
|
} |
690 |
|
|
|
691 |
|
✗ |
ff_colorspacedsp_init(&s->dsp); |
692 |
|
|
|
693 |
|
✗ |
return 0; |
694 |
|
|
} |
695 |
|
|
|
696 |
|
✗ |
static void uninit(AVFilterContext *ctx) |
697 |
|
|
{ |
698 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
699 |
|
|
|
700 |
|
✗ |
av_freep(&s->rgb[0]); |
701 |
|
✗ |
av_freep(&s->rgb[1]); |
702 |
|
✗ |
av_freep(&s->rgb[2]); |
703 |
|
✗ |
s->rgb_sz = 0; |
704 |
|
✗ |
av_freep(&s->dither_scratch_base[0][0]); |
705 |
|
✗ |
av_freep(&s->dither_scratch_base[0][1]); |
706 |
|
✗ |
av_freep(&s->dither_scratch_base[1][0]); |
707 |
|
✗ |
av_freep(&s->dither_scratch_base[1][1]); |
708 |
|
✗ |
av_freep(&s->dither_scratch_base[2][0]); |
709 |
|
✗ |
av_freep(&s->dither_scratch_base[2][1]); |
710 |
|
|
|
711 |
|
✗ |
av_freep(&s->lin_lut); |
712 |
|
✗ |
} |
713 |
|
|
|
714 |
|
✗ |
static int filter_frame(AVFilterLink *link, AVFrame *in) |
715 |
|
|
{ |
716 |
|
✗ |
AVFilterContext *ctx = link->dst; |
717 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
718 |
|
✗ |
ColorSpaceContext *s = ctx->priv; |
719 |
|
|
// FIXME if yuv2yuv_passthrough, don't get a new buffer but use the |
720 |
|
|
// input one if it is writable *OR* the actual literal values of in_* |
721 |
|
|
// and out_* are identical (not just their respective properties) |
722 |
|
✗ |
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
723 |
|
|
int res; |
724 |
|
✗ |
ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32); |
725 |
|
✗ |
unsigned rgb_sz = rgb_stride * in->height; |
726 |
|
|
ThreadData td; |
727 |
|
|
|
728 |
|
✗ |
if (!out) { |
729 |
|
✗ |
av_frame_free(&in); |
730 |
|
✗ |
return AVERROR(ENOMEM); |
731 |
|
|
} |
732 |
|
✗ |
res = av_frame_copy_props(out, in); |
733 |
|
✗ |
if (res < 0) { |
734 |
|
✗ |
av_frame_free(&in); |
735 |
|
✗ |
av_frame_free(&out); |
736 |
|
✗ |
return res; |
737 |
|
|
} |
738 |
|
|
|
739 |
|
✗ |
out->colorspace = s->out_csp; |
740 |
|
✗ |
out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ? |
741 |
|
✗ |
in->color_range : s->user_rng; |
742 |
|
✗ |
out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ? |
743 |
|
✗ |
default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm; |
744 |
|
✗ |
if (s->user_trc == AVCOL_TRC_UNSPECIFIED) { |
745 |
|
✗ |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); |
746 |
|
|
|
747 |
|
✗ |
out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)]; |
748 |
|
✗ |
if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12) |
749 |
|
✗ |
out->color_trc = AVCOL_TRC_BT2020_12; |
750 |
|
|
} else { |
751 |
|
✗ |
out->color_trc = s->user_trc; |
752 |
|
|
} |
753 |
|
✗ |
if (rgb_sz != s->rgb_sz) { |
754 |
|
✗ |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); |
755 |
|
✗ |
int uvw = in->width >> desc->log2_chroma_w; |
756 |
|
|
|
757 |
|
✗ |
av_freep(&s->rgb[0]); |
758 |
|
✗ |
av_freep(&s->rgb[1]); |
759 |
|
✗ |
av_freep(&s->rgb[2]); |
760 |
|
✗ |
s->rgb_sz = 0; |
761 |
|
✗ |
av_freep(&s->dither_scratch_base[0][0]); |
762 |
|
✗ |
av_freep(&s->dither_scratch_base[0][1]); |
763 |
|
✗ |
av_freep(&s->dither_scratch_base[1][0]); |
764 |
|
✗ |
av_freep(&s->dither_scratch_base[1][1]); |
765 |
|
✗ |
av_freep(&s->dither_scratch_base[2][0]); |
766 |
|
✗ |
av_freep(&s->dither_scratch_base[2][1]); |
767 |
|
|
|
768 |
|
✗ |
s->rgb[0] = av_malloc(rgb_sz); |
769 |
|
✗ |
s->rgb[1] = av_malloc(rgb_sz); |
770 |
|
✗ |
s->rgb[2] = av_malloc(rgb_sz); |
771 |
|
✗ |
s->dither_scratch_base[0][0] = |
772 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4)); |
773 |
|
✗ |
s->dither_scratch_base[0][1] = |
774 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4)); |
775 |
|
✗ |
s->dither_scratch_base[1][0] = |
776 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4)); |
777 |
|
✗ |
s->dither_scratch_base[1][1] = |
778 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4)); |
779 |
|
✗ |
s->dither_scratch_base[2][0] = |
780 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4)); |
781 |
|
✗ |
s->dither_scratch_base[2][1] = |
782 |
|
✗ |
av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4)); |
783 |
|
✗ |
s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1]; |
784 |
|
✗ |
s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1]; |
785 |
|
✗ |
s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1]; |
786 |
|
✗ |
s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1]; |
787 |
|
✗ |
s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1]; |
788 |
|
✗ |
s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1]; |
789 |
|
✗ |
if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] || |
790 |
|
✗ |
!s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] || |
791 |
|
✗ |
!s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] || |
792 |
|
✗ |
!s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) { |
793 |
|
✗ |
uninit(ctx); |
794 |
|
✗ |
av_frame_free(&in); |
795 |
|
✗ |
av_frame_free(&out); |
796 |
|
✗ |
return AVERROR(ENOMEM); |
797 |
|
|
} |
798 |
|
✗ |
s->rgb_sz = rgb_sz; |
799 |
|
|
} |
800 |
|
✗ |
res = create_filtergraph(ctx, in, out); |
801 |
|
✗ |
if (res < 0) { |
802 |
|
✗ |
av_frame_free(&in); |
803 |
|
✗ |
av_frame_free(&out); |
804 |
|
✗ |
return res; |
805 |
|
|
} |
806 |
|
✗ |
s->rgb_stride = rgb_stride / sizeof(int16_t); |
807 |
|
✗ |
td.in = in; |
808 |
|
✗ |
td.out = out; |
809 |
|
✗ |
td.in_linesize[0] = in->linesize[0]; |
810 |
|
✗ |
td.in_linesize[1] = in->linesize[1]; |
811 |
|
✗ |
td.in_linesize[2] = in->linesize[2]; |
812 |
|
✗ |
td.out_linesize[0] = out->linesize[0]; |
813 |
|
✗ |
td.out_linesize[1] = out->linesize[1]; |
814 |
|
✗ |
td.out_linesize[2] = out->linesize[2]; |
815 |
|
✗ |
td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h; |
816 |
|
✗ |
td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h; |
817 |
|
✗ |
if (s->yuv2yuv_passthrough) { |
818 |
|
✗ |
res = av_frame_copy(out, in); |
819 |
|
✗ |
if (res < 0) { |
820 |
|
✗ |
av_frame_free(&in); |
821 |
|
✗ |
av_frame_free(&out); |
822 |
|
✗ |
return res; |
823 |
|
|
} |
824 |
|
|
} else { |
825 |
|
✗ |
ff_filter_execute(ctx, convert, &td, NULL, |
826 |
|
✗ |
FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx))); |
827 |
|
|
} |
828 |
|
✗ |
av_frame_free(&in); |
829 |
|
|
|
830 |
|
✗ |
return ff_filter_frame(outlink, out); |
831 |
|
|
} |
832 |
|
|
|
833 |
|
✗ |
static int query_formats(const AVFilterContext *ctx, |
834 |
|
|
AVFilterFormatsConfig **cfg_in, |
835 |
|
|
AVFilterFormatsConfig **cfg_out) |
836 |
|
|
{ |
837 |
|
|
static const enum AVPixelFormat pix_fmts[] = { |
838 |
|
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, |
839 |
|
|
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, |
840 |
|
|
AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, |
841 |
|
|
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, |
842 |
|
|
AV_PIX_FMT_NONE |
843 |
|
|
}; |
844 |
|
|
int res; |
845 |
|
✗ |
const ColorSpaceContext *s = ctx->priv; |
846 |
|
|
AVFilterFormats *formats; |
847 |
|
|
|
848 |
|
✗ |
res = ff_formats_ref(ff_make_formats_list_singleton(s->out_csp), &cfg_out[0]->color_spaces); |
849 |
|
✗ |
if (res < 0) |
850 |
|
✗ |
return res; |
851 |
|
✗ |
if (s->user_rng != AVCOL_RANGE_UNSPECIFIED) { |
852 |
|
✗ |
res = ff_formats_ref(ff_make_formats_list_singleton(s->user_rng), &cfg_out[0]->color_ranges); |
853 |
|
✗ |
if (res < 0) |
854 |
|
✗ |
return res; |
855 |
|
|
} |
856 |
|
|
|
857 |
|
✗ |
formats = ff_make_format_list(pix_fmts); |
858 |
|
✗ |
if (!formats) |
859 |
|
✗ |
return AVERROR(ENOMEM); |
860 |
|
✗ |
if (s->user_format == AV_PIX_FMT_NONE) |
861 |
|
✗ |
return ff_set_common_formats2(ctx, cfg_in, cfg_out, formats); |
862 |
|
|
|
863 |
|
✗ |
res = ff_formats_ref(formats, &cfg_in[0]->formats); |
864 |
|
✗ |
if (res < 0) |
865 |
|
✗ |
return res; |
866 |
|
|
|
867 |
|
✗ |
formats = NULL; |
868 |
|
✗ |
res = ff_add_format(&formats, s->user_format); |
869 |
|
✗ |
if (res < 0) |
870 |
|
✗ |
return res; |
871 |
|
|
|
872 |
|
✗ |
return ff_formats_ref(formats, &cfg_out[0]->formats); |
873 |
|
|
} |
874 |
|
|
|
875 |
|
✗ |
static int config_props(AVFilterLink *outlink) |
876 |
|
|
{ |
877 |
|
✗ |
AVFilterContext *ctx = outlink->dst; |
878 |
|
✗ |
AVFilterLink *inlink = outlink->src->inputs[0]; |
879 |
|
|
|
880 |
|
✗ |
if (inlink->w % 2 || inlink->h % 2) { |
881 |
|
✗ |
av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n", |
882 |
|
|
inlink->w, inlink->h); |
883 |
|
✗ |
return AVERROR_PATCHWELCOME; |
884 |
|
|
} |
885 |
|
|
|
886 |
|
✗ |
outlink->w = inlink->w; |
887 |
|
✗ |
outlink->h = inlink->h; |
888 |
|
✗ |
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; |
889 |
|
✗ |
outlink->time_base = inlink->time_base; |
890 |
|
|
|
891 |
|
✗ |
return 0; |
892 |
|
|
} |
893 |
|
|
|
894 |
|
|
#define OFFSET(x) offsetof(ColorSpaceContext, x) |
895 |
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM |
896 |
|
|
#define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, .unit = z } |
897 |
|
|
|
898 |
|
|
static const AVOption colorspace_options[] = { |
899 |
|
|
{ "all", "Set all color properties together", |
900 |
|
|
OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED }, |
901 |
|
|
CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" }, |
902 |
|
|
ENUM("bt470m", CS_BT470M, "all"), |
903 |
|
|
ENUM("bt470bg", CS_BT470BG, "all"), |
904 |
|
|
ENUM("bt601-6-525", CS_BT601_6_525, "all"), |
905 |
|
|
ENUM("bt601-6-625", CS_BT601_6_625, "all"), |
906 |
|
|
ENUM("bt709", CS_BT709, "all"), |
907 |
|
|
ENUM("smpte170m", CS_SMPTE170M, "all"), |
908 |
|
|
ENUM("smpte240m", CS_SMPTE240M, "all"), |
909 |
|
|
ENUM("bt2020", CS_BT2020, "all"), |
910 |
|
|
|
911 |
|
|
{ "space", "Output colorspace", |
912 |
|
|
OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, |
913 |
|
|
AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, .unit = "csp"}, |
914 |
|
|
ENUM("bt709", AVCOL_SPC_BT709, "csp"), |
915 |
|
|
ENUM("fcc", AVCOL_SPC_FCC, "csp"), |
916 |
|
|
ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"), |
917 |
|
|
ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"), |
918 |
|
|
ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"), |
919 |
|
|
ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"), |
920 |
|
|
ENUM("gbr", AVCOL_SPC_RGB, "csp"), |
921 |
|
|
ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"), |
922 |
|
|
ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"), |
923 |
|
|
|
924 |
|
|
{ "range", "Output color range", |
925 |
|
|
OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED }, |
926 |
|
|
AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" }, |
927 |
|
|
ENUM("tv", AVCOL_RANGE_MPEG, "rng"), |
928 |
|
|
ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"), |
929 |
|
|
ENUM("pc", AVCOL_RANGE_JPEG, "rng"), |
930 |
|
|
ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"), |
931 |
|
|
|
932 |
|
|
{ "primaries", "Output color primaries", |
933 |
|
|
OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED }, |
934 |
|
|
AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "prm" }, |
935 |
|
|
ENUM("bt709", AVCOL_PRI_BT709, "prm"), |
936 |
|
|
ENUM("bt470m", AVCOL_PRI_BT470M, "prm"), |
937 |
|
|
ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"), |
938 |
|
|
ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"), |
939 |
|
|
ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"), |
940 |
|
|
ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"), |
941 |
|
|
ENUM("film", AVCOL_PRI_FILM, "prm"), |
942 |
|
|
ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"), |
943 |
|
|
ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"), |
944 |
|
|
ENUM("bt2020", AVCOL_PRI_BT2020, "prm"), |
945 |
|
|
ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"), |
946 |
|
|
ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"), |
947 |
|
|
|
948 |
|
|
{ "trc", "Output transfer characteristics", |
949 |
|
|
OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED }, |
950 |
|
|
AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, .unit = "trc" }, |
951 |
|
|
ENUM("bt709", AVCOL_TRC_BT709, "trc"), |
952 |
|
|
ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"), |
953 |
|
|
ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"), |
954 |
|
|
ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"), |
955 |
|
|
ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"), |
956 |
|
|
ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"), |
957 |
|
|
ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"), |
958 |
|
|
ENUM("linear", AVCOL_TRC_LINEAR, "trc"), |
959 |
|
|
ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"), |
960 |
|
|
ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"), |
961 |
|
|
ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"), |
962 |
|
|
ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"), |
963 |
|
|
ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"), |
964 |
|
|
ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"), |
965 |
|
|
|
966 |
|
|
{ "format", "Output pixel format", |
967 |
|
|
OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE }, |
968 |
|
|
AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, .unit = "fmt" }, |
969 |
|
|
ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"), |
970 |
|
|
ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"), |
971 |
|
|
ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"), |
972 |
|
|
ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"), |
973 |
|
|
ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"), |
974 |
|
|
ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"), |
975 |
|
|
ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"), |
976 |
|
|
ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"), |
977 |
|
|
ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"), |
978 |
|
|
|
979 |
|
|
{ "fast", "Ignore primary chromaticity and gamma correction", |
980 |
|
|
OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 }, |
981 |
|
|
0, 1, FLAGS }, |
982 |
|
|
|
983 |
|
|
{ "dither", "Dithering mode", |
984 |
|
|
OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE }, |
985 |
|
|
DITHER_NONE, DITHER_NB - 1, FLAGS, .unit = "dither" }, |
986 |
|
|
ENUM("none", DITHER_NONE, "dither"), |
987 |
|
|
ENUM("fsb", DITHER_FSB, "dither"), |
988 |
|
|
|
989 |
|
|
{ "wpadapt", "Whitepoint adaptation method", |
990 |
|
|
OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD }, |
991 |
|
|
WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, .unit = "wpadapt" }, |
992 |
|
|
ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"), |
993 |
|
|
ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"), |
994 |
|
|
ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"), |
995 |
|
|
|
996 |
|
|
{ "iall", "Set all input color properties together", |
997 |
|
|
OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED }, |
998 |
|
|
CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" }, |
999 |
|
|
{ "ispace", "Input colorspace", |
1000 |
|
|
OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, |
1001 |
|
|
AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "csp" }, |
1002 |
|
|
{ "irange", "Input color range", |
1003 |
|
|
OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED }, |
1004 |
|
|
AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" }, |
1005 |
|
|
{ "iprimaries", "Input color primaries", |
1006 |
|
|
OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED }, |
1007 |
|
|
AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "prm" }, |
1008 |
|
|
{ "itrc", "Input transfer characteristics", |
1009 |
|
|
OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED }, |
1010 |
|
|
AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, .unit = "trc" }, |
1011 |
|
|
|
1012 |
|
|
{ NULL } |
1013 |
|
|
}; |
1014 |
|
|
|
1015 |
|
|
AVFILTER_DEFINE_CLASS(colorspace); |
1016 |
|
|
|
1017 |
|
|
static const AVFilterPad inputs[] = { |
1018 |
|
|
{ |
1019 |
|
|
.name = "default", |
1020 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
1021 |
|
|
.filter_frame = filter_frame, |
1022 |
|
|
}, |
1023 |
|
|
}; |
1024 |
|
|
|
1025 |
|
|
static const AVFilterPad outputs[] = { |
1026 |
|
|
{ |
1027 |
|
|
.name = "default", |
1028 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
1029 |
|
|
.config_props = config_props, |
1030 |
|
|
}, |
1031 |
|
|
}; |
1032 |
|
|
|
1033 |
|
|
const AVFilter ff_vf_colorspace = { |
1034 |
|
|
.name = "colorspace", |
1035 |
|
|
.description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."), |
1036 |
|
|
.init = init, |
1037 |
|
|
.uninit = uninit, |
1038 |
|
|
.priv_size = sizeof(ColorSpaceContext), |
1039 |
|
|
.priv_class = &colorspace_class, |
1040 |
|
|
FILTER_INPUTS(inputs), |
1041 |
|
|
FILTER_OUTPUTS(outputs), |
1042 |
|
|
FILTER_QUERY_FUNC2(query_formats), |
1043 |
|
|
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, |
1044 |
|
|
}; |
1045 |
|
|
|