GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/huffyuvenc.c Lines: 341 560 60.9 %
Date: 2019-11-18 18:00:01 Branches: 180 377 47.7 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3
 *
4
 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
5
 * the algorithm used
6
 *
7
 * This file is part of FFmpeg.
8
 *
9
 * FFmpeg is free software; you can redistribute it and/or
10
 * modify it under the terms of the GNU Lesser General Public
11
 * License as published by the Free Software Foundation; either
12
 * version 2.1 of the License, or (at your option) any later version.
13
 *
14
 * FFmpeg is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * Lesser General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU Lesser General Public
20
 * License along with FFmpeg; if not, write to the Free Software
21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
 *
23
 * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24
 */
25
26
/**
27
 * @file
28
 * huffyuv encoder
29
 */
30
31
#include "avcodec.h"
32
#include "huffyuv.h"
33
#include "huffman.h"
34
#include "huffyuvencdsp.h"
35
#include "internal.h"
36
#include "lossless_videoencdsp.h"
37
#include "put_bits.h"
38
#include "libavutil/opt.h"
39
#include "libavutil/pixdesc.h"
40
41
134100
static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
42
                              const uint8_t *src0, const uint8_t *src1, int w)
43
{
44
134100
    if (s->bps <= 8) {
45
        s->llvidencdsp.diff_bytes(dst, src0, src1, w);
46
    } else {
47
134100
        s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
48
    }
49
134100
}
50
51
718400
static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
52
                                      const uint8_t *src, int w, int left)
53
{
54
    int i;
55
718400
    int min_width = FFMIN(w, 32);
56
57
718400
    if (s->bps <= 8) {
58
11777100
        for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
59
11417900
            const int temp = src[i];
60
11417900
            dst[i] = temp - left;
61
11417900
            left   = temp;
62
        }
63
359200
        if (w < 32)
64
5100
            return left;
65
354100
        s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
66
354100
        return src[w-1];
67
    } else {
68
359200
        const uint16_t *src16 = (const uint16_t *)src;
69
359200
        uint16_t       *dst16 = (      uint16_t *)dst;
70
11777100
        for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
71
11417900
            const int temp = src16[i];
72
11417900
            dst16[i] = temp - left;
73
11417900
            left   = temp;
74
        }
75
359200
        if (w < 32)
76
5100
            return left;
77
354100
        s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
78
354100
        return src16[w-1];
79
    }
80
}
81
82
44900
static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
83
                                             const uint8_t *src, int w,
84
                                             int *red, int *green, int *blue,
85
                                             int *alpha)
86
{
87
    int i;
88
    int r, g, b, a;
89
44900
    int min_width = FFMIN(w, 8);
90
44900
    r = *red;
91
44900
    g = *green;
92
44900
    b = *blue;
93
44900
    a = *alpha;
94
95
404100
    for (i = 0; i < min_width; i++) {
96
359200
        const int rt = src[i * 4 + R];
97
359200
        const int gt = src[i * 4 + G];
98
359200
        const int bt = src[i * 4 + B];
99
359200
        const int at = src[i * 4 + A];
100
359200
        dst[i * 4 + R] = rt - r;
101
359200
        dst[i * 4 + G] = gt - g;
102
359200
        dst[i * 4 + B] = bt - b;
103
359200
        dst[i * 4 + A] = at - a;
104
359200
        r = rt;
105
359200
        g = gt;
106
359200
        b = bt;
107
359200
        a = at;
108
    }
109
110
44900
    s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
111
112
44900
    *red   = src[(w - 1) * 4 + R];
113
44900
    *green = src[(w - 1) * 4 + G];
114
44900
    *blue  = src[(w - 1) * 4 + B];
115
44900
    *alpha = src[(w - 1) * 4 + A];
116
44900
}
117
118
44900
static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
119
                                             uint8_t *src, int w,
120
                                             int *red, int *green, int *blue)
121
{
122
    int i;
123
    int r, g, b;
124
44900
    r = *red;
125
44900
    g = *green;
126
44900
    b = *blue;
127
763300
    for (i = 0; i < FFMIN(w, 16); i++) {
128
718400
        const int rt = src[i * 3 + 0];
129
718400
        const int gt = src[i * 3 + 1];
130
718400
        const int bt = src[i * 3 + 2];
131
718400
        dst[i * 3 + 0] = rt - r;
132
718400
        dst[i * 3 + 1] = gt - g;
133
718400
        dst[i * 3 + 2] = bt - b;
134
718400
        r = rt;
135
718400
        g = gt;
136
718400
        b = bt;
137
    }
138
139
44900
    s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
140
141
44900
    *red   = src[(w - 1) * 3 + 0];
142
44900
    *green = src[(w - 1) * 3 + 1];
143
44900
    *blue  = src[(w - 1) * 3 + 2];
144
44900
}
145
146
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
147
{
148
    if (s->bps <= 8) {
149
        s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
150
    } else {
151
        s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
152
    }
153
}
154
155
96
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
156
{
157
    int i;
158
96
    int index = 0;
159
96
    int n = s->vlc_n;
160
161
3972
    for (i = 0; i < n;) {
162
3876
        int val = len[i];
163
3876
        int repeat = 0;
164
165

277284
        for (; i < n && len[i] == val && repeat < 255; i++)
166
273408
            repeat++;
167
168


3876
        av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
169
3876
        if (repeat > 7) {
170
2244
            buf[index++] = val;
171
2244
            buf[index++] = repeat;
172
        } else {
173
1632
            buf[index++] = val | (repeat << 5);
174
        }
175
    }
176
177
96
    return index;
178
}
179
180
32
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
181
{
182
    int i, ret;
183
32
    int size = 0;
184
32
    int count = 3;
185
186
32
    if (s->version > 2)
187
16
        count = 1 + s->alpha + 2*s->chroma;
188
189
128
    for (i = 0; i < count; i++) {
190
96
        if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
191
            return ret;
192
193
96
        if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
194
            return -1;
195
        }
196
197
96
        size += store_table(s, s->len[i], buf + size);
198
    }
199
32
    return size;
200
}
201
202
32
static av_cold int encode_init(AVCodecContext *avctx)
203
{
204
32
    HYuvContext *s = avctx->priv_data;
205
    int i, j;
206
    int ret;
207
32
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
208
209
32
    ff_huffyuv_common_init(avctx);
210
32
    ff_huffyuvencdsp_init(&s->hencdsp, avctx);
211
32
    ff_llvidencdsp_init(&s->llvidencdsp);
212
213
32
    avctx->extradata = av_mallocz(3*MAX_N + 4);
214
32
    if (s->flags&AV_CODEC_FLAG_PASS1) {
215
#define STATS_OUT_SIZE 21*MAX_N*3 + 4
216
        avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
217
        if (!avctx->stats_out)
218
            return AVERROR(ENOMEM);
219
    }
220
32
    s->version = 2;
221
222
32
    if (!avctx->extradata)
223
        return AVERROR(ENOMEM);
224
225
#if FF_API_CODED_FRAME
226
FF_DISABLE_DEPRECATION_WARNINGS
227
32
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
228
32
    avctx->coded_frame->key_frame = 1;
229
FF_ENABLE_DEPRECATION_WARNINGS
230
#endif
231
#if FF_API_PRIVATE_OPT
232
FF_DISABLE_DEPRECATION_WARNINGS
233
32
    if (avctx->context_model == 1)
234
        s->context = avctx->context_model;
235
FF_ENABLE_DEPRECATION_WARNINGS
236
#endif
237
238
32
    s->bps = desc->comp[0].depth;
239

32
    s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
240
32
    s->chroma = desc->nb_components > 2;
241
32
    s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
242
32
    av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt,
243
                                     &s->chroma_h_shift,
244
                                     &s->chroma_v_shift);
245
246

32
    switch (avctx->pix_fmt) {
247
8
    case AV_PIX_FMT_YUV420P:
248
    case AV_PIX_FMT_YUV422P:
249
8
        if (s->width & 1) {
250
            av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
251
            return AVERROR(EINVAL);
252
        }
253
8
        s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
254
8
        break;
255
16
    case AV_PIX_FMT_YUV444P:
256
    case AV_PIX_FMT_YUV410P:
257
    case AV_PIX_FMT_YUV411P:
258
    case AV_PIX_FMT_YUV440P:
259
    case AV_PIX_FMT_GBRP:
260
    case AV_PIX_FMT_GBRP9:
261
    case AV_PIX_FMT_GBRP10:
262
    case AV_PIX_FMT_GBRP12:
263
    case AV_PIX_FMT_GBRP14:
264
    case AV_PIX_FMT_GBRP16:
265
    case AV_PIX_FMT_GRAY8:
266
    case AV_PIX_FMT_GRAY16:
267
    case AV_PIX_FMT_YUVA444P:
268
    case AV_PIX_FMT_YUVA420P:
269
    case AV_PIX_FMT_YUVA422P:
270
    case AV_PIX_FMT_GBRAP:
271
    case AV_PIX_FMT_YUV420P9:
272
    case AV_PIX_FMT_YUV420P10:
273
    case AV_PIX_FMT_YUV420P12:
274
    case AV_PIX_FMT_YUV420P14:
275
    case AV_PIX_FMT_YUV420P16:
276
    case AV_PIX_FMT_YUV422P9:
277
    case AV_PIX_FMT_YUV422P10:
278
    case AV_PIX_FMT_YUV422P12:
279
    case AV_PIX_FMT_YUV422P14:
280
    case AV_PIX_FMT_YUV422P16:
281
    case AV_PIX_FMT_YUV444P9:
282
    case AV_PIX_FMT_YUV444P10:
283
    case AV_PIX_FMT_YUV444P12:
284
    case AV_PIX_FMT_YUV444P14:
285
    case AV_PIX_FMT_YUV444P16:
286
    case AV_PIX_FMT_YUVA420P9:
287
    case AV_PIX_FMT_YUVA420P10:
288
    case AV_PIX_FMT_YUVA420P16:
289
    case AV_PIX_FMT_YUVA422P9:
290
    case AV_PIX_FMT_YUVA422P10:
291
    case AV_PIX_FMT_YUVA422P16:
292
    case AV_PIX_FMT_YUVA444P9:
293
    case AV_PIX_FMT_YUVA444P10:
294
    case AV_PIX_FMT_YUVA444P16:
295
16
        s->version = 3;
296
16
        break;
297
4
    case AV_PIX_FMT_RGB32:
298
4
        s->bitstream_bpp = 32;
299
4
        break;
300
4
    case AV_PIX_FMT_RGB24:
301
4
        s->bitstream_bpp = 24;
302
4
        break;
303
    default:
304
        av_log(avctx, AV_LOG_ERROR, "format not supported\n");
305
        return AVERROR(EINVAL);
306
    }
307
32
    s->n = 1<<s->bps;
308
32
    s->vlc_n = FFMIN(s->n, MAX_VLC_N);
309
310
32
    avctx->bits_per_coded_sample = s->bitstream_bpp;
311

32
    s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
312
#if FF_API_PRIVATE_OPT
313
FF_DISABLE_DEPRECATION_WARNINGS
314
32
    if (avctx->prediction_method)
315
        s->predictor = avctx->prediction_method;
316
FF_ENABLE_DEPRECATION_WARNINGS
317
#endif
318
32
    s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
319
32
    if (s->context) {
320
        if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
321
            av_log(avctx, AV_LOG_ERROR,
322
                   "context=1 is not compatible with "
323
                   "2 pass huffyuv encoding\n");
324
            return AVERROR(EINVAL);
325
        }
326
    }
327
328
32
    if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
329
12
        if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
330
            av_log(avctx, AV_LOG_ERROR,
331
                   "Error: YV12 is not supported by huffyuv; use "
332
                   "vcodec=ffvhuff or format=422p\n");
333
            return AVERROR(EINVAL);
334
        }
335
#if FF_API_PRIVATE_OPT
336
12
        if (s->context) {
337
            av_log(avctx, AV_LOG_ERROR,
338
                   "Error: per-frame huffman tables are not supported "
339
                   "by huffyuv; use vcodec=ffvhuff\n");
340
            return AVERROR(EINVAL);
341
        }
342
12
        if (s->version > 2) {
343
            av_log(avctx, AV_LOG_ERROR,
344
                   "Error: ver>2 is not supported "
345
                   "by huffyuv; use vcodec=ffvhuff\n");
346
            return AVERROR(EINVAL);
347
        }
348
#endif
349
12
        if (s->interlaced != ( s->height > 288 ))
350
            av_log(avctx, AV_LOG_INFO,
351
                   "using huffyuv 2.2.0 or newer interlacing flag\n");
352
    }
353
354

32
    if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
355
        av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
356
               "Use vstrict=-2 / -strict -2 to use it anyway.\n");
357
        return AVERROR(EINVAL);
358
    }
359
360

32
    if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
361
        av_log(avctx, AV_LOG_ERROR,
362
               "Error: RGB is incompatible with median predictor\n");
363
        return AVERROR(EINVAL);
364
    }
365
366
32
    ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
367
32
    ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
368
32
    if (s->context)
369
        ((uint8_t*)avctx->extradata)[2] |= 0x40;
370
32
    if (s->version < 3) {
371
16
        ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
372
16
        ((uint8_t*)avctx->extradata)[3] = 0;
373
    } else {
374
16
        ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
375
16
        if (s->chroma)
376
16
            ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
377
16
        if (s->alpha)
378
            ((uint8_t*)avctx->extradata)[2] |= 4;
379
16
        ((uint8_t*)avctx->extradata)[3] = 1;
380
    }
381
32
    s->avctx->extradata_size = 4;
382
383
32
    if (avctx->stats_in) {
384
        char *p = avctx->stats_in;
385
386
        for (i = 0; i < 4; i++)
387
            for (j = 0; j < s->vlc_n; j++)
388
                s->stats[i][j] = 1;
389
390
        for (;;) {
391
            for (i = 0; i < 4; i++) {
392
                char *next;
393
394
                for (j = 0; j < s->vlc_n; j++) {
395
                    s->stats[i][j] += strtol(p, &next, 0);
396
                    if (next == p) return -1;
397
                    p = next;
398
                }
399
            }
400
            if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
401
        }
402
    } else {
403
160
        for (i = 0; i < 4; i++)
404
364672
            for (j = 0; j < s->vlc_n; j++) {
405
364544
                int d = FFMIN(j, s->vlc_n - j);
406
407
364544
                s->stats[i][j] = 100000000 / (d*d + 1);
408
            }
409
    }
410
411
32
    ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size);
412
32
    if (ret < 0)
413
        return ret;
414
32
    s->avctx->extradata_size += ret;
415
416
32
    if (s->context) {
417
        for (i = 0; i < 4; i++) {
418
            int pels = s->width * s->height / (i ? 40 : 10);
419
            for (j = 0; j < s->vlc_n; j++) {
420
                int d = FFMIN(j, s->vlc_n - j);
421
                s->stats[i][j] = pels/(d*d + 1);
422
            }
423
        }
424
    } else {
425
160
        for (i = 0; i < 4; i++)
426
364672
            for (j = 0; j < s->vlc_n; j++)
427
364544
                s->stats[i][j]= 0;
428
    }
429
430
32
    if (ff_huffyuv_alloc_temp(s)) {
431
        ff_huffyuv_common_end(s);
432
        return AVERROR(ENOMEM);
433
    }
434
435
32
    s->picture_number=0;
436
437
32
    return 0;
438
}
439
67350
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
440
{
441
    int i;
442
67350
    const uint8_t *y = s->temp[0] + offset;
443
67350
    const uint8_t *u = s->temp[1] + offset / 2;
444
67350
    const uint8_t *v = s->temp[2] + offset / 2;
445
446
67350
    if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
447
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
448
        return -1;
449
    }
450
451
#define LOAD4\
452
            int y0 = y[2 * i];\
453
            int y1 = y[2 * i + 1];\
454
            int u0 = u[i];\
455
            int v0 = v[i];
456
457
67350
    count /= 2;
458
459
67350
    if (s->flags & AV_CODEC_FLAG_PASS1) {
460
        for(i = 0; i < count; i++) {
461
            LOAD4;
462
            s->stats[0][y0]++;
463
            s->stats[1][u0]++;
464
            s->stats[0][y1]++;
465
            s->stats[2][v0]++;
466
        }
467
    }
468
67350
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
469
        return 0;
470
67350
    if (s->context) {
471
        for (i = 0; i < count; i++) {
472
            LOAD4;
473
            s->stats[0][y0]++;
474
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
475
            s->stats[1][u0]++;
476
            put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
477
            s->stats[0][y1]++;
478
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
479
            s->stats[2][v0]++;
480
            put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
481
        }
482
    } else {
483
11515100
        for(i = 0; i < count; i++) {
484
11447750
            LOAD4;
485
11447750
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
486
11447750
            put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
487
11447750
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
488
11447750
            put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
489
        }
490
    }
491
67350
    return 0;
492
}
493
494
493900
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
495
{
496
493900
    int i, count = width/2;
497
498
493900
    if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < count * s->bps / 2) {
499
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
500
        return -1;
501
    }
502
503
#define LOADEND\
504
            int y0 = s->temp[0][width-1];
505
#define LOADEND_14\
506
            int y0 = s->temp16[0][width-1] & mask;
507
#define LOADEND_16\
508
            int y0 = s->temp16[0][width-1];
509
#define STATEND\
510
            s->stats[plane][y0]++;
511
#define STATEND_16\
512
            s->stats[plane][y0>>2]++;
513
#define WRITEEND\
514
            put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
515
#define WRITEEND_16\
516
            put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
517
            put_bits(&s->pb, 2, y0&3);
518
519
#define LOAD2\
520
            int y0 = s->temp[0][2 * i];\
521
            int y1 = s->temp[0][2 * i + 1];
522
#define LOAD2_14\
523
            int y0 = s->temp16[0][2 * i] & mask;\
524
            int y1 = s->temp16[0][2 * i + 1] & mask;
525
#define LOAD2_16\
526
            int y0 = s->temp16[0][2 * i];\
527
            int y1 = s->temp16[0][2 * i + 1];
528
#define STAT2\
529
            s->stats[plane][y0]++;\
530
            s->stats[plane][y1]++;
531
#define STAT2_16\
532
            s->stats[plane][y0>>2]++;\
533
            s->stats[plane][y1>>2]++;
534
#define WRITE2\
535
            put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
536
            put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
537
#define WRITE2_16\
538
            put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
539
            put_bits(&s->pb, 2, y0&3);\
540
            put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
541
            put_bits(&s->pb, 2, y1&3);
542
543
493900
    if (s->bps <= 8) {
544
134700
    if (s->flags & AV_CODEC_FLAG_PASS1) {
545
        for (i = 0; i < count; i++) {
546
            LOAD2;
547
            STAT2;
548
        }
549
        if (width&1) {
550
            LOADEND;
551
            STATEND;
552
        }
553
    }
554
134700
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
555
        return 0;
556
557
134700
    if (s->context) {
558
        for (i = 0; i < count; i++) {
559
            LOAD2;
560
            STAT2;
561
            WRITE2;
562
        }
563
        if (width&1) {
564
            LOADEND;
565
            STATEND;
566
            WRITEEND;
567
        }
568
    } else {
569
23031000
        for (i = 0; i < count; i++) {
570
22896300
            LOAD2;
571
22896300
            WRITE2;
572
        }
573
134700
        if (width&1) {
574
            LOADEND;
575
            WRITEEND;
576
        }
577
    }
578
359200
    } else if (s->bps <= 14) {
579
224500
        int mask = s->n - 1;
580
224500
        if (s->flags & AV_CODEC_FLAG_PASS1) {
581
            for (i = 0; i < count; i++) {
582
                LOAD2_14;
583
                STAT2;
584
            }
585
            if (width&1) {
586
                LOADEND_14;
587
                STATEND;
588
            }
589
        }
590
224500
        if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
591
            return 0;
592
593
224500
        if (s->context) {
594
            for (i = 0; i < count; i++) {
595
                LOAD2_14;
596
                STAT2;
597
                WRITE2;
598
            }
599
            if (width&1) {
600
                LOADEND_14;
601
                STATEND;
602
                WRITEEND;
603
            }
604
        } else {
605
26934300
            for (i = 0; i < count; i++) {
606
26709800
                LOAD2_14;
607
26709800
                WRITE2;
608
            }
609
224500
            if (width&1) {
610
5100
                LOADEND_14;
611
5100
                WRITEEND;
612
            }
613
        }
614
    } else {
615
134700
        if (s->flags & AV_CODEC_FLAG_PASS1) {
616
            for (i = 0; i < count; i++) {
617
                LOAD2_16;
618
                STAT2_16;
619
            }
620
            if (width&1) {
621
                LOADEND_16;
622
                STATEND_16;
623
            }
624
        }
625
134700
        if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
626
            return 0;
627
628
134700
        if (s->context) {
629
            for (i = 0; i < count; i++) {
630
                LOAD2_16;
631
                STAT2_16;
632
                WRITE2_16;
633
            }
634
            if (width&1) {
635
                LOADEND_16;
636
                STATEND_16;
637
                WRITEEND_16;
638
            }
639
        } else {
640
23031000
            for (i = 0; i < count; i++) {
641
22896300
                LOAD2_16;
642
22896300
                WRITE2_16;
643
            }
644
134700
            if (width&1) {
645
                LOADEND_16;
646
                WRITEEND_16;
647
            }
648
        }
649
    }
650
#undef LOAD2
651
#undef STAT2
652
#undef WRITE2
653
493900
    return 0;
654
}
655
656
22450
static int encode_gray_bitstream(HYuvContext *s, int count)
657
{
658
    int i;
659
660
22450
    if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
661
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
662
        return -1;
663
    }
664
665
#define LOAD2\
666
            int y0 = s->temp[0][2 * i];\
667
            int y1 = s->temp[0][2 * i + 1];
668
#define STAT2\
669
            s->stats[0][y0]++;\
670
            s->stats[0][y1]++;
671
#define WRITE2\
672
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
673
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
674
675
22450
    count /= 2;
676
677
22450
    if (s->flags & AV_CODEC_FLAG_PASS1) {
678
        for (i = 0; i < count; i++) {
679
            LOAD2;
680
            STAT2;
681
        }
682
    }
683
22450
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
684
        return 0;
685
686
22450
    if (s->context) {
687
        for (i = 0; i < count; i++) {
688
            LOAD2;
689
            STAT2;
690
            WRITE2;
691
        }
692
    } else {
693
3838500
        for (i = 0; i < count; i++) {
694
3816050
            LOAD2;
695
3816050
            WRITE2;
696
        }
697
    }
698
22450
    return 0;
699
}
700
701
89800
static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
702
{
703
    int i;
704
705
89800
    if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
706
89800
        4 * planes * count) {
707
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
708
        return -1;
709
    }
710
711
#define LOAD_GBRA                                                       \
712
    int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G];            \
713
    int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
714
    int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
715
    int a = s->temp[0][planes * i + A];
716
717
#define STAT_BGRA                                                       \
718
    s->stats[0][b]++;                                                   \
719
    s->stats[1][g]++;                                                   \
720
    s->stats[2][r]++;                                                   \
721
    if (planes == 4)                                                    \
722
        s->stats[2][a]++;
723
724
#define WRITE_GBRA                                                      \
725
    put_bits(&s->pb, s->len[1][g], s->bits[1][g]);                      \
726
    put_bits(&s->pb, s->len[0][b], s->bits[0][b]);                      \
727
    put_bits(&s->pb, s->len[2][r], s->bits[2][r]);                      \
728
    if (planes == 4)                                                    \
729
        put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
730
731
89800
    if ((s->flags & AV_CODEC_FLAG_PASS1) &&
732
        (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
733
        for (i = 0; i < count; i++) {
734
            LOAD_GBRA;
735
            STAT_BGRA;
736
        }
737

89800
    } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
738
        for (i = 0; i < count; i++) {
739
            LOAD_GBRA;
740
            STAT_BGRA;
741
            WRITE_GBRA;
742
        }
743
    } else {
744
30617800
        for (i = 0; i < count; i++) {
745

30528000
            LOAD_GBRA;
746
30528000
            WRITE_GBRA;
747
        }
748
    }
749
89800
    return 0;
750
}
751
752
1600
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
753
                        const AVFrame *pict, int *got_packet)
754
{
755
1600
    HYuvContext *s = avctx->priv_data;
756
1600
    const int width = s->width;
757
1600
    const int width2 = s->width>>1;
758
1600
    const int height = s->height;
759
1600
    const int fake_ystride = s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
760
1600
    const int fake_ustride = s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
761
1600
    const int fake_vstride = s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
762
1600
    const AVFrame * const p = pict;
763
1600
    int i, j, size = 0, ret;
764
765
1600
    if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
766
        return ret;
767
768
1600
    if (s->context) {
769
        size = store_huffman_tables(s, pkt->data);
770
        if (size < 0)
771
            return size;
772
773
        for (i = 0; i < 4; i++)
774
            for (j = 0; j < s->vlc_n; j++)
775
                s->stats[i][j] >>= 1;
776
    }
777
778
1600
    init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
779
780
1600
    if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
781
1800
        avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
782
        int lefty, leftu, leftv, y, cy;
783
784
400
        put_bits(&s->pb, 8, leftv = p->data[2][0]);
785
400
        put_bits(&s->pb, 8, lefty = p->data[0][1]);
786
400
        put_bits(&s->pb, 8, leftu = p->data[1][0]);
787
400
        put_bits(&s->pb, 8,         p->data[0][0]);
788
789
400
        lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
790
400
        leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
791
400
        leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
792
793
400
        encode_422_bitstream(s, 2, width-2);
794
795
400
        if (s->predictor==MEDIAN) {
796
            int lefttopy, lefttopu, lefttopv;
797
            cy = y = 1;
798
            if (s->interlaced) {
799
                lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
800
                leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
801
                leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
802
803
                encode_422_bitstream(s, 0, width);
804
                y++; cy++;
805
            }
806
807
            lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
808
            leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
809
            leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
810
811
            encode_422_bitstream(s, 0, 4);
812
813
            lefttopy = p->data[0][3];
814
            lefttopu = p->data[1][1];
815
            lefttopv = p->data[2][1];
816
            s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width  - 4, &lefty, &lefttopy);
817
            s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
818
            s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
819
            encode_422_bitstream(s, 0, width - 4);
820
            y++; cy++;
821
822
            for (; y < height; y++,cy++) {
823
                uint8_t *ydst, *udst, *vdst;
824
825
                if (s->bitstream_bpp == 12) {
826
                    while (2 * cy > y) {
827
                        ydst = p->data[0] + p->linesize[0] * y;
828
                        s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
829
                        encode_gray_bitstream(s, width);
830
                        y++;
831
                    }
832
                    if (y >= height) break;
833
                }
834
                ydst = p->data[0] + p->linesize[0] * y;
835
                udst = p->data[1] + p->linesize[1] * cy;
836
                vdst = p->data[2] + p->linesize[2] * cy;
837
838
                s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width,  &lefty, &lefttopy);
839
                s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
840
                s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
841
842
                encode_422_bitstream(s, 0, width);
843
            }
844
        } else {
845
67350
            for (cy = y = 1; y < height; y++, cy++) {
846
                uint8_t *ydst, *udst, *vdst;
847
848
                /* encode a luma only line & y++ */
849
67150
                if (s->bitstream_bpp == 12) {
850
22450
                    ydst = p->data[0] + p->linesize[0] * y;
851
852

22450
                    if (s->predictor == PLANE && s->interlaced < y) {
853
                        s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
854
855
                        lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
856
                    } else {
857
22450
                        lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
858
                    }
859
22450
                    encode_gray_bitstream(s, width);
860
22450
                    y++;
861
22450
                    if (y >= height) break;
862
                }
863
864
66950
                ydst = p->data[0] + p->linesize[0] * y;
865
66950
                udst = p->data[1] + p->linesize[1] * cy;
866
66950
                vdst = p->data[2] + p->linesize[2] * cy;
867
868

66950
                if (s->predictor == PLANE && s->interlaced < cy) {
869
                    s->llvidencdsp.diff_bytes(s->temp[1],          ydst, ydst - fake_ystride, width);
870
                    s->llvidencdsp.diff_bytes(s->temp[2],          udst, udst - fake_ustride, width2);
871
                    s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
872
873
                    lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
874
                    leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
875
                    leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
876
                } else {
877
66950
                    lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
878
66950
                    leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
879
66950
                    leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
880
                }
881
882
66950
                encode_422_bitstream(s, 0, width);
883
            }
884
        }
885
1200
    } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
886
200
        uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
887
200
        const int stride = -p->linesize[0];
888
200
        const int fake_stride = -fake_ystride;
889
        int y;
890
        int leftr, leftg, leftb, lefta;
891
892
200
        put_bits(&s->pb, 8, lefta = data[A]);
893
200
        put_bits(&s->pb, 8, leftr = data[R]);
894
200
        put_bits(&s->pb, 8, leftg = data[G]);
895
200
        put_bits(&s->pb, 8, leftb = data[B]);
896
897
200
        sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
898
                                  &leftr, &leftg, &leftb, &lefta);
899
200
        encode_bgra_bitstream(s, width - 1, 4);
900
901
44900
        for (y = 1; y < s->height; y++) {
902
44700
            uint8_t *dst = data + y*stride;
903

44700
            if (s->predictor == PLANE && s->interlaced < y) {
904
                s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
905
                sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
906
                                          &leftr, &leftg, &leftb, &lefta);
907
            } else {
908
44700
                sub_left_prediction_bgr32(s, s->temp[0], dst, width,
909
                                          &leftr, &leftg, &leftb, &lefta);
910
            }
911
44700
            encode_bgra_bitstream(s, width, 4);
912
        }
913
1000
    } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
914
200
        uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
915
200
        const int stride = -p->linesize[0];
916
200
        const int fake_stride = -fake_ystride;
917
        int y;
918
        int leftr, leftg, leftb;
919
920
200
        put_bits(&s->pb, 8, leftr = data[0]);
921
200
        put_bits(&s->pb, 8, leftg = data[1]);
922
200
        put_bits(&s->pb, 8, leftb = data[2]);
923
200
        put_bits(&s->pb, 8, 0);
924
925
200
        sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
926
                                  &leftr, &leftg, &leftb);
927
200
        encode_bgra_bitstream(s, width-1, 3);
928
929
44900
        for (y = 1; y < s->height; y++) {
930
44700
            uint8_t *dst = data + y * stride;
931

44700
            if (s->predictor == PLANE && s->interlaced < y) {
932
                s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
933
                                      width * 3);
934
                sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
935
                                          &leftr, &leftg, &leftb);
936
            } else {
937
44700
                sub_left_prediction_rgb24(s, s->temp[0], dst, width,
938
                                          &leftr, &leftg, &leftb);
939
            }
940
44700
            encode_bgra_bitstream(s, width, 3);
941
        }
942
800
    } else if (s->version > 2) {
943
        int plane;
944
3200
        for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
945
            int left, y;
946
2400
            int w = width;
947
2400
            int h = height;
948
2400
            int fake_stride = fake_ystride;
949
950

2400
            if (s->chroma && (plane == 1 || plane == 2)) {
951
1600
                w >>= s->chroma_h_shift;
952
1600
                h >>= s->chroma_v_shift;
953
1600
                fake_stride = plane == 1 ? fake_ustride : fake_vstride;
954
            }
955
956
2400
            left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
957
958
2400
            encode_plane_bitstream(s, w, plane);
959
960
2400
            if (s->predictor==MEDIAN) {
961
                int lefttop;
962
                y = 1;
963
                if (s->interlaced) {
964
                    left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
965
966
                    encode_plane_bitstream(s, w, plane);
967
                    y++;
968
                }
969
970
                lefttop = p->data[plane][0];
971
972
                for (; y < h; y++) {
973
                    uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
974
975
                    sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
976
977
                    encode_plane_bitstream(s, w, plane);
978
                }
979
            } else {
980
493900
                for (y = 1; y < h; y++) {
981
491500
                    uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
982
983

491500
                    if (s->predictor == PLANE && s->interlaced < y) {
984
134100
                        diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
985
986
134100
                        left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
987
                    } else {
988
357400
                        left = sub_left_prediction(s, s->temp[0], dst, w , left);
989
                    }
990
991
491500
                    encode_plane_bitstream(s, w, plane);
992
                }
993
            }
994
        }
995
    } else {
996
        av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
997
    }
998
1600
    emms_c();
999
1000
1600
    size += (put_bits_count(&s->pb) + 31) / 8;
1001
1600
    put_bits(&s->pb, 16, 0);
1002
1600
    put_bits(&s->pb, 15, 0);
1003
1600
    size /= 4;
1004
1005

1600
    if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1006
        int j;
1007
        char *p = avctx->stats_out;
1008
        char *end = p + STATS_OUT_SIZE;
1009
        for (i = 0; i < 4; i++) {
1010
            for (j = 0; j < s->vlc_n; j++) {
1011
                snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1012
                p += strlen(p);
1013
                s->stats[i][j]= 0;
1014
            }
1015
            snprintf(p, end-p, "\n");
1016
            p++;
1017
            if (end <= p)
1018
                return AVERROR(ENOMEM);
1019
        }
1020
1600
    } else if (avctx->stats_out)
1021
        avctx->stats_out[0] = '\0';
1022
1600
    if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
1023
1600
        flush_put_bits(&s->pb);
1024
1600
        s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
1025
    }
1026
1027
1600
    s->picture_number++;
1028
1029
1600
    pkt->size   = size * 4;
1030
1600
    pkt->flags |= AV_PKT_FLAG_KEY;
1031
1600
    *got_packet = 1;
1032
1033
1600
    return 0;
1034
}
1035
1036
32
static av_cold int encode_end(AVCodecContext *avctx)
1037
{
1038
32
    HYuvContext *s = avctx->priv_data;
1039
1040
32
    ff_huffyuv_common_end(s);
1041
1042
32
    av_freep(&avctx->extradata);
1043
32
    av_freep(&avctx->stats_out);
1044
1045
32
    return 0;
1046
}
1047
1048
#define OFFSET(x) offsetof(HYuvContext, x)
1049
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1050
1051
#define COMMON_OPTIONS \
1052
    { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1053
      OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
1054
      0, 1, VE }, \
1055
    { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1056
        { "left",   NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT },   INT_MIN, INT_MAX, VE, "pred" }, \
1057
        { "plane",  NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE },  INT_MIN, INT_MAX, VE, "pred" }, \
1058
        { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1059
1060
static const AVOption normal_options[] = {
1061
    COMMON_OPTIONS
1062
    { NULL },
1063
};
1064
1065
static const AVOption ff_options[] = {
1066
    COMMON_OPTIONS
1067
    { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1068
    { NULL },
1069
};
1070
1071
static const AVClass normal_class = {
1072
    .class_name = "huffyuv",
1073
    .item_name  = av_default_item_name,
1074
    .option     = normal_options,
1075
    .version    = LIBAVUTIL_VERSION_INT,
1076
};
1077
1078
static const AVClass ff_class = {
1079
    .class_name = "ffvhuff",
1080
    .item_name  = av_default_item_name,
1081
    .option     = ff_options,
1082
    .version    = LIBAVUTIL_VERSION_INT,
1083
};
1084
1085
AVCodec ff_huffyuv_encoder = {
1086
    .name           = "huffyuv",
1087
    .long_name      = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1088
    .type           = AVMEDIA_TYPE_VIDEO,
1089
    .id             = AV_CODEC_ID_HUFFYUV,
1090
    .priv_data_size = sizeof(HYuvContext),
1091
    .init           = encode_init,
1092
    .encode2        = encode_frame,
1093
    .close          = encode_end,
1094
    .capabilities   = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
1095
    .priv_class     = &normal_class,
1096
    .pix_fmts       = (const enum AVPixelFormat[]){
1097
        AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
1098
        AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1099
    },
1100
    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |
1101
                      FF_CODEC_CAP_INIT_CLEANUP,
1102
};
1103
1104
#if CONFIG_FFVHUFF_ENCODER
1105
AVCodec ff_ffvhuff_encoder = {
1106
    .name           = "ffvhuff",
1107
    .long_name      = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1108
    .type           = AVMEDIA_TYPE_VIDEO,
1109
    .id             = AV_CODEC_ID_FFVHUFF,
1110
    .priv_data_size = sizeof(HYuvContext),
1111
    .init           = encode_init,
1112
    .encode2        = encode_frame,
1113
    .close          = encode_end,
1114
    .capabilities   = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
1115
    .priv_class     = &ff_class,
1116
    .pix_fmts       = (const enum AVPixelFormat[]){
1117
        AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P,
1118
        AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
1119
        AV_PIX_FMT_GBRP,
1120
        AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
1121
        AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
1122
        AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
1123
        AV_PIX_FMT_GBRAP,
1124
        AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16,
1125
        AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16,
1126
        AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
1127
        AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
1128
        AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16,
1129
        AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16,
1130
        AV_PIX_FMT_RGB24,
1131
        AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1132
    },
1133
    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |
1134
                      FF_CODEC_CAP_INIT_CLEANUP,
1135
};
1136
#endif