GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/libavcodec/huffyuvenc.c Lines: 340 559 60.8 %
Date: 2021-04-18 21:26:34 Branches: 180 377 47.7 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3
 *
4
 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
5
 * the algorithm used
6
 *
7
 * This file is part of FFmpeg.
8
 *
9
 * FFmpeg is free software; you can redistribute it and/or
10
 * modify it under the terms of the GNU Lesser General Public
11
 * License as published by the Free Software Foundation; either
12
 * version 2.1 of the License, or (at your option) any later version.
13
 *
14
 * FFmpeg is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * Lesser General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU Lesser General Public
20
 * License along with FFmpeg; if not, write to the Free Software
21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
 *
23
 * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24
 */
25
26
/**
27
 * @file
28
 * huffyuv encoder
29
 */
30
31
#include "avcodec.h"
32
#include "huffyuv.h"
33
#include "huffman.h"
34
#include "huffyuvencdsp.h"
35
#include "internal.h"
36
#include "lossless_videoencdsp.h"
37
#include "put_bits.h"
38
#include "libavutil/opt.h"
39
#include "libavutil/pixdesc.h"
40
41
134100
static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
42
                              const uint8_t *src0, const uint8_t *src1, int w)
43
{
44
134100
    if (s->bps <= 8) {
45
        s->llvidencdsp.diff_bytes(dst, src0, src1, w);
46
    } else {
47
134100
        s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
48
    }
49
134100
}
50
51
718400
static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
52
                                      const uint8_t *src, int w, int left)
53
{
54
    int i;
55
718400
    int min_width = FFMIN(w, 32);
56
57
718400
    if (s->bps <= 8) {
58
11777100
        for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
59
11417900
            const int temp = src[i];
60
11417900
            dst[i] = temp - left;
61
11417900
            left   = temp;
62
        }
63
359200
        if (w < 32)
64
5100
            return left;
65
354100
        s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
66
354100
        return src[w-1];
67
    } else {
68
359200
        const uint16_t *src16 = (const uint16_t *)src;
69
359200
        uint16_t       *dst16 = (      uint16_t *)dst;
70
11777100
        for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
71
11417900
            const int temp = src16[i];
72
11417900
            dst16[i] = temp - left;
73
11417900
            left   = temp;
74
        }
75
359200
        if (w < 32)
76
5100
            return left;
77
354100
        s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
78
354100
        return src16[w-1];
79
    }
80
}
81
82
44900
static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
83
                                             const uint8_t *src, int w,
84
                                             int *red, int *green, int *blue,
85
                                             int *alpha)
86
{
87
    int i;
88
    int r, g, b, a;
89
44900
    int min_width = FFMIN(w, 8);
90
44900
    r = *red;
91
44900
    g = *green;
92
44900
    b = *blue;
93
44900
    a = *alpha;
94
95
404100
    for (i = 0; i < min_width; i++) {
96
359200
        const int rt = src[i * 4 + R];
97
359200
        const int gt = src[i * 4 + G];
98
359200
        const int bt = src[i * 4 + B];
99
359200
        const int at = src[i * 4 + A];
100
359200
        dst[i * 4 + R] = rt - r;
101
359200
        dst[i * 4 + G] = gt - g;
102
359200
        dst[i * 4 + B] = bt - b;
103
359200
        dst[i * 4 + A] = at - a;
104
359200
        r = rt;
105
359200
        g = gt;
106
359200
        b = bt;
107
359200
        a = at;
108
    }
109
110
44900
    s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
111
112
44900
    *red   = src[(w - 1) * 4 + R];
113
44900
    *green = src[(w - 1) * 4 + G];
114
44900
    *blue  = src[(w - 1) * 4 + B];
115
44900
    *alpha = src[(w - 1) * 4 + A];
116
44900
}
117
118
44900
static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
119
                                             uint8_t *src, int w,
120
                                             int *red, int *green, int *blue)
121
{
122
    int i;
123
    int r, g, b;
124
44900
    r = *red;
125
44900
    g = *green;
126
44900
    b = *blue;
127
763300
    for (i = 0; i < FFMIN(w, 16); i++) {
128
718400
        const int rt = src[i * 3 + 0];
129
718400
        const int gt = src[i * 3 + 1];
130
718400
        const int bt = src[i * 3 + 2];
131
718400
        dst[i * 3 + 0] = rt - r;
132
718400
        dst[i * 3 + 1] = gt - g;
133
718400
        dst[i * 3 + 2] = bt - b;
134
718400
        r = rt;
135
718400
        g = gt;
136
718400
        b = bt;
137
    }
138
139
44900
    s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
140
141
44900
    *red   = src[(w - 1) * 3 + 0];
142
44900
    *green = src[(w - 1) * 3 + 1];
143
44900
    *blue  = src[(w - 1) * 3 + 2];
144
44900
}
145
146
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
147
{
148
    if (s->bps <= 8) {
149
        s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
150
    } else {
151
        s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
152
    }
153
}
154
155
96
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
156
{
157
    int i;
158
96
    int index = 0;
159
96
    int n = s->vlc_n;
160
161
3972
    for (i = 0; i < n;) {
162
3876
        int val = len[i];
163
3876
        int repeat = 0;
164
165

277284
        for (; i < n && len[i] == val && repeat < 255; i++)
166
273408
            repeat++;
167
168


3876
        av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
169
3876
        if (repeat > 7) {
170
2244
            buf[index++] = val;
171
2244
            buf[index++] = repeat;
172
        } else {
173
1632
            buf[index++] = val | (repeat << 5);
174
        }
175
    }
176
177
96
    return index;
178
}
179
180
32
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
181
{
182
    int i, ret;
183
32
    int size = 0;
184
32
    int count = 3;
185
186
32
    if (s->version > 2)
187
16
        count = 1 + s->alpha + 2*s->chroma;
188
189
128
    for (i = 0; i < count; i++) {
190
96
        if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
191
            return ret;
192
193
96
        if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
194
            return -1;
195
        }
196
197
96
        size += store_table(s, s->len[i], buf + size);
198
    }
199
32
    return size;
200
}
201
202
32
static av_cold int encode_init(AVCodecContext *avctx)
203
{
204
32
    HYuvContext *s = avctx->priv_data;
205
    int i, j;
206
    int ret;
207
32
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
208
209
32
    ff_huffyuv_common_init(avctx);
210
32
    ff_huffyuvencdsp_init(&s->hencdsp, avctx);
211
32
    ff_llvidencdsp_init(&s->llvidencdsp);
212
213
32
    avctx->extradata = av_mallocz(3*MAX_N + 4);
214
32
    if (s->flags&AV_CODEC_FLAG_PASS1) {
215
#define STATS_OUT_SIZE 21*MAX_N*3 + 4
216
        avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
217
        if (!avctx->stats_out)
218
            return AVERROR(ENOMEM);
219
    }
220
32
    s->version = 2;
221
222
32
    if (!avctx->extradata)
223
        return AVERROR(ENOMEM);
224
225
#if FF_API_CODED_FRAME
226
FF_DISABLE_DEPRECATION_WARNINGS
227
32
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
228
32
    avctx->coded_frame->key_frame = 1;
229
FF_ENABLE_DEPRECATION_WARNINGS
230
#endif
231
#if FF_API_PRIVATE_OPT
232
FF_DISABLE_DEPRECATION_WARNINGS
233
32
    if (avctx->context_model == 1)
234
        s->context = avctx->context_model;
235
FF_ENABLE_DEPRECATION_WARNINGS
236
#endif
237
238
32
    s->bps = desc->comp[0].depth;
239

32
    s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
240
32
    s->chroma = desc->nb_components > 2;
241
32
    s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
242
32
    av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt,
243
                                     &s->chroma_h_shift,
244
                                     &s->chroma_v_shift);
245
246

32
    switch (avctx->pix_fmt) {
247
8
    case AV_PIX_FMT_YUV420P:
248
    case AV_PIX_FMT_YUV422P:
249
8
        if (s->width & 1) {
250
            av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
251
            return AVERROR(EINVAL);
252
        }
253
8
        s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
254
8
        break;
255
16
    case AV_PIX_FMT_YUV444P:
256
    case AV_PIX_FMT_YUV410P:
257
    case AV_PIX_FMT_YUV411P:
258
    case AV_PIX_FMT_YUV440P:
259
    case AV_PIX_FMT_GBRP:
260
    case AV_PIX_FMT_GBRP9:
261
    case AV_PIX_FMT_GBRP10:
262
    case AV_PIX_FMT_GBRP12:
263
    case AV_PIX_FMT_GBRP14:
264
    case AV_PIX_FMT_GBRP16:
265
    case AV_PIX_FMT_GRAY8:
266
    case AV_PIX_FMT_GRAY16:
267
    case AV_PIX_FMT_YUVA444P:
268
    case AV_PIX_FMT_YUVA420P:
269
    case AV_PIX_FMT_YUVA422P:
270
    case AV_PIX_FMT_GBRAP:
271
    case AV_PIX_FMT_YUV420P9:
272
    case AV_PIX_FMT_YUV420P10:
273
    case AV_PIX_FMT_YUV420P12:
274
    case AV_PIX_FMT_YUV420P14:
275
    case AV_PIX_FMT_YUV420P16:
276
    case AV_PIX_FMT_YUV422P9:
277
    case AV_PIX_FMT_YUV422P10:
278
    case AV_PIX_FMT_YUV422P12:
279
    case AV_PIX_FMT_YUV422P14:
280
    case AV_PIX_FMT_YUV422P16:
281
    case AV_PIX_FMT_YUV444P9:
282
    case AV_PIX_FMT_YUV444P10:
283
    case AV_PIX_FMT_YUV444P12:
284
    case AV_PIX_FMT_YUV444P14:
285
    case AV_PIX_FMT_YUV444P16:
286
    case AV_PIX_FMT_YUVA420P9:
287
    case AV_PIX_FMT_YUVA420P10:
288
    case AV_PIX_FMT_YUVA420P16:
289
    case AV_PIX_FMT_YUVA422P9:
290
    case AV_PIX_FMT_YUVA422P10:
291
    case AV_PIX_FMT_YUVA422P16:
292
    case AV_PIX_FMT_YUVA444P9:
293
    case AV_PIX_FMT_YUVA444P10:
294
    case AV_PIX_FMT_YUVA444P16:
295
16
        s->version = 3;
296
16
        break;
297
4
    case AV_PIX_FMT_RGB32:
298
4
        s->bitstream_bpp = 32;
299
4
        break;
300
4
    case AV_PIX_FMT_RGB24:
301
4
        s->bitstream_bpp = 24;
302
4
        break;
303
    default:
304
        av_log(avctx, AV_LOG_ERROR, "format not supported\n");
305
        return AVERROR(EINVAL);
306
    }
307
32
    s->n = 1<<s->bps;
308
32
    s->vlc_n = FFMIN(s->n, MAX_VLC_N);
309
310
32
    avctx->bits_per_coded_sample = s->bitstream_bpp;
311

32
    s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
312
#if FF_API_PRIVATE_OPT
313
FF_DISABLE_DEPRECATION_WARNINGS
314
32
    if (avctx->prediction_method)
315
        s->predictor = avctx->prediction_method;
316
FF_ENABLE_DEPRECATION_WARNINGS
317
#endif
318
32
    s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
319
32
    if (s->context) {
320
        if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
321
            av_log(avctx, AV_LOG_ERROR,
322
                   "context=1 is not compatible with "
323
                   "2 pass huffyuv encoding\n");
324
            return AVERROR(EINVAL);
325
        }
326
    }
327
328
32
    if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
329
12
        if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
330
            av_log(avctx, AV_LOG_ERROR,
331
                   "Error: YV12 is not supported by huffyuv; use "
332
                   "vcodec=ffvhuff or format=422p\n");
333
            return AVERROR(EINVAL);
334
        }
335
#if FF_API_PRIVATE_OPT
336
12
        if (s->context) {
337
            av_log(avctx, AV_LOG_ERROR,
338
                   "Error: per-frame huffman tables are not supported "
339
                   "by huffyuv; use vcodec=ffvhuff\n");
340
            return AVERROR(EINVAL);
341
        }
342
12
        if (s->version > 2) {
343
            av_log(avctx, AV_LOG_ERROR,
344
                   "Error: ver>2 is not supported "
345
                   "by huffyuv; use vcodec=ffvhuff\n");
346
            return AVERROR(EINVAL);
347
        }
348
#endif
349
12
        if (s->interlaced != ( s->height > 288 ))
350
            av_log(avctx, AV_LOG_INFO,
351
                   "using huffyuv 2.2.0 or newer interlacing flag\n");
352
    }
353
354

32
    if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
355
        av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
356
               "Use vstrict=-2 / -strict -2 to use it anyway.\n");
357
        return AVERROR(EINVAL);
358
    }
359
360

32
    if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
361
        av_log(avctx, AV_LOG_ERROR,
362
               "Error: RGB is incompatible with median predictor\n");
363
        return AVERROR(EINVAL);
364
    }
365
366
32
    ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
367
32
    ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
368
32
    if (s->context)
369
        ((uint8_t*)avctx->extradata)[2] |= 0x40;
370
32
    if (s->version < 3) {
371
16
        ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
372
16
        ((uint8_t*)avctx->extradata)[3] = 0;
373
    } else {
374
16
        ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
375
16
        if (s->chroma)
376
16
            ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
377
16
        if (s->alpha)
378
            ((uint8_t*)avctx->extradata)[2] |= 4;
379
16
        ((uint8_t*)avctx->extradata)[3] = 1;
380
    }
381
32
    s->avctx->extradata_size = 4;
382
383
32
    if (avctx->stats_in) {
384
        char *p = avctx->stats_in;
385
386
        for (i = 0; i < 4; i++)
387
            for (j = 0; j < s->vlc_n; j++)
388
                s->stats[i][j] = 1;
389
390
        for (;;) {
391
            for (i = 0; i < 4; i++) {
392
                char *next;
393
394
                for (j = 0; j < s->vlc_n; j++) {
395
                    s->stats[i][j] += strtol(p, &next, 0);
396
                    if (next == p) return -1;
397
                    p = next;
398
                }
399
            }
400
            if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
401
        }
402
    } else {
403
160
        for (i = 0; i < 4; i++)
404
364672
            for (j = 0; j < s->vlc_n; j++) {
405
364544
                int d = FFMIN(j, s->vlc_n - j);
406
407
364544
                s->stats[i][j] = 100000000 / (d*d + 1);
408
            }
409
    }
410
411
32
    ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size);
412
32
    if (ret < 0)
413
        return ret;
414
32
    s->avctx->extradata_size += ret;
415
416
32
    if (s->context) {
417
        for (i = 0; i < 4; i++) {
418
            int pels = s->width * s->height / (i ? 40 : 10);
419
            for (j = 0; j < s->vlc_n; j++) {
420
                int d = FFMIN(j, s->vlc_n - j);
421
                s->stats[i][j] = pels/(d*d + 1);
422
            }
423
        }
424
    } else {
425
160
        for (i = 0; i < 4; i++)
426
364672
            for (j = 0; j < s->vlc_n; j++)
427
364544
                s->stats[i][j]= 0;
428
    }
429
430
32
    if (ff_huffyuv_alloc_temp(s)) {
431
        ff_huffyuv_common_end(s);
432
        return AVERROR(ENOMEM);
433
    }
434
435
32
    s->picture_number=0;
436
437
32
    return 0;
438
}
439
67350
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
440
{
441
    int i;
442
67350
    const uint8_t *y = s->temp[0] + offset;
443
67350
    const uint8_t *u = s->temp[1] + offset / 2;
444
67350
    const uint8_t *v = s->temp[2] + offset / 2;
445
446
67350
    if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
447
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
448
        return -1;
449
    }
450
451
#define LOAD4\
452
            int y0 = y[2 * i];\
453
            int y1 = y[2 * i + 1];\
454
            int u0 = u[i];\
455
            int v0 = v[i];
456
457
67350
    count /= 2;
458
459
67350
    if (s->flags & AV_CODEC_FLAG_PASS1) {
460
        for(i = 0; i < count; i++) {
461
            LOAD4;
462
            s->stats[0][y0]++;
463
            s->stats[1][u0]++;
464
            s->stats[0][y1]++;
465
            s->stats[2][v0]++;
466
        }
467
    }
468
67350
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
469
        return 0;
470
67350
    if (s->context) {
471
        for (i = 0; i < count; i++) {
472
            LOAD4;
473
            s->stats[0][y0]++;
474
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
475
            s->stats[1][u0]++;
476
            put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
477
            s->stats[0][y1]++;
478
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
479
            s->stats[2][v0]++;
480
            put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
481
        }
482
    } else {
483
11515100
        for(i = 0; i < count; i++) {
484
11447750
            LOAD4;
485
11447750
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
486
11447750
            put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
487
11447750
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
488
11447750
            put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
489
        }
490
    }
491
67350
    return 0;
492
}
493
494
493900
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
495
{
496
493900
    int i, count = width/2;
497
498
493900
    if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
499
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
500
        return -1;
501
    }
502
503
#define LOADEND\
504
            int y0 = s->temp[0][width-1];
505
#define LOADEND_14\
506
            int y0 = s->temp16[0][width-1] & mask;
507
#define LOADEND_16\
508
            int y0 = s->temp16[0][width-1];
509
#define STATEND\
510
            s->stats[plane][y0]++;
511
#define STATEND_16\
512
            s->stats[plane][y0>>2]++;
513
#define WRITEEND\
514
            put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
515
#define WRITEEND_16\
516
            put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
517
            put_bits(&s->pb, 2, y0&3);
518
519
#define LOAD2\
520
            int y0 = s->temp[0][2 * i];\
521
            int y1 = s->temp[0][2 * i + 1];
522
#define LOAD2_14\
523
            int y0 = s->temp16[0][2 * i] & mask;\
524
            int y1 = s->temp16[0][2 * i + 1] & mask;
525
#define LOAD2_16\
526
            int y0 = s->temp16[0][2 * i];\
527
            int y1 = s->temp16[0][2 * i + 1];
528
#define STAT2\
529
            s->stats[plane][y0]++;\
530
            s->stats[plane][y1]++;
531
#define STAT2_16\
532
            s->stats[plane][y0>>2]++;\
533
            s->stats[plane][y1>>2]++;
534
#define WRITE2\
535
            put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
536
            put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
537
#define WRITE2_16\
538
            put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
539
            put_bits(&s->pb, 2, y0&3);\
540
            put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
541
            put_bits(&s->pb, 2, y1&3);
542
543
493900
    if (s->bps <= 8) {
544
134700
    if (s->flags & AV_CODEC_FLAG_PASS1) {
545
        for (i = 0; i < count; i++) {
546
            LOAD2;
547
            STAT2;
548
        }
549
        if (width&1) {
550
            LOADEND;
551
            STATEND;
552
        }
553
    }
554
134700
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
555
        return 0;
556
557
134700
    if (s->context) {
558
        for (i = 0; i < count; i++) {
559
            LOAD2;
560
            STAT2;
561
            WRITE2;
562
        }
563
        if (width&1) {
564
            LOADEND;
565
            STATEND;
566
            WRITEEND;
567
        }
568
    } else {
569
23031000
        for (i = 0; i < count; i++) {
570
22896300
            LOAD2;
571
22896300
            WRITE2;
572
        }
573
134700
        if (width&1) {
574
            LOADEND;
575
            WRITEEND;
576
        }
577
    }
578
359200
    } else if (s->bps <= 14) {
579
224500
        int mask = s->n - 1;
580
224500
        if (s->flags & AV_CODEC_FLAG_PASS1) {
581
            for (i = 0; i < count; i++) {
582
                LOAD2_14;
583
                STAT2;
584
            }
585
            if (width&1) {
586
                LOADEND_14;
587
                STATEND;
588
            }
589
        }
590
224500
        if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
591
            return 0;
592
593
224500
        if (s->context) {
594
            for (i = 0; i < count; i++) {
595
                LOAD2_14;
596
                STAT2;
597
                WRITE2;
598
            }
599
            if (width&1) {
600
                LOADEND_14;
601
                STATEND;
602
                WRITEEND;
603
            }
604
        } else {
605
26934300
            for (i = 0; i < count; i++) {
606
26709800
                LOAD2_14;
607
26709800
                WRITE2;
608
            }
609
224500
            if (width&1) {
610
5100
                LOADEND_14;
611
5100
                WRITEEND;
612
            }
613
        }
614
    } else {
615
134700
        if (s->flags & AV_CODEC_FLAG_PASS1) {
616
            for (i = 0; i < count; i++) {
617
                LOAD2_16;
618
                STAT2_16;
619
            }
620
            if (width&1) {
621
                LOADEND_16;
622
                STATEND_16;
623
            }
624
        }
625
134700
        if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
626
            return 0;
627
628
134700
        if (s->context) {
629
            for (i = 0; i < count; i++) {
630
                LOAD2_16;
631
                STAT2_16;
632
                WRITE2_16;
633
            }
634
            if (width&1) {
635
                LOADEND_16;
636
                STATEND_16;
637
                WRITEEND_16;
638
            }
639
        } else {
640
23031000
            for (i = 0; i < count; i++) {
641
22896300
                LOAD2_16;
642
22896300
                WRITE2_16;
643
            }
644
134700
            if (width&1) {
645
                LOADEND_16;
646
                WRITEEND_16;
647
            }
648
        }
649
    }
650
#undef LOAD2
651
#undef STAT2
652
#undef WRITE2
653
493900
    return 0;
654
}
655
656
22450
static int encode_gray_bitstream(HYuvContext *s, int count)
657
{
658
    int i;
659
660
22450
    if (put_bytes_left(&s->pb, 0) < 4 * count) {
661
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
662
        return -1;
663
    }
664
665
#define LOAD2\
666
            int y0 = s->temp[0][2 * i];\
667
            int y1 = s->temp[0][2 * i + 1];
668
#define STAT2\
669
            s->stats[0][y0]++;\
670
            s->stats[0][y1]++;
671
#define WRITE2\
672
            put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
673
            put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
674
675
22450
    count /= 2;
676
677
22450
    if (s->flags & AV_CODEC_FLAG_PASS1) {
678
        for (i = 0; i < count; i++) {
679
            LOAD2;
680
            STAT2;
681
        }
682
    }
683
22450
    if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
684
        return 0;
685
686
22450
    if (s->context) {
687
        for (i = 0; i < count; i++) {
688
            LOAD2;
689
            STAT2;
690
            WRITE2;
691
        }
692
    } else {
693
3838500
        for (i = 0; i < count; i++) {
694
3816050
            LOAD2;
695
3816050
            WRITE2;
696
        }
697
    }
698
22450
    return 0;
699
}
700
701
89800
static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
702
{
703
    int i;
704
705
89800
    if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
706
        av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
707
        return -1;
708
    }
709
710
#define LOAD_GBRA                                                       \
711
    int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G];            \
712
    int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
713
    int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
714
    int a = s->temp[0][planes * i + A];
715
716
#define STAT_BGRA                                                       \
717
    s->stats[0][b]++;                                                   \
718
    s->stats[1][g]++;                                                   \
719
    s->stats[2][r]++;                                                   \
720
    if (planes == 4)                                                    \
721
        s->stats[2][a]++;
722
723
#define WRITE_GBRA                                                      \
724
    put_bits(&s->pb, s->len[1][g], s->bits[1][g]);                      \
725
    put_bits(&s->pb, s->len[0][b], s->bits[0][b]);                      \
726
    put_bits(&s->pb, s->len[2][r], s->bits[2][r]);                      \
727
    if (planes == 4)                                                    \
728
        put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
729
730
89800
    if ((s->flags & AV_CODEC_FLAG_PASS1) &&
731
        (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
732
        for (i = 0; i < count; i++) {
733
            LOAD_GBRA;
734
            STAT_BGRA;
735
        }
736

89800
    } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
737
        for (i = 0; i < count; i++) {
738
            LOAD_GBRA;
739
            STAT_BGRA;
740
            WRITE_GBRA;
741
        }
742
    } else {
743
30617800
        for (i = 0; i < count; i++) {
744

30528000
            LOAD_GBRA;
745
30528000
            WRITE_GBRA;
746
        }
747
    }
748
89800
    return 0;
749
}
750
751
1600
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
752
                        const AVFrame *pict, int *got_packet)
753
{
754
1600
    HYuvContext *s = avctx->priv_data;
755
1600
    const int width = s->width;
756
1600
    const int width2 = s->width>>1;
757
1600
    const int height = s->height;
758
1600
    const int fake_ystride = s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
759
1600
    const int fake_ustride = s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
760
1600
    const int fake_vstride = s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
761
1600
    const AVFrame * const p = pict;
762
1600
    int i, j, size = 0, ret;
763
764
1600
    if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
765
        return ret;
766
767
1600
    if (s->context) {
768
        size = store_huffman_tables(s, pkt->data);
769
        if (size < 0)
770
            return size;
771
772
        for (i = 0; i < 4; i++)
773
            for (j = 0; j < s->vlc_n; j++)
774
                s->stats[i][j] >>= 1;
775
    }
776
777
1600
    init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
778
779
1600
    if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
780
1800
        avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
781
        int lefty, leftu, leftv, y, cy;
782
783
400
        put_bits(&s->pb, 8, leftv = p->data[2][0]);
784
400
        put_bits(&s->pb, 8, lefty = p->data[0][1]);
785
400
        put_bits(&s->pb, 8, leftu = p->data[1][0]);
786
400
        put_bits(&s->pb, 8,         p->data[0][0]);
787
788
400
        lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
789
400
        leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
790
400
        leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
791
792
400
        encode_422_bitstream(s, 2, width-2);
793
794
400
        if (s->predictor==MEDIAN) {
795
            int lefttopy, lefttopu, lefttopv;
796
            cy = y = 1;
797
            if (s->interlaced) {
798
                lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
799
                leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
800
                leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
801
802
                encode_422_bitstream(s, 0, width);
803
                y++; cy++;
804
            }
805
806
            lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
807
            leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
808
            leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
809
810
            encode_422_bitstream(s, 0, 4);
811
812
            lefttopy = p->data[0][3];
813
            lefttopu = p->data[1][1];
814
            lefttopv = p->data[2][1];
815
            s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width  - 4, &lefty, &lefttopy);
816
            s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
817
            s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
818
            encode_422_bitstream(s, 0, width - 4);
819
            y++; cy++;
820
821
            for (; y < height; y++,cy++) {
822
                uint8_t *ydst, *udst, *vdst;
823
824
                if (s->bitstream_bpp == 12) {
825
                    while (2 * cy > y) {
826
                        ydst = p->data[0] + p->linesize[0] * y;
827
                        s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
828
                        encode_gray_bitstream(s, width);
829
                        y++;
830
                    }
831
                    if (y >= height) break;
832
                }
833
                ydst = p->data[0] + p->linesize[0] * y;
834
                udst = p->data[1] + p->linesize[1] * cy;
835
                vdst = p->data[2] + p->linesize[2] * cy;
836
837
                s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width,  &lefty, &lefttopy);
838
                s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
839
                s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
840
841
                encode_422_bitstream(s, 0, width);
842
            }
843
        } else {
844
67350
            for (cy = y = 1; y < height; y++, cy++) {
845
                uint8_t *ydst, *udst, *vdst;
846
847
                /* encode a luma only line & y++ */
848
67150
                if (s->bitstream_bpp == 12) {
849
22450
                    ydst = p->data[0] + p->linesize[0] * y;
850
851

22450
                    if (s->predictor == PLANE && s->interlaced < y) {
852
                        s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
853
854
                        lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
855
                    } else {
856
22450
                        lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
857
                    }
858
22450
                    encode_gray_bitstream(s, width);
859
22450
                    y++;
860
22450
                    if (y >= height) break;
861
                }
862
863
66950
                ydst = p->data[0] + p->linesize[0] * y;
864
66950
                udst = p->data[1] + p->linesize[1] * cy;
865
66950
                vdst = p->data[2] + p->linesize[2] * cy;
866
867

66950
                if (s->predictor == PLANE && s->interlaced < cy) {
868
                    s->llvidencdsp.diff_bytes(s->temp[1],          ydst, ydst - fake_ystride, width);
869
                    s->llvidencdsp.diff_bytes(s->temp[2],          udst, udst - fake_ustride, width2);
870
                    s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
871
872
                    lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
873
                    leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
874
                    leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
875
                } else {
876
66950
                    lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
877
66950
                    leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
878
66950
                    leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
879
                }
880
881
66950
                encode_422_bitstream(s, 0, width);
882
            }
883
        }
884
1200
    } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
885
200
        uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
886
200
        const int stride = -p->linesize[0];
887
200
        const int fake_stride = -fake_ystride;
888
        int y;
889
        int leftr, leftg, leftb, lefta;
890
891
200
        put_bits(&s->pb, 8, lefta = data[A]);
892
200
        put_bits(&s->pb, 8, leftr = data[R]);
893
200
        put_bits(&s->pb, 8, leftg = data[G]);
894
200
        put_bits(&s->pb, 8, leftb = data[B]);
895
896
200
        sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
897
                                  &leftr, &leftg, &leftb, &lefta);
898
200
        encode_bgra_bitstream(s, width - 1, 4);
899
900
44900
        for (y = 1; y < s->height; y++) {
901
44700
            uint8_t *dst = data + y*stride;
902

44700
            if (s->predictor == PLANE && s->interlaced < y) {
903
                s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
904
                sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
905
                                          &leftr, &leftg, &leftb, &lefta);
906
            } else {
907
44700
                sub_left_prediction_bgr32(s, s->temp[0], dst, width,
908
                                          &leftr, &leftg, &leftb, &lefta);
909
            }
910
44700
            encode_bgra_bitstream(s, width, 4);
911
        }
912
1000
    } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
913
200
        uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
914
200
        const int stride = -p->linesize[0];
915
200
        const int fake_stride = -fake_ystride;
916
        int y;
917
        int leftr, leftg, leftb;
918
919
200
        put_bits(&s->pb, 8, leftr = data[0]);
920
200
        put_bits(&s->pb, 8, leftg = data[1]);
921
200
        put_bits(&s->pb, 8, leftb = data[2]);
922
200
        put_bits(&s->pb, 8, 0);
923
924
200
        sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
925
                                  &leftr, &leftg, &leftb);
926
200
        encode_bgra_bitstream(s, width-1, 3);
927
928
44900
        for (y = 1; y < s->height; y++) {
929
44700
            uint8_t *dst = data + y * stride;
930

44700
            if (s->predictor == PLANE && s->interlaced < y) {
931
                s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
932
                                      width * 3);
933
                sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
934
                                          &leftr, &leftg, &leftb);
935
            } else {
936
44700
                sub_left_prediction_rgb24(s, s->temp[0], dst, width,
937
                                          &leftr, &leftg, &leftb);
938
            }
939
44700
            encode_bgra_bitstream(s, width, 3);
940
        }
941
800
    } else if (s->version > 2) {
942
        int plane;
943
3200
        for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
944
            int left, y;
945
2400
            int w = width;
946
2400
            int h = height;
947
2400
            int fake_stride = fake_ystride;
948
949

2400
            if (s->chroma && (plane == 1 || plane == 2)) {
950
1600
                w >>= s->chroma_h_shift;
951
1600
                h >>= s->chroma_v_shift;
952
1600
                fake_stride = plane == 1 ? fake_ustride : fake_vstride;
953
            }
954
955
2400
            left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
956
957
2400
            encode_plane_bitstream(s, w, plane);
958
959
2400
            if (s->predictor==MEDIAN) {
960
                int lefttop;
961
                y = 1;
962
                if (s->interlaced) {
963
                    left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
964
965
                    encode_plane_bitstream(s, w, plane);
966
                    y++;
967
                }
968
969
                lefttop = p->data[plane][0];
970
971
                for (; y < h; y++) {
972
                    uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
973
974
                    sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
975
976
                    encode_plane_bitstream(s, w, plane);
977
                }
978
            } else {
979
493900
                for (y = 1; y < h; y++) {
980
491500
                    uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
981
982

491500
                    if (s->predictor == PLANE && s->interlaced < y) {
983
134100
                        diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
984
985
134100
                        left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
986
                    } else {
987
357400
                        left = sub_left_prediction(s, s->temp[0], dst, w , left);
988
                    }
989
990
491500
                    encode_plane_bitstream(s, w, plane);
991
                }
992
            }
993
        }
994
    } else {
995
        av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
996
    }
997
1600
    emms_c();
998
999
1600
    size += (put_bits_count(&s->pb) + 31) / 8;
1000
1600
    put_bits(&s->pb, 16, 0);
1001
1600
    put_bits(&s->pb, 15, 0);
1002
1600
    size /= 4;
1003
1004

1600
    if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1005
        int j;
1006
        char *p = avctx->stats_out;
1007
        char *end = p + STATS_OUT_SIZE;
1008
        for (i = 0; i < 4; i++) {
1009
            for (j = 0; j < s->vlc_n; j++) {
1010
                snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1011
                p += strlen(p);
1012
                s->stats[i][j]= 0;
1013
            }
1014
            snprintf(p, end-p, "\n");
1015
            p++;
1016
            if (end <= p)
1017
                return AVERROR(ENOMEM);
1018
        }
1019
1600
    } else if (avctx->stats_out)
1020
        avctx->stats_out[0] = '\0';
1021
1600
    if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
1022
1600
        flush_put_bits(&s->pb);
1023
1600
        s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
1024
    }
1025
1026
1600
    s->picture_number++;
1027
1028
1600
    pkt->size   = size * 4;
1029
1600
    pkt->flags |= AV_PKT_FLAG_KEY;
1030
1600
    *got_packet = 1;
1031
1032
1600
    return 0;
1033
}
1034
1035
32
static av_cold int encode_end(AVCodecContext *avctx)
1036
{
1037
32
    HYuvContext *s = avctx->priv_data;
1038
1039
32
    ff_huffyuv_common_end(s);
1040
1041
32
    av_freep(&avctx->extradata);
1042
32
    av_freep(&avctx->stats_out);
1043
1044
32
    return 0;
1045
}
1046
1047
#define OFFSET(x) offsetof(HYuvContext, x)
1048
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1049
1050
#define COMMON_OPTIONS \
1051
    { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1052
      OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
1053
      0, 1, VE }, \
1054
    { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1055
        { "left",   NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT },   INT_MIN, INT_MAX, VE, "pred" }, \
1056
        { "plane",  NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE },  INT_MIN, INT_MAX, VE, "pred" }, \
1057
        { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1058
1059
static const AVOption normal_options[] = {
1060
    COMMON_OPTIONS
1061
    { NULL },
1062
};
1063
1064
static const AVOption ff_options[] = {
1065
    COMMON_OPTIONS
1066
    { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1067
    { NULL },
1068
};
1069
1070
static const AVClass normal_class = {
1071
    .class_name = "huffyuv",
1072
    .item_name  = av_default_item_name,
1073
    .option     = normal_options,
1074
    .version    = LIBAVUTIL_VERSION_INT,
1075
};
1076
1077
static const AVClass ff_class = {
1078
    .class_name = "ffvhuff",
1079
    .item_name  = av_default_item_name,
1080
    .option     = ff_options,
1081
    .version    = LIBAVUTIL_VERSION_INT,
1082
};
1083
1084
AVCodec ff_huffyuv_encoder = {
1085
    .name           = "huffyuv",
1086
    .long_name      = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1087
    .type           = AVMEDIA_TYPE_VIDEO,
1088
    .id             = AV_CODEC_ID_HUFFYUV,
1089
    .priv_data_size = sizeof(HYuvContext),
1090
    .init           = encode_init,
1091
    .encode2        = encode_frame,
1092
    .close          = encode_end,
1093
    .capabilities   = AV_CODEC_CAP_FRAME_THREADS,
1094
    .priv_class     = &normal_class,
1095
    .pix_fmts       = (const enum AVPixelFormat[]){
1096
        AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
1097
        AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1098
    },
1099
    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |
1100
                      FF_CODEC_CAP_INIT_CLEANUP,
1101
};
1102
1103
#if CONFIG_FFVHUFF_ENCODER
1104
AVCodec ff_ffvhuff_encoder = {
1105
    .name           = "ffvhuff",
1106
    .long_name      = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1107
    .type           = AVMEDIA_TYPE_VIDEO,
1108
    .id             = AV_CODEC_ID_FFVHUFF,
1109
    .priv_data_size = sizeof(HYuvContext),
1110
    .init           = encode_init,
1111
    .encode2        = encode_frame,
1112
    .close          = encode_end,
1113
    .capabilities   = AV_CODEC_CAP_FRAME_THREADS,
1114
    .priv_class     = &ff_class,
1115
    .pix_fmts       = (const enum AVPixelFormat[]){
1116
        AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P,
1117
        AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
1118
        AV_PIX_FMT_GBRP,
1119
        AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
1120
        AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
1121
        AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
1122
        AV_PIX_FMT_GBRAP,
1123
        AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16,
1124
        AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16,
1125
        AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
1126
        AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
1127
        AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16,
1128
        AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16,
1129
        AV_PIX_FMT_RGB24,
1130
        AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1131
    },
1132
    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |
1133
                      FF_CODEC_CAP_INIT_CLEANUP,
1134
};
1135
#endif