Directory: | ../../../ffmpeg/ |
---|---|
File: | src/libavcodec/utvideoenc.c |
Date: | 2022-07-05 19:52:29 |
Exec | Total | Coverage | |
---|---|---|---|
Lines: | 246 | 292 | 84.2% |
Branches: | 101 | 131 | 77.1% |
Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * Ut Video encoder | ||
3 | * Copyright (c) 2012 Jan Ekström | ||
4 | * | ||
5 | * This file is part of FFmpeg. | ||
6 | * | ||
7 | * FFmpeg is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU Lesser General Public | ||
9 | * License as published by the Free Software Foundation; either | ||
10 | * version 2.1 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * FFmpeg is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public | ||
18 | * License along with FFmpeg; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
20 | */ | ||
21 | |||
22 | /** | ||
23 | * @file | ||
24 | * Ut Video encoder | ||
25 | */ | ||
26 | |||
27 | #include "libavutil/imgutils.h" | ||
28 | #include "libavutil/intreadwrite.h" | ||
29 | #include "libavutil/opt.h" | ||
30 | |||
31 | #include "avcodec.h" | ||
32 | #include "codec_internal.h" | ||
33 | #include "encode.h" | ||
34 | #include "bswapdsp.h" | ||
35 | #include "bytestream.h" | ||
36 | #include "put_bits.h" | ||
37 | #include "mathops.h" | ||
38 | #include "utvideo.h" | ||
39 | #include "huffman.h" | ||
40 | |||
41 | typedef struct HuffEntry { | ||
42 | uint16_t sym; | ||
43 | uint8_t len; | ||
44 | uint32_t code; | ||
45 | } HuffEntry; | ||
46 | |||
47 | /* Compare huffman tree nodes */ | ||
48 | 3199615 | static int ut_huff_cmp_len(const void *a, const void *b) | |
49 | { | ||
50 | 3199615 | const HuffEntry *aa = a, *bb = b; | |
51 | 3199615 | return (aa->len - bb->len)*256 + aa->sym - bb->sym; | |
52 | } | ||
53 | |||
54 | /* Compare huffentry symbols */ | ||
55 | 3154892 | static int huff_cmp_sym(const void *a, const void *b) | |
56 | { | ||
57 | 3154892 | const HuffEntry *aa = a, *bb = b; | |
58 | 3154892 | return aa->sym - bb->sym; | |
59 | } | ||
60 | |||
61 | 135 | static av_cold int utvideo_encode_close(AVCodecContext *avctx) | |
62 | { | ||
63 | 135 | UtvideoContext *c = avctx->priv_data; | |
64 | int i; | ||
65 | |||
66 | 135 | av_freep(&c->slice_bits); | |
67 |
2/2✓ Branch 0 taken 540 times.
✓ Branch 1 taken 135 times.
|
675 | for (i = 0; i < 4; i++) |
68 | 540 | av_freep(&c->slice_buffer[i]); | |
69 | |||
70 | 135 | return 0; | |
71 | } | ||
72 | |||
73 | 135 | static av_cold int utvideo_encode_init(AVCodecContext *avctx) | |
74 | { | ||
75 | 135 | UtvideoContext *c = avctx->priv_data; | |
76 | int i, subsampled_height; | ||
77 | uint32_t original_format; | ||
78 | |||
79 | 135 | c->avctx = avctx; | |
80 | 135 | c->frame_info_size = 4; | |
81 | 135 | c->slice_stride = FFALIGN(avctx->width, 32); | |
82 | |||
83 |
5/6✓ Branch 0 taken 27 times.
✓ Branch 1 taken 27 times.
✓ Branch 2 taken 27 times.
✓ Branch 3 taken 27 times.
✓ Branch 4 taken 27 times.
✗ Branch 5 not taken.
|
135 | switch (avctx->pix_fmt) { |
84 | 27 | case AV_PIX_FMT_GBRP: | |
85 | 27 | c->planes = 3; | |
86 | 27 | avctx->codec_tag = MKTAG('U', 'L', 'R', 'G'); | |
87 | 27 | original_format = UTVIDEO_RGB; | |
88 | 27 | break; | |
89 | 27 | case AV_PIX_FMT_GBRAP: | |
90 | 27 | c->planes = 4; | |
91 | 27 | avctx->codec_tag = MKTAG('U', 'L', 'R', 'A'); | |
92 | 27 | original_format = UTVIDEO_RGBA; | |
93 | 27 | avctx->bits_per_coded_sample = 32; | |
94 | 27 | break; | |
95 | 27 | case AV_PIX_FMT_YUV420P: | |
96 |
2/4✓ Branch 0 taken 27 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 27 times.
|
27 | if (avctx->width & 1 || avctx->height & 1) { |
97 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
98 | "4:2:0 video requires even width and height.\n"); | ||
99 | ✗ | return AVERROR_INVALIDDATA; | |
100 | } | ||
101 | 27 | c->planes = 3; | |
102 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
|
27 | if (avctx->colorspace == AVCOL_SPC_BT709) |
103 | ✗ | avctx->codec_tag = MKTAG('U', 'L', 'H', '0'); | |
104 | else | ||
105 | 27 | avctx->codec_tag = MKTAG('U', 'L', 'Y', '0'); | |
106 | 27 | original_format = UTVIDEO_420; | |
107 | 27 | break; | |
108 | 27 | case AV_PIX_FMT_YUV422P: | |
109 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
|
27 | if (avctx->width & 1) { |
110 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
111 | "4:2:2 video requires even width.\n"); | ||
112 | ✗ | return AVERROR_INVALIDDATA; | |
113 | } | ||
114 | 27 | c->planes = 3; | |
115 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
|
27 | if (avctx->colorspace == AVCOL_SPC_BT709) |
116 | ✗ | avctx->codec_tag = MKTAG('U', 'L', 'H', '2'); | |
117 | else | ||
118 | 27 | avctx->codec_tag = MKTAG('U', 'L', 'Y', '2'); | |
119 | 27 | original_format = UTVIDEO_422; | |
120 | 27 | break; | |
121 | 27 | case AV_PIX_FMT_YUV444P: | |
122 | 27 | c->planes = 3; | |
123 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
|
27 | if (avctx->colorspace == AVCOL_SPC_BT709) |
124 | ✗ | avctx->codec_tag = MKTAG('U', 'L', 'H', '4'); | |
125 | else | ||
126 | 27 | avctx->codec_tag = MKTAG('U', 'L', 'Y', '4'); | |
127 | 27 | original_format = UTVIDEO_444; | |
128 | 27 | break; | |
129 | ✗ | default: | |
130 | ✗ | av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n", | |
131 | ✗ | avctx->pix_fmt); | |
132 | ✗ | return AVERROR_INVALIDDATA; | |
133 | } | ||
134 | |||
135 | 135 | ff_bswapdsp_init(&c->bdsp); | |
136 | 135 | ff_llvidencdsp_init(&c->llvidencdsp); | |
137 | |||
138 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 135 times.
|
135 | if (c->frame_pred == PRED_GRADIENT) { |
139 | ✗ | av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n"); | |
140 | ✗ | return AVERROR_OPTION_NOT_FOUND; | |
141 | } | ||
142 | |||
143 | /* | ||
144 | * Check the asked slice count for obviously invalid | ||
145 | * values (> 256 or negative). | ||
146 | */ | ||
147 |
2/4✓ Branch 0 taken 135 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 135 times.
|
135 | if (avctx->slices > 256 || avctx->slices < 0) { |
148 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
149 | "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n", | ||
150 | avctx->slices); | ||
151 | ✗ | return AVERROR(EINVAL); | |
152 | } | ||
153 | |||
154 | /* Check that the slice count is not larger than the subsampled height */ | ||
155 | 135 | subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h; | |
156 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 135 times.
|
135 | if (avctx->slices > subsampled_height) { |
157 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
158 | "Slice count %d is larger than the subsampling-applied height %d.\n", | ||
159 | avctx->slices, subsampled_height); | ||
160 | ✗ | return AVERROR(EINVAL); | |
161 | } | ||
162 | |||
163 | /* extradata size is 4 * 32 bits */ | ||
164 | 135 | avctx->extradata_size = 16; | |
165 | |||
166 | 135 | avctx->extradata = av_mallocz(avctx->extradata_size + | |
167 | AV_INPUT_BUFFER_PADDING_SIZE); | ||
168 | |||
169 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 135 times.
|
135 | if (!avctx->extradata) { |
170 | ✗ | av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n"); | |
171 | ✗ | return AVERROR(ENOMEM); | |
172 | } | ||
173 | |||
174 |
2/2✓ Branch 0 taken 432 times.
✓ Branch 1 taken 135 times.
|
567 | for (i = 0; i < c->planes; i++) { |
175 | 432 | c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) + | |
176 | AV_INPUT_BUFFER_PADDING_SIZE); | ||
177 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 432 times.
|
432 | if (!c->slice_buffer[i]) { |
178 | ✗ | av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n"); | |
179 | ✗ | return AVERROR(ENOMEM); | |
180 | } | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Set the version of the encoder. | ||
185 | * Last byte is "implementation ID", which is | ||
186 | * obtained from the creator of the format. | ||
187 | * Libavcodec has been assigned with the ID 0xF0. | ||
188 | */ | ||
189 | 135 | AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0)); | |
190 | |||
191 | /* | ||
192 | * Set the "original format" | ||
193 | * Not used for anything during decoding. | ||
194 | */ | ||
195 | 135 | AV_WL32(avctx->extradata + 4, original_format); | |
196 | |||
197 | /* Write 4 as the 'frame info size' */ | ||
198 | 135 | AV_WL32(avctx->extradata + 8, c->frame_info_size); | |
199 | |||
200 | /* | ||
201 | * Set how many slices are going to be used. | ||
202 | * By default uses multiple slices depending on the subsampled height. | ||
203 | * This enables multithreading in the official decoder. | ||
204 | */ | ||
205 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 135 times.
|
135 | if (!avctx->slices) { |
206 | ✗ | c->slices = subsampled_height / 120; | |
207 | |||
208 | ✗ | if (!c->slices) | |
209 | ✗ | c->slices = 1; | |
210 | ✗ | else if (c->slices > 256) | |
211 | ✗ | c->slices = 256; | |
212 | } else { | ||
213 | 135 | c->slices = avctx->slices; | |
214 | } | ||
215 | |||
216 | /* Set compression mode */ | ||
217 | 135 | c->compression = COMP_HUFF; | |
218 | |||
219 | /* | ||
220 | * Set the encoding flags: | ||
221 | * - Slice count minus 1 | ||
222 | * - Interlaced encoding mode flag, set to zero for now. | ||
223 | * - Compression mode (none/huff) | ||
224 | * And write the flags. | ||
225 | */ | ||
226 | 135 | c->flags = (c->slices - 1) << 24; | |
227 | 135 | c->flags |= 0 << 11; // bit field to signal interlaced encoding mode | |
228 | 135 | c->flags |= c->compression; | |
229 | |||
230 | 135 | AV_WL32(avctx->extradata + 12, c->flags); | |
231 | |||
232 | 135 | return 0; | |
233 | } | ||
234 | |||
235 | 300 | static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, | |
236 | uint8_t *const src[4], int planes, const int stride[4], | ||
237 | int width, int height) | ||
238 | { | ||
239 | int i, j; | ||
240 | 300 | int k = 2 * dst_stride; | |
241 | 300 | const uint8_t *sg = src[0]; | |
242 | 300 | const uint8_t *sb = src[1]; | |
243 | 300 | const uint8_t *sr = src[2]; | |
244 | 300 | const uint8_t *sa = src[3]; | |
245 | unsigned int g; | ||
246 | |||
247 |
2/2✓ Branch 0 taken 86400 times.
✓ Branch 1 taken 300 times.
|
86700 | for (j = 0; j < height; j++) { |
248 |
2/2✓ Branch 0 taken 43200 times.
✓ Branch 1 taken 43200 times.
|
86400 | if (planes == 3) { |
249 |
2/2✓ Branch 0 taken 15206400 times.
✓ Branch 1 taken 43200 times.
|
15249600 | for (i = 0; i < width; i++) { |
250 | 15206400 | g = sg[i]; | |
251 | 15206400 | dst[0][k] = g; | |
252 | 15206400 | g += 0x80; | |
253 | 15206400 | dst[1][k] = sb[i] - g; | |
254 | 15206400 | dst[2][k] = sr[i] - g; | |
255 | 15206400 | k++; | |
256 | } | ||
257 | } else { | ||
258 |
2/2✓ Branch 0 taken 15206400 times.
✓ Branch 1 taken 43200 times.
|
15249600 | for (i = 0; i < width; i++) { |
259 | 15206400 | g = sg[i]; | |
260 | 15206400 | dst[0][k] = g; | |
261 | 15206400 | g += 0x80; | |
262 | 15206400 | dst[1][k] = sb[i] - g; | |
263 | 15206400 | dst[2][k] = sr[i] - g; | |
264 | 15206400 | dst[3][k] = sa[i]; | |
265 | 15206400 | k++; | |
266 | } | ||
267 | 43200 | sa += stride[3]; | |
268 | } | ||
269 | 86400 | k += dst_stride - width; | |
270 | 86400 | sg += stride[0]; | |
271 | 86400 | sb += stride[1]; | |
272 | 86400 | sr += stride[2]; | |
273 | } | ||
274 | 300 | } | |
275 | |||
276 | #undef A | ||
277 | #undef B | ||
278 | |||
279 | /* Write data to a plane with median prediction */ | ||
280 | 800 | static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, | |
281 | ptrdiff_t stride, int width, int height) | ||
282 | { | ||
283 | int i, j; | ||
284 | int A, B; | ||
285 | uint8_t prev; | ||
286 | |||
287 | /* First line uses left neighbour prediction */ | ||
288 | 800 | prev = 0x80; /* Set the initial value */ | |
289 |
2/2✓ Branch 0 taken 246400 times.
✓ Branch 1 taken 800 times.
|
247200 | for (i = 0; i < width; i++) { |
290 | 246400 | *dst++ = src[i] - prev; | |
291 | 246400 | prev = src[i]; | |
292 | } | ||
293 | |||
294 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 800 times.
|
800 | if (height == 1) |
295 | ✗ | return; | |
296 | |||
297 | 800 | src += stride; | |
298 | |||
299 | /* | ||
300 | * Second line uses top prediction for the first sample, | ||
301 | * and median for the rest. | ||
302 | */ | ||
303 | 800 | A = B = 0; | |
304 | |||
305 | /* Rest of the coded part uses median prediction */ | ||
306 |
2/2✓ Branch 0 taken 215200 times.
✓ Branch 1 taken 800 times.
|
216000 | for (j = 1; j < height; j++) { |
307 | 215200 | c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B); | |
308 | 215200 | dst += width; | |
309 | 215200 | src += stride; | |
310 | } | ||
311 | } | ||
312 | |||
313 | /* Count the usage of values in a plane */ | ||
314 | 2400 | static void count_usage(uint8_t *src, int width, | |
315 | int height, uint64_t *counts) | ||
316 | { | ||
317 | int i, j; | ||
318 | |||
319 |
2/2✓ Branch 0 taken 648000 times.
✓ Branch 1 taken 2400 times.
|
650400 | for (j = 0; j < height; j++) { |
320 |
2/2✓ Branch 0 taken 205286400 times.
✓ Branch 1 taken 648000 times.
|
205934400 | for (i = 0; i < width; i++) { |
321 | 205286400 | counts[src[i]]++; | |
322 | } | ||
323 | 648000 | src += width; | |
324 | } | ||
325 | 2400 | } | |
326 | |||
327 | /* Calculate the actual huffman codes from the code lengths */ | ||
328 | 2350 | static void calculate_codes(HuffEntry *he) | |
329 | { | ||
330 | int last, i; | ||
331 | uint32_t code; | ||
332 | |||
333 | 2350 | qsort(he, 256, sizeof(*he), ut_huff_cmp_len); | |
334 | |||
335 | 2350 | last = 255; | |
336 |
3/4✓ Branch 0 taken 83691 times.
✓ Branch 1 taken 2350 times.
✓ Branch 2 taken 83691 times.
✗ Branch 3 not taken.
|
86041 | while (he[last].len == 255 && last) |
337 | 83691 | last--; | |
338 | |||
339 | 2350 | code = 0; | |
340 |
2/2✓ Branch 0 taken 517909 times.
✓ Branch 1 taken 2350 times.
|
520259 | for (i = last; i >= 0; i--) { |
341 | 517909 | he[i].code = code >> (32 - he[i].len); | |
342 | 517909 | code += 0x80000000u >> (he[i].len - 1); | |
343 | } | ||
344 | |||
345 | 2350 | qsort(he, 256, sizeof(*he), huff_cmp_sym); | |
346 | 2350 | } | |
347 | |||
348 | /* Write huffman bit codes to a memory block */ | ||
349 | 2350 | static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, | |
350 | int width, int height, HuffEntry *he) | ||
351 | { | ||
352 | PutBitContext pb; | ||
353 | int i, j; | ||
354 | int count; | ||
355 | |||
356 | 2350 | init_put_bits(&pb, dst, dst_size); | |
357 | |||
358 | /* Write the codes */ | ||
359 |
2/2✓ Branch 0 taken 633600 times.
✓ Branch 1 taken 2350 times.
|
635950 | for (j = 0; j < height; j++) { |
360 |
2/2✓ Branch 0 taken 200217600 times.
✓ Branch 1 taken 633600 times.
|
200851200 | for (i = 0; i < width; i++) |
361 | 200217600 | put_bits(&pb, he[src[i]].len, he[src[i]].code); | |
362 | |||
363 | 633600 | src += width; | |
364 | } | ||
365 | |||
366 | /* Pad output to a 32-bit boundary */ | ||
367 | 2350 | count = put_bits_count(&pb) & 0x1F; | |
368 | |||
369 |
2/2✓ Branch 0 taken 2170 times.
✓ Branch 1 taken 180 times.
|
2350 | if (count) |
370 | 2170 | put_bits(&pb, 32 - count, 0); | |
371 | |||
372 | /* Flush the rest with zeroes */ | ||
373 | 2350 | flush_put_bits(&pb); | |
374 | |||
375 | /* Return the amount of bytes written */ | ||
376 | 2350 | return put_bytes_output(&pb); | |
377 | } | ||
378 | |||
379 | 2400 | static int encode_plane(AVCodecContext *avctx, uint8_t *src, | |
380 | uint8_t *dst, ptrdiff_t stride, int plane_no, | ||
381 | int width, int height, PutByteContext *pb) | ||
382 | { | ||
383 | 2400 | UtvideoContext *c = avctx->priv_data; | |
384 | uint8_t lengths[256]; | ||
385 | 2400 | uint64_t counts[256] = { 0 }; | |
386 | |||
387 | HuffEntry he[256]; | ||
388 | |||
389 | 2400 | uint32_t offset = 0, slice_len = 0; | |
390 |
4/4✓ Branch 0 taken 750 times.
✓ Branch 1 taken 1650 times.
✓ Branch 2 taken 150 times.
✓ Branch 3 taken 600 times.
|
2400 | const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P); |
391 | 2400 | int i, sstart, send = 0; | |
392 | int symbol; | ||
393 | int ret; | ||
394 | |||
395 | /* Do prediction / make planes */ | ||
396 |
3/4✓ Branch 0 taken 800 times.
✓ Branch 1 taken 800 times.
✓ Branch 2 taken 800 times.
✗ Branch 3 not taken.
|
2400 | switch (c->frame_pred) { |
397 | 800 | case PRED_NONE: | |
398 |
2/2✓ Branch 0 taken 800 times.
✓ Branch 1 taken 800 times.
|
1600 | for (i = 0; i < c->slices; i++) { |
399 | 800 | sstart = send; | |
400 | 800 | send = height * (i + 1) / c->slices & cmask; | |
401 | 800 | av_image_copy_plane(dst + sstart * width, width, | |
402 | 800 | src + sstart * stride, stride, | |
403 | width, send - sstart); | ||
404 | } | ||
405 | 800 | break; | |
406 | 800 | case PRED_LEFT: | |
407 |
2/2✓ Branch 0 taken 800 times.
✓ Branch 1 taken 800 times.
|
1600 | for (i = 0; i < c->slices; i++) { |
408 | 800 | sstart = send; | |
409 | 800 | send = height * (i + 1) / c->slices & cmask; | |
410 | 800 | c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart); | |
411 | } | ||
412 | 800 | break; | |
413 | 800 | case PRED_MEDIAN: | |
414 |
2/2✓ Branch 0 taken 800 times.
✓ Branch 1 taken 800 times.
|
1600 | for (i = 0; i < c->slices; i++) { |
415 | 800 | sstart = send; | |
416 | 800 | send = height * (i + 1) / c->slices & cmask; | |
417 | 800 | median_predict(c, src + sstart * stride, dst + sstart * width, | |
418 | stride, width, send - sstart); | ||
419 | } | ||
420 | 800 | break; | |
421 | ✗ | default: | |
422 | ✗ | av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n", | |
423 | c->frame_pred); | ||
424 | ✗ | return AVERROR_OPTION_NOT_FOUND; | |
425 | } | ||
426 | |||
427 | /* Count the usage of values */ | ||
428 | 2400 | count_usage(dst, width, height, counts); | |
429 | |||
430 | /* Check for a special case where only one symbol was used */ | ||
431 |
1/2✓ Branch 0 taken 16840 times.
✗ Branch 1 not taken.
|
16840 | for (symbol = 0; symbol < 256; symbol++) { |
432 | /* If non-zero count is found, see if it matches width * height */ | ||
433 |
2/2✓ Branch 0 taken 2400 times.
✓ Branch 1 taken 14440 times.
|
16840 | if (counts[symbol]) { |
434 | /* Special case if only one symbol was used */ | ||
435 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 2350 times.
|
2400 | if (counts[symbol] == width * (int64_t)height) { |
436 | /* | ||
437 | * Write a zero for the single symbol | ||
438 | * used in the plane, else 0xFF. | ||
439 | */ | ||
440 |
2/2✓ Branch 0 taken 12800 times.
✓ Branch 1 taken 50 times.
|
12850 | for (i = 0; i < 256; i++) { |
441 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 12750 times.
|
12800 | if (i == symbol) |
442 | 50 | bytestream2_put_byte(pb, 0); | |
443 | else | ||
444 | 12750 | bytestream2_put_byte(pb, 0xFF); | |
445 | } | ||
446 | |||
447 | /* Write zeroes for lengths */ | ||
448 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 50 times.
|
100 | for (i = 0; i < c->slices; i++) |
449 | 50 | bytestream2_put_le32(pb, 0); | |
450 | |||
451 | /* And that's all for that plane folks */ | ||
452 | 50 | return 0; | |
453 | } | ||
454 | 2350 | break; | |
455 | } | ||
456 | } | ||
457 | |||
458 | /* Calculate huffman lengths */ | ||
459 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 2350 times.
|
2350 | if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0) |
460 | ✗ | return ret; | |
461 | |||
462 | /* | ||
463 | * Write the plane's header into the output packet: | ||
464 | * - huffman code lengths (256 bytes) | ||
465 | * - slice end offsets (gotten from the slice lengths) | ||
466 | */ | ||
467 |
2/2✓ Branch 0 taken 601600 times.
✓ Branch 1 taken 2350 times.
|
603950 | for (i = 0; i < 256; i++) { |
468 | 601600 | bytestream2_put_byte(pb, lengths[i]); | |
469 | |||
470 | 601600 | he[i].len = lengths[i]; | |
471 | 601600 | he[i].sym = i; | |
472 | } | ||
473 | |||
474 | /* Calculate the huffman codes themselves */ | ||
475 | 2350 | calculate_codes(he); | |
476 | |||
477 | 2350 | send = 0; | |
478 |
2/2✓ Branch 0 taken 2350 times.
✓ Branch 1 taken 2350 times.
|
4700 | for (i = 0; i < c->slices; i++) { |
479 | 2350 | sstart = send; | |
480 | 2350 | send = height * (i + 1) / c->slices & cmask; | |
481 | |||
482 | /* | ||
483 | * Write the huffman codes to a buffer, | ||
484 | * get the offset in bytes. | ||
485 | */ | ||
486 | 4700 | offset += write_huff_codes(dst + sstart * width, c->slice_bits, | |
487 | 2350 | width * height + 4, width, | |
488 | send - sstart, he); | ||
489 | |||
490 | 2350 | slice_len = offset - slice_len; | |
491 | |||
492 | /* Byteswap the written huffman codes */ | ||
493 | 2350 | c->bdsp.bswap_buf((uint32_t *) c->slice_bits, | |
494 | 2350 | (uint32_t *) c->slice_bits, | |
495 | 2350 | slice_len >> 2); | |
496 | |||
497 | /* Write the offset to the stream */ | ||
498 | 2350 | bytestream2_put_le32(pb, offset); | |
499 | |||
500 | /* Seek to the data part of the packet */ | ||
501 | 2350 | bytestream2_seek_p(pb, 4 * (c->slices - i - 1) + | |
502 | 2350 | offset - slice_len, SEEK_CUR); | |
503 | |||
504 | /* Write the slices' data into the output packet */ | ||
505 | 2350 | bytestream2_put_buffer(pb, c->slice_bits, slice_len); | |
506 | |||
507 | /* Seek back to the slice offsets */ | ||
508 | 2350 | bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset, | |
509 | SEEK_CUR); | ||
510 | |||
511 | 2350 | slice_len = offset; | |
512 | } | ||
513 | |||
514 | /* And at the end seek to the end of written slice(s) */ | ||
515 | 2350 | bytestream2_seek_p(pb, offset, SEEK_CUR); | |
516 | |||
517 | 2350 | return 0; | |
518 | } | ||
519 | |||
520 | 750 | static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |
521 | const AVFrame *pic, int *got_packet) | ||
522 | { | ||
523 | 750 | UtvideoContext *c = avctx->priv_data; | |
524 | PutByteContext pb; | ||
525 | |||
526 | uint32_t frame_info; | ||
527 | |||
528 | uint8_t *dst; | ||
529 | |||
530 | 750 | int width = avctx->width, height = avctx->height; | |
531 | 750 | int i, ret = 0; | |
532 | |||
533 | /* Allocate a new packet if needed, and set it to the pointer dst */ | ||
534 | 750 | ret = ff_alloc_packet(avctx, pkt, (256 + 4 * c->slices + width * height) | |
535 | 750 | * c->planes + 4); | |
536 | |||
537 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 750 times.
|
750 | if (ret < 0) |
538 | ✗ | return ret; | |
539 | |||
540 | 750 | dst = pkt->data; | |
541 | |||
542 | 750 | bytestream2_init_writer(&pb, dst, pkt->size); | |
543 | |||
544 | 750 | av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4); | |
545 | |||
546 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 750 times.
|
750 | if (!c->slice_bits) { |
547 | ✗ | av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n"); | |
548 | ✗ | return AVERROR(ENOMEM); | |
549 | } | ||
550 | |||
551 | /* In case of RGB, mangle the planes to Ut Video's format */ | ||
552 |
4/4✓ Branch 0 taken 600 times.
✓ Branch 1 taken 150 times.
✓ Branch 2 taken 150 times.
✓ Branch 3 taken 450 times.
|
750 | if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP) |
553 | 300 | mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data, | |
554 | 300 | c->planes, pic->linesize, width, height); | |
555 | |||
556 | /* Deal with the planes */ | ||
557 |
4/5✓ Branch 0 taken 300 times.
✓ Branch 1 taken 150 times.
✓ Branch 2 taken 150 times.
✓ Branch 3 taken 150 times.
✗ Branch 4 not taken.
|
750 | switch (avctx->pix_fmt) { |
558 | 300 | case AV_PIX_FMT_GBRP: | |
559 | case AV_PIX_FMT_GBRAP: | ||
560 |
2/2✓ Branch 0 taken 1050 times.
✓ Branch 1 taken 300 times.
|
1350 | for (i = 0; i < c->planes; i++) { |
561 | 1050 | ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride, | |
562 | c->slice_buffer[i], c->slice_stride, i, | ||
563 | width, height, &pb); | ||
564 | |||
565 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1050 times.
|
1050 | if (ret) { |
566 | ✗ | av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i); | |
567 | ✗ | return ret; | |
568 | } | ||
569 | } | ||
570 | 300 | break; | |
571 | 150 | case AV_PIX_FMT_YUV444P: | |
572 |
2/2✓ Branch 0 taken 450 times.
✓ Branch 1 taken 150 times.
|
600 | for (i = 0; i < c->planes; i++) { |
573 | 450 | ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0], | |
574 | 450 | pic->linesize[i], i, width, height, &pb); | |
575 | |||
576 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 450 times.
|
450 | if (ret) { |
577 | ✗ | av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i); | |
578 | ✗ | return ret; | |
579 | } | ||
580 | } | ||
581 | 150 | break; | |
582 | 150 | case AV_PIX_FMT_YUV422P: | |
583 |
2/2✓ Branch 0 taken 450 times.
✓ Branch 1 taken 150 times.
|
600 | for (i = 0; i < c->planes; i++) { |
584 | 450 | ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0], | |
585 | 450 | pic->linesize[i], i, width >> !!i, height, &pb); | |
586 | |||
587 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 450 times.
|
450 | if (ret) { |
588 | ✗ | av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i); | |
589 | ✗ | return ret; | |
590 | } | ||
591 | } | ||
592 | 150 | break; | |
593 | 150 | case AV_PIX_FMT_YUV420P: | |
594 |
2/2✓ Branch 0 taken 450 times.
✓ Branch 1 taken 150 times.
|
600 | for (i = 0; i < c->planes; i++) { |
595 | 450 | ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0], | |
596 | 450 | pic->linesize[i], i, width >> !!i, height >> !!i, | |
597 | &pb); | ||
598 | |||
599 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 450 times.
|
450 | if (ret) { |
600 | ✗ | av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i); | |
601 | ✗ | return ret; | |
602 | } | ||
603 | } | ||
604 | 150 | break; | |
605 | ✗ | default: | |
606 | ✗ | av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n", | |
607 | ✗ | avctx->pix_fmt); | |
608 | ✗ | return AVERROR_INVALIDDATA; | |
609 | } | ||
610 | |||
611 | /* | ||
612 | * Write frame information (LE 32-bit unsigned) | ||
613 | * into the output packet. | ||
614 | * Contains the prediction method. | ||
615 | */ | ||
616 | 750 | frame_info = c->frame_pred << 8; | |
617 | 750 | bytestream2_put_le32(&pb, frame_info); | |
618 | |||
619 | 750 | pkt->size = bytestream2_tell_p(&pb); | |
620 | |||
621 | /* Packet should be done */ | ||
622 | 750 | *got_packet = 1; | |
623 | |||
624 | 750 | return 0; | |
625 | } | ||
626 | |||
627 | #define OFFSET(x) offsetof(UtvideoContext, x) | ||
628 | #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM | ||
629 | static const AVOption options[] = { | ||
630 | { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" }, | ||
631 | { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" }, | ||
632 | { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" }, | ||
633 | { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" }, | ||
634 | { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, | ||
635 | |||
636 | { NULL}, | ||
637 | }; | ||
638 | |||
639 | static const AVClass utvideo_class = { | ||
640 | .class_name = "utvideo", | ||
641 | .item_name = av_default_item_name, | ||
642 | .option = options, | ||
643 | .version = LIBAVUTIL_VERSION_INT, | ||
644 | }; | ||
645 | |||
646 | const FFCodec ff_utvideo_encoder = { | ||
647 | .p.name = "utvideo", | ||
648 | .p.long_name = NULL_IF_CONFIG_SMALL("Ut Video"), | ||
649 | .p.type = AVMEDIA_TYPE_VIDEO, | ||
650 | .p.id = AV_CODEC_ID_UTVIDEO, | ||
651 | .priv_data_size = sizeof(UtvideoContext), | ||
652 | .p.priv_class = &utvideo_class, | ||
653 | .init = utvideo_encode_init, | ||
654 | FF_CODEC_ENCODE_CB(utvideo_encode_frame), | ||
655 | .close = utvideo_encode_close, | ||
656 | .p.capabilities = AV_CODEC_CAP_FRAME_THREADS, | ||
657 | .p.pix_fmts = (const enum AVPixelFormat[]) { | ||
658 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_YUV422P, | ||
659 | AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE | ||
660 | }, | ||
661 | .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP, | ||
662 | }; | ||
663 |