GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
/* |
||
2 |
* Ut Video decoder |
||
3 |
* Copyright (c) 2011 Konstantin Shishkov |
||
4 |
* |
||
5 |
* This file is part of FFmpeg. |
||
6 |
* |
||
7 |
* FFmpeg is free software; you can redistribute it and/or |
||
8 |
* modify it under the terms of the GNU Lesser General Public |
||
9 |
* License as published by the Free Software Foundation; either |
||
10 |
* version 2.1 of the License, or (at your option) any later version. |
||
11 |
* |
||
12 |
* FFmpeg is distributed in the hope that it will be useful, |
||
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||
15 |
* Lesser General Public License for more details. |
||
16 |
* |
||
17 |
* You should have received a copy of the GNU Lesser General Public |
||
18 |
* License along with FFmpeg; if not, write to the Free Software |
||
19 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||
20 |
*/ |
||
21 |
|||
22 |
/** |
||
23 |
* @file |
||
24 |
* Ut Video decoder |
||
25 |
*/ |
||
26 |
|||
27 |
#include <inttypes.h> |
||
28 |
#include <stdlib.h> |
||
29 |
|||
30 |
#define CACHED_BITSTREAM_READER !ARCH_X86_32 |
||
31 |
#define UNCHECKED_BITSTREAM_READER 1 |
||
32 |
|||
33 |
#include "libavutil/intreadwrite.h" |
||
34 |
#include "libavutil/pixdesc.h" |
||
35 |
#include "avcodec.h" |
||
36 |
#include "bswapdsp.h" |
||
37 |
#include "bytestream.h" |
||
38 |
#include "get_bits.h" |
||
39 |
#include "internal.h" |
||
40 |
#include "thread.h" |
||
41 |
#include "utvideo.h" |
||
42 |
|||
43 |
typedef struct HuffEntry { |
||
44 |
uint8_t len; |
||
45 |
uint16_t sym; |
||
46 |
} HuffEntry; |
||
47 |
|||
48 |
183 |
static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, |
|
49 |
int *fsym, unsigned nb_elems) |
||
50 |
{ |
||
51 |
int i; |
||
52 |
HuffEntry he[1024]; |
||
53 |
uint8_t bits[1024]; |
||
54 |
183 |
uint16_t codes_count[33] = { 0 }; |
|
55 |
|||
56 |
183 |
*fsym = -1; |
|
57 |
✓✓ | 46566 |
for (i = 0; i < nb_elems; i++) { |
58 |
✓✓ | 46387 |
if (src[i] == 0) { |
59 |
4 |
*fsym = i; |
|
60 |
4 |
return 0; |
|
61 |
✓✓ | 46383 |
} else if (src[i] == 255) { |
62 |
18630 |
bits[i] = 0; |
|
63 |
✓✗ | 27753 |
} else if (src[i] <= 32) { |
64 |
27753 |
bits[i] = src[i]; |
|
65 |
} else |
||
66 |
return AVERROR_INVALIDDATA; |
||
67 |
|||
68 |
46383 |
codes_count[bits[i]]++; |
|
69 |
} |
||
70 |
✗✓ | 179 |
if (codes_count[0] == nb_elems) |
71 |
return AVERROR_INVALIDDATA; |
||
72 |
|||
73 |
/* For Ut Video, longer codes are to the left of the tree and |
||
74 |
* for codes with the same length the symbol is descending from |
||
75 |
* left to right. So after the next loop --codes_count[i] will |
||
76 |
* be the index of the first (lowest) symbol of length i when |
||
77 |
* indexed by the position in the tree with left nodes being first. */ |
||
78 |
✓✓ | 5907 |
for (int i = 31; i >= 0; i--) |
79 |
5728 |
codes_count[i] += codes_count[i + 1]; |
|
80 |
|||
81 |
✓✓ | 46003 |
for (unsigned i = 0; i < nb_elems; i++) |
82 |
45824 |
he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i }; |
|
83 |
|||
84 |
#define VLC_BITS 11 |
||
85 |
179 |
return ff_init_vlc_from_lengths(vlc, VLC_BITS, codes_count[0], |
|
86 |
&he[0].len, sizeof(*he), |
||
87 |
179 |
&he[0].sym, sizeof(*he), 2, 0, 0, c->avctx); |
|
88 |
} |
||
89 |
|||
90 |
static int decode_plane10(UtvideoContext *c, int plane_no, |
||
91 |
uint16_t *dst, ptrdiff_t stride, |
||
92 |
int width, int height, |
||
93 |
const uint8_t *src, const uint8_t *huff, |
||
94 |
int use_pred) |
||
95 |
{ |
||
96 |
int i, j, slice, pix, ret; |
||
97 |
int sstart, send; |
||
98 |
VLC vlc; |
||
99 |
GetBitContext gb; |
||
100 |
int prev, fsym; |
||
101 |
|||
102 |
if ((ret = build_huff(c, huff, &vlc, &fsym, 1024)) < 0) { |
||
103 |
av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); |
||
104 |
return ret; |
||
105 |
} |
||
106 |
if (fsym >= 0) { // build_huff reported a symbol to fill slices with |
||
107 |
send = 0; |
||
108 |
for (slice = 0; slice < c->slices; slice++) { |
||
109 |
uint16_t *dest; |
||
110 |
|||
111 |
sstart = send; |
||
112 |
send = (height * (slice + 1) / c->slices); |
||
113 |
dest = dst + sstart * stride; |
||
114 |
|||
115 |
prev = 0x200; |
||
116 |
for (j = sstart; j < send; j++) { |
||
117 |
for (i = 0; i < width; i++) { |
||
118 |
pix = fsym; |
||
119 |
if (use_pred) { |
||
120 |
prev += pix; |
||
121 |
prev &= 0x3FF; |
||
122 |
pix = prev; |
||
123 |
} |
||
124 |
dest[i] = pix; |
||
125 |
} |
||
126 |
dest += stride; |
||
127 |
} |
||
128 |
} |
||
129 |
return 0; |
||
130 |
} |
||
131 |
|||
132 |
send = 0; |
||
133 |
for (slice = 0; slice < c->slices; slice++) { |
||
134 |
uint16_t *dest; |
||
135 |
int slice_data_start, slice_data_end, slice_size; |
||
136 |
|||
137 |
sstart = send; |
||
138 |
send = (height * (slice + 1) / c->slices); |
||
139 |
dest = dst + sstart * stride; |
||
140 |
|||
141 |
// slice offset and size validation was done earlier |
||
142 |
slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; |
||
143 |
slice_data_end = AV_RL32(src + slice * 4); |
||
144 |
slice_size = slice_data_end - slice_data_start; |
||
145 |
|||
146 |
if (!slice_size) { |
||
147 |
av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " |
||
148 |
"yet a slice has a length of zero.\n"); |
||
149 |
goto fail; |
||
150 |
} |
||
151 |
|||
152 |
memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); |
||
153 |
c->bdsp.bswap_buf((uint32_t *) c->slice_bits, |
||
154 |
(uint32_t *)(src + slice_data_start + c->slices * 4), |
||
155 |
(slice_data_end - slice_data_start + 3) >> 2); |
||
156 |
init_get_bits(&gb, c->slice_bits, slice_size * 8); |
||
157 |
|||
158 |
prev = 0x200; |
||
159 |
for (j = sstart; j < send; j++) { |
||
160 |
for (i = 0; i < width; i++) { |
||
161 |
pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3); |
||
162 |
if (pix < 0) { |
||
163 |
av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); |
||
164 |
goto fail; |
||
165 |
} |
||
166 |
if (use_pred) { |
||
167 |
prev += pix; |
||
168 |
prev &= 0x3FF; |
||
169 |
pix = prev; |
||
170 |
} |
||
171 |
dest[i] = pix; |
||
172 |
} |
||
173 |
dest += stride; |
||
174 |
if (get_bits_left(&gb) < 0) { |
||
175 |
av_log(c->avctx, AV_LOG_ERROR, |
||
176 |
"Slice decoding ran out of bits\n"); |
||
177 |
goto fail; |
||
178 |
} |
||
179 |
} |
||
180 |
if (get_bits_left(&gb) > 32) |
||
181 |
av_log(c->avctx, AV_LOG_WARNING, |
||
182 |
"%d bits left after decoding slice\n", get_bits_left(&gb)); |
||
183 |
} |
||
184 |
|||
185 |
ff_free_vlc(&vlc); |
||
186 |
|||
187 |
return 0; |
||
188 |
fail: |
||
189 |
ff_free_vlc(&vlc); |
||
190 |
return AVERROR_INVALIDDATA; |
||
191 |
} |
||
192 |
|||
193 |
183 |
static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt) |
|
194 |
{ |
||
195 |
✓✓✓✓ |
183 |
const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no; |
196 |
|||
197 |
✓✓ | 183 |
if (interlaced) |
198 |
24 |
return ~(1 + 2 * is_luma); |
|
199 |
|||
200 |
159 |
return ~is_luma; |
|
201 |
} |
||
202 |
|||
203 |
183 |
static int decode_plane(UtvideoContext *c, int plane_no, |
|
204 |
uint8_t *dst, ptrdiff_t stride, |
||
205 |
int width, int height, |
||
206 |
const uint8_t *src, int use_pred) |
||
207 |
{ |
||
208 |
int i, j, slice, pix; |
||
209 |
int sstart, send; |
||
210 |
VLC vlc; |
||
211 |
GetBitContext gb; |
||
212 |
int ret, prev, fsym; |
||
213 |
183 |
const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt); |
|
214 |
|||
215 |
✗✓ | 183 |
if (c->pack) { |
216 |
send = 0; |
||
217 |
for (slice = 0; slice < c->slices; slice++) { |
||
218 |
GetBitContext cbit, pbit; |
||
219 |
uint8_t *dest, *p; |
||
220 |
|||
221 |
ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]); |
||
222 |
if (ret < 0) |
||
223 |
return ret; |
||
224 |
|||
225 |
ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]); |
||
226 |
if (ret < 0) |
||
227 |
return ret; |
||
228 |
|||
229 |
sstart = send; |
||
230 |
send = (height * (slice + 1) / c->slices) & cmask; |
||
231 |
dest = dst + sstart * stride; |
||
232 |
|||
233 |
if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit)) |
||
234 |
return AVERROR_INVALIDDATA; |
||
235 |
|||
236 |
for (p = dest; p < dst + send * stride; p += 8) { |
||
237 |
int bits = get_bits_le(&cbit, 3); |
||
238 |
|||
239 |
if (bits == 0) { |
||
240 |
*(uint64_t *) p = 0; |
||
241 |
} else { |
||
242 |
uint32_t sub = 0x80 >> (8 - (bits + 1)), add; |
||
243 |
int k; |
||
244 |
|||
245 |
if ((bits + 1) * 8 > get_bits_left(&pbit)) |
||
246 |
return AVERROR_INVALIDDATA; |
||
247 |
|||
248 |
for (k = 0; k < 8; k++) { |
||
249 |
|||
250 |
p[k] = get_bits_le(&pbit, bits + 1); |
||
251 |
add = (~p[k] & sub) << (8 - bits); |
||
252 |
p[k] -= sub; |
||
253 |
p[k] += add; |
||
254 |
} |
||
255 |
} |
||
256 |
} |
||
257 |
} |
||
258 |
|||
259 |
return 0; |
||
260 |
} |
||
261 |
|||
262 |
✗✓ | 183 |
if (build_huff(c, src, &vlc, &fsym, 256)) { |
263 |
av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); |
||
264 |
return AVERROR_INVALIDDATA; |
||
265 |
} |
||
266 |
✓✓ | 183 |
if (fsym >= 0) { // build_huff reported a symbol to fill slices with |
267 |
4 |
send = 0; |
|
268 |
✓✓ | 20 |
for (slice = 0; slice < c->slices; slice++) { |
269 |
uint8_t *dest; |
||
270 |
|||
271 |
16 |
sstart = send; |
|
272 |
16 |
send = (height * (slice + 1) / c->slices) & cmask; |
|
273 |
16 |
dest = dst + sstart * stride; |
|
274 |
|||
275 |
16 |
prev = 0x80; |
|
276 |
✓✓ | 3088 |
for (j = sstart; j < send; j++) { |
277 |
✓✓ | 3148800 |
for (i = 0; i < width; i++) { |
278 |
3145728 |
pix = fsym; |
|
279 |
✗✓ | 3145728 |
if (use_pred) { |
280 |
prev += (unsigned)pix; |
||
281 |
pix = prev; |
||
282 |
} |
||
283 |
3145728 |
dest[i] = pix; |
|
284 |
} |
||
285 |
3072 |
dest += stride; |
|
286 |
} |
||
287 |
} |
||
288 |
4 |
return 0; |
|
289 |
} |
||
290 |
|||
291 |
179 |
src += 256; |
|
292 |
|||
293 |
179 |
send = 0; |
|
294 |
✓✓ | 895 |
for (slice = 0; slice < c->slices; slice++) { |
295 |
uint8_t *dest; |
||
296 |
int slice_data_start, slice_data_end, slice_size; |
||
297 |
|||
298 |
716 |
sstart = send; |
|
299 |
716 |
send = (height * (slice + 1) / c->slices) & cmask; |
|
300 |
716 |
dest = dst + sstart * stride; |
|
301 |
|||
302 |
// slice offset and size validation was done earlier |
||
303 |
✓✓ | 716 |
slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; |
304 |
716 |
slice_data_end = AV_RL32(src + slice * 4); |
|
305 |
716 |
slice_size = slice_data_end - slice_data_start; |
|
306 |
|||
307 |
✗✓ | 716 |
if (!slice_size) { |
308 |
av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " |
||
309 |
"yet a slice has a length of zero.\n"); |
||
310 |
goto fail; |
||
311 |
} |
||
312 |
|||
313 |
716 |
memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); |
|
314 |
716 |
c->bdsp.bswap_buf((uint32_t *) c->slice_bits, |
|
315 |
716 |
(uint32_t *)(src + slice_data_start + c->slices * 4), |
|
316 |
716 |
(slice_data_end - slice_data_start + 3) >> 2); |
|
317 |
716 |
init_get_bits(&gb, c->slice_bits, slice_size * 8); |
|
318 |
|||
319 |
716 |
prev = 0x80; |
|
320 |
✓✓ | 63858 |
for (j = sstart; j < send; j++) { |
321 |
✓✓ | 35325686 |
for (i = 0; i < width; i++) { |
322 |
35262544 |
pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3); |
|
323 |
✗✓ | 35262544 |
if (pix < 0) { |
324 |
av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); |
||
325 |
goto fail; |
||
326 |
} |
||
327 |
✓✓ | 35262544 |
if (use_pred) { |
328 |
15513600 |
prev += pix; |
|
329 |
15513600 |
pix = prev; |
|
330 |
} |
||
331 |
35262544 |
dest[i] = pix; |
|
332 |
} |
||
333 |
✗✓ | 63142 |
if (get_bits_left(&gb) < 0) { |
334 |
av_log(c->avctx, AV_LOG_ERROR, |
||
335 |
"Slice decoding ran out of bits\n"); |
||
336 |
goto fail; |
||
337 |
} |
||
338 |
63142 |
dest += stride; |
|
339 |
} |
||
340 |
✗✓ | 716 |
if (get_bits_left(&gb) > 32) |
341 |
av_log(c->avctx, AV_LOG_WARNING, |
||
342 |
"%d bits left after decoding slice\n", get_bits_left(&gb)); |
||
343 |
} |
||
344 |
|||
345 |
179 |
ff_free_vlc(&vlc); |
|
346 |
|||
347 |
179 |
return 0; |
|
348 |
fail: |
||
349 |
ff_free_vlc(&vlc); |
||
350 |
return AVERROR_INVALIDDATA; |
||
351 |
} |
||
352 |
|||
353 |
#undef A |
||
354 |
#undef B |
||
355 |
#undef C |
||
356 |
|||
357 |
77 |
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
|
358 |
int width, int height, int slices, int rmode) |
||
359 |
{ |
||
360 |
int i, j, slice; |
||
361 |
int A, B, C; |
||
362 |
uint8_t *bsrc; |
||
363 |
int slice_start, slice_height; |
||
364 |
77 |
const int cmask = ~rmode; |
|
365 |
|||
366 |
✓✓ | 385 |
for (slice = 0; slice < slices; slice++) { |
367 |
308 |
slice_start = ((slice * height) / slices) & cmask; |
|
368 |
308 |
slice_height = ((((slice + 1) * height) / slices) & cmask) - |
|
369 |
slice_start; |
||
370 |
|||
371 |
✗✓ | 308 |
if (!slice_height) |
372 |
continue; |
||
373 |
308 |
bsrc = src + slice_start * stride; |
|
374 |
|||
375 |
// first line - left neighbour prediction |
||
376 |
308 |
bsrc[0] += 0x80; |
|
377 |
308 |
c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
|
378 |
308 |
bsrc += stride; |
|
379 |
✗✓ | 308 |
if (slice_height <= 1) |
380 |
continue; |
||
381 |
// second line - first element has top prediction, the rest uses median |
||
382 |
308 |
C = bsrc[-stride]; |
|
383 |
308 |
bsrc[0] += C; |
|
384 |
308 |
A = bsrc[0]; |
|
385 |
✓✓ | 4928 |
for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ |
386 |
4620 |
B = bsrc[i - stride]; |
|
387 |
4620 |
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); |
|
388 |
4620 |
C = B; |
|
389 |
4620 |
A = bsrc[i]; |
|
390 |
} |
||
391 |
✓✗ | 308 |
if (width > 16) |
392 |
308 |
c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16, |
|
393 |
308 |
bsrc + 16, width - 16, &A, &B); |
|
394 |
|||
395 |
308 |
bsrc += stride; |
|
396 |
// the rest of lines use continuous median prediction |
||
397 |
✓✓ | 33436 |
for (j = 2; j < slice_height; j++) { |
398 |
33128 |
c->llviddsp.add_median_pred(bsrc, bsrc - stride, |
|
399 |
bsrc, width, &A, &B); |
||
400 |
33128 |
bsrc += stride; |
|
401 |
} |
||
402 |
} |
||
403 |
77 |
} |
|
404 |
|||
405 |
/* UtVideo interlaced mode treats every two lines as a single one, |
||
406 |
* so restoring function should take care of possible padding between |
||
407 |
* two parts of the same "line". |
||
408 |
*/ |
||
409 |
12 |
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
|
410 |
int width, int height, int slices, int rmode) |
||
411 |
{ |
||
412 |
int i, j, slice; |
||
413 |
int A, B, C; |
||
414 |
uint8_t *bsrc; |
||
415 |
int slice_start, slice_height; |
||
416 |
✓✓ | 12 |
const int cmask = ~(rmode ? 3 : 1); |
417 |
12 |
const ptrdiff_t stride2 = stride << 1; |
|
418 |
|||
419 |
✓✓ | 60 |
for (slice = 0; slice < slices; slice++) { |
420 |
48 |
slice_start = ((slice * height) / slices) & cmask; |
|
421 |
48 |
slice_height = ((((slice + 1) * height) / slices) & cmask) - |
|
422 |
slice_start; |
||
423 |
48 |
slice_height >>= 1; |
|
424 |
✗✓ | 48 |
if (!slice_height) |
425 |
continue; |
||
426 |
|||
427 |
48 |
bsrc = src + slice_start * stride; |
|
428 |
|||
429 |
// first line - left neighbour prediction |
||
430 |
48 |
bsrc[0] += 0x80; |
|
431 |
48 |
A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
|
432 |
48 |
c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); |
|
433 |
48 |
bsrc += stride2; |
|
434 |
✗✓ | 48 |
if (slice_height <= 1) |
435 |
continue; |
||
436 |
// second line - first element has top prediction, the rest uses median |
||
437 |
48 |
C = bsrc[-stride2]; |
|
438 |
48 |
bsrc[0] += C; |
|
439 |
48 |
A = bsrc[0]; |
|
440 |
✓✓ | 768 |
for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ |
441 |
720 |
B = bsrc[i - stride2]; |
|
442 |
720 |
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); |
|
443 |
720 |
C = B; |
|
444 |
720 |
A = bsrc[i]; |
|
445 |
} |
||
446 |
✓✗ | 48 |
if (width > 16) |
447 |
48 |
c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16, |
|
448 |
48 |
bsrc + 16, width - 16, &A, &B); |
|
449 |
|||
450 |
48 |
c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, |
|
451 |
48 |
bsrc + stride, width, &A, &B); |
|
452 |
48 |
bsrc += stride2; |
|
453 |
// the rest of lines use continuous median prediction |
||
454 |
✓✓ | 216 |
for (j = 2; j < slice_height; j++) { |
455 |
168 |
c->llviddsp.add_median_pred(bsrc, bsrc - stride2, |
|
456 |
bsrc, width, &A, &B); |
||
457 |
168 |
c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, |
|
458 |
168 |
bsrc + stride, width, &A, &B); |
|
459 |
168 |
bsrc += stride2; |
|
460 |
} |
||
461 |
} |
||
462 |
12 |
} |
|
463 |
|||
464 |
13 |
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
|
465 |
int width, int height, int slices, int rmode) |
||
466 |
{ |
||
467 |
int i, j, slice; |
||
468 |
int A, B, C; |
||
469 |
uint8_t *bsrc; |
||
470 |
int slice_start, slice_height; |
||
471 |
13 |
const int cmask = ~rmode; |
|
472 |
13 |
int min_width = FFMIN(width, 32); |
|
473 |
|||
474 |
✓✓ | 65 |
for (slice = 0; slice < slices; slice++) { |
475 |
52 |
slice_start = ((slice * height) / slices) & cmask; |
|
476 |
52 |
slice_height = ((((slice + 1) * height) / slices) & cmask) - |
|
477 |
slice_start; |
||
478 |
|||
479 |
✗✓ | 52 |
if (!slice_height) |
480 |
continue; |
||
481 |
52 |
bsrc = src + slice_start * stride; |
|
482 |
|||
483 |
// first line - left neighbour prediction |
||
484 |
52 |
bsrc[0] += 0x80; |
|
485 |
52 |
c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
|
486 |
52 |
bsrc += stride; |
|
487 |
✗✓ | 52 |
if (slice_height <= 1) |
488 |
continue; |
||
489 |
✓✓ | 502 |
for (j = 1; j < slice_height; j++) { |
490 |
// second line - first element has top prediction, the rest uses gradient |
||
491 |
450 |
bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF; |
|
492 |
✓✓ | 14400 |
for (i = 1; i < min_width; i++) { /* dsp need align 32 */ |
493 |
13950 |
A = bsrc[i - stride]; |
|
494 |
13950 |
B = bsrc[i - (stride + 1)]; |
|
495 |
13950 |
C = bsrc[i - 1]; |
|
496 |
13950 |
bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; |
|
497 |
} |
||
498 |
✓✓ | 450 |
if (width > 32) |
499 |
322 |
c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32); |
|
500 |
450 |
bsrc += stride; |
|
501 |
} |
||
502 |
} |
||
503 |
13 |
} |
|
504 |
|||
505 |
12 |
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
|
506 |
int width, int height, int slices, int rmode) |
||
507 |
{ |
||
508 |
int i, j, slice; |
||
509 |
int A, B, C; |
||
510 |
uint8_t *bsrc; |
||
511 |
int slice_start, slice_height; |
||
512 |
✓✓ | 12 |
const int cmask = ~(rmode ? 3 : 1); |
513 |
12 |
const ptrdiff_t stride2 = stride << 1; |
|
514 |
12 |
int min_width = FFMIN(width, 32); |
|
515 |
|||
516 |
✓✓ | 60 |
for (slice = 0; slice < slices; slice++) { |
517 |
48 |
slice_start = ((slice * height) / slices) & cmask; |
|
518 |
48 |
slice_height = ((((slice + 1) * height) / slices) & cmask) - |
|
519 |
slice_start; |
||
520 |
48 |
slice_height >>= 1; |
|
521 |
✗✓ | 48 |
if (!slice_height) |
522 |
continue; |
||
523 |
|||
524 |
48 |
bsrc = src + slice_start * stride; |
|
525 |
|||
526 |
// first line - left neighbour prediction |
||
527 |
48 |
bsrc[0] += 0x80; |
|
528 |
48 |
A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
|
529 |
48 |
c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); |
|
530 |
48 |
bsrc += stride2; |
|
531 |
✗✓ | 48 |
if (slice_height <= 1) |
532 |
continue; |
||
533 |
✓✓ | 264 |
for (j = 1; j < slice_height; j++) { |
534 |
// second line - first element has top prediction, the rest uses gradient |
||
535 |
216 |
bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF; |
|
536 |
✓✓ | 6912 |
for (i = 1; i < min_width; i++) { /* dsp need align 32 */ |
537 |
6696 |
A = bsrc[i - stride2]; |
|
538 |
6696 |
B = bsrc[i - (stride2 + 1)]; |
|
539 |
6696 |
C = bsrc[i - 1]; |
|
540 |
6696 |
bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; |
|
541 |
} |
||
542 |
✓✓ | 216 |
if (width > 32) |
543 |
160 |
c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32); |
|
544 |
|||
545 |
216 |
A = bsrc[-stride]; |
|
546 |
216 |
B = bsrc[-(1 + stride + stride - width)]; |
|
547 |
216 |
C = bsrc[width - 1]; |
|
548 |
216 |
bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF; |
|
549 |
✓✓ | 12032 |
for (i = 1; i < width; i++) { |
550 |
11816 |
A = bsrc[i - stride]; |
|
551 |
11816 |
B = bsrc[i - (1 + stride)]; |
|
552 |
11816 |
C = bsrc[i - 1 + stride]; |
|
553 |
11816 |
bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF; |
|
554 |
} |
||
555 |
216 |
bsrc += stride2; |
|
556 |
} |
||
557 |
} |
||
558 |
12 |
} |
|
559 |
|||
560 |
57 |
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, |
|
561 |
AVPacket *avpkt) |
||
562 |
{ |
||
563 |
57 |
const uint8_t *buf = avpkt->data; |
|
564 |
57 |
int buf_size = avpkt->size; |
|
565 |
57 |
UtvideoContext *c = avctx->priv_data; |
|
566 |
int i, j; |
||
567 |
const uint8_t *plane_start[5]; |
||
568 |
57 |
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size; |
|
569 |
int ret; |
||
570 |
GetByteContext gb; |
||
571 |
57 |
ThreadFrame frame = { .f = data }; |
|
572 |
|||
573 |
✗✓ | 57 |
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) |
574 |
return ret; |
||
575 |
|||
576 |
/* parse plane structure to get frame flags and validate slice offsets */ |
||
577 |
57 |
bytestream2_init(&gb, buf, buf_size); |
|
578 |
|||
579 |
✗✓ | 57 |
if (c->pack) { |
580 |
const uint8_t *packed_stream; |
||
581 |
const uint8_t *control_stream; |
||
582 |
GetByteContext pb; |
||
583 |
uint32_t nb_cbs; |
||
584 |
int left; |
||
585 |
|||
586 |
c->frame_info = PRED_GRADIENT << 8; |
||
587 |
|||
588 |
if (bytestream2_get_byte(&gb) != 1) |
||
589 |
return AVERROR_INVALIDDATA; |
||
590 |
bytestream2_skip(&gb, 3); |
||
591 |
c->offset = bytestream2_get_le32(&gb); |
||
592 |
|||
593 |
if (buf_size <= c->offset + 8LL) |
||
594 |
return AVERROR_INVALIDDATA; |
||
595 |
|||
596 |
bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset); |
||
597 |
|||
598 |
nb_cbs = bytestream2_get_le32(&pb); |
||
599 |
if (nb_cbs > c->offset) |
||
600 |
return AVERROR_INVALIDDATA; |
||
601 |
|||
602 |
packed_stream = buf + 8; |
||
603 |
control_stream = packed_stream + (c->offset - nb_cbs); |
||
604 |
left = control_stream - packed_stream; |
||
605 |
|||
606 |
for (i = 0; i < c->planes; i++) { |
||
607 |
for (j = 0; j < c->slices; j++) { |
||
608 |
c->packed_stream[i][j] = packed_stream; |
||
609 |
c->packed_stream_size[i][j] = bytestream2_get_le32(&pb); |
||
610 |
if (c->packed_stream_size[i][j] > left) |
||
611 |
return AVERROR_INVALIDDATA; |
||
612 |
left -= c->packed_stream_size[i][j]; |
||
613 |
packed_stream += c->packed_stream_size[i][j]; |
||
614 |
} |
||
615 |
} |
||
616 |
|||
617 |
left = buf + buf_size - control_stream; |
||
618 |
|||
619 |
for (i = 0; i < c->planes; i++) { |
||
620 |
for (j = 0; j < c->slices; j++) { |
||
621 |
c->control_stream[i][j] = control_stream; |
||
622 |
c->control_stream_size[i][j] = bytestream2_get_le32(&pb); |
||
623 |
if (c->control_stream_size[i][j] > left) |
||
624 |
return AVERROR_INVALIDDATA; |
||
625 |
left -= c->control_stream_size[i][j]; |
||
626 |
control_stream += c->control_stream_size[i][j]; |
||
627 |
} |
||
628 |
} |
||
629 |
✗✓ | 57 |
} else if (c->pro) { |
630 |
if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { |
||
631 |
av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); |
||
632 |
return AVERROR_INVALIDDATA; |
||
633 |
} |
||
634 |
c->frame_info = bytestream2_get_le32u(&gb); |
||
635 |
c->slices = ((c->frame_info >> 16) & 0xff) + 1; |
||
636 |
for (i = 0; i < c->planes; i++) { |
||
637 |
plane_start[i] = gb.buffer; |
||
638 |
if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) { |
||
639 |
av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); |
||
640 |
return AVERROR_INVALIDDATA; |
||
641 |
} |
||
642 |
slice_start = 0; |
||
643 |
slice_end = 0; |
||
644 |
for (j = 0; j < c->slices; j++) { |
||
645 |
slice_end = bytestream2_get_le32u(&gb); |
||
646 |
if (slice_end < 0 || slice_end < slice_start || |
||
647 |
bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) { |
||
648 |
av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); |
||
649 |
return AVERROR_INVALIDDATA; |
||
650 |
} |
||
651 |
slice_size = slice_end - slice_start; |
||
652 |
slice_start = slice_end; |
||
653 |
max_slice_size = FFMAX(max_slice_size, slice_size); |
||
654 |
} |
||
655 |
plane_size = slice_end; |
||
656 |
bytestream2_skipu(&gb, plane_size); |
||
657 |
bytestream2_skipu(&gb, 1024); |
||
658 |
} |
||
659 |
plane_start[c->planes] = gb.buffer; |
||
660 |
} else { |
||
661 |
✓✓ | 240 |
for (i = 0; i < c->planes; i++) { |
662 |
183 |
plane_start[i] = gb.buffer; |
|
663 |
✗✓ | 183 |
if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) { |
664 |
av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); |
||
665 |
return AVERROR_INVALIDDATA; |
||
666 |
} |
||
667 |
183 |
bytestream2_skipu(&gb, 256); |
|
668 |
183 |
slice_start = 0; |
|
669 |
183 |
slice_end = 0; |
|
670 |
✓✓ | 915 |
for (j = 0; j < c->slices; j++) { |
671 |
732 |
slice_end = bytestream2_get_le32u(&gb); |
|
672 |
✓✗✓✗ ✗✓ |
1464 |
if (slice_end < 0 || slice_end < slice_start || |
673 |
732 |
bytestream2_get_bytes_left(&gb) < slice_end) { |
|
674 |
av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); |
||
675 |
return AVERROR_INVALIDDATA; |
||
676 |
} |
||
677 |
732 |
slice_size = slice_end - slice_start; |
|
678 |
732 |
slice_start = slice_end; |
|
679 |
732 |
max_slice_size = FFMAX(max_slice_size, slice_size); |
|
680 |
} |
||
681 |
183 |
plane_size = slice_end; |
|
682 |
183 |
bytestream2_skipu(&gb, plane_size); |
|
683 |
} |
||
684 |
57 |
plane_start[c->planes] = gb.buffer; |
|
685 |
✗✓ | 57 |
if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { |
686 |
av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); |
||
687 |
return AVERROR_INVALIDDATA; |
||
688 |
} |
||
689 |
57 |
c->frame_info = bytestream2_get_le32u(&gb); |
|
690 |
} |
||
691 |
57 |
av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n", |
|
692 |
c->frame_info); |
||
693 |
|||
694 |
57 |
c->frame_pred = (c->frame_info >> 8) & 3; |
|
695 |
|||
696 |
57 |
max_slice_size += 4*avctx->width; |
|
697 |
|||
698 |
✓✗ | 57 |
if (!c->pack) { |
699 |
57 |
av_fast_malloc(&c->slice_bits, &c->slice_bits_size, |
|
700 |
57 |
max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE); |
|
701 |
|||
702 |
✗✓ | 57 |
if (!c->slice_bits) { |
703 |
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n"); |
||
704 |
return AVERROR(ENOMEM); |
||
705 |
} |
||
706 |
} |
||
707 |
|||
708 |
✓✗✓✓ ✓✗✗✗ |
57 |
switch (c->avctx->pix_fmt) { |
709 |
28 |
case AV_PIX_FMT_GBRP: |
|
710 |
case AV_PIX_FMT_GBRAP: |
||
711 |
✓✓ | 124 |
for (i = 0; i < c->planes; i++) { |
712 |
96 |
ret = decode_plane(c, i, frame.f->data[i], |
|
713 |
96 |
frame.f->linesize[i], avctx->width, |
|
714 |
avctx->height, plane_start[i], |
||
715 |
96 |
c->frame_pred == PRED_LEFT); |
|
716 |
✗✓ | 96 |
if (ret) |
717 |
return ret; |
||
718 |
✓✓ | 96 |
if (c->frame_pred == PRED_MEDIAN) { |
719 |
✓✓ | 53 |
if (!c->interlaced) { |
720 |
50 |
restore_median_planar(c, frame.f->data[i], |
|
721 |
50 |
frame.f->linesize[i], avctx->width, |
|
722 |
avctx->height, c->slices, 0); |
||
723 |
} else { |
||
724 |
3 |
restore_median_planar_il(c, frame.f->data[i], |
|
725 |
3 |
frame.f->linesize[i], |
|
726 |
avctx->width, avctx->height, c->slices, |
||
727 |
0); |
||
728 |
} |
||
729 |
✓✓ | 43 |
} else if (c->frame_pred == PRED_GRADIENT) { |
730 |
✓✓ | 7 |
if (!c->interlaced) { |
731 |
4 |
restore_gradient_planar(c, frame.f->data[i], |
|
732 |
4 |
frame.f->linesize[i], avctx->width, |
|
733 |
avctx->height, c->slices, 0); |
||
734 |
} else { |
||
735 |
3 |
restore_gradient_planar_il(c, frame.f->data[i], |
|
736 |
3 |
frame.f->linesize[i], |
|
737 |
avctx->width, avctx->height, c->slices, |
||
738 |
0); |
||
739 |
} |
||
740 |
} |
||
741 |
} |
||
742 |
28 |
c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1], |
|
743 |
28 |
frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1], |
|
744 |
avctx->width, avctx->height); |
||
745 |
28 |
break; |
|
746 |
case AV_PIX_FMT_GBRAP10: |
||
747 |
case AV_PIX_FMT_GBRP10: |
||
748 |
for (i = 0; i < c->planes; i++) { |
||
749 |
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], |
||
750 |
frame.f->linesize[i] / 2, avctx->width, |
||
751 |
avctx->height, plane_start[i], |
||
752 |
plane_start[i + 1] - 1024, |
||
753 |
c->frame_pred == PRED_LEFT); |
||
754 |
if (ret) |
||
755 |
return ret; |
||
756 |
} |
||
757 |
c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1], |
||
758 |
frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2, |
||
759 |
avctx->width, avctx->height); |
||
760 |
break; |
||
761 |
14 |
case AV_PIX_FMT_YUV420P: |
|
762 |
✓✓ | 56 |
for (i = 0; i < 3; i++) { |
763 |
42 |
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i], |
|
764 |
42 |
avctx->width >> !!i, avctx->height >> !!i, |
|
765 |
42 |
plane_start[i], c->frame_pred == PRED_LEFT); |
|
766 |
✗✓ | 42 |
if (ret) |
767 |
return ret; |
||
768 |
✓✓ | 42 |
if (c->frame_pred == PRED_MEDIAN) { |
769 |
✓✓ | 15 |
if (!c->interlaced) { |
770 |
12 |
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
771 |
12 |
avctx->width >> !!i, avctx->height >> !!i, |
|
772 |
c->slices, !i); |
||
773 |
} else { |
||
774 |
3 |
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
775 |
3 |
avctx->width >> !!i, |
|
776 |
3 |
avctx->height >> !!i, |
|
777 |
c->slices, !i); |
||
778 |
} |
||
779 |
✓✓ | 27 |
} else if (c->frame_pred == PRED_GRADIENT) { |
780 |
✓✓ | 6 |
if (!c->interlaced) { |
781 |
3 |
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
782 |
3 |
avctx->width >> !!i, avctx->height >> !!i, |
|
783 |
c->slices, !i); |
||
784 |
} else { |
||
785 |
3 |
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
786 |
3 |
avctx->width >> !!i, |
|
787 |
3 |
avctx->height >> !!i, |
|
788 |
c->slices, !i); |
||
789 |
} |
||
790 |
} |
||
791 |
} |
||
792 |
14 |
break; |
|
793 |
11 |
case AV_PIX_FMT_YUV422P: |
|
794 |
✓✓ | 44 |
for (i = 0; i < 3; i++) { |
795 |
33 |
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i], |
|
796 |
33 |
avctx->width >> !!i, avctx->height, |
|
797 |
33 |
plane_start[i], c->frame_pred == PRED_LEFT); |
|
798 |
✗✓ | 33 |
if (ret) |
799 |
return ret; |
||
800 |
✓✓ | 33 |
if (c->frame_pred == PRED_MEDIAN) { |
801 |
✓✓ | 15 |
if (!c->interlaced) { |
802 |
12 |
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
803 |
12 |
avctx->width >> !!i, avctx->height, |
|
804 |
c->slices, 0); |
||
805 |
} else { |
||
806 |
3 |
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
807 |
3 |
avctx->width >> !!i, avctx->height, |
|
808 |
c->slices, 0); |
||
809 |
} |
||
810 |
✓✓ | 18 |
} else if (c->frame_pred == PRED_GRADIENT) { |
811 |
✓✓ | 6 |
if (!c->interlaced) { |
812 |
3 |
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
813 |
3 |
avctx->width >> !!i, avctx->height, |
|
814 |
c->slices, 0); |
||
815 |
} else { |
||
816 |
3 |
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
817 |
3 |
avctx->width >> !!i, avctx->height, |
|
818 |
c->slices, 0); |
||
819 |
} |
||
820 |
} |
||
821 |
} |
||
822 |
11 |
break; |
|
823 |
4 |
case AV_PIX_FMT_YUV444P: |
|
824 |
✓✓ | 16 |
for (i = 0; i < 3; i++) { |
825 |
12 |
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i], |
|
826 |
avctx->width, avctx->height, |
||
827 |
12 |
plane_start[i], c->frame_pred == PRED_LEFT); |
|
828 |
✗✓ | 12 |
if (ret) |
829 |
return ret; |
||
830 |
✓✓ | 12 |
if (c->frame_pred == PRED_MEDIAN) { |
831 |
✓✓ | 6 |
if (!c->interlaced) { |
832 |
3 |
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
833 |
avctx->width, avctx->height, |
||
834 |
c->slices, 0); |
||
835 |
} else { |
||
836 |
3 |
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
837 |
avctx->width, avctx->height, |
||
838 |
c->slices, 0); |
||
839 |
} |
||
840 |
✓✗ | 6 |
} else if (c->frame_pred == PRED_GRADIENT) { |
841 |
✓✓ | 6 |
if (!c->interlaced) { |
842 |
3 |
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i], |
|
843 |
avctx->width, avctx->height, |
||
844 |
c->slices, 0); |
||
845 |
} else { |
||
846 |
3 |
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i], |
|
847 |
avctx->width, avctx->height, |
||
848 |
c->slices, 0); |
||
849 |
} |
||
850 |
} |
||
851 |
} |
||
852 |
4 |
break; |
|
853 |
case AV_PIX_FMT_YUV420P10: |
||
854 |
for (i = 0; i < 3; i++) { |
||
855 |
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2, |
||
856 |
avctx->width >> !!i, avctx->height >> !!i, |
||
857 |
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); |
||
858 |
if (ret) |
||
859 |
return ret; |
||
860 |
} |
||
861 |
break; |
||
862 |
case AV_PIX_FMT_YUV422P10: |
||
863 |
for (i = 0; i < 3; i++) { |
||
864 |
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2, |
||
865 |
avctx->width >> !!i, avctx->height, |
||
866 |
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); |
||
867 |
if (ret) |
||
868 |
return ret; |
||
869 |
} |
||
870 |
break; |
||
871 |
} |
||
872 |
|||
873 |
57 |
frame.f->key_frame = 1; |
|
874 |
57 |
frame.f->pict_type = AV_PICTURE_TYPE_I; |
|
875 |
57 |
frame.f->interlaced_frame = !!c->interlaced; |
|
876 |
|||
877 |
57 |
*got_frame = 1; |
|
878 |
|||
879 |
/* always report that the buffer was completely consumed */ |
||
880 |
57 |
return buf_size; |
|
881 |
} |
||
882 |
|||
883 |
46 |
static av_cold int decode_init(AVCodecContext *avctx) |
|
884 |
{ |
||
885 |
46 |
UtvideoContext * const c = avctx->priv_data; |
|
886 |
int h_shift, v_shift; |
||
887 |
|||
888 |
46 |
c->avctx = avctx; |
|
889 |
|||
890 |
46 |
ff_utvideodsp_init(&c->utdsp); |
|
891 |
46 |
ff_bswapdsp_init(&c->bdsp); |
|
892 |
46 |
ff_llviddsp_init(&c->llviddsp); |
|
893 |
|||
894 |
46 |
c->slice_bits_size = 0; |
|
895 |
|||
896 |
✓✓✓✓ ✗✗✗✗ ✗✓✓✓ ✗✗✗✗ ✗✗✗ |
46 |
switch (avctx->codec_tag) { |
897 |
10 |
case MKTAG('U', 'L', 'R', 'G'): |
|
898 |
10 |
c->planes = 3; |
|
899 |
10 |
avctx->pix_fmt = AV_PIX_FMT_GBRP; |
|
900 |
10 |
break; |
|
901 |
8 |
case MKTAG('U', 'L', 'R', 'A'): |
|
902 |
8 |
c->planes = 4; |
|
903 |
8 |
avctx->pix_fmt = AV_PIX_FMT_GBRAP; |
|
904 |
8 |
break; |
|
905 |
4 |
case MKTAG('U', 'L', 'Y', '0'): |
|
906 |
4 |
c->planes = 3; |
|
907 |
4 |
avctx->pix_fmt = AV_PIX_FMT_YUV420P; |
|
908 |
4 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
|
909 |
4 |
break; |
|
910 |
4 |
case MKTAG('U', 'L', 'Y', '2'): |
|
911 |
4 |
c->planes = 3; |
|
912 |
4 |
avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
|
913 |
4 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
|
914 |
4 |
break; |
|
915 |
case MKTAG('U', 'L', 'Y', '4'): |
||
916 |
c->planes = 3; |
||
917 |
avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
||
918 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
||
919 |
break; |
||
920 |
case MKTAG('U', 'Q', 'Y', '0'): |
||
921 |
c->planes = 3; |
||
922 |
c->pro = 1; |
||
923 |
avctx->pix_fmt = AV_PIX_FMT_YUV420P10; |
||
924 |
break; |
||
925 |
case MKTAG('U', 'Q', 'Y', '2'): |
||
926 |
c->planes = 3; |
||
927 |
c->pro = 1; |
||
928 |
avctx->pix_fmt = AV_PIX_FMT_YUV422P10; |
||
929 |
break; |
||
930 |
case MKTAG('U', 'Q', 'R', 'G'): |
||
931 |
c->planes = 3; |
||
932 |
c->pro = 1; |
||
933 |
avctx->pix_fmt = AV_PIX_FMT_GBRP10; |
||
934 |
break; |
||
935 |
case MKTAG('U', 'Q', 'R', 'A'): |
||
936 |
c->planes = 4; |
||
937 |
c->pro = 1; |
||
938 |
avctx->pix_fmt = AV_PIX_FMT_GBRAP10; |
||
939 |
break; |
||
940 |
6 |
case MKTAG('U', 'L', 'H', '0'): |
|
941 |
6 |
c->planes = 3; |
|
942 |
6 |
avctx->pix_fmt = AV_PIX_FMT_YUV420P; |
|
943 |
6 |
avctx->colorspace = AVCOL_SPC_BT709; |
|
944 |
6 |
break; |
|
945 |
6 |
case MKTAG('U', 'L', 'H', '2'): |
|
946 |
6 |
c->planes = 3; |
|
947 |
6 |
avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
|
948 |
6 |
avctx->colorspace = AVCOL_SPC_BT709; |
|
949 |
6 |
break; |
|
950 |
8 |
case MKTAG('U', 'L', 'H', '4'): |
|
951 |
8 |
c->planes = 3; |
|
952 |
8 |
avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
|
953 |
8 |
avctx->colorspace = AVCOL_SPC_BT709; |
|
954 |
8 |
break; |
|
955 |
case MKTAG('U', 'M', 'Y', '2'): |
||
956 |
c->planes = 3; |
||
957 |
c->pack = 1; |
||
958 |
avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
||
959 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
||
960 |
break; |
||
961 |
case MKTAG('U', 'M', 'H', '2'): |
||
962 |
c->planes = 3; |
||
963 |
c->pack = 1; |
||
964 |
avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
||
965 |
avctx->colorspace = AVCOL_SPC_BT709; |
||
966 |
break; |
||
967 |
case MKTAG('U', 'M', 'Y', '4'): |
||
968 |
c->planes = 3; |
||
969 |
c->pack = 1; |
||
970 |
avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
||
971 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
||
972 |
break; |
||
973 |
case MKTAG('U', 'M', 'H', '4'): |
||
974 |
c->planes = 3; |
||
975 |
c->pack = 1; |
||
976 |
avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
||
977 |
avctx->colorspace = AVCOL_SPC_BT709; |
||
978 |
break; |
||
979 |
case MKTAG('U', 'M', 'R', 'G'): |
||
980 |
c->planes = 3; |
||
981 |
c->pack = 1; |
||
982 |
avctx->pix_fmt = AV_PIX_FMT_GBRP; |
||
983 |
break; |
||
984 |
case MKTAG('U', 'M', 'R', 'A'): |
||
985 |
c->planes = 4; |
||
986 |
c->pack = 1; |
||
987 |
avctx->pix_fmt = AV_PIX_FMT_GBRAP; |
||
988 |
break; |
||
989 |
default: |
||
990 |
av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n", |
||
991 |
avctx->codec_tag); |
||
992 |
return AVERROR_INVALIDDATA; |
||
993 |
} |
||
994 |
|||
995 |
46 |
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift); |
|
996 |
✓✗ | 46 |
if ((avctx->width & ((1<<h_shift)-1)) || |
997 |
✗✓ | 46 |
(avctx->height & ((1<<v_shift)-1))) { |
998 |
avpriv_request_sample(avctx, "Odd dimensions"); |
||
999 |
return AVERROR_PATCHWELCOME; |
||
1000 |
} |
||
1001 |
|||
1002 |
✗✓✗✗ |
46 |
if (c->pack && avctx->extradata_size >= 16) { |
1003 |
av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
||
1004 |
avctx->extradata[3], avctx->extradata[2], |
||
1005 |
avctx->extradata[1], avctx->extradata[0]); |
||
1006 |
av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
||
1007 |
AV_RB32(avctx->extradata + 4)); |
||
1008 |
c->compression = avctx->extradata[8]; |
||
1009 |
if (c->compression != 2) |
||
1010 |
avpriv_request_sample(avctx, "Unknown compression type"); |
||
1011 |
c->slices = avctx->extradata[9] + 1; |
||
1012 |
✓✗✓✗ |
46 |
} else if (!c->pro && avctx->extradata_size >= 16) { |
1013 |
46 |
av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
|
1014 |
46 |
avctx->extradata[3], avctx->extradata[2], |
|
1015 |
46 |
avctx->extradata[1], avctx->extradata[0]); |
|
1016 |
46 |
av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
|
1017 |
46 |
AV_RB32(avctx->extradata + 4)); |
|
1018 |
46 |
c->frame_info_size = AV_RL32(avctx->extradata + 8); |
|
1019 |
46 |
c->flags = AV_RL32(avctx->extradata + 12); |
|
1020 |
|||
1021 |
✗✓ | 46 |
if (c->frame_info_size != 4) |
1022 |
avpriv_request_sample(avctx, "Frame info not 4 bytes"); |
||
1023 |
46 |
av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags); |
|
1024 |
46 |
c->slices = (c->flags >> 24) + 1; |
|
1025 |
46 |
c->compression = c->flags & 1; |
|
1026 |
46 |
c->interlaced = c->flags & 0x800; |
|
1027 |
} else if (c->pro && avctx->extradata_size == 8) { |
||
1028 |
av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
||
1029 |
avctx->extradata[3], avctx->extradata[2], |
||
1030 |
avctx->extradata[1], avctx->extradata[0]); |
||
1031 |
av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
||
1032 |
AV_RB32(avctx->extradata + 4)); |
||
1033 |
c->interlaced = 0; |
||
1034 |
c->frame_info_size = 4; |
||
1035 |
} else { |
||
1036 |
av_log(avctx, AV_LOG_ERROR, |
||
1037 |
"Insufficient extradata size %d, should be at least 16\n", |
||
1038 |
avctx->extradata_size); |
||
1039 |
return AVERROR_INVALIDDATA; |
||
1040 |
} |
||
1041 |
|||
1042 |
46 |
return 0; |
|
1043 |
} |
||
1044 |
|||
1045 |
46 |
static av_cold int decode_end(AVCodecContext *avctx) |
|
1046 |
{ |
||
1047 |
46 |
UtvideoContext * const c = avctx->priv_data; |
|
1048 |
|||
1049 |
46 |
av_freep(&c->slice_bits); |
|
1050 |
|||
1051 |
46 |
return 0; |
|
1052 |
} |
||
1053 |
|||
1054 |
AVCodec ff_utvideo_decoder = { |
||
1055 |
.name = "utvideo", |
||
1056 |
.long_name = NULL_IF_CONFIG_SMALL("Ut Video"), |
||
1057 |
.type = AVMEDIA_TYPE_VIDEO, |
||
1058 |
.id = AV_CODEC_ID_UTVIDEO, |
||
1059 |
.priv_data_size = sizeof(UtvideoContext), |
||
1060 |
.init = decode_init, |
||
1061 |
.close = decode_end, |
||
1062 |
.decode = decode_frame, |
||
1063 |
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, |
||
1064 |
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, |
||
1065 |
}; |
Generated by: GCOVR (Version 4.2) |