Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * This file is part of FFmpeg. | ||
3 | * | ||
4 | * FFmpeg is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU Lesser General Public | ||
6 | * License as published by the Free Software Foundation; either | ||
7 | * version 2.1 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * FFmpeg is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * Lesser General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU Lesser General Public | ||
15 | * License along with FFmpeg; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #include <stdatomic.h> | ||
20 | |||
21 | #include "libavutil/attributes.h" | ||
22 | #include "libavutil/mastering_display_metadata.h" | ||
23 | #include "libavutil/mem_internal.h" | ||
24 | #include "libavutil/pixdesc.h" | ||
25 | #include "libavutil/thread.h" | ||
26 | |||
27 | #include "apv.h" | ||
28 | #include "apv_decode.h" | ||
29 | #include "apv_dsp.h" | ||
30 | #include "avcodec.h" | ||
31 | #include "cbs.h" | ||
32 | #include "cbs_apv.h" | ||
33 | #include "codec_internal.h" | ||
34 | #include "decode.h" | ||
35 | #include "internal.h" | ||
36 | #include "thread.h" | ||
37 | |||
38 | |||
39 | typedef struct APVDerivedTileInfo { | ||
40 | uint8_t tile_cols; | ||
41 | uint8_t tile_rows; | ||
42 | uint16_t num_tiles; | ||
43 | // The spec uses an extra element on the end of these arrays | ||
44 | // not corresponding to any tile. | ||
45 | uint16_t col_starts[APV_MAX_TILE_COLS + 1]; | ||
46 | uint16_t row_starts[APV_MAX_TILE_ROWS + 1]; | ||
47 | } APVDerivedTileInfo; | ||
48 | |||
49 | typedef struct APVDecodeContext { | ||
50 | CodedBitstreamContext *cbc; | ||
51 | APVDSPContext dsp; | ||
52 | |||
53 | CodedBitstreamFragment au; | ||
54 | APVDerivedTileInfo tile_info; | ||
55 | |||
56 | AVPacket *pkt; | ||
57 | AVFrame *output_frame; | ||
58 | atomic_int tile_errors; | ||
59 | |||
60 | int nb_unit; | ||
61 | |||
62 | uint8_t warned_additional_frames; | ||
63 | uint8_t warned_unknown_pbu_types; | ||
64 | } APVDecodeContext; | ||
65 | |||
66 | static const enum AVPixelFormat apv_format_table[5][5] = { | ||
67 | { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16 }, | ||
68 | { 0 }, // 4:2:0 is not valid. | ||
69 | { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16 }, | ||
70 | { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16 }, | ||
71 | { AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, 0 ,AV_PIX_FMT_YUVA444P16 }, | ||
72 | }; | ||
73 | |||
74 | static APVVLCLUT decode_lut; | ||
75 | |||
76 | 12 | static int apv_decode_check_format(AVCodecContext *avctx, | |
77 | const APVRawFrameHeader *header) | ||
78 | { | ||
79 | int err, bit_depth; | ||
80 | |||
81 | 12 | avctx->profile = header->frame_info.profile_idc; | |
82 | 12 | avctx->level = header->frame_info.level_idc; | |
83 | |||
84 | 12 | bit_depth = header->frame_info.bit_depth_minus8 + 8; | |
85 |
3/6✓ Branch 0 taken 12 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 12 times.
|
12 | if (bit_depth < 8 || bit_depth > 16 || bit_depth % 2) { |
86 | ✗ | avpriv_request_sample(avctx, "Bit depth %d", bit_depth); | |
87 | ✗ | return AVERROR_PATCHWELCOME; | |
88 | } | ||
89 | 12 | avctx->pix_fmt = | |
90 | 12 | apv_format_table[header->frame_info.chroma_format_idc][bit_depth - 4 >> 2]; | |
91 | |||
92 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
|
12 | if (!avctx->pix_fmt) { |
93 | ✗ | avpriv_request_sample(avctx, "YUVA444P14"); | |
94 | ✗ | return AVERROR_PATCHWELCOME; | |
95 | } | ||
96 | |||
97 | 12 | err = ff_set_dimensions(avctx, | |
98 | 12 | FFALIGN(header->frame_info.frame_width, 16), | |
99 | 12 | FFALIGN(header->frame_info.frame_height, 16)); | |
100 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
|
12 | if (err < 0) { |
101 | // Unsupported frame size. | ||
102 | ✗ | return err; | |
103 | } | ||
104 | 12 | avctx->width = header->frame_info.frame_width; | |
105 | 12 | avctx->height = header->frame_info.frame_height; | |
106 | |||
107 | 12 | avctx->sample_aspect_ratio = (AVRational){ 1, 1 }; | |
108 | |||
109 | 12 | avctx->color_primaries = header->color_primaries; | |
110 | 12 | avctx->color_trc = header->transfer_characteristics; | |
111 | 12 | avctx->colorspace = header->matrix_coefficients; | |
112 | 24 | avctx->color_range = header->full_range_flag ? AVCOL_RANGE_JPEG | |
113 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
|
12 | : AVCOL_RANGE_MPEG; |
114 | 12 | avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; | |
115 | |||
116 | 12 | avctx->refs = 0; | |
117 | 12 | avctx->has_b_frames = 0; | |
118 | |||
119 | 12 | return 0; | |
120 | } | ||
121 | |||
122 | static const CodedBitstreamUnitType apv_decompose_unit_types[] = { | ||
123 | APV_PBU_PRIMARY_FRAME, | ||
124 | APV_PBU_METADATA, | ||
125 | }; | ||
126 | |||
127 | static AVOnce apv_entropy_once = AV_ONCE_INIT; | ||
128 | |||
129 | 6 | static av_cold void apv_entropy_build_decode_lut(void) | |
130 | { | ||
131 | 6 | ff_apv_entropy_build_decode_lut(&decode_lut); | |
132 | 6 | } | |
133 | |||
134 | 8 | static av_cold int apv_decode_init(AVCodecContext *avctx) | |
135 | { | ||
136 | 8 | APVDecodeContext *apv = avctx->priv_data; | |
137 | int err; | ||
138 | |||
139 | 8 | ff_thread_once(&apv_entropy_once, apv_entropy_build_decode_lut); | |
140 | |||
141 | 8 | err = ff_cbs_init(&apv->cbc, AV_CODEC_ID_APV, avctx); | |
142 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (err < 0) |
143 | ✗ | return err; | |
144 | |||
145 | 8 | apv->cbc->decompose_unit_types = | |
146 | apv_decompose_unit_types; | ||
147 | 8 | apv->cbc->nb_decompose_unit_types = | |
148 | FF_ARRAY_ELEMS(apv_decompose_unit_types); | ||
149 | |||
150 | // Extradata could be set here, but is ignored by the decoder. | ||
151 | |||
152 | 8 | apv->pkt = avctx->internal->in_pkt; | |
153 | 8 | ff_apv_dsp_init(&apv->dsp); | |
154 | |||
155 | 8 | atomic_init(&apv->tile_errors, 0); | |
156 | |||
157 | 8 | return 0; | |
158 | } | ||
159 | |||
160 | ✗ | static av_cold void apv_decode_flush(AVCodecContext *avctx) | |
161 | { | ||
162 | ✗ | APVDecodeContext *apv = avctx->priv_data; | |
163 | |||
164 | ✗ | apv->nb_unit = 0; | |
165 | ✗ | av_packet_unref(apv->pkt); | |
166 | ✗ | ff_cbs_fragment_reset(&apv->au); | |
167 | ✗ | ff_cbs_flush(apv->cbc); | |
168 | ✗ | } | |
169 | |||
170 | 8 | static av_cold int apv_decode_close(AVCodecContext *avctx) | |
171 | { | ||
172 | 8 | APVDecodeContext *apv = avctx->priv_data; | |
173 | |||
174 | 8 | ff_cbs_fragment_free(&apv->au); | |
175 | 8 | ff_cbs_close(&apv->cbc); | |
176 | |||
177 | 8 | return 0; | |
178 | } | ||
179 | |||
180 | 8640 | static int apv_decode_block(AVCodecContext *avctx, | |
181 | void *output, | ||
182 | ptrdiff_t pitch, | ||
183 | GetBitContext *gbc, | ||
184 | APVEntropyState *entropy_state, | ||
185 | int bit_depth, | ||
186 | int qp_shift, | ||
187 | const uint16_t *qmatrix) | ||
188 | { | ||
189 | 8640 | APVDecodeContext *apv = avctx->priv_data; | |
190 | int err; | ||
191 | |||
192 | 8640 | LOCAL_ALIGNED_32(int16_t, coeff, [64]); | |
193 | 8640 | memset(coeff, 0, 64 * sizeof(int16_t)); | |
194 | |||
195 | 8640 | err = ff_apv_entropy_decode_block(coeff, gbc, entropy_state); | |
196 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8640 times.
|
8640 | if (err < 0) |
197 | ✗ | return err; | |
198 | |||
199 | 8640 | apv->dsp.decode_transquant(output, pitch, | |
200 | coeff, qmatrix, | ||
201 | bit_depth, qp_shift); | ||
202 | |||
203 | 8640 | return 0; | |
204 | } | ||
205 | |||
206 | 24 | static int apv_decode_tile_component(AVCodecContext *avctx, void *data, | |
207 | int job, int thread) | ||
208 | { | ||
209 | 24 | APVRawFrame *input = data; | |
210 | 24 | APVDecodeContext *apv = avctx->priv_data; | |
211 | 24 | const CodedBitstreamAPVContext *apv_cbc = apv->cbc->priv_data; | |
212 | 24 | const APVDerivedTileInfo *tile_info = &apv->tile_info; | |
213 | |||
214 | 24 | int tile_index = job / apv_cbc->num_comp; | |
215 | 24 | int comp_index = job % apv_cbc->num_comp; | |
216 | |||
217 | const AVPixFmtDescriptor *pix_fmt_desc = | ||
218 | 24 | av_pix_fmt_desc_get(avctx->pix_fmt); | |
219 | |||
220 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 12 times.
|
24 | int sub_w_shift = comp_index == 0 ? 0 : pix_fmt_desc->log2_chroma_w; |
221 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 12 times.
|
24 | int sub_h_shift = comp_index == 0 ? 0 : pix_fmt_desc->log2_chroma_h; |
222 | |||
223 | 24 | APVRawTile *tile = &input->tile[tile_index]; | |
224 | |||
225 | 24 | int tile_y = tile_index / tile_info->tile_cols; | |
226 | 24 | int tile_x = tile_index % tile_info->tile_cols; | |
227 | |||
228 | 24 | int tile_start_x = tile_info->col_starts[tile_x]; | |
229 | 24 | int tile_start_y = tile_info->row_starts[tile_y]; | |
230 | |||
231 | 24 | int tile_width = tile_info->col_starts[tile_x + 1] - tile_start_x; | |
232 | 24 | int tile_height = tile_info->row_starts[tile_y + 1] - tile_start_y; | |
233 | |||
234 | 24 | int tile_mb_width = tile_width / APV_MB_WIDTH; | |
235 | 24 | int tile_mb_height = tile_height / APV_MB_HEIGHT; | |
236 | |||
237 | 24 | int blk_mb_width = 2 >> sub_w_shift; | |
238 | 24 | int blk_mb_height = 2 >> sub_h_shift; | |
239 | |||
240 | int bit_depth; | ||
241 | int qp_shift; | ||
242 | 24 | LOCAL_ALIGNED_32(uint16_t, qmatrix_scaled, [64]); | |
243 | |||
244 | GetBitContext gbc; | ||
245 | |||
246 | 24 | APVEntropyState entropy_state = { | |
247 | .log_ctx = avctx, | ||
248 | .decode_lut = &decode_lut, | ||
249 | .prev_dc = 0, | ||
250 | .prev_k_dc = 5, | ||
251 | .prev_k_level = 0, | ||
252 | }; | ||
253 | |||
254 | int err; | ||
255 | |||
256 | 24 | err = init_get_bits8(&gbc, tile->tile_data[comp_index], | |
257 | 24 | tile->tile_header.tile_data_size[comp_index]); | |
258 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
|
24 | if (err < 0) |
259 | ✗ | goto fail; | |
260 | |||
261 | // Combine the bitstream quantisation matrix with the qp scaling | ||
262 | // in advance. (Including qp_shift as well would overflow 16 bits.) | ||
263 | // Fix the row ordering at the same time. | ||
264 | { | ||
265 | static const uint8_t apv_level_scale[6] = { 40, 45, 51, 57, 64, 71 }; | ||
266 | 24 | int qp = tile->tile_header.tile_qp[comp_index]; | |
267 | 24 | int level_scale = apv_level_scale[qp % 6]; | |
268 | |||
269 | 24 | bit_depth = apv_cbc->bit_depth; | |
270 | 24 | qp_shift = qp / 6; | |
271 | |||
272 |
2/2✓ Branch 0 taken 192 times.
✓ Branch 1 taken 24 times.
|
216 | for (int y = 0; y < 8; y++) { |
273 |
2/2✓ Branch 0 taken 1536 times.
✓ Branch 1 taken 192 times.
|
1728 | for (int x = 0; x < 8; x++) |
274 | 1536 | qmatrix_scaled[y * 8 + x] = level_scale * | |
275 | 1536 | input->frame_header.quantization_matrix.q_matrix[comp_index][x][y]; | |
276 | } | ||
277 | } | ||
278 | |||
279 |
2/2✓ Branch 0 taken 288 times.
✓ Branch 1 taken 24 times.
|
312 | for (int mb_y = 0; mb_y < tile_mb_height; mb_y++) { |
280 |
2/2✓ Branch 0 taken 2880 times.
✓ Branch 1 taken 288 times.
|
3168 | for (int mb_x = 0; mb_x < tile_mb_width; mb_x++) { |
281 |
2/2✓ Branch 0 taken 5760 times.
✓ Branch 1 taken 2880 times.
|
8640 | for (int blk_y = 0; blk_y < blk_mb_height; blk_y++) { |
282 |
2/2✓ Branch 0 taken 8640 times.
✓ Branch 1 taken 5760 times.
|
14400 | for (int blk_x = 0; blk_x < blk_mb_width; blk_x++) { |
283 | 8640 | int frame_y = (tile_start_y + | |
284 | 8640 | APV_MB_HEIGHT * mb_y + | |
285 | 8640 | APV_TR_SIZE * blk_y) >> sub_h_shift; | |
286 | 8640 | int frame_x = (tile_start_x + | |
287 | 8640 | APV_MB_WIDTH * mb_x + | |
288 | 8640 | APV_TR_SIZE * blk_x) >> sub_w_shift; | |
289 | |||
290 | 8640 | ptrdiff_t frame_pitch = apv->output_frame->linesize[comp_index]; | |
291 | 8640 | uint8_t *block_start = apv->output_frame->data[comp_index] + | |
292 | 8640 | frame_y * frame_pitch + 2 * frame_x; | |
293 | |||
294 | 8640 | err = apv_decode_block(avctx, | |
295 | block_start, frame_pitch, | ||
296 | &gbc, &entropy_state, | ||
297 | bit_depth, | ||
298 | qp_shift, | ||
299 | qmatrix_scaled); | ||
300 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8640 times.
|
8640 | if (err < 0) { |
301 | // Error in block decode means entropy desync, | ||
302 | // so this is not recoverable. | ||
303 | ✗ | goto fail; | |
304 | } | ||
305 | } | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | |||
310 | 24 | av_log(avctx, AV_LOG_DEBUG, | |
311 | "Decoded tile %d component %d: %dx%d MBs starting at (%d,%d)\n", | ||
312 | tile_index, comp_index, tile_mb_width, tile_mb_height, | ||
313 | tile_start_x, tile_start_y); | ||
314 | |||
315 | 24 | return 0; | |
316 | |||
317 | ✗ | fail: | |
318 | ✗ | av_log(avctx, AV_LOG_VERBOSE, | |
319 | "Decode error in tile %d component %d.\n", | ||
320 | tile_index, comp_index); | ||
321 | ✗ | atomic_fetch_add_explicit(&apv->tile_errors, 1, memory_order_relaxed); | |
322 | ✗ | return err; | |
323 | } | ||
324 | |||
325 | 6 | static void apv_derive_tile_info(APVDerivedTileInfo *ti, | |
326 | const APVRawFrameHeader *fh) | ||
327 | { | ||
328 | 6 | int frame_width_in_mbs = (fh->frame_info.frame_width + (APV_MB_WIDTH - 1)) >> 4; | |
329 | 6 | int frame_height_in_mbs = (fh->frame_info.frame_height + (APV_MB_HEIGHT - 1)) >> 4; | |
330 | int start_mb, i; | ||
331 | |||
332 | 6 | start_mb = 0; | |
333 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 6 times.
|
18 | for (i = 0; start_mb < frame_width_in_mbs; i++) { |
334 | 12 | ti->col_starts[i] = start_mb * APV_MB_WIDTH; | |
335 | 12 | start_mb += fh->tile_info.tile_width_in_mbs; | |
336 | } | ||
337 | 6 | ti->col_starts[i] = frame_width_in_mbs * APV_MB_WIDTH; | |
338 | 6 | ti->tile_cols = i; | |
339 | |||
340 | 6 | start_mb = 0; | |
341 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
|
12 | for (i = 0; start_mb < frame_height_in_mbs; i++) { |
342 | 6 | ti->row_starts[i] = start_mb * APV_MB_HEIGHT; | |
343 | 6 | start_mb += fh->tile_info.tile_height_in_mbs; | |
344 | } | ||
345 | 6 | ti->row_starts[i] = frame_height_in_mbs * APV_MB_HEIGHT; | |
346 | 6 | ti->tile_rows = i; | |
347 | |||
348 | 6 | ti->num_tiles = ti->tile_cols * ti->tile_rows; | |
349 | 6 | } | |
350 | |||
351 | 12 | static int apv_decode(AVCodecContext *avctx, AVFrame *output, | |
352 | APVRawFrame *input) | ||
353 | { | ||
354 | 12 | APVDecodeContext *apv = avctx->priv_data; | |
355 | 12 | const AVPixFmtDescriptor *desc = NULL; | |
356 | 12 | APVDerivedTileInfo *tile_info = &apv->tile_info; | |
357 | int err, job_count; | ||
358 | |||
359 | 12 | err = apv_decode_check_format(avctx, &input->frame_header); | |
360 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
|
12 | if (err < 0) { |
361 | ✗ | av_log(avctx, AV_LOG_ERROR, "Unsupported format parameters.\n"); | |
362 | ✗ | return err; | |
363 | } | ||
364 | |||
365 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
|
12 | if (avctx->skip_frame == AVDISCARD_ALL) |
366 | 6 | return 0; | |
367 | |||
368 | 6 | desc = av_pix_fmt_desc_get(avctx->pix_fmt); | |
369 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | av_assert0(desc); |
370 | |||
371 | 6 | err = ff_thread_get_buffer(avctx, output, 0); | |
372 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (err < 0) |
373 | ✗ | return err; | |
374 | |||
375 | 6 | apv->output_frame = output; | |
376 | 6 | atomic_store_explicit(&apv->tile_errors, 0, memory_order_relaxed); | |
377 | |||
378 | 6 | apv_derive_tile_info(tile_info, &input->frame_header); | |
379 | |||
380 | // Each component within a tile is independent of every other, | ||
381 | // so we can decode all in parallel. | ||
382 | 6 | job_count = tile_info->num_tiles * desc->nb_components; | |
383 | |||
384 | 6 | avctx->execute2(avctx, apv_decode_tile_component, | |
385 | input, NULL, job_count); | ||
386 | |||
387 | 6 | err = atomic_load_explicit(&apv->tile_errors, memory_order_relaxed); | |
388 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (err > 0) { |
389 | ✗ | av_log(avctx, AV_LOG_ERROR, | |
390 | "Decode errors in %d tile components.\n", err); | ||
391 | ✗ | if (avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) { | |
392 | // Output the frame anyway. | ||
393 | ✗ | output->flags |= AV_FRAME_FLAG_CORRUPT; | |
394 | } else { | ||
395 | ✗ | return AVERROR_INVALIDDATA; | |
396 | } | ||
397 | } | ||
398 | |||
399 | 6 | return 0; | |
400 | } | ||
401 | |||
402 | 12 | static int apv_decode_metadata(AVCodecContext *avctx, AVFrame *frame, | |
403 | const APVRawMetadata *md) | ||
404 | { | ||
405 | int err; | ||
406 | |||
407 |
2/2✓ Branch 0 taken 24 times.
✓ Branch 1 taken 12 times.
|
36 | for (int i = 0; i < md->metadata_count; i++) { |
408 | 24 | const APVRawMetadataPayload *pl = &md->payloads[i]; | |
409 | |||
410 |
1/3✗ Branch 0 not taken.
✗ Branch 1 not taken.
✓ Branch 2 taken 24 times.
|
24 | switch (pl->payload_type) { |
411 | ✗ | case APV_METADATA_MDCV: | |
412 | { | ||
413 | ✗ | const APVRawMetadataMDCV *mdcv = &pl->mdcv; | |
414 | AVMasteringDisplayMetadata *mdm; | ||
415 | |||
416 | ✗ | err = ff_decode_mastering_display_new(avctx, frame, &mdm); | |
417 | ✗ | if (err < 0) | |
418 | ✗ | return err; | |
419 | |||
420 | ✗ | if (mdm) { | |
421 | ✗ | for (int j = 0; j < 3; j++) { | |
422 | ✗ | mdm->display_primaries[j][0] = | |
423 | ✗ | av_make_q(mdcv->primary_chromaticity_x[j], 1 << 16); | |
424 | ✗ | mdm->display_primaries[j][1] = | |
425 | ✗ | av_make_q(mdcv->primary_chromaticity_y[j], 1 << 16); | |
426 | } | ||
427 | |||
428 | ✗ | mdm->white_point[0] = | |
429 | ✗ | av_make_q(mdcv->white_point_chromaticity_x, 1 << 16); | |
430 | ✗ | mdm->white_point[1] = | |
431 | ✗ | av_make_q(mdcv->white_point_chromaticity_y, 1 << 16); | |
432 | |||
433 | ✗ | mdm->max_luminance = | |
434 | ✗ | av_make_q(mdcv->max_mastering_luminance, 1 << 8); | |
435 | ✗ | mdm->min_luminance = | |
436 | ✗ | av_make_q(mdcv->min_mastering_luminance, 1 << 14); | |
437 | |||
438 | ✗ | mdm->has_primaries = 1; | |
439 | ✗ | mdm->has_luminance = 1; | |
440 | } | ||
441 | } | ||
442 | ✗ | break; | |
443 | ✗ | case APV_METADATA_CLL: | |
444 | { | ||
445 | ✗ | const APVRawMetadataCLL *cll = &pl->cll; | |
446 | AVContentLightMetadata *clm; | ||
447 | |||
448 | ✗ | err = ff_decode_content_light_new(avctx, frame, &clm); | |
449 | ✗ | if (err < 0) | |
450 | ✗ | return err; | |
451 | |||
452 | ✗ | if (clm) { | |
453 | ✗ | clm->MaxCLL = cll->max_cll; | |
454 | ✗ | clm->MaxFALL = cll->max_fall; | |
455 | } | ||
456 | } | ||
457 | ✗ | break; | |
458 | 24 | default: | |
459 | // Ignore other types of metadata. | ||
460 | 24 | break; | |
461 | } | ||
462 | } | ||
463 | |||
464 | 12 | return 0; | |
465 | } | ||
466 | |||
467 | 24 | static int apv_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame) | |
468 | { | ||
469 | 24 | APVDecodeContext *apv = avctx->priv_data; | |
470 | 24 | CodedBitstreamFragment *au = &apv->au; | |
471 | int i, err; | ||
472 | |||
473 |
2/2✓ Branch 0 taken 24 times.
✓ Branch 1 taken 12 times.
|
36 | for (i = apv->nb_unit; i < au->nb_units; i++) { |
474 | 24 | CodedBitstreamUnit *pbu = &au->units[i]; | |
475 | |||
476 |
2/5✓ Branch 0 taken 12 times.
✓ Branch 1 taken 12 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
|
24 | switch (pbu->type) { |
477 | 12 | case APV_PBU_PRIMARY_FRAME: | |
478 | 12 | err = apv_decode(avctx, frame, pbu->content); | |
479 | 12 | i++; | |
480 | 12 | goto end; | |
481 | 12 | case APV_PBU_METADATA: | |
482 | 12 | apv_decode_metadata(avctx, frame, pbu->content); | |
483 | 12 | break; | |
484 | ✗ | case APV_PBU_NON_PRIMARY_FRAME: | |
485 | case APV_PBU_PREVIEW_FRAME: | ||
486 | case APV_PBU_DEPTH_FRAME: | ||
487 | case APV_PBU_ALPHA_FRAME: | ||
488 | ✗ | if (!avctx->internal->is_copy && | |
489 | ✗ | !apv->warned_additional_frames) { | |
490 | ✗ | av_log(avctx, AV_LOG_WARNING, | |
491 | "Stream contains additional non-primary frames " | ||
492 | "which will be ignored by the decoder.\n"); | ||
493 | ✗ | apv->warned_additional_frames = 1; | |
494 | } | ||
495 | ✗ | break; | |
496 | ✗ | case APV_PBU_ACCESS_UNIT_INFORMATION: | |
497 | case APV_PBU_FILLER: | ||
498 | // Not relevant to the decoder. | ||
499 | ✗ | break; | |
500 | ✗ | default: | |
501 | ✗ | if (!avctx->internal->is_copy && | |
502 | ✗ | !apv->warned_unknown_pbu_types) { | |
503 | ✗ | av_log(avctx, AV_LOG_WARNING, | |
504 | "Stream contains PBUs with unknown types " | ||
505 | "which will be ignored by the decoder.\n"); | ||
506 | ✗ | apv->warned_unknown_pbu_types = 1; | |
507 | } | ||
508 | ✗ | break; | |
509 | } | ||
510 | } | ||
511 | |||
512 | 12 | err = AVERROR(EAGAIN); | |
513 | 24 | end: | |
514 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
|
24 | av_assert0(i <= apv->au.nb_units); |
515 | 24 | apv->nb_unit = i; | |
516 | |||
517 |
5/6✓ Branch 0 taken 12 times.
✓ Branch 1 taken 12 times.
✓ Branch 2 taken 12 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 12 times.
✓ Branch 5 taken 12 times.
|
24 | if ((err < 0 && err != AVERROR(EAGAIN)) || apv->au.nb_units == i) { |
518 | 12 | av_packet_unref(apv->pkt); | |
519 | 12 | ff_cbs_fragment_reset(&apv->au); | |
520 | 12 | apv->nb_unit = 0; | |
521 | } | ||
522 |
4/4✓ Branch 0 taken 12 times.
✓ Branch 1 taken 12 times.
✓ Branch 2 taken 6 times.
✓ Branch 3 taken 6 times.
|
24 | if (!err && !frame->buf[0]) |
523 | 6 | err = AVERROR(EAGAIN); | |
524 | |||
525 | 24 | return err; | |
526 | } | ||
527 | |||
528 | 26 | static int apv_receive_frame(AVCodecContext *avctx, AVFrame *frame) | |
529 | { | ||
530 | 26 | APVDecodeContext *apv = avctx->priv_data; | |
531 | int err; | ||
532 | |||
533 | do { | ||
534 |
2/2✓ Branch 0 taken 32 times.
✓ Branch 1 taken 12 times.
|
44 | if (!apv->au.nb_units) { |
535 | 32 | err = ff_decode_get_packet(avctx, apv->pkt); | |
536 |
2/2✓ Branch 0 taken 20 times.
✓ Branch 1 taken 12 times.
|
32 | if (err < 0) |
537 | 20 | return err; | |
538 | |||
539 | 12 | err = ff_cbs_read_packet(apv->cbc, &apv->au, apv->pkt); | |
540 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
|
12 | if (err < 0) { |
541 | ✗ | ff_cbs_fragment_reset(&apv->au); | |
542 | ✗ | av_packet_unref(apv->pkt); | |
543 | ✗ | av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n"); | |
544 | ✗ | return err; | |
545 | } | ||
546 | |||
547 | 12 | apv->nb_unit = 0; | |
548 | 12 | av_log(avctx, AV_LOG_DEBUG, "Total PBUs on this packet: %d.\n", | |
549 | apv->au.nb_units); | ||
550 | } | ||
551 | |||
552 | 24 | err = apv_receive_frame_internal(avctx, frame); | |
553 |
2/2✓ Branch 0 taken 18 times.
✓ Branch 1 taken 6 times.
|
24 | } while (err == AVERROR(EAGAIN)); |
554 | |||
555 | 6 | return err; | |
556 | } | ||
557 | |||
558 | const FFCodec ff_apv_decoder = { | ||
559 | .p.name = "apv", | ||
560 | CODEC_LONG_NAME("Advanced Professional Video"), | ||
561 | .p.type = AVMEDIA_TYPE_VIDEO, | ||
562 | .p.id = AV_CODEC_ID_APV, | ||
563 | .priv_data_size = sizeof(APVDecodeContext), | ||
564 | .init = apv_decode_init, | ||
565 | .flush = apv_decode_flush, | ||
566 | .close = apv_decode_close, | ||
567 | FF_CODEC_RECEIVE_FRAME_CB(apv_receive_frame), | ||
568 | .p.capabilities = AV_CODEC_CAP_DR1 | | ||
569 | AV_CODEC_CAP_SLICE_THREADS | | ||
570 | AV_CODEC_CAP_FRAME_THREADS, | ||
571 | .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, | ||
572 | }; | ||
573 |