1 |
|
|
/* |
2 |
|
|
* AV1 video decoder |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/film_grain_params.h" |
22 |
|
|
#include "libavutil/pixdesc.h" |
23 |
|
|
#include "libavutil/opt.h" |
24 |
|
|
#include "avcodec.h" |
25 |
|
|
#include "av1dec.h" |
26 |
|
|
#include "bytestream.h" |
27 |
|
|
#include "hwconfig.h" |
28 |
|
|
#include "internal.h" |
29 |
|
|
#include "profiles.h" |
30 |
|
|
|
31 |
|
|
static uint32_t inverse_recenter(int r, uint32_t v) |
32 |
|
|
{ |
33 |
|
|
if (v > 2 * r) |
34 |
|
|
return v; |
35 |
|
|
else if (v & 1) |
36 |
|
|
return r - ((v + 1) >> 1); |
37 |
|
|
else |
38 |
|
|
return r + (v >> 1); |
39 |
|
|
} |
40 |
|
|
|
41 |
|
|
static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp, |
42 |
|
|
int mx, int r) |
43 |
|
|
{ |
44 |
|
|
if ((r << 1) <= mx) { |
45 |
|
|
return inverse_recenter(r, sub_exp); |
46 |
|
|
} else { |
47 |
|
|
return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp); |
48 |
|
|
} |
49 |
|
|
} |
50 |
|
|
|
51 |
|
|
static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low, |
52 |
|
|
int high, int r) |
53 |
|
|
{ |
54 |
|
|
int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low); |
55 |
|
|
return x + low; |
56 |
|
|
} |
57 |
|
|
|
58 |
|
|
static void read_global_param(AV1DecContext *s, int type, int ref, int idx) |
59 |
|
|
{ |
60 |
|
|
uint8_t primary_frame, prev_frame; |
61 |
|
|
uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx; |
62 |
|
|
int32_t r, prev_gm_param; |
63 |
|
|
|
64 |
|
|
primary_frame = s->raw_frame_header->primary_ref_frame; |
65 |
|
|
prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame]; |
66 |
|
|
abs_bits = AV1_GM_ABS_ALPHA_BITS; |
67 |
|
|
prec_bits = AV1_GM_ALPHA_PREC_BITS; |
68 |
|
|
|
69 |
|
|
/* setup_past_independence() sets PrevGmParams to default values. We can |
70 |
|
|
* simply point to the current's frame gm_params as they will be initialized |
71 |
|
|
* with defaults at this point. |
72 |
|
|
*/ |
73 |
|
|
if (s->raw_frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE) |
74 |
|
|
prev_gm_param = s->cur_frame.gm_params[ref][idx]; |
75 |
|
|
else |
76 |
|
|
prev_gm_param = s->ref[prev_frame].gm_params[ref][idx]; |
77 |
|
|
|
78 |
|
|
if (idx < 2) { |
79 |
|
|
if (type == AV1_WARP_MODEL_TRANSLATION) { |
80 |
|
|
abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS - |
81 |
|
|
!s->raw_frame_header->allow_high_precision_mv; |
82 |
|
|
prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS - |
83 |
|
|
!s->raw_frame_header->allow_high_precision_mv; |
84 |
|
|
} else { |
85 |
|
|
abs_bits = AV1_GM_ABS_TRANS_BITS; |
86 |
|
|
prec_bits = AV1_GM_TRANS_PREC_BITS; |
87 |
|
|
} |
88 |
|
|
} |
89 |
|
|
round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0; |
90 |
|
|
prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits; |
91 |
|
|
sub = (idx % 3) == 2 ? (1 << prec_bits) : 0; |
92 |
|
|
mx = 1 << abs_bits; |
93 |
|
|
r = (prev_gm_param >> prec_diff) - sub; |
94 |
|
|
|
95 |
|
|
s->cur_frame.gm_params[ref][idx] = |
96 |
|
|
(decode_signed_subexp_with_ref(s->raw_frame_header->gm_params[ref][idx], |
97 |
|
|
-mx, mx + 1, r) << prec_diff) + round; |
98 |
|
|
} |
99 |
|
|
|
100 |
|
|
/** |
101 |
|
|
* update gm type/params, since cbs already implemented part of this funcation, |
102 |
|
|
* so we don't need to full implement spec. |
103 |
|
|
*/ |
104 |
|
|
static void global_motion_params(AV1DecContext *s) |
105 |
|
|
{ |
106 |
|
|
const AV1RawFrameHeader *header = s->raw_frame_header; |
107 |
|
|
int type, ref; |
108 |
|
|
|
109 |
|
|
for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) { |
110 |
|
|
s->cur_frame.gm_type[ref] = AV1_WARP_MODEL_IDENTITY; |
111 |
|
|
for (int i = 0; i < 6; i++) |
112 |
|
|
s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ? |
113 |
|
|
1 << AV1_WARPEDMODEL_PREC_BITS : 0; |
114 |
|
|
} |
115 |
|
|
if (header->frame_type == AV1_FRAME_KEY || |
116 |
|
|
header->frame_type == AV1_FRAME_INTRA_ONLY) |
117 |
|
|
return; |
118 |
|
|
|
119 |
|
|
for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) { |
120 |
|
|
if (header->is_global[ref]) { |
121 |
|
|
if (header->is_rot_zoom[ref]) { |
122 |
|
|
type = AV1_WARP_MODEL_ROTZOOM; |
123 |
|
|
} else { |
124 |
|
|
type = header->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION |
125 |
|
|
: AV1_WARP_MODEL_AFFINE; |
126 |
|
|
} |
127 |
|
|
} else { |
128 |
|
|
type = AV1_WARP_MODEL_IDENTITY; |
129 |
|
|
} |
130 |
|
|
s->cur_frame.gm_type[ref] = type; |
131 |
|
|
|
132 |
|
|
if (type >= AV1_WARP_MODEL_ROTZOOM) { |
133 |
|
|
read_global_param(s, type, ref, 2); |
134 |
|
|
read_global_param(s, type, ref, 3); |
135 |
|
|
if (type == AV1_WARP_MODEL_AFFINE) { |
136 |
|
|
read_global_param(s, type, ref, 4); |
137 |
|
|
read_global_param(s, type, ref, 5); |
138 |
|
|
} else { |
139 |
|
|
s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3]; |
140 |
|
|
s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2]; |
141 |
|
|
} |
142 |
|
|
} |
143 |
|
|
if (type >= AV1_WARP_MODEL_TRANSLATION) { |
144 |
|
|
read_global_param(s, type, ref, 0); |
145 |
|
|
read_global_param(s, type, ref, 1); |
146 |
|
|
} |
147 |
|
|
} |
148 |
|
|
} |
149 |
|
|
|
150 |
|
|
static int get_relative_dist(const AV1RawSequenceHeader *seq, |
151 |
|
|
unsigned int a, unsigned int b) |
152 |
|
|
{ |
153 |
|
|
unsigned int diff = a - b; |
154 |
|
|
unsigned int m = 1 << seq->order_hint_bits_minus_1; |
155 |
|
|
return (diff & (m - 1)) - (diff & m); |
156 |
|
|
} |
157 |
|
|
|
158 |
|
|
static void skip_mode_params(AV1DecContext *s) |
159 |
|
|
{ |
160 |
|
|
const AV1RawFrameHeader *header = s->raw_frame_header; |
161 |
|
|
const AV1RawSequenceHeader *seq = s->raw_seq; |
162 |
|
|
|
163 |
|
|
int forward_idx, backward_idx; |
164 |
|
|
int forward_hint, backward_hint; |
165 |
|
|
int second_forward_idx, second_forward_hint; |
166 |
|
|
int ref_hint, dist, i; |
167 |
|
|
|
168 |
|
|
if (!header->skip_mode_present) |
169 |
|
|
return; |
170 |
|
|
|
171 |
|
|
forward_idx = -1; |
172 |
|
|
backward_idx = -1; |
173 |
|
|
for (i = 0; i < AV1_REFS_PER_FRAME; i++) { |
174 |
|
|
ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint; |
175 |
|
|
dist = get_relative_dist(seq, ref_hint, header->order_hint); |
176 |
|
|
if (dist < 0) { |
177 |
|
|
if (forward_idx < 0 || |
178 |
|
|
get_relative_dist(seq, ref_hint, forward_hint) > 0) { |
179 |
|
|
forward_idx = i; |
180 |
|
|
forward_hint = ref_hint; |
181 |
|
|
} |
182 |
|
|
} else if (dist > 0) { |
183 |
|
|
if (backward_idx < 0 || |
184 |
|
|
get_relative_dist(seq, ref_hint, backward_hint) < 0) { |
185 |
|
|
backward_idx = i; |
186 |
|
|
backward_hint = ref_hint; |
187 |
|
|
} |
188 |
|
|
} |
189 |
|
|
} |
190 |
|
|
|
191 |
|
|
if (forward_idx < 0) { |
192 |
|
|
return; |
193 |
|
|
} else if (backward_idx >= 0) { |
194 |
|
|
s->cur_frame.skip_mode_frame_idx[0] = |
195 |
|
|
AV1_REF_FRAME_LAST + FFMIN(forward_idx, backward_idx); |
196 |
|
|
s->cur_frame.skip_mode_frame_idx[1] = |
197 |
|
|
AV1_REF_FRAME_LAST + FFMAX(forward_idx, backward_idx); |
198 |
|
|
return; |
199 |
|
|
} |
200 |
|
|
|
201 |
|
|
second_forward_idx = -1; |
202 |
|
|
for (i = 0; i < AV1_REFS_PER_FRAME; i++) { |
203 |
|
|
ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint; |
204 |
|
|
if (get_relative_dist(seq, ref_hint, forward_hint) < 0) { |
205 |
|
|
if (second_forward_idx < 0 || |
206 |
|
|
get_relative_dist(seq, ref_hint, second_forward_hint) > 0) { |
207 |
|
|
second_forward_idx = i; |
208 |
|
|
second_forward_hint = ref_hint; |
209 |
|
|
} |
210 |
|
|
} |
211 |
|
|
} |
212 |
|
|
|
213 |
|
|
if (second_forward_idx < 0) |
214 |
|
|
return; |
215 |
|
|
|
216 |
|
|
s->cur_frame.skip_mode_frame_idx[0] = |
217 |
|
|
AV1_REF_FRAME_LAST + FFMIN(forward_idx, second_forward_idx); |
218 |
|
|
s->cur_frame.skip_mode_frame_idx[1] = |
219 |
|
|
AV1_REF_FRAME_LAST + FFMAX(forward_idx, second_forward_idx); |
220 |
|
|
} |
221 |
|
|
|
222 |
|
|
static void coded_lossless_param(AV1DecContext *s) |
223 |
|
|
{ |
224 |
|
|
const AV1RawFrameHeader *header = s->raw_frame_header; |
225 |
|
|
int i; |
226 |
|
|
|
227 |
|
|
if (header->delta_q_y_dc || header->delta_q_u_ac || |
228 |
|
|
header->delta_q_u_dc || header->delta_q_v_ac || |
229 |
|
|
header->delta_q_v_dc) { |
230 |
|
|
s->cur_frame.coded_lossless = 0; |
231 |
|
|
return; |
232 |
|
|
} |
233 |
|
|
|
234 |
|
|
s->cur_frame.coded_lossless = 1; |
235 |
|
|
for (i = 0; i < AV1_MAX_SEGMENTS; i++) { |
236 |
|
|
int qindex; |
237 |
|
|
if (header->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) { |
238 |
|
|
qindex = (header->base_q_idx + |
239 |
|
|
header->feature_value[i][AV1_SEG_LVL_ALT_Q]); |
240 |
|
|
} else { |
241 |
|
|
qindex = header->base_q_idx; |
242 |
|
|
} |
243 |
|
|
qindex = av_clip_uintp2(qindex, 8); |
244 |
|
|
|
245 |
|
|
if (qindex) { |
246 |
|
|
s->cur_frame.coded_lossless = 0; |
247 |
|
|
return; |
248 |
|
|
} |
249 |
|
|
} |
250 |
|
|
} |
251 |
|
|
|
252 |
|
|
static void load_grain_params(AV1DecContext *s) |
253 |
|
|
{ |
254 |
|
|
const AV1RawFrameHeader *header = s->raw_frame_header; |
255 |
|
|
const AV1RawFilmGrainParams *film_grain = &header->film_grain, *src; |
256 |
|
|
AV1RawFilmGrainParams *dst = &s->cur_frame.film_grain; |
257 |
|
|
|
258 |
|
|
if (!film_grain->apply_grain) |
259 |
|
|
return; |
260 |
|
|
|
261 |
|
|
if (film_grain->update_grain) { |
262 |
|
|
memcpy(dst, film_grain, sizeof(*dst)); |
263 |
|
|
return; |
264 |
|
|
} |
265 |
|
|
|
266 |
|
|
src = &s->ref[film_grain->film_grain_params_ref_idx].film_grain; |
267 |
|
|
|
268 |
|
|
memcpy(dst, src, sizeof(*dst)); |
269 |
|
|
dst->grain_seed = film_grain->grain_seed; |
270 |
|
|
} |
271 |
|
|
|
272 |
|
|
static int init_tile_data(AV1DecContext *s) |
273 |
|
|
|
274 |
|
|
{ |
275 |
|
|
int cur_tile_num = |
276 |
|
|
s->raw_frame_header->tile_cols * s->raw_frame_header->tile_rows; |
277 |
|
|
if (s->tile_num < cur_tile_num) { |
278 |
|
|
int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num, |
279 |
|
|
sizeof(TileGroupInfo)); |
280 |
|
|
if (ret < 0) { |
281 |
|
|
s->tile_num = 0; |
282 |
|
|
return ret; |
283 |
|
|
} |
284 |
|
|
} |
285 |
|
|
s->tile_num = cur_tile_num; |
286 |
|
|
|
287 |
|
|
return 0; |
288 |
|
|
} |
289 |
|
|
|
290 |
|
|
static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group) |
291 |
|
|
{ |
292 |
|
|
AV1DecContext *s = avctx->priv_data; |
293 |
|
|
GetByteContext gb; |
294 |
|
|
uint16_t tile_num, tile_row, tile_col; |
295 |
|
|
uint32_t size = 0, size_bytes = 0; |
296 |
|
|
|
297 |
|
|
bytestream2_init(&gb, tile_group->tile_data.data, |
298 |
|
|
tile_group->tile_data.data_size); |
299 |
|
|
s->tg_start = tile_group->tg_start; |
300 |
|
|
s->tg_end = tile_group->tg_end; |
301 |
|
|
|
302 |
|
|
for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) { |
303 |
|
|
tile_row = tile_num / s->raw_frame_header->tile_cols; |
304 |
|
|
tile_col = tile_num % s->raw_frame_header->tile_cols; |
305 |
|
|
|
306 |
|
|
if (tile_num == tile_group->tg_end) { |
307 |
|
|
s->tile_group_info[tile_num].tile_size = bytestream2_get_bytes_left(&gb); |
308 |
|
|
s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb); |
309 |
|
|
s->tile_group_info[tile_num].tile_row = tile_row; |
310 |
|
|
s->tile_group_info[tile_num].tile_column = tile_col; |
311 |
|
|
return 0; |
312 |
|
|
} |
313 |
|
|
size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1; |
314 |
|
|
if (bytestream2_get_bytes_left(&gb) < size_bytes) |
315 |
|
|
return AVERROR_INVALIDDATA; |
316 |
|
|
size = 0; |
317 |
|
|
for (int i = 0; i < size_bytes; i++) |
318 |
|
|
size |= bytestream2_get_byteu(&gb) << 8 * i; |
319 |
|
|
if (bytestream2_get_bytes_left(&gb) <= size) |
320 |
|
|
return AVERROR_INVALIDDATA; |
321 |
|
|
size++; |
322 |
|
|
|
323 |
|
|
s->tile_group_info[tile_num].tile_size = size; |
324 |
|
|
s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb); |
325 |
|
|
s->tile_group_info[tile_num].tile_row = tile_row; |
326 |
|
|
s->tile_group_info[tile_num].tile_column = tile_col; |
327 |
|
|
|
328 |
|
|
bytestream2_skipu(&gb, size); |
329 |
|
|
} |
330 |
|
|
|
331 |
|
|
return 0; |
332 |
|
|
|
333 |
|
|
} |
334 |
|
|
|
335 |
|
21 |
static int get_pixel_format(AVCodecContext *avctx) |
336 |
|
|
{ |
337 |
|
21 |
AV1DecContext *s = avctx->priv_data; |
338 |
|
21 |
const AV1RawSequenceHeader *seq = s->raw_seq; |
339 |
|
|
uint8_t bit_depth; |
340 |
|
|
int ret; |
341 |
|
21 |
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; |
342 |
|
|
#define HWACCEL_MAX (CONFIG_AV1_DXVA2_HWACCEL + \ |
343 |
|
|
CONFIG_AV1_D3D11VA_HWACCEL * 2 + \ |
344 |
|
|
CONFIG_AV1_NVDEC_HWACCEL + \ |
345 |
|
|
CONFIG_AV1_VAAPI_HWACCEL) |
346 |
|
21 |
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts; |
347 |
|
|
|
348 |
✗✓✗✗
|
21 |
if (seq->seq_profile == 2 && seq->color_config.high_bitdepth) |
349 |
|
|
bit_depth = seq->color_config.twelve_bit ? 12 : 10; |
350 |
✓✗ |
21 |
else if (seq->seq_profile <= 2) |
351 |
✓✓ |
21 |
bit_depth = seq->color_config.high_bitdepth ? 10 : 8; |
352 |
|
|
else { |
353 |
|
|
av_log(avctx, AV_LOG_ERROR, |
354 |
|
|
"Unknown AV1 profile %d.\n", seq->seq_profile); |
355 |
|
|
return -1; |
356 |
|
|
} |
357 |
|
|
|
358 |
✓✗ |
21 |
if (!seq->color_config.mono_chrome) { |
359 |
|
|
// 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1 |
360 |
✗✓ |
21 |
if (seq->color_config.subsampling_x == 0 && |
361 |
|
|
seq->color_config.subsampling_y == 0) { |
362 |
|
|
if (bit_depth == 8) |
363 |
|
|
pix_fmt = AV_PIX_FMT_YUV444P; |
364 |
|
|
else if (bit_depth == 10) |
365 |
|
|
pix_fmt = AV_PIX_FMT_YUV444P10; |
366 |
|
|
else if (bit_depth == 12) |
367 |
|
|
pix_fmt = AV_PIX_FMT_YUV444P12; |
368 |
|
|
else |
369 |
|
|
av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n"); |
370 |
✓✗ |
21 |
} else if (seq->color_config.subsampling_x == 1 && |
371 |
✗✓ |
21 |
seq->color_config.subsampling_y == 0) { |
372 |
|
|
if (bit_depth == 8) |
373 |
|
|
pix_fmt = AV_PIX_FMT_YUV422P; |
374 |
|
|
else if (bit_depth == 10) |
375 |
|
|
pix_fmt = AV_PIX_FMT_YUV422P10; |
376 |
|
|
else if (bit_depth == 12) |
377 |
|
|
pix_fmt = AV_PIX_FMT_YUV422P12; |
378 |
|
|
else |
379 |
|
|
av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n"); |
380 |
✓✗ |
21 |
} else if (seq->color_config.subsampling_x == 1 && |
381 |
✓✗ |
21 |
seq->color_config.subsampling_y == 1) { |
382 |
✓✓ |
21 |
if (bit_depth == 8) |
383 |
|
20 |
pix_fmt = AV_PIX_FMT_YUV420P; |
384 |
✓✗ |
1 |
else if (bit_depth == 10) |
385 |
|
1 |
pix_fmt = AV_PIX_FMT_YUV420P10; |
386 |
|
|
else if (bit_depth == 12) |
387 |
|
|
pix_fmt = AV_PIX_FMT_YUV420P12; |
388 |
|
|
else |
389 |
|
|
av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n"); |
390 |
|
|
} |
391 |
|
|
} else { |
392 |
|
|
if (bit_depth == 8) |
393 |
|
|
pix_fmt = AV_PIX_FMT_GRAY8; |
394 |
|
|
else if (bit_depth == 10) |
395 |
|
|
pix_fmt = AV_PIX_FMT_GRAY10; |
396 |
|
|
else if (bit_depth == 12) |
397 |
|
|
pix_fmt = AV_PIX_FMT_GRAY12; |
398 |
|
|
else |
399 |
|
|
av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n"); |
400 |
|
|
} |
401 |
|
|
|
402 |
|
21 |
av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n", |
403 |
|
|
av_get_pix_fmt_name(pix_fmt)); |
404 |
|
|
|
405 |
✗✓ |
21 |
if (pix_fmt == AV_PIX_FMT_NONE) |
406 |
|
|
return -1; |
407 |
|
21 |
s->pix_fmt = pix_fmt; |
408 |
|
|
|
409 |
✓✓✗✗ ✗ |
21 |
switch (s->pix_fmt) { |
410 |
|
20 |
case AV_PIX_FMT_YUV420P: |
411 |
|
|
#if CONFIG_AV1_DXVA2_HWACCEL |
412 |
|
|
*fmtp++ = AV_PIX_FMT_DXVA2_VLD; |
413 |
|
|
#endif |
414 |
|
|
#if CONFIG_AV1_D3D11VA_HWACCEL |
415 |
|
|
*fmtp++ = AV_PIX_FMT_D3D11VA_VLD; |
416 |
|
|
*fmtp++ = AV_PIX_FMT_D3D11; |
417 |
|
|
#endif |
418 |
|
|
#if CONFIG_AV1_NVDEC_HWACCEL |
419 |
|
|
*fmtp++ = AV_PIX_FMT_CUDA; |
420 |
|
|
#endif |
421 |
|
|
#if CONFIG_AV1_VAAPI_HWACCEL |
422 |
|
20 |
*fmtp++ = AV_PIX_FMT_VAAPI; |
423 |
|
|
#endif |
424 |
|
20 |
break; |
425 |
|
1 |
case AV_PIX_FMT_YUV420P10: |
426 |
|
|
#if CONFIG_AV1_DXVA2_HWACCEL |
427 |
|
|
*fmtp++ = AV_PIX_FMT_DXVA2_VLD; |
428 |
|
|
#endif |
429 |
|
|
#if CONFIG_AV1_D3D11VA_HWACCEL |
430 |
|
|
*fmtp++ = AV_PIX_FMT_D3D11VA_VLD; |
431 |
|
|
*fmtp++ = AV_PIX_FMT_D3D11; |
432 |
|
|
#endif |
433 |
|
|
#if CONFIG_AV1_NVDEC_HWACCEL |
434 |
|
|
*fmtp++ = AV_PIX_FMT_CUDA; |
435 |
|
|
#endif |
436 |
|
|
#if CONFIG_AV1_VAAPI_HWACCEL |
437 |
|
1 |
*fmtp++ = AV_PIX_FMT_VAAPI; |
438 |
|
|
#endif |
439 |
|
1 |
break; |
440 |
|
|
case AV_PIX_FMT_GRAY8: |
441 |
|
|
#if CONFIG_AV1_NVDEC_HWACCEL |
442 |
|
|
*fmtp++ = AV_PIX_FMT_CUDA; |
443 |
|
|
#endif |
444 |
|
|
break; |
445 |
|
|
case AV_PIX_FMT_GRAY10: |
446 |
|
|
#if CONFIG_AV1_NVDEC_HWACCEL |
447 |
|
|
*fmtp++ = AV_PIX_FMT_CUDA; |
448 |
|
|
#endif |
449 |
|
|
break; |
450 |
|
|
} |
451 |
|
|
|
452 |
|
21 |
*fmtp++ = s->pix_fmt; |
453 |
|
21 |
*fmtp = AV_PIX_FMT_NONE; |
454 |
|
|
|
455 |
|
21 |
ret = ff_thread_get_format(avctx, pix_fmts); |
456 |
✗✓ |
21 |
if (ret < 0) |
457 |
|
|
return ret; |
458 |
|
|
|
459 |
|
|
/** |
460 |
|
|
* check if the HW accel is inited correctly. If not, return un-implemented. |
461 |
|
|
* Since now the av1 decoder doesn't support native decode, if it will be |
462 |
|
|
* implemented in the future, need remove this check. |
463 |
|
|
*/ |
464 |
✓✗ |
21 |
if (!avctx->hwaccel) { |
465 |
|
21 |
av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport" |
466 |
|
|
" hardware accelerated AV1 decoding.\n"); |
467 |
|
21 |
return AVERROR(ENOSYS); |
468 |
|
|
} |
469 |
|
|
|
470 |
|
|
avctx->pix_fmt = ret; |
471 |
|
|
|
472 |
|
|
return 0; |
473 |
|
|
} |
474 |
|
|
|
475 |
|
282 |
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f) |
476 |
|
|
{ |
477 |
|
282 |
ff_thread_release_buffer(avctx, &f->tf); |
478 |
|
282 |
av_buffer_unref(&f->hwaccel_priv_buf); |
479 |
|
282 |
f->hwaccel_picture_private = NULL; |
480 |
|
282 |
av_buffer_unref(&f->header_ref); |
481 |
|
282 |
f->raw_frame_header = NULL; |
482 |
|
282 |
f->spatial_id = f->temporal_id = 0; |
483 |
|
282 |
memset(f->skip_mode_frame_idx, 0, |
484 |
|
|
2 * sizeof(uint8_t)); |
485 |
|
282 |
memset(&f->film_grain, 0, sizeof(f->film_grain)); |
486 |
|
282 |
f->coded_lossless = 0; |
487 |
|
282 |
} |
488 |
|
|
|
489 |
|
6 |
static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src) |
490 |
|
|
{ |
491 |
|
|
int ret; |
492 |
|
|
|
493 |
|
6 |
ret = ff_thread_ref_frame(&dst->tf, &src->tf); |
494 |
✓✗ |
6 |
if (ret < 0) |
495 |
|
6 |
return ret; |
496 |
|
|
|
497 |
|
|
dst->header_ref = av_buffer_ref(src->header_ref); |
498 |
|
|
if (!dst->header_ref) |
499 |
|
|
goto fail; |
500 |
|
|
|
501 |
|
|
dst->raw_frame_header = src->raw_frame_header; |
502 |
|
|
|
503 |
|
|
if (src->hwaccel_picture_private) { |
504 |
|
|
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf); |
505 |
|
|
if (!dst->hwaccel_priv_buf) |
506 |
|
|
goto fail; |
507 |
|
|
dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data; |
508 |
|
|
} |
509 |
|
|
|
510 |
|
|
dst->spatial_id = src->spatial_id; |
511 |
|
|
dst->temporal_id = src->temporal_id; |
512 |
|
|
memcpy(dst->gm_type, |
513 |
|
|
src->gm_type, |
514 |
|
|
AV1_NUM_REF_FRAMES * sizeof(uint8_t)); |
515 |
|
|
memcpy(dst->gm_params, |
516 |
|
|
src->gm_params, |
517 |
|
|
AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t)); |
518 |
|
|
memcpy(dst->skip_mode_frame_idx, |
519 |
|
|
src->skip_mode_frame_idx, |
520 |
|
|
2 * sizeof(uint8_t)); |
521 |
|
|
memcpy(&dst->film_grain, |
522 |
|
|
&src->film_grain, |
523 |
|
|
sizeof(dst->film_grain)); |
524 |
|
|
dst->coded_lossless = src->coded_lossless; |
525 |
|
|
|
526 |
|
|
return 0; |
527 |
|
|
|
528 |
|
|
fail: |
529 |
|
|
av1_frame_unref(avctx, dst); |
530 |
|
|
return AVERROR(ENOMEM); |
531 |
|
|
} |
532 |
|
|
|
533 |
|
21 |
static av_cold int av1_decode_free(AVCodecContext *avctx) |
534 |
|
|
{ |
535 |
|
21 |
AV1DecContext *s = avctx->priv_data; |
536 |
|
|
|
537 |
✓✓ |
189 |
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) { |
538 |
|
168 |
av1_frame_unref(avctx, &s->ref[i]); |
539 |
|
168 |
av_frame_free(&s->ref[i].tf.f); |
540 |
|
|
} |
541 |
|
21 |
av1_frame_unref(avctx, &s->cur_frame); |
542 |
|
21 |
av_frame_free(&s->cur_frame.tf.f); |
543 |
|
|
|
544 |
|
21 |
av_buffer_unref(&s->seq_ref); |
545 |
|
21 |
av_buffer_unref(&s->header_ref); |
546 |
|
21 |
av_freep(&s->tile_group_info); |
547 |
|
|
|
548 |
|
21 |
ff_cbs_fragment_free(&s->current_obu); |
549 |
|
21 |
ff_cbs_close(&s->cbc); |
550 |
|
|
|
551 |
|
21 |
return 0; |
552 |
|
|
} |
553 |
|
|
|
554 |
|
104 |
static int set_context_with_sequence(AVCodecContext *avctx, |
555 |
|
|
const AV1RawSequenceHeader *seq) |
556 |
|
|
{ |
557 |
|
104 |
int width = seq->max_frame_width_minus_1 + 1; |
558 |
|
104 |
int height = seq->max_frame_height_minus_1 + 1; |
559 |
|
|
|
560 |
|
104 |
avctx->profile = seq->seq_profile; |
561 |
|
104 |
avctx->level = seq->seq_level_idx[0]; |
562 |
|
|
|
563 |
|
104 |
avctx->color_range = |
564 |
✗✓ |
104 |
seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; |
565 |
|
104 |
avctx->color_primaries = seq->color_config.color_primaries; |
566 |
|
104 |
avctx->colorspace = seq->color_config.color_primaries; |
567 |
|
104 |
avctx->color_trc = seq->color_config.transfer_characteristics; |
568 |
|
|
|
569 |
✗✗✓ |
104 |
switch (seq->color_config.chroma_sample_position) { |
570 |
|
|
case AV1_CSP_VERTICAL: |
571 |
|
|
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; |
572 |
|
|
break; |
573 |
|
|
case AV1_CSP_COLOCATED: |
574 |
|
|
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; |
575 |
|
|
break; |
576 |
|
|
} |
577 |
|
|
|
578 |
✓✓✗✓
|
104 |
if (avctx->width != width || avctx->height != height) { |
579 |
|
3 |
int ret = ff_set_dimensions(avctx, width, height); |
580 |
✗✓ |
3 |
if (ret < 0) |
581 |
|
|
return ret; |
582 |
|
|
} |
583 |
|
104 |
avctx->sample_aspect_ratio = (AVRational) { 1, 1 }; |
584 |
|
|
|
585 |
✓✓ |
104 |
if (seq->timing_info.num_units_in_display_tick && |
586 |
✓✗ |
65 |
seq->timing_info.time_scale) { |
587 |
|
65 |
av_reduce(&avctx->framerate.den, &avctx->framerate.num, |
588 |
|
65 |
seq->timing_info.num_units_in_display_tick, |
589 |
|
65 |
seq->timing_info.time_scale, |
590 |
|
|
INT_MAX); |
591 |
✗✓ |
65 |
if (seq->timing_info.equal_picture_interval) |
592 |
|
|
avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1; |
593 |
|
|
} |
594 |
|
|
|
595 |
|
104 |
return 0; |
596 |
|
|
} |
597 |
|
|
|
598 |
|
93 |
static int update_context_with_frame_header(AVCodecContext *avctx, |
599 |
|
|
const AV1RawFrameHeader *header) |
600 |
|
|
{ |
601 |
|
|
AVRational aspect_ratio; |
602 |
|
93 |
int width = header->frame_width_minus_1 + 1; |
603 |
|
93 |
int height = header->frame_height_minus_1 + 1; |
604 |
|
93 |
int r_width = header->render_width_minus_1 + 1; |
605 |
|
93 |
int r_height = header->render_height_minus_1 + 1; |
606 |
|
|
int ret; |
607 |
|
|
|
608 |
✓✗✗✓
|
93 |
if (avctx->width != width || avctx->height != height) { |
609 |
|
|
ret = ff_set_dimensions(avctx, width, height); |
610 |
|
|
if (ret < 0) |
611 |
|
|
return ret; |
612 |
|
|
} |
613 |
|
|
|
614 |
|
93 |
av_reduce(&aspect_ratio.num, &aspect_ratio.den, |
615 |
|
93 |
(int64_t)height * r_width, |
616 |
|
93 |
(int64_t)width * r_height, |
617 |
|
|
INT_MAX); |
618 |
|
|
|
619 |
✗✓ |
93 |
if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) { |
620 |
|
|
ret = ff_set_sar(avctx, aspect_ratio); |
621 |
|
|
if (ret < 0) |
622 |
|
|
return ret; |
623 |
|
|
} |
624 |
|
|
|
625 |
|
93 |
return 0; |
626 |
|
|
} |
627 |
|
|
|
628 |
|
21 |
static av_cold int av1_decode_init(AVCodecContext *avctx) |
629 |
|
|
{ |
630 |
|
21 |
AV1DecContext *s = avctx->priv_data; |
631 |
|
|
AV1RawSequenceHeader *seq; |
632 |
|
|
int ret; |
633 |
|
|
|
634 |
|
21 |
s->avctx = avctx; |
635 |
|
21 |
s->pix_fmt = AV_PIX_FMT_NONE; |
636 |
|
|
|
637 |
✓✓ |
189 |
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) { |
638 |
|
168 |
s->ref[i].tf.f = av_frame_alloc(); |
639 |
✗✓ |
168 |
if (!s->ref[i].tf.f) { |
640 |
|
|
av_log(avctx, AV_LOG_ERROR, |
641 |
|
|
"Failed to allocate reference frame buffer %d.\n", i); |
642 |
|
|
return AVERROR(ENOMEM); |
643 |
|
|
} |
644 |
|
|
} |
645 |
|
|
|
646 |
|
21 |
s->cur_frame.tf.f = av_frame_alloc(); |
647 |
✗✓ |
21 |
if (!s->cur_frame.tf.f) { |
648 |
|
|
av_log(avctx, AV_LOG_ERROR, |
649 |
|
|
"Failed to allocate current frame buffer.\n"); |
650 |
|
|
return AVERROR(ENOMEM); |
651 |
|
|
} |
652 |
|
|
|
653 |
|
21 |
ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx); |
654 |
✗✓ |
21 |
if (ret < 0) |
655 |
|
|
return ret; |
656 |
|
|
|
657 |
|
21 |
av_opt_set_int(s->cbc->priv_data, "operating_point", s->operating_point, 0); |
658 |
|
|
|
659 |
✓✓✓✗
|
21 |
if (avctx->extradata && avctx->extradata_size) { |
660 |
|
2 |
ret = ff_cbs_read_extradata_from_codec(s->cbc, |
661 |
|
|
&s->current_obu, |
662 |
|
|
avctx); |
663 |
✗✓ |
2 |
if (ret < 0) { |
664 |
|
|
av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n"); |
665 |
|
|
return ret; |
666 |
|
|
} |
667 |
|
|
|
668 |
|
2 |
seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header; |
669 |
✗✓ |
2 |
if (!seq) { |
670 |
|
|
av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n"); |
671 |
|
|
goto end; |
672 |
|
|
} |
673 |
|
|
|
674 |
|
2 |
ret = set_context_with_sequence(avctx, seq); |
675 |
✓✗ |
2 |
if (ret < 0) { |
676 |
|
|
av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n"); |
677 |
|
|
goto end; |
678 |
|
|
} |
679 |
|
|
|
680 |
|
2 |
end: |
681 |
|
2 |
ff_cbs_fragment_reset(&s->current_obu); |
682 |
|
|
} |
683 |
|
|
|
684 |
|
21 |
return ret; |
685 |
|
|
} |
686 |
|
|
|
687 |
|
93 |
static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f) |
688 |
|
|
{ |
689 |
|
93 |
AV1DecContext *s = avctx->priv_data; |
690 |
|
93 |
AV1RawFrameHeader *header= s->raw_frame_header; |
691 |
|
|
AVFrame *frame; |
692 |
|
|
int ret; |
693 |
|
|
|
694 |
|
93 |
ret = update_context_with_frame_header(avctx, header); |
695 |
✗✓ |
93 |
if (ret < 0) { |
696 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n"); |
697 |
|
|
return ret; |
698 |
|
|
} |
699 |
|
|
|
700 |
|
93 |
f->header_ref = av_buffer_ref(s->header_ref); |
701 |
✗✓ |
93 |
if (!f->header_ref) |
702 |
|
|
return AVERROR(ENOMEM); |
703 |
|
|
|
704 |
|
93 |
f->raw_frame_header = s->raw_frame_header; |
705 |
|
|
|
706 |
✓✗ |
93 |
if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0) |
707 |
|
93 |
goto fail; |
708 |
|
|
|
709 |
|
|
frame = f->tf.f; |
710 |
|
|
frame->key_frame = header->frame_type == AV1_FRAME_KEY; |
711 |
|
|
|
712 |
|
|
switch (header->frame_type) { |
713 |
|
|
case AV1_FRAME_KEY: |
714 |
|
|
case AV1_FRAME_INTRA_ONLY: |
715 |
|
|
frame->pict_type = AV_PICTURE_TYPE_I; |
716 |
|
|
break; |
717 |
|
|
case AV1_FRAME_INTER: |
718 |
|
|
frame->pict_type = AV_PICTURE_TYPE_P; |
719 |
|
|
break; |
720 |
|
|
case AV1_FRAME_SWITCH: |
721 |
|
|
frame->pict_type = AV_PICTURE_TYPE_SP; |
722 |
|
|
break; |
723 |
|
|
} |
724 |
|
|
|
725 |
|
|
if (avctx->hwaccel) { |
726 |
|
|
const AVHWAccel *hwaccel = avctx->hwaccel; |
727 |
|
|
if (hwaccel->frame_priv_data_size) { |
728 |
|
|
f->hwaccel_priv_buf = |
729 |
|
|
av_buffer_allocz(hwaccel->frame_priv_data_size); |
730 |
|
|
if (!f->hwaccel_priv_buf) { |
731 |
|
|
ret = AVERROR(ENOMEM); |
732 |
|
|
goto fail; |
733 |
|
|
} |
734 |
|
|
f->hwaccel_picture_private = f->hwaccel_priv_buf->data; |
735 |
|
|
} |
736 |
|
|
} |
737 |
|
|
return 0; |
738 |
|
|
|
739 |
|
93 |
fail: |
740 |
|
93 |
av1_frame_unref(avctx, f); |
741 |
|
93 |
return ret; |
742 |
|
|
} |
743 |
|
|
|
744 |
|
|
static int export_film_grain(AVCodecContext *avctx, AVFrame *frame) |
745 |
|
|
{ |
746 |
|
|
AV1DecContext *s = avctx->priv_data; |
747 |
|
|
const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain; |
748 |
|
|
AVFilmGrainParams *fgp; |
749 |
|
|
AVFilmGrainAOMParams *aom; |
750 |
|
|
|
751 |
|
|
if (!film_grain->apply_grain) |
752 |
|
|
return 0; |
753 |
|
|
|
754 |
|
|
fgp = av_film_grain_params_create_side_data(frame); |
755 |
|
|
if (!fgp) |
756 |
|
|
return AVERROR(ENOMEM); |
757 |
|
|
|
758 |
|
|
fgp->type = AV_FILM_GRAIN_PARAMS_AV1; |
759 |
|
|
fgp->seed = film_grain->grain_seed; |
760 |
|
|
|
761 |
|
|
aom = &fgp->codec.aom; |
762 |
|
|
aom->chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma; |
763 |
|
|
aom->scaling_shift = film_grain->grain_scaling_minus_8 + 8; |
764 |
|
|
aom->ar_coeff_lag = film_grain->ar_coeff_lag; |
765 |
|
|
aom->ar_coeff_shift = film_grain->ar_coeff_shift_minus_6 + 6; |
766 |
|
|
aom->grain_scale_shift = film_grain->grain_scale_shift; |
767 |
|
|
aom->overlap_flag = film_grain->overlap_flag; |
768 |
|
|
aom->limit_output_range = film_grain->clip_to_restricted_range; |
769 |
|
|
|
770 |
|
|
aom->num_y_points = film_grain->num_y_points; |
771 |
|
|
for (int i = 0; i < film_grain->num_y_points; i++) { |
772 |
|
|
aom->y_points[i][0] = film_grain->point_y_value[i]; |
773 |
|
|
aom->y_points[i][1] = film_grain->point_y_scaling[i]; |
774 |
|
|
} |
775 |
|
|
aom->num_uv_points[0] = film_grain->num_cb_points; |
776 |
|
|
for (int i = 0; i < film_grain->num_cb_points; i++) { |
777 |
|
|
aom->uv_points[0][i][0] = film_grain->point_cb_value[i]; |
778 |
|
|
aom->uv_points[0][i][1] = film_grain->point_cb_scaling[i]; |
779 |
|
|
} |
780 |
|
|
aom->num_uv_points[1] = film_grain->num_cr_points; |
781 |
|
|
for (int i = 0; i < film_grain->num_cr_points; i++) { |
782 |
|
|
aom->uv_points[1][i][0] = film_grain->point_cr_value[i]; |
783 |
|
|
aom->uv_points[1][i][1] = film_grain->point_cr_scaling[i]; |
784 |
|
|
} |
785 |
|
|
|
786 |
|
|
for (int i = 0; i < 24; i++) { |
787 |
|
|
aom->ar_coeffs_y[i] = film_grain->ar_coeffs_y_plus_128[i] - 128; |
788 |
|
|
} |
789 |
|
|
for (int i = 0; i < 25; i++) { |
790 |
|
|
aom->ar_coeffs_uv[0][i] = film_grain->ar_coeffs_cb_plus_128[i] - 128; |
791 |
|
|
aom->ar_coeffs_uv[1][i] = film_grain->ar_coeffs_cr_plus_128[i] - 128; |
792 |
|
|
} |
793 |
|
|
|
794 |
|
|
aom->uv_mult[0] = film_grain->cb_mult; |
795 |
|
|
aom->uv_mult[1] = film_grain->cr_mult; |
796 |
|
|
aom->uv_mult_luma[0] = film_grain->cb_luma_mult; |
797 |
|
|
aom->uv_mult_luma[1] = film_grain->cr_luma_mult; |
798 |
|
|
aom->uv_offset[0] = film_grain->cb_offset; |
799 |
|
|
aom->uv_offset[1] = film_grain->cr_offset; |
800 |
|
|
|
801 |
|
|
return 0; |
802 |
|
|
} |
803 |
|
|
|
804 |
|
|
static int set_output_frame(AVCodecContext *avctx, AVFrame *frame, |
805 |
|
|
const AVPacket *pkt, int *got_frame) |
806 |
|
|
{ |
807 |
|
|
AV1DecContext *s = avctx->priv_data; |
808 |
|
|
const AVFrame *srcframe = s->cur_frame.tf.f; |
809 |
|
|
int ret; |
810 |
|
|
|
811 |
|
|
// TODO: all layers |
812 |
|
|
if (s->operating_point_idc && |
813 |
|
|
av_log2(s->operating_point_idc >> 8) > s->cur_frame.spatial_id) |
814 |
|
|
return 0; |
815 |
|
|
|
816 |
|
|
ret = av_frame_ref(frame, srcframe); |
817 |
|
|
if (ret < 0) |
818 |
|
|
return ret; |
819 |
|
|
|
820 |
|
|
if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) { |
821 |
|
|
ret = export_film_grain(avctx, frame); |
822 |
|
|
if (ret < 0) { |
823 |
|
|
av_frame_unref(frame); |
824 |
|
|
return ret; |
825 |
|
|
} |
826 |
|
|
} |
827 |
|
|
|
828 |
|
|
frame->pts = pkt->pts; |
829 |
|
|
frame->pkt_dts = pkt->dts; |
830 |
|
|
frame->pkt_size = pkt->size; |
831 |
|
|
|
832 |
|
|
*got_frame = 1; |
833 |
|
|
|
834 |
|
|
return 0; |
835 |
|
|
} |
836 |
|
|
|
837 |
|
|
static int update_reference_list(AVCodecContext *avctx) |
838 |
|
|
{ |
839 |
|
|
AV1DecContext *s = avctx->priv_data; |
840 |
|
|
const AV1RawFrameHeader *header = s->raw_frame_header; |
841 |
|
|
int ret; |
842 |
|
|
|
843 |
|
|
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) { |
844 |
|
|
if (header->refresh_frame_flags & (1 << i)) { |
845 |
|
|
if (s->ref[i].tf.f->buf[0]) |
846 |
|
|
av1_frame_unref(avctx, &s->ref[i]); |
847 |
|
|
if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) { |
848 |
|
|
av_log(avctx, AV_LOG_ERROR, |
849 |
|
|
"Failed to update frame %d in reference list\n", i); |
850 |
|
|
return ret; |
851 |
|
|
} |
852 |
|
|
} |
853 |
|
|
} |
854 |
|
|
return 0; |
855 |
|
|
} |
856 |
|
|
|
857 |
|
93 |
static int get_current_frame(AVCodecContext *avctx) |
858 |
|
|
{ |
859 |
|
93 |
AV1DecContext *s = avctx->priv_data; |
860 |
|
|
int ret; |
861 |
|
|
|
862 |
✗✓ |
93 |
if (s->cur_frame.tf.f->buf[0]) |
863 |
|
|
av1_frame_unref(avctx, &s->cur_frame); |
864 |
|
|
|
865 |
|
93 |
ret = av1_frame_alloc(avctx, &s->cur_frame); |
866 |
✓✗ |
93 |
if (ret < 0) { |
867 |
|
93 |
av_log(avctx, AV_LOG_ERROR, |
868 |
|
|
"Failed to allocate space for current frame.\n"); |
869 |
|
93 |
return ret; |
870 |
|
|
} |
871 |
|
|
|
872 |
|
|
ret = init_tile_data(s); |
873 |
|
|
if (ret < 0) { |
874 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n"); |
875 |
|
|
return ret; |
876 |
|
|
} |
877 |
|
|
|
878 |
|
|
global_motion_params(s); |
879 |
|
|
skip_mode_params(s); |
880 |
|
|
coded_lossless_param(s); |
881 |
|
|
load_grain_params(s); |
882 |
|
|
|
883 |
|
|
return ret; |
884 |
|
|
} |
885 |
|
|
|
886 |
|
330 |
static int av1_decode_frame(AVCodecContext *avctx, void *frame, |
887 |
|
|
int *got_frame, AVPacket *pkt) |
888 |
|
|
{ |
889 |
|
330 |
AV1DecContext *s = avctx->priv_data; |
890 |
|
330 |
AV1RawTileGroup *raw_tile_group = NULL; |
891 |
|
|
int ret; |
892 |
|
|
|
893 |
|
330 |
ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt); |
894 |
✗✓ |
330 |
if (ret < 0) { |
895 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n"); |
896 |
|
|
goto end; |
897 |
|
|
} |
898 |
|
330 |
av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n", |
899 |
|
|
s->current_obu.nb_units); |
900 |
|
|
|
901 |
✓✗ |
737 |
for (int i = 0; i < s->current_obu.nb_units; i++) { |
902 |
|
737 |
CodedBitstreamUnit *unit = &s->current_obu.units[i]; |
903 |
|
737 |
AV1RawOBU *obu = unit->content; |
904 |
|
|
const AV1RawOBUHeader *header; |
905 |
|
|
|
906 |
✗✓ |
737 |
if (!obu) |
907 |
|
|
continue; |
908 |
|
|
|
909 |
|
737 |
header = &obu->header; |
910 |
|
737 |
av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type); |
911 |
|
|
|
912 |
✓✗✓✗ ✓✗ |
737 |
switch (unit->type) { |
913 |
|
102 |
case AV1_OBU_SEQUENCE_HEADER: |
914 |
|
102 |
av_buffer_unref(&s->seq_ref); |
915 |
|
102 |
s->seq_ref = av_buffer_ref(unit->content_ref); |
916 |
✗✓ |
102 |
if (!s->seq_ref) { |
917 |
|
|
ret = AVERROR(ENOMEM); |
918 |
|
|
goto end; |
919 |
|
|
} |
920 |
|
|
|
921 |
|
102 |
s->raw_seq = &obu->obu.sequence_header; |
922 |
|
|
|
923 |
|
102 |
ret = set_context_with_sequence(avctx, s->raw_seq); |
924 |
✗✓ |
102 |
if (ret < 0) { |
925 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n"); |
926 |
|
|
s->raw_seq = NULL; |
927 |
|
|
goto end; |
928 |
|
|
} |
929 |
|
|
|
930 |
|
102 |
s->operating_point_idc = s->raw_seq->operating_point_idc[s->operating_point]; |
931 |
|
|
|
932 |
✓✓ |
102 |
if (s->pix_fmt == AV_PIX_FMT_NONE) { |
933 |
|
21 |
ret = get_pixel_format(avctx); |
934 |
✓✗ |
21 |
if (ret < 0) { |
935 |
|
21 |
av_log(avctx, AV_LOG_ERROR, |
936 |
|
|
"Failed to get pixel format.\n"); |
937 |
|
21 |
s->raw_seq = NULL; |
938 |
|
21 |
goto end; |
939 |
|
|
} |
940 |
|
|
} |
941 |
|
|
|
942 |
✗✓✗✗
|
81 |
if (avctx->hwaccel && avctx->hwaccel->decode_params) { |
943 |
|
|
ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data, |
944 |
|
|
unit->data_size); |
945 |
|
|
if (ret < 0) { |
946 |
|
|
av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n"); |
947 |
|
|
s->raw_seq = NULL; |
948 |
|
|
goto end; |
949 |
|
|
} |
950 |
|
|
} |
951 |
|
81 |
break; |
952 |
|
|
case AV1_OBU_REDUNDANT_FRAME_HEADER: |
953 |
|
|
if (s->raw_frame_header) |
954 |
|
|
break; |
955 |
|
|
// fall-through |
956 |
|
|
case AV1_OBU_FRAME: |
957 |
|
|
case AV1_OBU_FRAME_HEADER: |
958 |
✓✓ |
309 |
if (!s->raw_seq) { |
959 |
|
210 |
av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n"); |
960 |
|
210 |
ret = AVERROR_INVALIDDATA; |
961 |
|
210 |
goto end; |
962 |
|
|
} |
963 |
|
|
|
964 |
|
99 |
av_buffer_unref(&s->header_ref); |
965 |
|
99 |
s->header_ref = av_buffer_ref(unit->content_ref); |
966 |
✗✓ |
99 |
if (!s->header_ref) { |
967 |
|
|
ret = AVERROR(ENOMEM); |
968 |
|
|
goto end; |
969 |
|
|
} |
970 |
|
|
|
971 |
✓✓ |
99 |
if (unit->type == AV1_OBU_FRAME) |
972 |
|
93 |
s->raw_frame_header = &obu->obu.frame.header; |
973 |
|
|
else |
974 |
|
6 |
s->raw_frame_header = &obu->obu.frame_header; |
975 |
|
|
|
976 |
✓✓ |
99 |
if (s->raw_frame_header->show_existing_frame) { |
977 |
✗✓ |
6 |
if (s->cur_frame.tf.f->buf[0]) |
978 |
|
|
av1_frame_unref(avctx, &s->cur_frame); |
979 |
|
|
|
980 |
|
6 |
ret = av1_frame_ref(avctx, &s->cur_frame, |
981 |
|
6 |
&s->ref[s->raw_frame_header->frame_to_show_map_idx]); |
982 |
✓✗ |
6 |
if (ret < 0) { |
983 |
|
6 |
av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n"); |
984 |
|
6 |
goto end; |
985 |
|
|
} |
986 |
|
|
|
987 |
|
|
ret = update_reference_list(avctx); |
988 |
|
|
if (ret < 0) { |
989 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n"); |
990 |
|
|
goto end; |
991 |
|
|
} |
992 |
|
|
|
993 |
|
|
ret = set_output_frame(avctx, frame, pkt, got_frame); |
994 |
|
|
if (ret < 0) |
995 |
|
|
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n"); |
996 |
|
|
|
997 |
|
|
s->raw_frame_header = NULL; |
998 |
|
|
|
999 |
|
|
goto end; |
1000 |
|
|
} |
1001 |
|
|
|
1002 |
|
93 |
ret = get_current_frame(avctx); |
1003 |
✓✗ |
93 |
if (ret < 0) { |
1004 |
|
93 |
av_log(avctx, AV_LOG_ERROR, "Get current frame error\n"); |
1005 |
|
93 |
goto end; |
1006 |
|
|
} |
1007 |
|
|
|
1008 |
|
|
s->cur_frame.spatial_id = header->spatial_id; |
1009 |
|
|
s->cur_frame.temporal_id = header->temporal_id; |
1010 |
|
|
|
1011 |
|
|
if (avctx->hwaccel) { |
1012 |
|
|
ret = avctx->hwaccel->start_frame(avctx, unit->data, |
1013 |
|
|
unit->data_size); |
1014 |
|
|
if (ret < 0) { |
1015 |
|
|
av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n"); |
1016 |
|
|
goto end; |
1017 |
|
|
} |
1018 |
|
|
} |
1019 |
|
|
if (unit->type != AV1_OBU_FRAME) |
1020 |
|
|
break; |
1021 |
|
|
// fall-through |
1022 |
|
|
case AV1_OBU_TILE_GROUP: |
1023 |
|
|
if (!s->raw_frame_header) { |
1024 |
|
|
av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n"); |
1025 |
|
|
ret = AVERROR_INVALIDDATA; |
1026 |
|
|
goto end; |
1027 |
|
|
} |
1028 |
|
|
|
1029 |
|
|
if (unit->type == AV1_OBU_FRAME) |
1030 |
|
|
raw_tile_group = &obu->obu.frame.tile_group; |
1031 |
|
|
else |
1032 |
|
|
raw_tile_group = &obu->obu.tile_group; |
1033 |
|
|
|
1034 |
|
|
ret = get_tiles_info(avctx, raw_tile_group); |
1035 |
|
|
if (ret < 0) |
1036 |
|
|
goto end; |
1037 |
|
|
|
1038 |
|
|
if (avctx->hwaccel) { |
1039 |
|
|
ret = avctx->hwaccel->decode_slice(avctx, |
1040 |
|
|
raw_tile_group->tile_data.data, |
1041 |
|
|
raw_tile_group->tile_data.data_size); |
1042 |
|
|
if (ret < 0) { |
1043 |
|
|
av_log(avctx, AV_LOG_ERROR, |
1044 |
|
|
"HW accel decode slice fail.\n"); |
1045 |
|
|
goto end; |
1046 |
|
|
} |
1047 |
|
|
} |
1048 |
|
|
break; |
1049 |
|
326 |
case AV1_OBU_TILE_LIST: |
1050 |
|
|
case AV1_OBU_TEMPORAL_DELIMITER: |
1051 |
|
|
case AV1_OBU_PADDING: |
1052 |
|
|
case AV1_OBU_METADATA: |
1053 |
|
326 |
break; |
1054 |
|
|
default: |
1055 |
|
|
av_log(avctx, AV_LOG_DEBUG, |
1056 |
|
|
"Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n", |
1057 |
|
|
unit->type, unit->data_size); |
1058 |
|
|
} |
1059 |
|
|
|
1060 |
✗✓✗✗
|
407 |
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) { |
1061 |
|
|
if (avctx->hwaccel) { |
1062 |
|
|
ret = avctx->hwaccel->end_frame(avctx); |
1063 |
|
|
if (ret < 0) { |
1064 |
|
|
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n"); |
1065 |
|
|
goto end; |
1066 |
|
|
} |
1067 |
|
|
} |
1068 |
|
|
|
1069 |
|
|
ret = update_reference_list(avctx); |
1070 |
|
|
if (ret < 0) { |
1071 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n"); |
1072 |
|
|
goto end; |
1073 |
|
|
} |
1074 |
|
|
|
1075 |
|
|
if (s->raw_frame_header->show_frame) { |
1076 |
|
|
ret = set_output_frame(avctx, frame, pkt, got_frame); |
1077 |
|
|
if (ret < 0) { |
1078 |
|
|
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n"); |
1079 |
|
|
goto end; |
1080 |
|
|
} |
1081 |
|
|
} |
1082 |
|
|
raw_tile_group = NULL; |
1083 |
|
|
s->raw_frame_header = NULL; |
1084 |
|
|
} |
1085 |
|
|
} |
1086 |
|
|
|
1087 |
|
|
end: |
1088 |
|
330 |
ff_cbs_fragment_reset(&s->current_obu); |
1089 |
✓✗ |
330 |
if (ret < 0) |
1090 |
|
330 |
s->raw_frame_header = NULL; |
1091 |
|
330 |
return ret; |
1092 |
|
|
} |
1093 |
|
|
|
1094 |
|
|
static void av1_decode_flush(AVCodecContext *avctx) |
1095 |
|
|
{ |
1096 |
|
|
AV1DecContext *s = avctx->priv_data; |
1097 |
|
|
|
1098 |
|
|
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) |
1099 |
|
|
av1_frame_unref(avctx, &s->ref[i]); |
1100 |
|
|
|
1101 |
|
|
av1_frame_unref(avctx, &s->cur_frame); |
1102 |
|
|
s->operating_point_idc = 0; |
1103 |
|
|
s->raw_frame_header = NULL; |
1104 |
|
|
s->raw_seq = NULL; |
1105 |
|
|
|
1106 |
|
|
ff_cbs_flush(s->cbc); |
1107 |
|
|
} |
1108 |
|
|
|
1109 |
|
|
#define OFFSET(x) offsetof(AV1DecContext, x) |
1110 |
|
|
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM |
1111 |
|
|
static const AVOption av1_options[] = { |
1112 |
|
|
{ "operating_point", "Select an operating point of the scalable bitstream", |
1113 |
|
|
OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, AV1_MAX_OPERATING_POINTS - 1, VD }, |
1114 |
|
|
{ NULL } |
1115 |
|
|
}; |
1116 |
|
|
|
1117 |
|
|
static const AVClass av1_class = { |
1118 |
|
|
.class_name = "AV1 decoder", |
1119 |
|
|
.item_name = av_default_item_name, |
1120 |
|
|
.option = av1_options, |
1121 |
|
|
.version = LIBAVUTIL_VERSION_INT, |
1122 |
|
|
}; |
1123 |
|
|
|
1124 |
|
|
AVCodec ff_av1_decoder = { |
1125 |
|
|
.name = "av1", |
1126 |
|
|
.long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"), |
1127 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
1128 |
|
|
.id = AV_CODEC_ID_AV1, |
1129 |
|
|
.priv_data_size = sizeof(AV1DecContext), |
1130 |
|
|
.init = av1_decode_init, |
1131 |
|
|
.close = av1_decode_free, |
1132 |
|
|
.decode = av1_decode_frame, |
1133 |
|
|
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING, |
1134 |
|
|
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | |
1135 |
|
|
FF_CODEC_CAP_INIT_CLEANUP | |
1136 |
|
|
FF_CODEC_CAP_SETS_PKT_DTS, |
1137 |
|
|
.flush = av1_decode_flush, |
1138 |
|
|
.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles), |
1139 |
|
|
.priv_class = &av1_class, |
1140 |
|
|
.hw_configs = (const AVCodecHWConfigInternal *const []) { |
1141 |
|
|
#if CONFIG_AV1_DXVA2_HWACCEL |
1142 |
|
|
HWACCEL_DXVA2(av1), |
1143 |
|
|
#endif |
1144 |
|
|
#if CONFIG_AV1_D3D11VA_HWACCEL |
1145 |
|
|
HWACCEL_D3D11VA(av1), |
1146 |
|
|
#endif |
1147 |
|
|
#if CONFIG_AV1_D3D11VA2_HWACCEL |
1148 |
|
|
HWACCEL_D3D11VA2(av1), |
1149 |
|
|
#endif |
1150 |
|
|
#if CONFIG_AV1_NVDEC_HWACCEL |
1151 |
|
|
HWACCEL_NVDEC(av1), |
1152 |
|
|
#endif |
1153 |
|
|
#if CONFIG_AV1_VAAPI_HWACCEL |
1154 |
|
|
HWACCEL_VAAPI(av1), |
1155 |
|
|
#endif |
1156 |
|
|
NULL |
1157 |
|
|
}, |
1158 |
|
|
}; |