Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* AV1 HW decode acceleration through VA API |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/frame.h" |
22 |
|
|
#include "libavutil/mem.h" |
23 |
|
|
#include "hwaccel_internal.h" |
24 |
|
|
#include "vaapi_decode.h" |
25 |
|
|
#include "internal.h" |
26 |
|
|
#include "av1dec.h" |
27 |
|
|
#include "thread.h" |
28 |
|
|
|
29 |
|
|
typedef struct VAAPIAV1FrameRef { |
30 |
|
|
AVFrame *frame; |
31 |
|
|
int valid; |
32 |
|
|
} VAAPIAV1FrameRef; |
33 |
|
|
|
34 |
|
|
typedef struct VAAPIAV1DecContext { |
35 |
|
|
VAAPIDecodeContext base; |
36 |
|
|
|
37 |
|
|
/** |
38 |
|
|
* For film grain case, VAAPI generate 2 output for each frame, |
39 |
|
|
* current_frame will not apply film grain, and will be used for |
40 |
|
|
* references for next frames. Maintain the reference list without |
41 |
|
|
* applying film grain here. And current_display_picture will be |
42 |
|
|
* used to apply film grain and push to downstream. |
43 |
|
|
*/ |
44 |
|
|
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]; |
45 |
|
|
AVFrame *tmp_frame; |
46 |
|
|
|
47 |
|
|
int nb_slice_params; |
48 |
|
|
VASliceParameterBufferAV1 *slice_params; |
49 |
|
|
} VAAPIAV1DecContext; |
50 |
|
|
|
51 |
|
✗ |
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf) |
52 |
|
|
{ |
53 |
|
✗ |
if (vf->f) |
54 |
|
✗ |
return ff_vaapi_get_surface_id(vf->f); |
55 |
|
|
else |
56 |
|
✗ |
return VA_INVALID_SURFACE; |
57 |
|
|
} |
58 |
|
|
|
59 |
|
✗ |
static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx) |
60 |
|
|
{ |
61 |
|
✗ |
AV1DecContext *s = avctx->priv_data; |
62 |
|
✗ |
const AV1RawSequenceHeader *seq = s->raw_seq; |
63 |
|
✗ |
int8_t bit_depth = 8; |
64 |
|
|
|
65 |
|
✗ |
if (seq->seq_profile == 2 && seq->color_config.high_bitdepth) |
66 |
|
✗ |
bit_depth = seq->color_config.twelve_bit ? 12 : 10; |
67 |
|
✗ |
else if (seq->seq_profile <= 2) |
68 |
|
✗ |
bit_depth = seq->color_config.high_bitdepth ? 10 : 8; |
69 |
|
|
else { |
70 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, |
71 |
|
✗ |
"Couldn't get bit depth from profile:%d.\n", seq->seq_profile); |
72 |
|
✗ |
return -1; |
73 |
|
|
} |
74 |
|
✗ |
return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2; |
75 |
|
|
} |
76 |
|
|
|
77 |
|
✗ |
static int vaapi_av1_decode_init(AVCodecContext *avctx) |
78 |
|
|
{ |
79 |
|
✗ |
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; |
80 |
|
|
|
81 |
|
✗ |
ctx->tmp_frame = av_frame_alloc(); |
82 |
|
✗ |
if (!ctx->tmp_frame) |
83 |
|
✗ |
return AVERROR(ENOMEM); |
84 |
|
|
|
85 |
|
✗ |
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) { |
86 |
|
✗ |
ctx->ref_tab[i].frame = av_frame_alloc(); |
87 |
|
✗ |
if (!ctx->ref_tab[i].frame) |
88 |
|
✗ |
return AVERROR(ENOMEM); |
89 |
|
✗ |
ctx->ref_tab[i].valid = 0; |
90 |
|
|
} |
91 |
|
|
|
92 |
|
✗ |
return ff_vaapi_decode_init(avctx); |
93 |
|
|
} |
94 |
|
|
|
95 |
|
✗ |
static int vaapi_av1_decode_uninit(AVCodecContext *avctx) |
96 |
|
|
{ |
97 |
|
✗ |
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; |
98 |
|
|
|
99 |
|
✗ |
av_frame_free(&ctx->tmp_frame); |
100 |
|
|
|
101 |
|
✗ |
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) |
102 |
|
✗ |
av_frame_free(&ctx->ref_tab[i].frame); |
103 |
|
|
|
104 |
|
✗ |
av_freep(&ctx->slice_params); |
105 |
|
|
|
106 |
|
✗ |
return ff_vaapi_decode_uninit(avctx); |
107 |
|
|
} |
108 |
|
|
|
109 |
|
|
|
110 |
|
✗ |
static int vaapi_av1_start_frame(AVCodecContext *avctx, |
111 |
|
|
av_unused const uint8_t *buffer, |
112 |
|
|
av_unused uint32_t size) |
113 |
|
|
{ |
114 |
|
✗ |
AV1DecContext *s = avctx->priv_data; |
115 |
|
✗ |
const AV1RawSequenceHeader *seq = s->raw_seq; |
116 |
|
✗ |
const AV1RawFrameHeader *frame_header = s->raw_frame_header; |
117 |
|
✗ |
const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain; |
118 |
|
✗ |
VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private; |
119 |
|
✗ |
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; |
120 |
|
|
VADecPictureParameterBufferAV1 pic_param; |
121 |
|
|
int8_t bit_depth_idx; |
122 |
|
✗ |
int err = 0; |
123 |
|
✗ |
int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain; |
124 |
|
✗ |
uint8_t remap_lr_type[4] = {AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ}; |
125 |
|
✗ |
uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0}; |
126 |
|
✗ |
uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER, |
127 |
|
|
AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, 7 , 0 , 0 }; |
128 |
|
|
|
129 |
|
✗ |
bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx); |
130 |
|
✗ |
if (bit_depth_idx < 0) |
131 |
|
✗ |
goto fail; |
132 |
|
|
|
133 |
|
✗ |
if (apply_grain) { |
134 |
|
✗ |
if (ctx->tmp_frame->buf[0]) |
135 |
|
✗ |
av_frame_unref(ctx->tmp_frame); |
136 |
|
✗ |
err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF); |
137 |
|
✗ |
if (err < 0) |
138 |
|
✗ |
goto fail; |
139 |
|
✗ |
pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame); |
140 |
|
|
} else { |
141 |
|
✗ |
pic->output_surface = ff_vaapi_get_surface_id(s->cur_frame.f); |
142 |
|
|
} |
143 |
|
|
|
144 |
|
✗ |
memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1)); |
145 |
|
✗ |
pic_param = (VADecPictureParameterBufferAV1) { |
146 |
|
✗ |
.profile = seq->seq_profile, |
147 |
|
✗ |
.order_hint_bits_minus_1 = seq->order_hint_bits_minus_1, |
148 |
|
|
.bit_depth_idx = bit_depth_idx, |
149 |
|
✗ |
.matrix_coefficients = seq->color_config.matrix_coefficients, |
150 |
|
✗ |
.current_frame = pic->output_surface, |
151 |
|
✗ |
.current_display_picture = ff_vaapi_get_surface_id(s->cur_frame.f), |
152 |
|
✗ |
.frame_width_minus1 = frame_header->frame_width_minus_1, |
153 |
|
✗ |
.frame_height_minus1 = frame_header->frame_height_minus_1, |
154 |
|
✗ |
.primary_ref_frame = frame_header->primary_ref_frame, |
155 |
|
✗ |
.order_hint = frame_header->order_hint, |
156 |
|
✗ |
.tile_cols = frame_header->tile_cols, |
157 |
|
✗ |
.tile_rows = frame_header->tile_rows, |
158 |
|
✗ |
.context_update_tile_id = frame_header->context_update_tile_id, |
159 |
|
✗ |
.superres_scale_denominator = frame_header->use_superres ? |
160 |
|
✗ |
frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : |
161 |
|
|
AV1_SUPERRES_NUM, |
162 |
|
✗ |
.interp_filter = frame_header->interpolation_filter, |
163 |
|
✗ |
.filter_level[0] = frame_header->loop_filter_level[0], |
164 |
|
✗ |
.filter_level[1] = frame_header->loop_filter_level[1], |
165 |
|
✗ |
.filter_level_u = frame_header->loop_filter_level[2], |
166 |
|
✗ |
.filter_level_v = frame_header->loop_filter_level[3], |
167 |
|
✗ |
.base_qindex = frame_header->base_q_idx, |
168 |
|
✗ |
.y_dc_delta_q = frame_header->delta_q_y_dc, |
169 |
|
✗ |
.u_dc_delta_q = frame_header->delta_q_u_dc, |
170 |
|
✗ |
.u_ac_delta_q = frame_header->delta_q_u_ac, |
171 |
|
✗ |
.v_dc_delta_q = frame_header->delta_q_v_dc, |
172 |
|
✗ |
.v_ac_delta_q = frame_header->delta_q_v_ac, |
173 |
|
✗ |
.cdef_damping_minus_3 = frame_header->cdef_damping_minus_3, |
174 |
|
✗ |
.cdef_bits = frame_header->cdef_bits, |
175 |
|
|
.seq_info_fields.fields = { |
176 |
|
✗ |
.still_picture = seq->still_picture, |
177 |
|
✗ |
.use_128x128_superblock = seq->use_128x128_superblock, |
178 |
|
✗ |
.enable_filter_intra = seq->enable_filter_intra, |
179 |
|
✗ |
.enable_intra_edge_filter = seq->enable_intra_edge_filter, |
180 |
|
✗ |
.enable_interintra_compound = seq->enable_interintra_compound, |
181 |
|
✗ |
.enable_masked_compound = seq->enable_masked_compound, |
182 |
|
✗ |
.enable_dual_filter = seq->enable_dual_filter, |
183 |
|
✗ |
.enable_order_hint = seq->enable_order_hint, |
184 |
|
✗ |
.enable_jnt_comp = seq->enable_jnt_comp, |
185 |
|
✗ |
.enable_cdef = seq->enable_cdef, |
186 |
|
✗ |
.mono_chrome = seq->color_config.mono_chrome, |
187 |
|
✗ |
.color_range = seq->color_config.color_range, |
188 |
|
✗ |
.subsampling_x = seq->color_config.subsampling_x, |
189 |
|
✗ |
.subsampling_y = seq->color_config.subsampling_y, |
190 |
|
✗ |
.chroma_sample_position = seq->color_config.chroma_sample_position, |
191 |
|
✗ |
.film_grain_params_present = seq->film_grain_params_present && |
192 |
|
✗ |
!(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN), |
193 |
|
|
}, |
194 |
|
|
.seg_info.segment_info_fields.bits = { |
195 |
|
✗ |
.enabled = frame_header->segmentation_enabled, |
196 |
|
✗ |
.update_map = frame_header->segmentation_update_map, |
197 |
|
✗ |
.temporal_update = frame_header->segmentation_temporal_update, |
198 |
|
✗ |
.update_data = frame_header->segmentation_update_data, |
199 |
|
|
}, |
200 |
|
|
.film_grain_info = { |
201 |
|
|
.film_grain_info_fields.bits = { |
202 |
|
|
.apply_grain = apply_grain, |
203 |
|
✗ |
.chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma, |
204 |
|
✗ |
.grain_scaling_minus_8 = film_grain->grain_scaling_minus_8, |
205 |
|
✗ |
.ar_coeff_lag = film_grain->ar_coeff_lag, |
206 |
|
✗ |
.ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6, |
207 |
|
✗ |
.grain_scale_shift = film_grain->grain_scale_shift, |
208 |
|
✗ |
.overlap_flag = film_grain->overlap_flag, |
209 |
|
✗ |
.clip_to_restricted_range = film_grain->clip_to_restricted_range, |
210 |
|
|
}, |
211 |
|
✗ |
.grain_seed = film_grain->grain_seed, |
212 |
|
✗ |
.num_y_points = film_grain->num_y_points, |
213 |
|
✗ |
.num_cb_points = film_grain->num_cb_points, |
214 |
|
✗ |
.num_cr_points = film_grain->num_cr_points, |
215 |
|
✗ |
.cb_mult = film_grain->cb_mult, |
216 |
|
✗ |
.cb_luma_mult = film_grain->cb_luma_mult, |
217 |
|
✗ |
.cb_offset = film_grain->cb_offset, |
218 |
|
✗ |
.cr_mult = film_grain->cr_mult, |
219 |
|
✗ |
.cr_luma_mult = film_grain->cr_luma_mult, |
220 |
|
✗ |
.cr_offset = film_grain->cr_offset, |
221 |
|
|
}, |
222 |
|
|
.pic_info_fields.bits = { |
223 |
|
✗ |
.frame_type = frame_header->frame_type, |
224 |
|
✗ |
.show_frame = frame_header->show_frame, |
225 |
|
✗ |
.showable_frame = frame_header->showable_frame, |
226 |
|
✗ |
.error_resilient_mode = frame_header->error_resilient_mode, |
227 |
|
✗ |
.disable_cdf_update = frame_header->disable_cdf_update, |
228 |
|
✗ |
.allow_screen_content_tools = frame_header->allow_screen_content_tools, |
229 |
|
✗ |
.force_integer_mv = s->cur_frame.force_integer_mv, |
230 |
|
✗ |
.allow_intrabc = frame_header->allow_intrabc, |
231 |
|
✗ |
.use_superres = frame_header->use_superres, |
232 |
|
✗ |
.allow_high_precision_mv = frame_header->allow_high_precision_mv, |
233 |
|
✗ |
.is_motion_mode_switchable = frame_header->is_motion_mode_switchable, |
234 |
|
✗ |
.use_ref_frame_mvs = frame_header->use_ref_frame_mvs, |
235 |
|
✗ |
.disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf, |
236 |
|
✗ |
.uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag, |
237 |
|
✗ |
.allow_warped_motion = frame_header->allow_warped_motion, |
238 |
|
|
}, |
239 |
|
|
.loop_filter_info_fields.bits = { |
240 |
|
✗ |
.sharpness_level = frame_header->loop_filter_sharpness, |
241 |
|
✗ |
.mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled, |
242 |
|
✗ |
.mode_ref_delta_update = frame_header->loop_filter_delta_update, |
243 |
|
|
}, |
244 |
|
|
.mode_control_fields.bits = { |
245 |
|
✗ |
.delta_q_present_flag = frame_header->delta_q_present, |
246 |
|
✗ |
.log2_delta_q_res = frame_header->delta_q_res, |
247 |
|
✗ |
.delta_lf_present_flag = frame_header->delta_lf_present, |
248 |
|
✗ |
.log2_delta_lf_res = frame_header->delta_lf_res, |
249 |
|
✗ |
.delta_lf_multi = frame_header->delta_lf_multi, |
250 |
|
✗ |
.tx_mode = frame_header->tx_mode, |
251 |
|
✗ |
.reference_select = frame_header->reference_select, |
252 |
|
✗ |
.reduced_tx_set_used = frame_header->reduced_tx_set, |
253 |
|
✗ |
.skip_mode_present = frame_header->skip_mode_present, |
254 |
|
|
}, |
255 |
|
|
.loop_restoration_fields.bits = { |
256 |
|
✗ |
.yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]], |
257 |
|
✗ |
.cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]], |
258 |
|
✗ |
.crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]], |
259 |
|
✗ |
.lr_unit_shift = frame_header->lr_unit_shift, |
260 |
|
✗ |
.lr_uv_shift = frame_header->lr_uv_shift, |
261 |
|
|
}, |
262 |
|
|
.qmatrix_fields.bits = { |
263 |
|
✗ |
.using_qmatrix = frame_header->using_qmatrix, |
264 |
|
✗ |
.qm_y = frame_header->qm_y, |
265 |
|
✗ |
.qm_u = frame_header->qm_u, |
266 |
|
✗ |
.qm_v = frame_header->qm_v, |
267 |
|
|
} |
268 |
|
|
}; |
269 |
|
|
|
270 |
|
✗ |
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) { |
271 |
|
✗ |
if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame) |
272 |
|
✗ |
pic_param.ref_frame_map[i] = VA_INVALID_ID; |
273 |
|
|
else |
274 |
|
✗ |
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ? |
275 |
|
✗ |
ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) : |
276 |
|
✗ |
vaapi_av1_surface_id(&s->ref[i]); |
277 |
|
|
} |
278 |
|
✗ |
for (int i = 0; i < AV1_REFS_PER_FRAME; i++) { |
279 |
|
✗ |
pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i]; |
280 |
|
|
} |
281 |
|
✗ |
for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) { |
282 |
|
✗ |
pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i]; |
283 |
|
|
} |
284 |
|
✗ |
for (int i = 0; i < 2; i++) { |
285 |
|
✗ |
pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i]; |
286 |
|
|
} |
287 |
|
✗ |
for (int i = 0; i < (1 << frame_header->cdef_bits); i++) { |
288 |
|
✗ |
pic_param.cdef_y_strengths[i] = |
289 |
|
✗ |
(frame_header->cdef_y_pri_strength[i] << 2) + |
290 |
|
✗ |
frame_header->cdef_y_sec_strength[i]; |
291 |
|
✗ |
pic_param.cdef_uv_strengths[i] = |
292 |
|
✗ |
(frame_header->cdef_uv_pri_strength[i] << 2) + |
293 |
|
✗ |
frame_header->cdef_uv_sec_strength[i]; |
294 |
|
|
} |
295 |
|
✗ |
for (int i = 0; i < frame_header->tile_cols; i++) { |
296 |
|
✗ |
pic_param.width_in_sbs_minus_1[i] = |
297 |
|
✗ |
frame_header->width_in_sbs_minus_1[i]; |
298 |
|
|
} |
299 |
|
✗ |
for (int i = 0; i < frame_header->tile_rows; i++) { |
300 |
|
✗ |
pic_param.height_in_sbs_minus_1[i] = |
301 |
|
✗ |
frame_header->height_in_sbs_minus_1[i]; |
302 |
|
|
} |
303 |
|
✗ |
for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) { |
304 |
|
✗ |
pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i]; |
305 |
|
✗ |
pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i]; |
306 |
|
✗ |
for (int j = 0; j < 6; j++) |
307 |
|
✗ |
pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j]; |
308 |
|
|
} |
309 |
|
✗ |
for (int i = 0; i < AV1_MAX_SEGMENTS; i++) { |
310 |
|
✗ |
for (int j = 0; j < AV1_SEG_LVL_MAX; j++) { |
311 |
|
✗ |
pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j); |
312 |
|
✗ |
if (segmentation_feature_signed[j]) |
313 |
|
✗ |
pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j], |
314 |
|
✗ |
-segmentation_feature_max[j], segmentation_feature_max[j]); |
315 |
|
|
else |
316 |
|
✗ |
pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j], |
317 |
|
✗ |
0, segmentation_feature_max[j]); |
318 |
|
|
} |
319 |
|
|
} |
320 |
|
✗ |
if (apply_grain) { |
321 |
|
✗ |
for (int i = 0; i < film_grain->num_y_points; i++) { |
322 |
|
✗ |
pic_param.film_grain_info.point_y_value[i] = |
323 |
|
✗ |
film_grain->point_y_value[i]; |
324 |
|
✗ |
pic_param.film_grain_info.point_y_scaling[i] = |
325 |
|
✗ |
film_grain->point_y_scaling[i]; |
326 |
|
|
} |
327 |
|
✗ |
for (int i = 0; i < film_grain->num_cb_points; i++) { |
328 |
|
✗ |
pic_param.film_grain_info.point_cb_value[i] = |
329 |
|
✗ |
film_grain->point_cb_value[i]; |
330 |
|
✗ |
pic_param.film_grain_info.point_cb_scaling[i] = |
331 |
|
✗ |
film_grain->point_cb_scaling[i]; |
332 |
|
|
} |
333 |
|
✗ |
for (int i = 0; i < film_grain->num_cr_points; i++) { |
334 |
|
✗ |
pic_param.film_grain_info.point_cr_value[i] = |
335 |
|
✗ |
film_grain->point_cr_value[i]; |
336 |
|
✗ |
pic_param.film_grain_info.point_cr_scaling[i] = |
337 |
|
✗ |
film_grain->point_cr_scaling[i]; |
338 |
|
|
} |
339 |
|
✗ |
for (int i = 0; i < 24; i++) { |
340 |
|
✗ |
pic_param.film_grain_info.ar_coeffs_y[i] = |
341 |
|
✗ |
film_grain->ar_coeffs_y_plus_128[i] - 128; |
342 |
|
|
} |
343 |
|
✗ |
for (int i = 0; i < 25; i++) { |
344 |
|
✗ |
pic_param.film_grain_info.ar_coeffs_cb[i] = |
345 |
|
✗ |
film_grain->ar_coeffs_cb_plus_128[i] - 128; |
346 |
|
✗ |
pic_param.film_grain_info.ar_coeffs_cr[i] = |
347 |
|
✗ |
film_grain->ar_coeffs_cr_plus_128[i] - 128; |
348 |
|
|
} |
349 |
|
|
} |
350 |
|
✗ |
err = ff_vaapi_decode_make_param_buffer(avctx, pic, |
351 |
|
|
VAPictureParameterBufferType, |
352 |
|
|
&pic_param, sizeof(pic_param)); |
353 |
|
✗ |
if (err < 0) |
354 |
|
✗ |
goto fail; |
355 |
|
|
|
356 |
|
✗ |
return 0; |
357 |
|
|
|
358 |
|
✗ |
fail: |
359 |
|
✗ |
ff_vaapi_decode_cancel(avctx, pic); |
360 |
|
✗ |
return err; |
361 |
|
|
} |
362 |
|
|
|
363 |
|
✗ |
static int vaapi_av1_end_frame(AVCodecContext *avctx) |
364 |
|
|
{ |
365 |
|
✗ |
const AV1DecContext *s = avctx->priv_data; |
366 |
|
✗ |
const AV1RawFrameHeader *header = s->raw_frame_header; |
367 |
|
✗ |
const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain; |
368 |
|
✗ |
VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private; |
369 |
|
✗ |
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; |
370 |
|
|
|
371 |
|
✗ |
int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain; |
372 |
|
|
int ret; |
373 |
|
✗ |
ret = ff_vaapi_decode_issue(avctx, pic); |
374 |
|
✗ |
if (ret < 0) |
375 |
|
✗ |
return ret; |
376 |
|
|
|
377 |
|
✗ |
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) { |
378 |
|
✗ |
if (header->refresh_frame_flags & (1 << i)) { |
379 |
|
✗ |
if (ctx->ref_tab[i].frame->buf[0]) |
380 |
|
✗ |
av_frame_unref(ctx->ref_tab[i].frame); |
381 |
|
|
|
382 |
|
✗ |
if (apply_grain) { |
383 |
|
✗ |
ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame); |
384 |
|
✗ |
if (ret < 0) |
385 |
|
✗ |
return ret; |
386 |
|
✗ |
ctx->ref_tab[i].valid = 1; |
387 |
|
|
} else { |
388 |
|
✗ |
ctx->ref_tab[i].valid = 0; |
389 |
|
|
} |
390 |
|
|
} |
391 |
|
|
} |
392 |
|
|
|
393 |
|
✗ |
return 0; |
394 |
|
|
} |
395 |
|
|
|
396 |
|
✗ |
static int vaapi_av1_decode_slice(AVCodecContext *avctx, |
397 |
|
|
const uint8_t *buffer, |
398 |
|
|
uint32_t size) |
399 |
|
|
{ |
400 |
|
✗ |
const AV1DecContext *s = avctx->priv_data; |
401 |
|
✗ |
VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private; |
402 |
|
✗ |
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; |
403 |
|
|
int err, nb_params; |
404 |
|
|
|
405 |
|
✗ |
nb_params = s->tg_end - s->tg_start + 1; |
406 |
|
✗ |
if (ctx->nb_slice_params < nb_params) { |
407 |
|
✗ |
VASliceParameterBufferAV1 *tmp = av_realloc_array(ctx->slice_params, |
408 |
|
|
nb_params, |
409 |
|
|
sizeof(*ctx->slice_params)); |
410 |
|
✗ |
if (!tmp) { |
411 |
|
✗ |
ctx->nb_slice_params = 0; |
412 |
|
✗ |
err = AVERROR(ENOMEM); |
413 |
|
✗ |
goto fail; |
414 |
|
|
} |
415 |
|
✗ |
ctx->slice_params = tmp; |
416 |
|
✗ |
ctx->nb_slice_params = nb_params; |
417 |
|
|
} |
418 |
|
|
|
419 |
|
✗ |
for (int i = s->tg_start; i <= s->tg_end; i++) { |
420 |
|
✗ |
ctx->slice_params[i - s->tg_start] = (VASliceParameterBufferAV1) { |
421 |
|
✗ |
.slice_data_size = s->tile_group_info[i].tile_size, |
422 |
|
✗ |
.slice_data_offset = s->tile_group_info[i].tile_offset, |
423 |
|
|
.slice_data_flag = VA_SLICE_DATA_FLAG_ALL, |
424 |
|
✗ |
.tile_row = s->tile_group_info[i].tile_row, |
425 |
|
✗ |
.tile_column = s->tile_group_info[i].tile_column, |
426 |
|
✗ |
.tg_start = s->tg_start, |
427 |
|
✗ |
.tg_end = s->tg_end, |
428 |
|
|
}; |
429 |
|
|
} |
430 |
|
|
|
431 |
|
✗ |
err = ff_vaapi_decode_make_slice_buffer(avctx, pic, ctx->slice_params, nb_params, |
432 |
|
|
sizeof(VASliceParameterBufferAV1), |
433 |
|
|
buffer, |
434 |
|
|
size); |
435 |
|
✗ |
if (err) |
436 |
|
✗ |
goto fail; |
437 |
|
|
|
438 |
|
✗ |
return 0; |
439 |
|
|
|
440 |
|
✗ |
fail: |
441 |
|
✗ |
ff_vaapi_decode_cancel(avctx, pic); |
442 |
|
✗ |
return err; |
443 |
|
|
} |
444 |
|
|
|
445 |
|
|
const FFHWAccel ff_av1_vaapi_hwaccel = { |
446 |
|
|
.p.name = "av1_vaapi", |
447 |
|
|
.p.type = AVMEDIA_TYPE_VIDEO, |
448 |
|
|
.p.id = AV_CODEC_ID_AV1, |
449 |
|
|
.p.pix_fmt = AV_PIX_FMT_VAAPI, |
450 |
|
|
.start_frame = vaapi_av1_start_frame, |
451 |
|
|
.end_frame = vaapi_av1_end_frame, |
452 |
|
|
.decode_slice = vaapi_av1_decode_slice, |
453 |
|
|
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
454 |
|
|
.init = vaapi_av1_decode_init, |
455 |
|
|
.uninit = vaapi_av1_decode_uninit, |
456 |
|
|
.frame_params = ff_vaapi_common_frame_params, |
457 |
|
|
.priv_data_size = sizeof(VAAPIAV1DecContext), |
458 |
|
|
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
459 |
|
|
}; |
460 |
|
|
|