FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/vaapi_vp9.c
Date: 2025-03-23 22:11:24
Exec Total Coverage
Lines: 0 80 0.0%
Functions: 0 4 0.0%
Branches: 0 20 0.0%

Line Branch Exec Source
1 /*
2 * VP9 HW decode acceleration through VA API
3 *
4 * Copyright (C) 2015 Timo Rothenpieler <timo@rothenpieler.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/pixdesc.h"
24
25 #include "hwaccel_internal.h"
26 #include "vaapi_decode.h"
27 #include "vp9shared.h"
28
29 static VASurfaceID vaapi_vp9_surface_id(const VP9Frame *vf)
30 {
31 if (vf)
32 return ff_vaapi_get_surface_id(vf->tf.f);
33 else
34 return VA_INVALID_SURFACE;
35 }
36
37 static int vaapi_vp9_start_frame(AVCodecContext *avctx,
38 av_unused const AVBufferRef *buffer_ref,
39 av_unused const uint8_t *buffer,
40 av_unused uint32_t size)
41 {
42 const VP9SharedContext *h = avctx->priv_data;
43 VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
44 VADecPictureParameterBufferVP9 pic_param;
45 const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
46 int err, i;
47
48 pic->output_surface = vaapi_vp9_surface_id(&h->frames[CUR_FRAME]);
49
50 pic_param = (VADecPictureParameterBufferVP9) {
51 .frame_width = avctx->width,
52 .frame_height = avctx->height,
53
54 .pic_fields.bits = {
55 .subsampling_x = pixdesc->log2_chroma_w,
56 .subsampling_y = pixdesc->log2_chroma_h,
57 .frame_type = !h->h.keyframe,
58 .show_frame = !h->h.invisible,
59 .error_resilient_mode = h->h.errorres,
60 .intra_only = h->h.intraonly,
61 .allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs,
62 .mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1),
63 .frame_parallel_decoding_mode = h->h.parallelmode,
64 .reset_frame_context = h->h.resetctx,
65 .refresh_frame_context = h->h.refreshctx,
66 .frame_context_idx = h->h.framectxid,
67
68 .segmentation_enabled = h->h.segmentation.enabled,
69 .segmentation_temporal_update = h->h.segmentation.temporal,
70 .segmentation_update_map = h->h.segmentation.update_map,
71
72 .last_ref_frame = h->h.refidx[0],
73 .last_ref_frame_sign_bias = h->h.signbias[0],
74 .golden_ref_frame = h->h.refidx[1],
75 .golden_ref_frame_sign_bias = h->h.signbias[1],
76 .alt_ref_frame = h->h.refidx[2],
77 .alt_ref_frame_sign_bias = h->h.signbias[2],
78 .lossless_flag = h->h.lossless,
79 },
80
81 .filter_level = h->h.filter.level,
82 .sharpness_level = h->h.filter.sharpness,
83 .log2_tile_rows = h->h.tiling.log2_tile_rows,
84 .log2_tile_columns = h->h.tiling.log2_tile_cols,
85
86 .frame_header_length_in_bytes = h->h.uncompressed_header_size,
87 .first_partition_size = h->h.compressed_header_size,
88
89 .profile = h->h.profile,
90 .bit_depth = h->h.bpp,
91 };
92
93 for (i = 0; i < 7; i++)
94 pic_param.mb_segment_tree_probs[i] = h->h.segmentation.prob[i];
95
96 if (h->h.segmentation.temporal) {
97 for (i = 0; i < 3; i++)
98 pic_param.segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
99 } else {
100 memset(pic_param.segment_pred_probs, 255, sizeof(pic_param.segment_pred_probs));
101 }
102
103 for (i = 0; i < 8; i++) {
104 if (h->refs[i].f)
105 pic_param.reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
106 else
107 pic_param.reference_frames[i] = VA_INVALID_ID;
108 }
109
110 err = ff_vaapi_decode_make_param_buffer(avctx, pic,
111 VAPictureParameterBufferType,
112 &pic_param, sizeof(pic_param));
113 if (err < 0) {
114 ff_vaapi_decode_cancel(avctx, pic);
115 return err;
116 }
117
118 return 0;
119 }
120
121 static int vaapi_vp9_end_frame(AVCodecContext *avctx)
122 {
123 const VP9SharedContext *h = avctx->priv_data;
124 VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
125
126 return ff_vaapi_decode_issue(avctx, pic);
127 }
128
129 static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
130 const uint8_t *buffer,
131 uint32_t size)
132 {
133 const VP9SharedContext *h = avctx->priv_data;
134 VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
135 VASliceParameterBufferVP9 slice_param;
136 int err, i;
137
138 slice_param = (VASliceParameterBufferVP9) {
139 .slice_data_size = size,
140 .slice_data_offset = 0,
141 .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
142 };
143
144 for (i = 0; i < 8; i++) {
145 slice_param.seg_param[i] = (VASegmentParameterVP9) {
146 .segment_flags.fields = {
147 .segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled,
148 .segment_reference = h->h.segmentation.feat[i].ref_val,
149 .segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled,
150 },
151
152 .luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0],
153 .luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1],
154 .chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0],
155 .chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1],
156 };
157
158 memcpy(slice_param.seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param.seg_param[i].filter_level));
159 }
160
161 err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
162 &slice_param, 1, sizeof(slice_param),
163 buffer, size);
164 if (err) {
165 ff_vaapi_decode_cancel(avctx, pic);
166 return err;
167 }
168
169 return 0;
170 }
171
172 const FFHWAccel ff_vp9_vaapi_hwaccel = {
173 .p.name = "vp9_vaapi",
174 .p.type = AVMEDIA_TYPE_VIDEO,
175 .p.id = AV_CODEC_ID_VP9,
176 .p.pix_fmt = AV_PIX_FMT_VAAPI,
177 .start_frame = vaapi_vp9_start_frame,
178 .end_frame = vaapi_vp9_end_frame,
179 .decode_slice = vaapi_vp9_decode_slice,
180 .frame_priv_data_size = sizeof(VAAPIDecodePicture),
181 .init = ff_vaapi_decode_init,
182 .uninit = ff_vaapi_decode_uninit,
183 .frame_params = ff_vaapi_common_frame_params,
184 .priv_data_size = sizeof(VAAPIDecodeContext),
185 .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
186 };
187