FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/vdpau_vp9.c
Date: 2025-04-25 22:50:00
Exec Total Coverage
Lines: 0 133 0.0%
Functions: 0 4 0.0%
Branches: 0 45 0.0%

Line Branch Exec Source
1 /*
2 * VP9 HW decode acceleration through VDPAU
3 *
4 * Copyright (c) 2019 Manoj Gupta Bonda
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <vdpau/vdpau.h>
24 #include "libavutil/pixdesc.h"
25 #include "avcodec.h"
26 #include "hwaccel_internal.h"
27 #include "vp9dec.h"
28 #include "vdpau.h"
29 #include "vdpau_internal.h"
30
31 static int vdpau_vp9_start_frame(AVCodecContext *avctx,
32 const AVBufferRef *buffer_ref,
33 const uint8_t *buffer, uint32_t size)
34 {
35 VP9Context *s = avctx->priv_data;
36 VP9SharedContext *h = &(s->s);
37 VP9Frame pic = h->frames[CUR_FRAME];
38 struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
39 int i;
40
41 VdpPictureInfoVP9 *info = &pic_ctx->info.vp9;
42 const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
43 if (!pixdesc) {
44 return AV_PIX_FMT_NONE;
45 }
46
47 info->width = avctx->width;
48 info->height = avctx->height;
49 /* fill LvPictureInfoVP9 struct */
50 info->lastReference = VDP_INVALID_HANDLE;
51 info->goldenReference = VDP_INVALID_HANDLE;
52 info->altReference = VDP_INVALID_HANDLE;
53
54 if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) {
55 info->lastReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f);
56 }
57 if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) {
58 info->goldenReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f);
59 }
60 if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) {
61 info->altReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f);
62 }
63
64 info->profile = h->h.profile;
65 info->frameContextIdx = h->h.framectxid;
66 info->keyFrame = h->h.keyframe;
67 info->showFrame = !h->h.invisible;
68 info->errorResilient = h->h.errorres;
69 info->frameParallelDecoding = h->h.parallelmode;
70
71 info->subSamplingX = pixdesc->log2_chroma_w;
72 info->subSamplingY = pixdesc->log2_chroma_h;
73
74 info->intraOnly = h->h.intraonly;
75 info->allowHighPrecisionMv = h->h.keyframe ? 0 : h->h.highprecisionmvs;
76 info->refreshEntropyProbs = h->h.refreshctx;
77
78 info->bitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
79 info->bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
80
81 info->loopFilterLevel = h->h.filter.level;
82 info->loopFilterSharpness = h->h.filter.sharpness;
83 info->modeRefLfEnabled = h->h.lf_delta.enabled;
84
85 info->log2TileColumns = h->h.tiling.log2_tile_cols;
86 info->log2TileRows = h->h.tiling.log2_tile_rows;
87
88 info->segmentEnabled = h->h.segmentation.enabled;
89 info->segmentMapUpdate = h->h.segmentation.update_map;
90 info->segmentMapTemporalUpdate = h->h.segmentation.temporal;
91 info->segmentFeatureMode = h->h.segmentation.absolute_vals;
92
93 info->qpYAc = h->h.yac_qi;
94 info->qpYDc = h->h.ydc_qdelta;
95 info->qpChDc = h->h.uvdc_qdelta;
96 info->qpChAc = h->h.uvac_qdelta;
97
98 info->resetFrameContext = h->h.resetctx;
99 info->mcompFilterType = h->h.filtermode ^ (h->h.filtermode <= 1);
100 info->uncompressedHeaderSize = h->h.uncompressed_header_size;
101 info->compressedHeaderSize = h->h.compressed_header_size;
102 info->refFrameSignBias[0] = 0;
103
104
105 for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++)
106 info->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
107
108 for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++)
109 info->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
110
111 for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++)
112 info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i];
113
114 for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) {
115 info->activeRefIdx[i] = h->h.refidx[i];
116 info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i];
117 info->refFrameSignBias[i + 1] = h->h.signbias[i];
118 }
119
120 for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) {
121 info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
122 info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
123 info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
124 info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
125
126 info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
127 info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
128 info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
129 info->segmentFeatureData[i][3] = 0;
130 }
131
132 switch (avctx->colorspace) {
133 default:
134 case AVCOL_SPC_UNSPECIFIED:
135 info->colorSpace = 0;
136 break;
137 case AVCOL_SPC_BT470BG:
138 info->colorSpace = 1;
139 break;
140 case AVCOL_SPC_BT709:
141 info->colorSpace = 2;
142 break;
143 case AVCOL_SPC_SMPTE170M:
144 info->colorSpace = 3;
145 break;
146 case AVCOL_SPC_SMPTE240M:
147 info->colorSpace = 4;
148 break;
149 case AVCOL_SPC_BT2020_NCL:
150 info->colorSpace = 5;
151 break;
152 case AVCOL_SPC_RESERVED:
153 info->colorSpace = 6;
154 break;
155 case AVCOL_SPC_RGB:
156 info->colorSpace = 7;
157 break;
158 }
159
160 return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
161
162 }
163
164 static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 };
165
166 static int vdpau_vp9_decode_slice(AVCodecContext *avctx,
167 const uint8_t *buffer, uint32_t size)
168 {
169 VP9SharedContext *h = avctx->priv_data;
170 VP9Frame pic = h->frames[CUR_FRAME];
171 struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
172
173 int val;
174
175 val = ff_vdpau_add_buffer(pic_ctx, start_code_prefix, 3);
176 if (val)
177 return val;
178
179 val = ff_vdpau_add_buffer(pic_ctx, buffer, size);
180 if (val)
181 return val;
182
183 return 0;
184 }
185
186 static int vdpau_vp9_end_frame(AVCodecContext *avctx)
187 {
188 VP9SharedContext *h = avctx->priv_data;
189 VP9Frame pic = h->frames[CUR_FRAME];
190 struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
191
192 int val;
193
194 val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx);
195 if (val < 0)
196 return val;
197
198 return 0;
199 }
200
201 static av_cold int vdpau_vp9_init(AVCodecContext *avctx)
202 {
203 VdpDecoderProfile profile;
204 uint32_t level = avctx->level;
205
206 switch (avctx->profile) {
207 case AV_PROFILE_VP9_0:
208 profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
209 break;
210 case AV_PROFILE_VP9_1:
211 profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
212 break;
213 case AV_PROFILE_VP9_2:
214 profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
215 break;
216 case AV_PROFILE_VP9_3:
217 profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
218 break;
219 default:
220 return AVERROR(ENOTSUP);
221 }
222
223 return ff_vdpau_common_init(avctx, profile, level);
224 }
225
226 const FFHWAccel ff_vp9_vdpau_hwaccel = {
227 .p.name = "vp9_vdpau",
228 .p.type = AVMEDIA_TYPE_VIDEO,
229 .p.id = AV_CODEC_ID_VP9,
230 .p.pix_fmt = AV_PIX_FMT_VDPAU,
231 .start_frame = vdpau_vp9_start_frame,
232 .end_frame = vdpau_vp9_end_frame,
233 .decode_slice = vdpau_vp9_decode_slice,
234 .frame_priv_data_size = sizeof(struct vdpau_picture_context),
235 .init = vdpau_vp9_init,
236 .uninit = ff_vdpau_common_uninit,
237 .frame_params = ff_vdpau_common_frame_params,
238 .priv_data_size = sizeof(VDPAUContext),
239 .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
240 };
241