Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* VP9 HW decode acceleration through VDPAU |
3 |
|
|
* |
4 |
|
|
* Copyright (c) 2019 Manoj Gupta Bonda |
5 |
|
|
* |
6 |
|
|
* This file is part of FFmpeg. |
7 |
|
|
* |
8 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
9 |
|
|
* modify it under the terms of the GNU Lesser General Public |
10 |
|
|
* License as published by the Free Software Foundation; either |
11 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
12 |
|
|
* |
13 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
14 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 |
|
|
* Lesser General Public License for more details. |
17 |
|
|
* |
18 |
|
|
* You should have received a copy of the GNU Lesser General Public |
19 |
|
|
* License along with FFmpeg; if not, write to the Free Software Foundation, |
20 |
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 |
|
|
*/ |
22 |
|
|
|
23 |
|
|
#include <vdpau/vdpau.h> |
24 |
|
|
#include "libavutil/pixdesc.h" |
25 |
|
|
#include "avcodec.h" |
26 |
|
|
#include "hwaccel_internal.h" |
27 |
|
|
#include "vp9dec.h" |
28 |
|
|
#include "vdpau.h" |
29 |
|
|
#include "vdpau_internal.h" |
30 |
|
|
|
31 |
|
✗ |
static int vdpau_vp9_start_frame(AVCodecContext *avctx, |
32 |
|
|
const uint8_t *buffer, uint32_t size) |
33 |
|
|
{ |
34 |
|
✗ |
VP9Context *s = avctx->priv_data; |
35 |
|
✗ |
VP9SharedContext *h = &(s->s); |
36 |
|
✗ |
VP9Frame pic = h->frames[CUR_FRAME]; |
37 |
|
✗ |
struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private; |
38 |
|
|
int i; |
39 |
|
|
|
40 |
|
✗ |
VdpPictureInfoVP9 *info = &pic_ctx->info.vp9; |
41 |
|
✗ |
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
42 |
|
✗ |
if (!pixdesc) { |
43 |
|
✗ |
return AV_PIX_FMT_NONE; |
44 |
|
|
} |
45 |
|
|
|
46 |
|
✗ |
info->width = avctx->width; |
47 |
|
✗ |
info->height = avctx->height; |
48 |
|
|
/* fill LvPictureInfoVP9 struct */ |
49 |
|
✗ |
info->lastReference = VDP_INVALID_HANDLE; |
50 |
|
✗ |
info->goldenReference = VDP_INVALID_HANDLE; |
51 |
|
✗ |
info->altReference = VDP_INVALID_HANDLE; |
52 |
|
|
|
53 |
|
✗ |
if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) { |
54 |
|
✗ |
info->lastReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f); |
55 |
|
|
} |
56 |
|
✗ |
if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) { |
57 |
|
✗ |
info->goldenReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f); |
58 |
|
|
} |
59 |
|
✗ |
if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) { |
60 |
|
✗ |
info->altReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f); |
61 |
|
|
} |
62 |
|
|
|
63 |
|
✗ |
info->profile = h->h.profile; |
64 |
|
✗ |
info->frameContextIdx = h->h.framectxid; |
65 |
|
✗ |
info->keyFrame = h->h.keyframe; |
66 |
|
✗ |
info->showFrame = !h->h.invisible; |
67 |
|
✗ |
info->errorResilient = h->h.errorres; |
68 |
|
✗ |
info->frameParallelDecoding = h->h.parallelmode; |
69 |
|
|
|
70 |
|
✗ |
info->subSamplingX = pixdesc->log2_chroma_w; |
71 |
|
✗ |
info->subSamplingY = pixdesc->log2_chroma_h; |
72 |
|
|
|
73 |
|
✗ |
info->intraOnly = h->h.intraonly; |
74 |
|
✗ |
info->allowHighPrecisionMv = h->h.keyframe ? 0 : h->h.highprecisionmvs; |
75 |
|
✗ |
info->refreshEntropyProbs = h->h.refreshctx; |
76 |
|
|
|
77 |
|
✗ |
info->bitDepthMinus8Luma = pixdesc->comp[0].depth - 8; |
78 |
|
✗ |
info->bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8; |
79 |
|
|
|
80 |
|
✗ |
info->loopFilterLevel = h->h.filter.level; |
81 |
|
✗ |
info->loopFilterSharpness = h->h.filter.sharpness; |
82 |
|
✗ |
info->modeRefLfEnabled = h->h.lf_delta.enabled; |
83 |
|
|
|
84 |
|
✗ |
info->log2TileColumns = h->h.tiling.log2_tile_cols; |
85 |
|
✗ |
info->log2TileRows = h->h.tiling.log2_tile_rows; |
86 |
|
|
|
87 |
|
✗ |
info->segmentEnabled = h->h.segmentation.enabled; |
88 |
|
✗ |
info->segmentMapUpdate = h->h.segmentation.update_map; |
89 |
|
✗ |
info->segmentMapTemporalUpdate = h->h.segmentation.temporal; |
90 |
|
✗ |
info->segmentFeatureMode = h->h.segmentation.absolute_vals; |
91 |
|
|
|
92 |
|
✗ |
info->qpYAc = h->h.yac_qi; |
93 |
|
✗ |
info->qpYDc = h->h.ydc_qdelta; |
94 |
|
✗ |
info->qpChDc = h->h.uvdc_qdelta; |
95 |
|
✗ |
info->qpChAc = h->h.uvac_qdelta; |
96 |
|
|
|
97 |
|
✗ |
info->resetFrameContext = h->h.resetctx; |
98 |
|
✗ |
info->mcompFilterType = h->h.filtermode ^ (h->h.filtermode <= 1); |
99 |
|
✗ |
info->uncompressedHeaderSize = h->h.uncompressed_header_size; |
100 |
|
✗ |
info->compressedHeaderSize = h->h.compressed_header_size; |
101 |
|
✗ |
info->refFrameSignBias[0] = 0; |
102 |
|
|
|
103 |
|
|
|
104 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++) |
105 |
|
✗ |
info->mbModeLfDelta[i] = h->h.lf_delta.mode[i]; |
106 |
|
|
|
107 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++) |
108 |
|
✗ |
info->mbRefLfDelta[i] = h->h.lf_delta.ref[i]; |
109 |
|
|
|
110 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++) |
111 |
|
✗ |
info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i]; |
112 |
|
|
|
113 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) { |
114 |
|
✗ |
info->activeRefIdx[i] = h->h.refidx[i]; |
115 |
|
✗ |
info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i]; |
116 |
|
✗ |
info->refFrameSignBias[i + 1] = h->h.signbias[i]; |
117 |
|
|
} |
118 |
|
|
|
119 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) { |
120 |
|
✗ |
info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled; |
121 |
|
✗ |
info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled; |
122 |
|
✗ |
info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled; |
123 |
|
✗ |
info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled; |
124 |
|
|
|
125 |
|
✗ |
info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val; |
126 |
|
✗ |
info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val; |
127 |
|
✗ |
info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val; |
128 |
|
✗ |
info->segmentFeatureData[i][3] = 0; |
129 |
|
|
} |
130 |
|
|
|
131 |
|
✗ |
switch (avctx->colorspace) { |
132 |
|
✗ |
default: |
133 |
|
|
case AVCOL_SPC_UNSPECIFIED: |
134 |
|
✗ |
info->colorSpace = 0; |
135 |
|
✗ |
break; |
136 |
|
✗ |
case AVCOL_SPC_BT470BG: |
137 |
|
✗ |
info->colorSpace = 1; |
138 |
|
✗ |
break; |
139 |
|
✗ |
case AVCOL_SPC_BT709: |
140 |
|
✗ |
info->colorSpace = 2; |
141 |
|
✗ |
break; |
142 |
|
✗ |
case AVCOL_SPC_SMPTE170M: |
143 |
|
✗ |
info->colorSpace = 3; |
144 |
|
✗ |
break; |
145 |
|
✗ |
case AVCOL_SPC_SMPTE240M: |
146 |
|
✗ |
info->colorSpace = 4; |
147 |
|
✗ |
break; |
148 |
|
✗ |
case AVCOL_SPC_BT2020_NCL: |
149 |
|
✗ |
info->colorSpace = 5; |
150 |
|
✗ |
break; |
151 |
|
✗ |
case AVCOL_SPC_RESERVED: |
152 |
|
✗ |
info->colorSpace = 6; |
153 |
|
✗ |
break; |
154 |
|
✗ |
case AVCOL_SPC_RGB: |
155 |
|
✗ |
info->colorSpace = 7; |
156 |
|
✗ |
break; |
157 |
|
|
} |
158 |
|
|
|
159 |
|
✗ |
return ff_vdpau_common_start_frame(pic_ctx, buffer, size); |
160 |
|
|
|
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 }; |
164 |
|
|
|
165 |
|
✗ |
static int vdpau_vp9_decode_slice(AVCodecContext *avctx, |
166 |
|
|
const uint8_t *buffer, uint32_t size) |
167 |
|
|
{ |
168 |
|
✗ |
VP9SharedContext *h = avctx->priv_data; |
169 |
|
✗ |
VP9Frame pic = h->frames[CUR_FRAME]; |
170 |
|
✗ |
struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private; |
171 |
|
|
|
172 |
|
|
int val; |
173 |
|
|
|
174 |
|
✗ |
val = ff_vdpau_add_buffer(pic_ctx, start_code_prefix, 3); |
175 |
|
✗ |
if (val) |
176 |
|
✗ |
return val; |
177 |
|
|
|
178 |
|
✗ |
val = ff_vdpau_add_buffer(pic_ctx, buffer, size); |
179 |
|
✗ |
if (val) |
180 |
|
✗ |
return val; |
181 |
|
|
|
182 |
|
✗ |
return 0; |
183 |
|
|
} |
184 |
|
|
|
185 |
|
✗ |
static int vdpau_vp9_end_frame(AVCodecContext *avctx) |
186 |
|
|
{ |
187 |
|
✗ |
VP9SharedContext *h = avctx->priv_data; |
188 |
|
✗ |
VP9Frame pic = h->frames[CUR_FRAME]; |
189 |
|
✗ |
struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private; |
190 |
|
|
|
191 |
|
|
int val; |
192 |
|
|
|
193 |
|
✗ |
val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx); |
194 |
|
✗ |
if (val < 0) |
195 |
|
✗ |
return val; |
196 |
|
|
|
197 |
|
✗ |
return 0; |
198 |
|
|
} |
199 |
|
|
|
200 |
|
✗ |
static int vdpau_vp9_init(AVCodecContext *avctx) |
201 |
|
|
{ |
202 |
|
|
VdpDecoderProfile profile; |
203 |
|
✗ |
uint32_t level = avctx->level; |
204 |
|
|
|
205 |
|
✗ |
switch (avctx->profile) { |
206 |
|
✗ |
case AV_PROFILE_VP9_0: |
207 |
|
✗ |
profile = VDP_DECODER_PROFILE_VP9_PROFILE_0; |
208 |
|
✗ |
break; |
209 |
|
✗ |
case AV_PROFILE_VP9_1: |
210 |
|
✗ |
profile = VDP_DECODER_PROFILE_VP9_PROFILE_1; |
211 |
|
✗ |
break; |
212 |
|
✗ |
case AV_PROFILE_VP9_2: |
213 |
|
✗ |
profile = VDP_DECODER_PROFILE_VP9_PROFILE_2; |
214 |
|
✗ |
break; |
215 |
|
✗ |
case AV_PROFILE_VP9_3: |
216 |
|
✗ |
profile = VDP_DECODER_PROFILE_VP9_PROFILE_3; |
217 |
|
✗ |
break; |
218 |
|
✗ |
default: |
219 |
|
✗ |
return AVERROR(ENOTSUP); |
220 |
|
|
} |
221 |
|
|
|
222 |
|
✗ |
return ff_vdpau_common_init(avctx, profile, level); |
223 |
|
|
} |
224 |
|
|
|
225 |
|
|
const FFHWAccel ff_vp9_vdpau_hwaccel = { |
226 |
|
|
.p.name = "vp9_vdpau", |
227 |
|
|
.p.type = AVMEDIA_TYPE_VIDEO, |
228 |
|
|
.p.id = AV_CODEC_ID_VP9, |
229 |
|
|
.p.pix_fmt = AV_PIX_FMT_VDPAU, |
230 |
|
|
.start_frame = vdpau_vp9_start_frame, |
231 |
|
|
.end_frame = vdpau_vp9_end_frame, |
232 |
|
|
.decode_slice = vdpau_vp9_decode_slice, |
233 |
|
|
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
234 |
|
|
.init = vdpau_vp9_init, |
235 |
|
|
.uninit = ff_vdpau_common_uninit, |
236 |
|
|
.frame_params = ff_vdpau_common_frame_params, |
237 |
|
|
.priv_data_size = sizeof(VDPAUContext), |
238 |
|
|
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
239 |
|
|
}; |
240 |
|
|
|