Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* This file is part of FFmpeg. |
3 |
|
|
* |
4 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
5 |
|
|
* modify it under the terms of the GNU Lesser General Public |
6 |
|
|
* License as published by the Free Software Foundation; either |
7 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
8 |
|
|
* |
9 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 |
|
|
* Lesser General Public License for more details. |
13 |
|
|
* |
14 |
|
|
* You should have received a copy of the GNU Lesser General Public |
15 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
16 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include <va/va.h> |
20 |
|
|
#include <va/va_enc_vp9.h> |
21 |
|
|
|
22 |
|
|
#include "libavutil/avassert.h" |
23 |
|
|
#include "libavutil/common.h" |
24 |
|
|
#include "libavutil/internal.h" |
25 |
|
|
#include "libavutil/opt.h" |
26 |
|
|
#include "libavutil/pixfmt.h" |
27 |
|
|
|
28 |
|
|
#include "avcodec.h" |
29 |
|
|
#include "codec_internal.h" |
30 |
|
|
#include "vaapi_encode.h" |
31 |
|
|
|
32 |
|
|
#define VP9_MAX_QUANT 255 |
33 |
|
|
|
34 |
|
|
#define VP9_MAX_TILE_WIDTH 4096 |
35 |
|
|
|
36 |
|
|
typedef struct VAAPIEncodeVP9Picture { |
37 |
|
|
int slot; |
38 |
|
|
} VAAPIEncodeVP9Picture; |
39 |
|
|
|
40 |
|
|
typedef struct VAAPIEncodeVP9Context { |
41 |
|
|
VAAPIEncodeContext common; |
42 |
|
|
|
43 |
|
|
// User options. |
44 |
|
|
int loop_filter_level; |
45 |
|
|
int loop_filter_sharpness; |
46 |
|
|
|
47 |
|
|
// Derived settings. |
48 |
|
|
int q_idx_idr; |
49 |
|
|
int q_idx_p; |
50 |
|
|
int q_idx_b; |
51 |
|
|
} VAAPIEncodeVP9Context; |
52 |
|
|
|
53 |
|
|
|
54 |
|
✗ |
static int vaapi_encode_vp9_init_sequence_params(AVCodecContext *avctx) |
55 |
|
|
{ |
56 |
|
✗ |
FFHWBaseEncodeContext *base_ctx = avctx->priv_data; |
57 |
|
✗ |
VAAPIEncodeContext *ctx = avctx->priv_data; |
58 |
|
✗ |
VAEncSequenceParameterBufferVP9 *vseq = ctx->codec_sequence_params; |
59 |
|
✗ |
VAEncPictureParameterBufferVP9 *vpic = ctx->codec_picture_params; |
60 |
|
|
|
61 |
|
✗ |
vseq->max_frame_width = avctx->width; |
62 |
|
✗ |
vseq->max_frame_height = avctx->height; |
63 |
|
|
|
64 |
|
✗ |
vseq->kf_auto = 0; |
65 |
|
|
|
66 |
|
✗ |
if (!(ctx->va_rc_mode & VA_RC_CQP)) { |
67 |
|
✗ |
vseq->bits_per_second = ctx->va_bit_rate; |
68 |
|
✗ |
vseq->intra_period = base_ctx->gop_size; |
69 |
|
|
} |
70 |
|
|
|
71 |
|
✗ |
vpic->frame_width_src = avctx->width; |
72 |
|
✗ |
vpic->frame_height_src = avctx->height; |
73 |
|
✗ |
vpic->frame_width_dst = avctx->width; |
74 |
|
✗ |
vpic->frame_height_dst = avctx->height; |
75 |
|
|
|
76 |
|
✗ |
return 0; |
77 |
|
|
} |
78 |
|
|
|
79 |
|
✗ |
static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx, |
80 |
|
|
FFHWBaseEncodePicture *pic) |
81 |
|
|
{ |
82 |
|
✗ |
FFHWBaseEncodeContext *base_ctx = avctx->priv_data; |
83 |
|
✗ |
VAAPIEncodeVP9Context *priv = avctx->priv_data; |
84 |
|
✗ |
VAAPIEncodePicture *vaapi_pic = pic->priv; |
85 |
|
✗ |
VAAPIEncodeVP9Picture *hpic = pic->codec_priv; |
86 |
|
✗ |
VAEncPictureParameterBufferVP9 *vpic = vaapi_pic->codec_picture_params; |
87 |
|
|
int i; |
88 |
|
|
int num_tile_columns; |
89 |
|
|
|
90 |
|
✗ |
vpic->reconstructed_frame = vaapi_pic->recon_surface; |
91 |
|
✗ |
vpic->coded_buf = vaapi_pic->output_buffer; |
92 |
|
|
|
93 |
|
|
// Maximum width of a tile in units of superblocks is MAX_TILE_WIDTH_B64(64) |
94 |
|
|
// So the number of tile columns is related to the width of the picture. |
95 |
|
|
// We set the minimum possible number for num_tile_columns as default value. |
96 |
|
✗ |
num_tile_columns = (vpic->frame_width_src + VP9_MAX_TILE_WIDTH - 1) / VP9_MAX_TILE_WIDTH; |
97 |
|
✗ |
vpic->log2_tile_columns = num_tile_columns == 1 ? 0 : av_log2(num_tile_columns - 1) + 1; |
98 |
|
|
|
99 |
|
✗ |
switch (pic->type) { |
100 |
|
✗ |
case FF_HW_PICTURE_TYPE_IDR: |
101 |
|
✗ |
av_assert0(pic->nb_refs[0] == 0 && pic->nb_refs[1] == 0); |
102 |
|
✗ |
vpic->ref_flags.bits.force_kf = 1; |
103 |
|
✗ |
vpic->refresh_frame_flags = 0xff; |
104 |
|
✗ |
hpic->slot = 0; |
105 |
|
✗ |
break; |
106 |
|
✗ |
case FF_HW_PICTURE_TYPE_P: |
107 |
|
✗ |
av_assert0(!pic->nb_refs[1]); |
108 |
|
|
{ |
109 |
|
✗ |
VAAPIEncodeVP9Picture *href = pic->refs[0][0]->codec_priv; |
110 |
|
✗ |
av_assert0(href->slot == 0 || href->slot == 1); |
111 |
|
|
|
112 |
|
✗ |
if (base_ctx->max_b_depth > 0) { |
113 |
|
✗ |
hpic->slot = !href->slot; |
114 |
|
✗ |
vpic->refresh_frame_flags = 1 << hpic->slot | 0xfc; |
115 |
|
|
} else { |
116 |
|
✗ |
hpic->slot = 0; |
117 |
|
✗ |
vpic->refresh_frame_flags = 0xff; |
118 |
|
|
} |
119 |
|
✗ |
vpic->ref_flags.bits.ref_frame_ctrl_l0 = 1; |
120 |
|
✗ |
vpic->ref_flags.bits.ref_last_idx = href->slot; |
121 |
|
✗ |
vpic->ref_flags.bits.ref_last_sign_bias = 1; |
122 |
|
|
} |
123 |
|
✗ |
break; |
124 |
|
✗ |
case FF_HW_PICTURE_TYPE_B: |
125 |
|
✗ |
av_assert0(pic->nb_refs[0] && pic->nb_refs[1]); |
126 |
|
|
{ |
127 |
|
✗ |
VAAPIEncodeVP9Picture *href0 = pic->refs[0][0]->codec_priv, |
128 |
|
✗ |
*href1 = pic->refs[1][0]->codec_priv; |
129 |
|
✗ |
av_assert0(href0->slot < pic->b_depth + 1 && |
130 |
|
|
href1->slot < pic->b_depth + 1); |
131 |
|
|
|
132 |
|
✗ |
if (pic->b_depth == base_ctx->max_b_depth) { |
133 |
|
|
// Unreferenced frame. |
134 |
|
✗ |
vpic->refresh_frame_flags = 0x00; |
135 |
|
✗ |
hpic->slot = 8; |
136 |
|
|
} else { |
137 |
|
✗ |
vpic->refresh_frame_flags = 0xfe << pic->b_depth & 0xff; |
138 |
|
✗ |
hpic->slot = 1 + pic->b_depth; |
139 |
|
|
} |
140 |
|
✗ |
vpic->ref_flags.bits.ref_frame_ctrl_l0 = 1; |
141 |
|
✗ |
vpic->ref_flags.bits.ref_frame_ctrl_l1 = 2; |
142 |
|
✗ |
vpic->ref_flags.bits.ref_last_idx = href0->slot; |
143 |
|
✗ |
vpic->ref_flags.bits.ref_last_sign_bias = 1; |
144 |
|
✗ |
vpic->ref_flags.bits.ref_gf_idx = href1->slot; |
145 |
|
✗ |
vpic->ref_flags.bits.ref_gf_sign_bias = 0; |
146 |
|
|
} |
147 |
|
✗ |
break; |
148 |
|
✗ |
default: |
149 |
|
✗ |
av_assert0(0 && "invalid picture type"); |
150 |
|
|
} |
151 |
|
✗ |
if (vpic->refresh_frame_flags == 0x00) { |
152 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pic %"PRId64" not stored.\n", |
153 |
|
|
pic->display_order); |
154 |
|
|
} else { |
155 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pic %"PRId64" stored in slot %d.\n", |
156 |
|
|
pic->display_order, hpic->slot); |
157 |
|
|
} |
158 |
|
|
|
159 |
|
✗ |
for (i = 0; i < FF_ARRAY_ELEMS(vpic->reference_frames); i++) |
160 |
|
✗ |
vpic->reference_frames[i] = VA_INVALID_SURFACE; |
161 |
|
|
|
162 |
|
✗ |
for (i = 0; i < MAX_REFERENCE_LIST_NUM; i++) { |
163 |
|
✗ |
for (int j = 0; j < pic->nb_refs[i]; j++) { |
164 |
|
✗ |
FFHWBaseEncodePicture *ref_pic = pic->refs[i][j]; |
165 |
|
|
int slot; |
166 |
|
✗ |
slot = ((VAAPIEncodeVP9Picture*)ref_pic->codec_priv)->slot; |
167 |
|
✗ |
av_assert0(vpic->reference_frames[slot] == VA_INVALID_SURFACE); |
168 |
|
✗ |
vpic->reference_frames[slot] = ((VAAPIEncodePicture *)ref_pic->priv)->recon_surface; |
169 |
|
|
} |
170 |
|
|
} |
171 |
|
|
|
172 |
|
✗ |
vpic->pic_flags.bits.frame_type = (pic->type != FF_HW_PICTURE_TYPE_IDR); |
173 |
|
✗ |
vpic->pic_flags.bits.show_frame = pic->display_order <= pic->encode_order; |
174 |
|
|
|
175 |
|
✗ |
if (pic->type == FF_HW_PICTURE_TYPE_IDR) |
176 |
|
✗ |
vpic->luma_ac_qindex = priv->q_idx_idr; |
177 |
|
✗ |
else if (pic->type == FF_HW_PICTURE_TYPE_P) |
178 |
|
✗ |
vpic->luma_ac_qindex = priv->q_idx_p; |
179 |
|
|
else |
180 |
|
✗ |
vpic->luma_ac_qindex = priv->q_idx_b; |
181 |
|
✗ |
vpic->luma_dc_qindex_delta = 0; |
182 |
|
✗ |
vpic->chroma_ac_qindex_delta = 0; |
183 |
|
✗ |
vpic->chroma_dc_qindex_delta = 0; |
184 |
|
|
|
185 |
|
✗ |
vpic->filter_level = priv->loop_filter_level; |
186 |
|
✗ |
vpic->sharpness_level = priv->loop_filter_sharpness; |
187 |
|
|
|
188 |
|
✗ |
return 0; |
189 |
|
|
} |
190 |
|
|
|
191 |
|
✗ |
static av_cold int vaapi_encode_vp9_get_encoder_caps(AVCodecContext *avctx) |
192 |
|
|
{ |
193 |
|
✗ |
FFHWBaseEncodeContext *base_ctx = avctx->priv_data; |
194 |
|
|
|
195 |
|
|
// Surfaces must be aligned to 64x64 superblock boundaries. |
196 |
|
✗ |
base_ctx->surface_width = FFALIGN(avctx->width, 64); |
197 |
|
✗ |
base_ctx->surface_height = FFALIGN(avctx->height, 64); |
198 |
|
|
|
199 |
|
✗ |
return 0; |
200 |
|
|
} |
201 |
|
|
|
202 |
|
✗ |
static av_cold int vaapi_encode_vp9_configure(AVCodecContext *avctx) |
203 |
|
|
{ |
204 |
|
✗ |
VAAPIEncodeContext *ctx = avctx->priv_data; |
205 |
|
✗ |
VAAPIEncodeVP9Context *priv = avctx->priv_data; |
206 |
|
|
|
207 |
|
✗ |
if (ctx->rc_mode->quality) { |
208 |
|
✗ |
priv->q_idx_p = av_clip(ctx->rc_quality, 0, VP9_MAX_QUANT); |
209 |
|
✗ |
if (avctx->i_quant_factor > 0.0) |
210 |
|
✗ |
priv->q_idx_idr = |
211 |
|
✗ |
av_clip((avctx->i_quant_factor * priv->q_idx_p + |
212 |
|
✗ |
avctx->i_quant_offset) + 0.5, |
213 |
|
|
0, VP9_MAX_QUANT); |
214 |
|
|
else |
215 |
|
✗ |
priv->q_idx_idr = priv->q_idx_p; |
216 |
|
✗ |
if (avctx->b_quant_factor > 0.0) |
217 |
|
✗ |
priv->q_idx_b = |
218 |
|
✗ |
av_clip((avctx->b_quant_factor * priv->q_idx_p + |
219 |
|
✗ |
avctx->b_quant_offset) + 0.5, |
220 |
|
|
0, VP9_MAX_QUANT); |
221 |
|
|
else |
222 |
|
✗ |
priv->q_idx_b = priv->q_idx_p; |
223 |
|
|
} else { |
224 |
|
|
// Arbitrary value. |
225 |
|
✗ |
priv->q_idx_idr = priv->q_idx_p = priv->q_idx_b = 100; |
226 |
|
|
} |
227 |
|
|
|
228 |
|
✗ |
ctx->roi_quant_range = VP9_MAX_QUANT; |
229 |
|
|
|
230 |
|
✗ |
return 0; |
231 |
|
|
} |
232 |
|
|
|
233 |
|
|
static const VAAPIEncodeProfile vaapi_encode_vp9_profiles[] = { |
234 |
|
|
{ AV_PROFILE_VP9_0, 8, 3, 1, 1, VAProfileVP9Profile0 }, |
235 |
|
|
{ AV_PROFILE_VP9_1, 8, 3, 0, 0, VAProfileVP9Profile1 }, |
236 |
|
|
{ AV_PROFILE_VP9_2, 10, 3, 1, 1, VAProfileVP9Profile2 }, |
237 |
|
|
{ AV_PROFILE_VP9_3, 10, 3, 0, 0, VAProfileVP9Profile3 }, |
238 |
|
|
{ AV_PROFILE_UNKNOWN } |
239 |
|
|
}; |
240 |
|
|
|
241 |
|
|
static const VAAPIEncodeType vaapi_encode_type_vp9 = { |
242 |
|
|
.profiles = vaapi_encode_vp9_profiles, |
243 |
|
|
|
244 |
|
|
.flags = FF_HW_FLAG_B_PICTURES | |
245 |
|
|
FF_HW_FLAG_B_PICTURE_REFERENCES, |
246 |
|
|
|
247 |
|
|
.default_quality = 100, |
248 |
|
|
|
249 |
|
|
.picture_priv_data_size = sizeof(VAAPIEncodeVP9Picture), |
250 |
|
|
|
251 |
|
|
.get_encoder_caps = &vaapi_encode_vp9_get_encoder_caps, |
252 |
|
|
.configure = &vaapi_encode_vp9_configure, |
253 |
|
|
|
254 |
|
|
.sequence_params_size = sizeof(VAEncSequenceParameterBufferVP9), |
255 |
|
|
.init_sequence_params = &vaapi_encode_vp9_init_sequence_params, |
256 |
|
|
|
257 |
|
|
.picture_params_size = sizeof(VAEncPictureParameterBufferVP9), |
258 |
|
|
.init_picture_params = &vaapi_encode_vp9_init_picture_params, |
259 |
|
|
}; |
260 |
|
|
|
261 |
|
✗ |
static av_cold int vaapi_encode_vp9_init(AVCodecContext *avctx) |
262 |
|
|
{ |
263 |
|
✗ |
VAAPIEncodeContext *ctx = avctx->priv_data; |
264 |
|
|
|
265 |
|
✗ |
ctx->codec = &vaapi_encode_type_vp9; |
266 |
|
|
|
267 |
|
|
// No packed headers are currently desired. They could be written, |
268 |
|
|
// but there isn't any reason to do so - the one usable driver (i965) |
269 |
|
|
// can write its own headers and there is no metadata to include. |
270 |
|
✗ |
ctx->desired_packed_headers = 0; |
271 |
|
|
|
272 |
|
✗ |
return ff_vaapi_encode_init(avctx); |
273 |
|
|
} |
274 |
|
|
|
275 |
|
|
#define OFFSET(x) offsetof(VAAPIEncodeVP9Context, x) |
276 |
|
|
#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM) |
277 |
|
|
static const AVOption vaapi_encode_vp9_options[] = { |
278 |
|
|
HW_BASE_ENCODE_COMMON_OPTIONS, |
279 |
|
|
VAAPI_ENCODE_COMMON_OPTIONS, |
280 |
|
|
VAAPI_ENCODE_RC_OPTIONS, |
281 |
|
|
|
282 |
|
|
{ "loop_filter_level", "Loop filter level", |
283 |
|
|
OFFSET(loop_filter_level), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 63, FLAGS }, |
284 |
|
|
{ "loop_filter_sharpness", "Loop filter sharpness", |
285 |
|
|
OFFSET(loop_filter_sharpness), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, 15, FLAGS }, |
286 |
|
|
{ NULL }, |
287 |
|
|
}; |
288 |
|
|
|
289 |
|
|
static const FFCodecDefault vaapi_encode_vp9_defaults[] = { |
290 |
|
|
{ "b", "0" }, |
291 |
|
|
{ "bf", "0" }, |
292 |
|
|
{ "g", "250" }, |
293 |
|
|
{ "qmin", "-1" }, |
294 |
|
|
{ "qmax", "-1" }, |
295 |
|
|
{ NULL }, |
296 |
|
|
}; |
297 |
|
|
|
298 |
|
|
static const AVClass vaapi_encode_vp9_class = { |
299 |
|
|
.class_name = "vp9_vaapi", |
300 |
|
|
.item_name = av_default_item_name, |
301 |
|
|
.option = vaapi_encode_vp9_options, |
302 |
|
|
.version = LIBAVUTIL_VERSION_INT, |
303 |
|
|
}; |
304 |
|
|
|
305 |
|
|
const FFCodec ff_vp9_vaapi_encoder = { |
306 |
|
|
.p.name = "vp9_vaapi", |
307 |
|
|
CODEC_LONG_NAME("VP9 (VAAPI)"), |
308 |
|
|
.p.type = AVMEDIA_TYPE_VIDEO, |
309 |
|
|
.p.id = AV_CODEC_ID_VP9, |
310 |
|
|
.priv_data_size = sizeof(VAAPIEncodeVP9Context), |
311 |
|
|
.init = &vaapi_encode_vp9_init, |
312 |
|
|
FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet), |
313 |
|
|
.close = &ff_vaapi_encode_close, |
314 |
|
|
.p.priv_class = &vaapi_encode_vp9_class, |
315 |
|
|
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE | |
316 |
|
|
AV_CODEC_CAP_DR1 | AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, |
317 |
|
|
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | |
318 |
|
|
FF_CODEC_CAP_INIT_CLEANUP, |
319 |
|
|
.defaults = vaapi_encode_vp9_defaults, |
320 |
|
|
.color_ranges = AVCOL_RANGE_MPEG, /* FIXME: implement tagging */ |
321 |
|
|
.p.pix_fmts = (const enum AVPixelFormat[]) { |
322 |
|
|
AV_PIX_FMT_VAAPI, |
323 |
|
|
AV_PIX_FMT_NONE, |
324 |
|
|
}, |
325 |
|
|
.hw_configs = ff_vaapi_encode_hw_configs, |
326 |
|
|
.p.wrapper_name = "vaapi", |
327 |
|
|
}; |
328 |
|
|
|