Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* This file is part of FFmpeg. |
3 |
|
|
* |
4 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
5 |
|
|
* modify it under the terms of the GNU Lesser General Public |
6 |
|
|
* License as published by the Free Software Foundation; either |
7 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
8 |
|
|
* |
9 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 |
|
|
* Lesser General Public License for more details. |
13 |
|
|
* |
14 |
|
|
* You should have received a copy of the GNU Lesser General Public |
15 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
16 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include "libavutil/avassert.h" |
20 |
|
|
#include "libavutil/common.h" |
21 |
|
|
#include "libavutil/error.h" |
22 |
|
|
#include "libavutil/internal.h" |
23 |
|
|
#include "libavutil/log.h" |
24 |
|
|
#include "libavutil/mem.h" |
25 |
|
|
#include "libavutil/pixdesc.h" |
26 |
|
|
|
27 |
|
|
#include "encode.h" |
28 |
|
|
#include "avcodec.h" |
29 |
|
|
#include "hw_base_encode.h" |
30 |
|
|
|
31 |
|
✗ |
static int base_encode_pic_free(FFHWBaseEncodePicture *pic) |
32 |
|
|
{ |
33 |
|
✗ |
av_frame_free(&pic->input_image); |
34 |
|
✗ |
av_frame_free(&pic->recon_image); |
35 |
|
|
|
36 |
|
✗ |
av_buffer_unref(&pic->opaque_ref); |
37 |
|
✗ |
av_freep(&pic->codec_priv); |
38 |
|
✗ |
av_freep(&pic->priv); |
39 |
|
✗ |
av_free(pic); |
40 |
|
|
|
41 |
|
✗ |
return 0; |
42 |
|
|
} |
43 |
|
|
|
44 |
|
✗ |
static void hw_base_encode_add_ref(FFHWBaseEncodePicture *pic, |
45 |
|
|
FFHWBaseEncodePicture *target, |
46 |
|
|
int is_ref, int in_dpb, int prev) |
47 |
|
|
{ |
48 |
|
✗ |
int refs = 0; |
49 |
|
|
|
50 |
|
✗ |
if (is_ref) { |
51 |
|
✗ |
av_assert0(pic != target); |
52 |
|
✗ |
av_assert0(pic->nb_refs[0] < MAX_PICTURE_REFERENCES && |
53 |
|
|
pic->nb_refs[1] < MAX_PICTURE_REFERENCES); |
54 |
|
✗ |
if (target->display_order < pic->display_order) |
55 |
|
✗ |
pic->refs[0][pic->nb_refs[0]++] = target; |
56 |
|
|
else |
57 |
|
✗ |
pic->refs[1][pic->nb_refs[1]++] = target; |
58 |
|
✗ |
++refs; |
59 |
|
|
} |
60 |
|
|
|
61 |
|
✗ |
if (in_dpb) { |
62 |
|
✗ |
av_assert0(pic->nb_dpb_pics < MAX_DPB_SIZE); |
63 |
|
✗ |
pic->dpb[pic->nb_dpb_pics++] = target; |
64 |
|
✗ |
++refs; |
65 |
|
|
} |
66 |
|
|
|
67 |
|
✗ |
if (prev) { |
68 |
|
✗ |
av_assert0(!pic->prev); |
69 |
|
✗ |
pic->prev = target; |
70 |
|
✗ |
++refs; |
71 |
|
|
} |
72 |
|
|
|
73 |
|
✗ |
target->ref_count[0] += refs; |
74 |
|
✗ |
target->ref_count[1] += refs; |
75 |
|
✗ |
} |
76 |
|
|
|
77 |
|
✗ |
static void hw_base_encode_remove_refs(FFHWBaseEncodePicture *pic, int level) |
78 |
|
|
{ |
79 |
|
|
int i; |
80 |
|
|
|
81 |
|
✗ |
if (pic->ref_removed[level]) |
82 |
|
✗ |
return; |
83 |
|
|
|
84 |
|
✗ |
for (i = 0; i < pic->nb_refs[0]; i++) { |
85 |
|
✗ |
av_assert0(pic->refs[0][i]); |
86 |
|
✗ |
--pic->refs[0][i]->ref_count[level]; |
87 |
|
✗ |
av_assert0(pic->refs[0][i]->ref_count[level] >= 0); |
88 |
|
|
} |
89 |
|
|
|
90 |
|
✗ |
for (i = 0; i < pic->nb_refs[1]; i++) { |
91 |
|
✗ |
av_assert0(pic->refs[1][i]); |
92 |
|
✗ |
--pic->refs[1][i]->ref_count[level]; |
93 |
|
✗ |
av_assert0(pic->refs[1][i]->ref_count[level] >= 0); |
94 |
|
|
} |
95 |
|
|
|
96 |
|
✗ |
for (i = 0; i < pic->nb_dpb_pics; i++) { |
97 |
|
✗ |
av_assert0(pic->dpb[i]); |
98 |
|
✗ |
--pic->dpb[i]->ref_count[level]; |
99 |
|
✗ |
av_assert0(pic->dpb[i]->ref_count[level] >= 0); |
100 |
|
|
} |
101 |
|
|
|
102 |
|
✗ |
av_assert0(pic->prev || pic->type == FF_HW_PICTURE_TYPE_IDR); |
103 |
|
✗ |
if (pic->prev) { |
104 |
|
✗ |
--pic->prev->ref_count[level]; |
105 |
|
✗ |
av_assert0(pic->prev->ref_count[level] >= 0); |
106 |
|
|
} |
107 |
|
|
|
108 |
|
✗ |
pic->ref_removed[level] = 1; |
109 |
|
|
} |
110 |
|
|
|
111 |
|
✗ |
static void hw_base_encode_set_b_pictures(FFHWBaseEncodeContext *ctx, |
112 |
|
|
FFHWBaseEncodePicture *start, |
113 |
|
|
FFHWBaseEncodePicture *end, |
114 |
|
|
FFHWBaseEncodePicture *prev, |
115 |
|
|
int current_depth, |
116 |
|
|
FFHWBaseEncodePicture **last) |
117 |
|
|
{ |
118 |
|
|
FFHWBaseEncodePicture *pic, *next, *ref; |
119 |
|
|
int i, len; |
120 |
|
|
|
121 |
|
✗ |
av_assert0(start && end && start != end && start->next != end); |
122 |
|
|
|
123 |
|
|
// If we are at the maximum depth then encode all pictures as |
124 |
|
|
// non-referenced B-pictures. Also do this if there is exactly one |
125 |
|
|
// picture left, since there will be nothing to reference it. |
126 |
|
✗ |
if (current_depth == ctx->max_b_depth || start->next->next == end) { |
127 |
|
✗ |
for (pic = start->next; pic; pic = pic->next) { |
128 |
|
✗ |
if (pic == end) |
129 |
|
✗ |
break; |
130 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_B; |
131 |
|
✗ |
pic->b_depth = current_depth; |
132 |
|
|
|
133 |
|
✗ |
hw_base_encode_add_ref(pic, start, 1, 1, 0); |
134 |
|
✗ |
hw_base_encode_add_ref(pic, end, 1, 1, 0); |
135 |
|
✗ |
hw_base_encode_add_ref(pic, prev, 0, 0, 1); |
136 |
|
|
|
137 |
|
✗ |
for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0]) |
138 |
|
✗ |
hw_base_encode_add_ref(pic, ref, 0, 1, 0); |
139 |
|
|
} |
140 |
|
✗ |
*last = prev; |
141 |
|
|
|
142 |
|
|
} else { |
143 |
|
|
// Split the current list at the midpoint with a referenced |
144 |
|
|
// B-picture, then descend into each side separately. |
145 |
|
✗ |
len = 0; |
146 |
|
✗ |
for (pic = start->next; pic != end; pic = pic->next) |
147 |
|
✗ |
++len; |
148 |
|
✗ |
for (pic = start->next, i = 1; 2 * i < len; pic = pic->next, i++); |
149 |
|
|
|
150 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_B; |
151 |
|
✗ |
pic->b_depth = current_depth; |
152 |
|
|
|
153 |
|
✗ |
pic->is_reference = 1; |
154 |
|
|
|
155 |
|
✗ |
hw_base_encode_add_ref(pic, pic, 0, 1, 0); |
156 |
|
✗ |
hw_base_encode_add_ref(pic, start, 1, 1, 0); |
157 |
|
✗ |
hw_base_encode_add_ref(pic, end, 1, 1, 0); |
158 |
|
✗ |
hw_base_encode_add_ref(pic, prev, 0, 0, 1); |
159 |
|
|
|
160 |
|
✗ |
for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0]) |
161 |
|
✗ |
hw_base_encode_add_ref(pic, ref, 0, 1, 0); |
162 |
|
|
|
163 |
|
✗ |
if (i > 1) |
164 |
|
✗ |
hw_base_encode_set_b_pictures(ctx, start, pic, pic, |
165 |
|
|
current_depth + 1, &next); |
166 |
|
|
else |
167 |
|
✗ |
next = pic; |
168 |
|
|
|
169 |
|
✗ |
hw_base_encode_set_b_pictures(ctx, pic, end, next, |
170 |
|
|
current_depth + 1, last); |
171 |
|
|
} |
172 |
|
✗ |
} |
173 |
|
|
|
174 |
|
✗ |
static void hw_base_encode_add_next_prev(FFHWBaseEncodeContext *ctx, |
175 |
|
|
FFHWBaseEncodePicture *pic) |
176 |
|
|
{ |
177 |
|
|
int i; |
178 |
|
|
|
179 |
|
✗ |
if (!pic) |
180 |
|
✗ |
return; |
181 |
|
|
|
182 |
|
✗ |
if (pic->type == FF_HW_PICTURE_TYPE_IDR) { |
183 |
|
✗ |
for (i = 0; i < ctx->nb_next_prev; i++) { |
184 |
|
✗ |
--ctx->next_prev[i]->ref_count[0]; |
185 |
|
✗ |
ctx->next_prev[i] = NULL; |
186 |
|
|
} |
187 |
|
✗ |
ctx->next_prev[0] = pic; |
188 |
|
✗ |
++pic->ref_count[0]; |
189 |
|
✗ |
ctx->nb_next_prev = 1; |
190 |
|
|
|
191 |
|
✗ |
return; |
192 |
|
|
} |
193 |
|
|
|
194 |
|
✗ |
if (ctx->nb_next_prev < ctx->ref_l0) { |
195 |
|
✗ |
ctx->next_prev[ctx->nb_next_prev++] = pic; |
196 |
|
✗ |
++pic->ref_count[0]; |
197 |
|
|
} else { |
198 |
|
✗ |
--ctx->next_prev[0]->ref_count[0]; |
199 |
|
✗ |
for (i = 0; i < ctx->ref_l0 - 1; i++) |
200 |
|
✗ |
ctx->next_prev[i] = ctx->next_prev[i + 1]; |
201 |
|
✗ |
ctx->next_prev[i] = pic; |
202 |
|
✗ |
++pic->ref_count[0]; |
203 |
|
|
} |
204 |
|
|
} |
205 |
|
|
|
206 |
|
✗ |
static int hw_base_encode_pick_next(AVCodecContext *avctx, |
207 |
|
|
FFHWBaseEncodeContext *ctx, |
208 |
|
|
FFHWBaseEncodePicture **pic_out) |
209 |
|
|
{ |
210 |
|
✗ |
FFHWBaseEncodePicture *pic = NULL, *prev = NULL, *next, *start; |
211 |
|
|
int i, b_counter, closed_gop_end; |
212 |
|
|
|
213 |
|
|
// If there are any B-frames already queued, the next one to encode |
214 |
|
|
// is the earliest not-yet-issued frame for which all references are |
215 |
|
|
// available. |
216 |
|
✗ |
for (pic = ctx->pic_start; pic; pic = pic->next) { |
217 |
|
✗ |
if (pic->encode_issued) |
218 |
|
✗ |
continue; |
219 |
|
✗ |
if (pic->type != FF_HW_PICTURE_TYPE_B) |
220 |
|
✗ |
continue; |
221 |
|
✗ |
for (i = 0; i < pic->nb_refs[0]; i++) { |
222 |
|
✗ |
if (!pic->refs[0][i]->encode_issued) |
223 |
|
✗ |
break; |
224 |
|
|
} |
225 |
|
✗ |
if (i != pic->nb_refs[0]) |
226 |
|
✗ |
continue; |
227 |
|
|
|
228 |
|
✗ |
for (i = 0; i < pic->nb_refs[1]; i++) { |
229 |
|
✗ |
if (!pic->refs[1][i]->encode_issued) |
230 |
|
✗ |
break; |
231 |
|
|
} |
232 |
|
✗ |
if (i == pic->nb_refs[1]) |
233 |
|
✗ |
break; |
234 |
|
|
} |
235 |
|
|
|
236 |
|
✗ |
if (pic) { |
237 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick B-picture at depth %d to " |
238 |
|
|
"encode next.\n", pic->b_depth); |
239 |
|
✗ |
*pic_out = pic; |
240 |
|
✗ |
return 0; |
241 |
|
|
} |
242 |
|
|
|
243 |
|
|
// Find the B-per-Pth available picture to become the next picture |
244 |
|
|
// on the top layer. |
245 |
|
✗ |
start = NULL; |
246 |
|
✗ |
b_counter = 0; |
247 |
|
✗ |
closed_gop_end = ctx->closed_gop || |
248 |
|
✗ |
ctx->idr_counter == ctx->gop_per_idr; |
249 |
|
✗ |
for (pic = ctx->pic_start; pic; pic = next) { |
250 |
|
✗ |
next = pic->next; |
251 |
|
✗ |
if (pic->encode_issued) { |
252 |
|
✗ |
start = pic; |
253 |
|
✗ |
continue; |
254 |
|
|
} |
255 |
|
|
// If the next available picture is force-IDR, encode it to start |
256 |
|
|
// a new GOP immediately. |
257 |
|
✗ |
if (pic->force_idr) |
258 |
|
✗ |
break; |
259 |
|
✗ |
if (b_counter == ctx->b_per_p) |
260 |
|
✗ |
break; |
261 |
|
|
// If this picture ends a closed GOP or starts a new GOP then it |
262 |
|
|
// needs to be in the top layer. |
263 |
|
✗ |
if (ctx->gop_counter + b_counter + closed_gop_end >= ctx->gop_size) |
264 |
|
✗ |
break; |
265 |
|
|
// If the picture after this one is force-IDR, we need to encode |
266 |
|
|
// this one in the top layer. |
267 |
|
✗ |
if (next && next->force_idr) |
268 |
|
✗ |
break; |
269 |
|
✗ |
++b_counter; |
270 |
|
|
} |
271 |
|
|
|
272 |
|
|
// At the end of the stream the last picture must be in the top layer. |
273 |
|
✗ |
if (!pic && ctx->end_of_stream) { |
274 |
|
✗ |
--b_counter; |
275 |
|
✗ |
pic = ctx->pic_end; |
276 |
|
✗ |
if (pic->encode_complete) |
277 |
|
✗ |
return AVERROR_EOF; |
278 |
|
✗ |
else if (pic->encode_issued) |
279 |
|
✗ |
return AVERROR(EAGAIN); |
280 |
|
|
} |
281 |
|
|
|
282 |
|
✗ |
if (!pic) { |
283 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - " |
284 |
|
|
"need more input for reference pictures.\n"); |
285 |
|
✗ |
return AVERROR(EAGAIN); |
286 |
|
|
} |
287 |
|
✗ |
if (ctx->input_order <= ctx->decode_delay && !ctx->end_of_stream) { |
288 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - " |
289 |
|
|
"need more input for timestamps.\n"); |
290 |
|
✗ |
return AVERROR(EAGAIN); |
291 |
|
|
} |
292 |
|
|
|
293 |
|
✗ |
if (pic->force_idr) { |
294 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick forced IDR-picture to " |
295 |
|
|
"encode next.\n"); |
296 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_IDR; |
297 |
|
✗ |
ctx->idr_counter = 1; |
298 |
|
✗ |
ctx->gop_counter = 1; |
299 |
|
|
|
300 |
|
✗ |
} else if (ctx->gop_counter + b_counter >= ctx->gop_size) { |
301 |
|
✗ |
if (ctx->idr_counter == ctx->gop_per_idr) { |
302 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP IDR-picture to " |
303 |
|
|
"encode next.\n"); |
304 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_IDR; |
305 |
|
✗ |
ctx->idr_counter = 1; |
306 |
|
|
} else { |
307 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP I-picture to " |
308 |
|
|
"encode next.\n"); |
309 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_I; |
310 |
|
✗ |
++ctx->idr_counter; |
311 |
|
|
} |
312 |
|
✗ |
ctx->gop_counter = 1; |
313 |
|
|
|
314 |
|
|
} else { |
315 |
|
✗ |
if (ctx->gop_counter + b_counter + closed_gop_end == ctx->gop_size) { |
316 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick group-end P-picture to " |
317 |
|
|
"encode next.\n"); |
318 |
|
|
} else { |
319 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Pick normal P-picture to " |
320 |
|
|
"encode next.\n"); |
321 |
|
|
} |
322 |
|
✗ |
pic->type = FF_HW_PICTURE_TYPE_P; |
323 |
|
✗ |
av_assert0(start); |
324 |
|
✗ |
ctx->gop_counter += 1 + b_counter; |
325 |
|
|
} |
326 |
|
✗ |
pic->is_reference = 1; |
327 |
|
✗ |
*pic_out = pic; |
328 |
|
|
|
329 |
|
✗ |
hw_base_encode_add_ref(pic, pic, 0, 1, 0); |
330 |
|
✗ |
if (pic->type != FF_HW_PICTURE_TYPE_IDR) { |
331 |
|
|
// TODO: apply both previous and forward multi reference for all vaapi encoders. |
332 |
|
|
// And L0/L1 reference frame number can be set dynamically through query |
333 |
|
|
// VAConfigAttribEncMaxRefFrames attribute. |
334 |
|
✗ |
if (avctx->codec_id == AV_CODEC_ID_AV1) { |
335 |
|
✗ |
for (i = 0; i < ctx->nb_next_prev; i++) |
336 |
|
✗ |
hw_base_encode_add_ref(pic, ctx->next_prev[i], |
337 |
|
✗ |
pic->type == FF_HW_PICTURE_TYPE_P, |
338 |
|
|
b_counter > 0, 0); |
339 |
|
|
} else |
340 |
|
✗ |
hw_base_encode_add_ref(pic, start, |
341 |
|
✗ |
pic->type == FF_HW_PICTURE_TYPE_P, |
342 |
|
|
b_counter > 0, 0); |
343 |
|
|
|
344 |
|
✗ |
hw_base_encode_add_ref(pic, ctx->next_prev[ctx->nb_next_prev - 1], 0, 0, 1); |
345 |
|
|
} |
346 |
|
|
|
347 |
|
✗ |
if (b_counter > 0) { |
348 |
|
✗ |
hw_base_encode_set_b_pictures(ctx, start, pic, pic, 1, |
349 |
|
|
&prev); |
350 |
|
|
} else { |
351 |
|
✗ |
prev = pic; |
352 |
|
|
} |
353 |
|
✗ |
hw_base_encode_add_next_prev(ctx, prev); |
354 |
|
|
|
355 |
|
✗ |
return 0; |
356 |
|
|
} |
357 |
|
|
|
358 |
|
✗ |
static int hw_base_encode_clear_old(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx) |
359 |
|
|
{ |
360 |
|
|
FFHWBaseEncodePicture *pic, *prev, *next; |
361 |
|
|
|
362 |
|
✗ |
av_assert0(ctx->pic_start); |
363 |
|
|
|
364 |
|
|
// Remove direct references once each picture is complete. |
365 |
|
✗ |
for (pic = ctx->pic_start; pic; pic = pic->next) { |
366 |
|
✗ |
if (pic->encode_complete && pic->next) |
367 |
|
✗ |
hw_base_encode_remove_refs(pic, 0); |
368 |
|
|
} |
369 |
|
|
|
370 |
|
|
// Remove indirect references once a picture has no direct references. |
371 |
|
✗ |
for (pic = ctx->pic_start; pic; pic = pic->next) { |
372 |
|
✗ |
if (pic->encode_complete && pic->ref_count[0] == 0) |
373 |
|
✗ |
hw_base_encode_remove_refs(pic, 1); |
374 |
|
|
} |
375 |
|
|
|
376 |
|
|
// Clear out all complete pictures with no remaining references. |
377 |
|
✗ |
prev = NULL; |
378 |
|
✗ |
for (pic = ctx->pic_start; pic; pic = next) { |
379 |
|
✗ |
next = pic->next; |
380 |
|
✗ |
if (pic->encode_complete && pic->ref_count[1] == 0) { |
381 |
|
✗ |
av_assert0(pic->ref_removed[0] && pic->ref_removed[1]); |
382 |
|
✗ |
if (prev) |
383 |
|
✗ |
prev->next = next; |
384 |
|
|
else |
385 |
|
✗ |
ctx->pic_start = next; |
386 |
|
✗ |
ctx->op->free(avctx, pic); |
387 |
|
✗ |
base_encode_pic_free(pic); |
388 |
|
|
} else { |
389 |
|
✗ |
prev = pic; |
390 |
|
|
} |
391 |
|
|
} |
392 |
|
|
|
393 |
|
✗ |
return 0; |
394 |
|
|
} |
395 |
|
|
|
396 |
|
✗ |
static int hw_base_encode_check_frame(FFHWBaseEncodeContext *ctx, |
397 |
|
|
const AVFrame *frame) |
398 |
|
|
{ |
399 |
|
✗ |
if ((frame->crop_top || frame->crop_bottom || |
400 |
|
✗ |
frame->crop_left || frame->crop_right) && !ctx->crop_warned) { |
401 |
|
✗ |
av_log(ctx->log_ctx, AV_LOG_WARNING, "Cropping information on input " |
402 |
|
|
"frames ignored due to lack of API support.\n"); |
403 |
|
✗ |
ctx->crop_warned = 1; |
404 |
|
|
} |
405 |
|
|
|
406 |
|
✗ |
if (!ctx->roi_allowed) { |
407 |
|
|
AVFrameSideData *sd = |
408 |
|
✗ |
av_frame_get_side_data(frame, AV_FRAME_DATA_REGIONS_OF_INTEREST); |
409 |
|
|
|
410 |
|
✗ |
if (sd && !ctx->roi_warned) { |
411 |
|
✗ |
av_log(ctx->log_ctx, AV_LOG_WARNING, "ROI side data on input " |
412 |
|
|
"frames ignored due to lack of driver support.\n"); |
413 |
|
✗ |
ctx->roi_warned = 1; |
414 |
|
|
} |
415 |
|
|
} |
416 |
|
|
|
417 |
|
✗ |
return 0; |
418 |
|
|
} |
419 |
|
|
|
420 |
|
✗ |
static int hw_base_encode_send_frame(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx, |
421 |
|
|
AVFrame *frame) |
422 |
|
|
{ |
423 |
|
|
FFHWBaseEncodePicture *pic; |
424 |
|
|
int err; |
425 |
|
|
|
426 |
|
✗ |
if (frame) { |
427 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n", |
428 |
|
|
frame->width, frame->height, frame->pts); |
429 |
|
|
|
430 |
|
✗ |
err = hw_base_encode_check_frame(ctx, frame); |
431 |
|
✗ |
if (err < 0) |
432 |
|
✗ |
return err; |
433 |
|
|
|
434 |
|
✗ |
pic = av_mallocz(sizeof(*pic)); |
435 |
|
✗ |
if (!pic) |
436 |
|
✗ |
return AVERROR(ENOMEM); |
437 |
|
|
|
438 |
|
✗ |
pic->input_image = av_frame_alloc(); |
439 |
|
✗ |
if (!pic->input_image) { |
440 |
|
✗ |
err = AVERROR(ENOMEM); |
441 |
|
✗ |
goto fail; |
442 |
|
|
} |
443 |
|
|
|
444 |
|
✗ |
if (ctx->recon_frames_ref) { |
445 |
|
✗ |
pic->recon_image = av_frame_alloc(); |
446 |
|
✗ |
if (!pic->recon_image) { |
447 |
|
✗ |
err = AVERROR(ENOMEM); |
448 |
|
✗ |
goto fail; |
449 |
|
|
} |
450 |
|
|
|
451 |
|
✗ |
err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0); |
452 |
|
✗ |
if (err < 0) { |
453 |
|
✗ |
err = AVERROR(ENOMEM); |
454 |
|
✗ |
goto fail; |
455 |
|
|
} |
456 |
|
|
} |
457 |
|
|
|
458 |
|
✗ |
pic->priv = av_mallocz(ctx->op->priv_size); |
459 |
|
✗ |
if (!pic->priv) { |
460 |
|
✗ |
err = AVERROR(ENOMEM); |
461 |
|
✗ |
goto fail; |
462 |
|
|
} |
463 |
|
|
|
464 |
|
✗ |
if (ctx->input_order == 0 || frame->pict_type == AV_PICTURE_TYPE_I) |
465 |
|
✗ |
pic->force_idr = 1; |
466 |
|
|
|
467 |
|
✗ |
pic->pts = frame->pts; |
468 |
|
✗ |
pic->duration = frame->duration; |
469 |
|
|
|
470 |
|
✗ |
if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) { |
471 |
|
✗ |
err = av_buffer_replace(&pic->opaque_ref, frame->opaque_ref); |
472 |
|
✗ |
if (err < 0) |
473 |
|
✗ |
goto fail; |
474 |
|
|
|
475 |
|
✗ |
pic->opaque = frame->opaque; |
476 |
|
|
} |
477 |
|
|
|
478 |
|
✗ |
av_frame_move_ref(pic->input_image, frame); |
479 |
|
|
|
480 |
|
✗ |
if (ctx->input_order == 0) |
481 |
|
✗ |
ctx->first_pts = pic->pts; |
482 |
|
✗ |
if (ctx->input_order == ctx->decode_delay) |
483 |
|
✗ |
ctx->dts_pts_diff = pic->pts - ctx->first_pts; |
484 |
|
✗ |
if (ctx->output_delay > 0) |
485 |
|
✗ |
ctx->ts_ring[ctx->input_order % |
486 |
|
✗ |
(3 * ctx->output_delay + ctx->async_depth)] = pic->pts; |
487 |
|
|
|
488 |
|
✗ |
pic->display_order = ctx->input_order; |
489 |
|
✗ |
++ctx->input_order; |
490 |
|
|
|
491 |
|
✗ |
if (ctx->pic_start) { |
492 |
|
✗ |
ctx->pic_end->next = pic; |
493 |
|
✗ |
ctx->pic_end = pic; |
494 |
|
|
} else { |
495 |
|
✗ |
ctx->pic_start = pic; |
496 |
|
✗ |
ctx->pic_end = pic; |
497 |
|
|
} |
498 |
|
|
|
499 |
|
✗ |
err = ctx->op->init(avctx, pic); |
500 |
|
✗ |
if (err < 0) |
501 |
|
✗ |
goto fail; |
502 |
|
|
} else { |
503 |
|
✗ |
ctx->end_of_stream = 1; |
504 |
|
|
|
505 |
|
|
// Fix timestamps if we hit end-of-stream before the initial decode |
506 |
|
|
// delay has elapsed. |
507 |
|
✗ |
if (ctx->input_order <= ctx->decode_delay) |
508 |
|
✗ |
ctx->dts_pts_diff = ctx->pic_end->pts - ctx->first_pts; |
509 |
|
|
} |
510 |
|
|
|
511 |
|
✗ |
return 0; |
512 |
|
|
|
513 |
|
✗ |
fail: |
514 |
|
✗ |
ctx->op->free(avctx, pic); |
515 |
|
✗ |
base_encode_pic_free(pic); |
516 |
|
✗ |
return err; |
517 |
|
|
} |
518 |
|
|
|
519 |
|
✗ |
int ff_hw_base_encode_set_output_property(FFHWBaseEncodeContext *ctx, |
520 |
|
|
AVCodecContext *avctx, |
521 |
|
|
FFHWBaseEncodePicture *pic, |
522 |
|
|
AVPacket *pkt, int flag_no_delay) |
523 |
|
|
{ |
524 |
|
✗ |
if (pic->type == FF_HW_PICTURE_TYPE_IDR) |
525 |
|
✗ |
pkt->flags |= AV_PKT_FLAG_KEY; |
526 |
|
|
|
527 |
|
✗ |
pkt->pts = pic->pts; |
528 |
|
✗ |
pkt->duration = pic->duration; |
529 |
|
|
|
530 |
|
|
// for no-delay encoders this is handled in generic codec |
531 |
|
✗ |
if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY && |
532 |
|
✗ |
avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) { |
533 |
|
✗ |
pkt->opaque = pic->opaque; |
534 |
|
✗ |
pkt->opaque_ref = pic->opaque_ref; |
535 |
|
✗ |
pic->opaque_ref = NULL; |
536 |
|
|
} |
537 |
|
|
|
538 |
|
✗ |
if (flag_no_delay) { |
539 |
|
✗ |
pkt->dts = pkt->pts; |
540 |
|
✗ |
return 0; |
541 |
|
|
} |
542 |
|
|
|
543 |
|
✗ |
if (ctx->output_delay == 0) { |
544 |
|
✗ |
pkt->dts = pkt->pts; |
545 |
|
✗ |
} else if (pic->encode_order < ctx->decode_delay) { |
546 |
|
✗ |
if (ctx->ts_ring[pic->encode_order] < INT64_MIN + ctx->dts_pts_diff) |
547 |
|
✗ |
pkt->dts = INT64_MIN; |
548 |
|
|
else |
549 |
|
✗ |
pkt->dts = ctx->ts_ring[pic->encode_order] - ctx->dts_pts_diff; |
550 |
|
|
} else { |
551 |
|
✗ |
pkt->dts = ctx->ts_ring[(pic->encode_order - ctx->decode_delay) % |
552 |
|
✗ |
(3 * ctx->output_delay + ctx->async_depth)]; |
553 |
|
|
} |
554 |
|
|
|
555 |
|
✗ |
return 0; |
556 |
|
|
} |
557 |
|
|
|
558 |
|
✗ |
int ff_hw_base_encode_receive_packet(FFHWBaseEncodeContext *ctx, |
559 |
|
|
AVCodecContext *avctx, AVPacket *pkt) |
560 |
|
|
{ |
561 |
|
✗ |
FFHWBaseEncodePicture *pic = NULL; |
562 |
|
✗ |
AVFrame *frame = ctx->frame; |
563 |
|
|
int err; |
564 |
|
|
|
565 |
|
✗ |
av_assert0(ctx->op && ctx->op->init && ctx->op->issue && |
566 |
|
|
ctx->op->output && ctx->op->free); |
567 |
|
|
|
568 |
|
✗ |
start: |
569 |
|
|
/** if no B frame before repeat P frame, sent repeat P frame out. */ |
570 |
|
✗ |
if (ctx->tail_pkt->size) { |
571 |
|
✗ |
for (FFHWBaseEncodePicture *tmp = ctx->pic_start; tmp; tmp = tmp->next) { |
572 |
|
✗ |
if (tmp->type == FF_HW_PICTURE_TYPE_B && tmp->pts < ctx->tail_pkt->pts) |
573 |
|
|
break; |
574 |
|
✗ |
else if (!tmp->next) { |
575 |
|
✗ |
av_packet_move_ref(pkt, ctx->tail_pkt); |
576 |
|
✗ |
goto end; |
577 |
|
|
} |
578 |
|
|
} |
579 |
|
|
} |
580 |
|
|
|
581 |
|
✗ |
err = ff_encode_get_frame(avctx, frame); |
582 |
|
✗ |
if (err == AVERROR_EOF) { |
583 |
|
✗ |
frame = NULL; |
584 |
|
✗ |
} else if (err < 0) |
585 |
|
✗ |
return err; |
586 |
|
|
|
587 |
|
✗ |
err = hw_base_encode_send_frame(avctx, ctx, frame); |
588 |
|
✗ |
if (err < 0) |
589 |
|
✗ |
return err; |
590 |
|
|
|
591 |
|
✗ |
if (!ctx->pic_start) { |
592 |
|
✗ |
if (ctx->end_of_stream) |
593 |
|
✗ |
return AVERROR_EOF; |
594 |
|
|
else |
595 |
|
✗ |
return AVERROR(EAGAIN); |
596 |
|
|
} |
597 |
|
|
|
598 |
|
✗ |
if (ctx->async_encode) { |
599 |
|
✗ |
if (av_fifo_can_write(ctx->encode_fifo)) { |
600 |
|
✗ |
err = hw_base_encode_pick_next(avctx, ctx, &pic); |
601 |
|
✗ |
if (!err) { |
602 |
|
✗ |
av_assert0(pic); |
603 |
|
✗ |
pic->encode_order = ctx->encode_order + |
604 |
|
✗ |
av_fifo_can_read(ctx->encode_fifo); |
605 |
|
✗ |
err = ctx->op->issue(avctx, pic); |
606 |
|
✗ |
if (err < 0) { |
607 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, "Encode failed: %s.\n", av_err2str(err)); |
608 |
|
✗ |
return err; |
609 |
|
|
} |
610 |
|
✗ |
pic->encode_issued = 1; |
611 |
|
✗ |
av_fifo_write(ctx->encode_fifo, &pic, 1); |
612 |
|
|
} |
613 |
|
|
} |
614 |
|
|
|
615 |
|
✗ |
if (!av_fifo_can_read(ctx->encode_fifo)) |
616 |
|
✗ |
return err; |
617 |
|
|
|
618 |
|
|
// More frames can be buffered |
619 |
|
✗ |
if (av_fifo_can_write(ctx->encode_fifo) && !ctx->end_of_stream) |
620 |
|
✗ |
return AVERROR(EAGAIN); |
621 |
|
|
|
622 |
|
✗ |
av_fifo_read(ctx->encode_fifo, &pic, 1); |
623 |
|
✗ |
ctx->encode_order = pic->encode_order + 1; |
624 |
|
|
} else { |
625 |
|
✗ |
err = hw_base_encode_pick_next(avctx, ctx, &pic); |
626 |
|
✗ |
if (err < 0) |
627 |
|
✗ |
return err; |
628 |
|
✗ |
av_assert0(pic); |
629 |
|
|
|
630 |
|
✗ |
pic->encode_order = ctx->encode_order++; |
631 |
|
|
|
632 |
|
✗ |
err = ctx->op->issue(avctx, pic); |
633 |
|
✗ |
if (err < 0) { |
634 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, "Encode failed: %s.\n", av_err2str(err)); |
635 |
|
✗ |
return err; |
636 |
|
|
} |
637 |
|
|
|
638 |
|
✗ |
pic->encode_issued = 1; |
639 |
|
|
} |
640 |
|
|
|
641 |
|
✗ |
err = ctx->op->output(avctx, pic, pkt); |
642 |
|
✗ |
if (err < 0) { |
643 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err); |
644 |
|
✗ |
return err; |
645 |
|
|
} |
646 |
|
|
|
647 |
|
✗ |
ctx->output_order = pic->encode_order; |
648 |
|
✗ |
hw_base_encode_clear_old(avctx, ctx); |
649 |
|
|
|
650 |
|
|
/** loop to get an available pkt in encoder flushing. */ |
651 |
|
✗ |
if (ctx->end_of_stream && !pkt->size) |
652 |
|
✗ |
goto start; |
653 |
|
|
|
654 |
|
✗ |
end: |
655 |
|
✗ |
if (pkt->size) |
656 |
|
✗ |
av_log(avctx, AV_LOG_DEBUG, "Output packet: pts %"PRId64", dts %"PRId64", " |
657 |
|
|
"size %d bytes.\n", pkt->pts, pkt->dts, pkt->size); |
658 |
|
|
|
659 |
|
✗ |
return 0; |
660 |
|
|
} |
661 |
|
|
|
662 |
|
✗ |
int ff_hw_base_init_gop_structure(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, |
663 |
|
|
uint32_t ref_l0, uint32_t ref_l1, |
664 |
|
|
int flags, int prediction_pre_only) |
665 |
|
|
{ |
666 |
|
✗ |
ctx->ref_l0 = FFMIN(ref_l0, MAX_PICTURE_REFERENCES); |
667 |
|
✗ |
ctx->ref_l1 = FFMIN(ref_l1, MAX_PICTURE_REFERENCES); |
668 |
|
|
|
669 |
|
✗ |
if (flags & FF_HW_FLAG_INTRA_ONLY || avctx->gop_size <= 1) { |
670 |
|
✗ |
av_log(avctx, AV_LOG_VERBOSE, "Using intra frames only.\n"); |
671 |
|
✗ |
ctx->gop_size = 1; |
672 |
|
✗ |
} else if (ref_l0 < 1) { |
673 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, "Driver does not support any " |
674 |
|
|
"reference frames.\n"); |
675 |
|
✗ |
return AVERROR(EINVAL); |
676 |
|
✗ |
} else if (!(flags & FF_HW_FLAG_B_PICTURES) || ref_l1 < 1 || |
677 |
|
✗ |
avctx->max_b_frames < 1 || prediction_pre_only) { |
678 |
|
✗ |
if (ctx->p_to_gpb) |
679 |
|
✗ |
av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames " |
680 |
|
|
"(supported references: %d / %d).\n", |
681 |
|
|
ref_l0, ref_l1); |
682 |
|
|
else |
683 |
|
✗ |
av_log(avctx, AV_LOG_VERBOSE, "Using intra and P-frames " |
684 |
|
|
"(supported references: %d / %d).\n", ref_l0, ref_l1); |
685 |
|
✗ |
ctx->gop_size = avctx->gop_size; |
686 |
|
✗ |
ctx->p_per_i = INT_MAX; |
687 |
|
✗ |
ctx->b_per_p = 0; |
688 |
|
|
} else { |
689 |
|
✗ |
if (ctx->p_to_gpb) |
690 |
|
✗ |
av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames " |
691 |
|
|
"(supported references: %d / %d).\n", |
692 |
|
|
ref_l0, ref_l1); |
693 |
|
|
else |
694 |
|
✗ |
av_log(avctx, AV_LOG_VERBOSE, "Using intra, P- and B-frames " |
695 |
|
|
"(supported references: %d / %d).\n", ref_l0, ref_l1); |
696 |
|
✗ |
ctx->gop_size = avctx->gop_size; |
697 |
|
✗ |
ctx->p_per_i = INT_MAX; |
698 |
|
✗ |
ctx->b_per_p = avctx->max_b_frames; |
699 |
|
✗ |
if (flags & FF_HW_FLAG_B_PICTURE_REFERENCES) { |
700 |
|
✗ |
ctx->max_b_depth = FFMIN(ctx->desired_b_depth, |
701 |
|
|
av_log2(ctx->b_per_p) + 1); |
702 |
|
|
} else { |
703 |
|
✗ |
ctx->max_b_depth = 1; |
704 |
|
|
} |
705 |
|
|
} |
706 |
|
|
|
707 |
|
✗ |
if (flags & FF_HW_FLAG_NON_IDR_KEY_PICTURES) { |
708 |
|
✗ |
ctx->closed_gop = !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP); |
709 |
|
✗ |
ctx->gop_per_idr = ctx->idr_interval + 1; |
710 |
|
|
} else { |
711 |
|
✗ |
ctx->closed_gop = 1; |
712 |
|
✗ |
ctx->gop_per_idr = 1; |
713 |
|
|
} |
714 |
|
|
|
715 |
|
✗ |
return 0; |
716 |
|
|
} |
717 |
|
|
|
718 |
|
✗ |
int ff_hw_base_get_recon_format(FFHWBaseEncodeContext *ctx, const void *hwconfig, |
719 |
|
|
enum AVPixelFormat *fmt) |
720 |
|
|
{ |
721 |
|
✗ |
AVHWFramesConstraints *constraints = NULL; |
722 |
|
|
enum AVPixelFormat recon_format; |
723 |
|
|
int err, i; |
724 |
|
|
|
725 |
|
✗ |
constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, |
726 |
|
|
hwconfig); |
727 |
|
✗ |
if (!constraints) { |
728 |
|
✗ |
err = AVERROR(ENOMEM); |
729 |
|
✗ |
goto fail; |
730 |
|
|
} |
731 |
|
|
|
732 |
|
|
// Probably we can use the input surface format as the surface format |
733 |
|
|
// of the reconstructed frames. If not, we just pick the first (only?) |
734 |
|
|
// format in the valid list and hope that it all works. |
735 |
|
✗ |
recon_format = AV_PIX_FMT_NONE; |
736 |
|
✗ |
if (constraints->valid_sw_formats) { |
737 |
|
✗ |
for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { |
738 |
|
✗ |
if (ctx->input_frames->sw_format == |
739 |
|
✗ |
constraints->valid_sw_formats[i]) { |
740 |
|
✗ |
recon_format = ctx->input_frames->sw_format; |
741 |
|
✗ |
break; |
742 |
|
|
} |
743 |
|
|
} |
744 |
|
✗ |
if (recon_format == AV_PIX_FMT_NONE) { |
745 |
|
|
// No match. Just use the first in the supported list and |
746 |
|
|
// hope for the best. |
747 |
|
✗ |
recon_format = constraints->valid_sw_formats[0]; |
748 |
|
|
} |
749 |
|
|
} else { |
750 |
|
|
// No idea what to use; copy input format. |
751 |
|
✗ |
recon_format = ctx->input_frames->sw_format; |
752 |
|
|
} |
753 |
|
✗ |
av_log(ctx->log_ctx, AV_LOG_DEBUG, "Using %s as format of " |
754 |
|
|
"reconstructed frames.\n", av_get_pix_fmt_name(recon_format)); |
755 |
|
|
|
756 |
|
✗ |
if (ctx->surface_width < constraints->min_width || |
757 |
|
✗ |
ctx->surface_height < constraints->min_height || |
758 |
|
✗ |
ctx->surface_width > constraints->max_width || |
759 |
|
✗ |
ctx->surface_height > constraints->max_height) { |
760 |
|
✗ |
av_log(ctx->log_ctx, AV_LOG_ERROR, "Hardware does not support encoding at " |
761 |
|
|
"size %dx%d (constraints: width %d-%d height %d-%d).\n", |
762 |
|
|
ctx->surface_width, ctx->surface_height, |
763 |
|
✗ |
constraints->min_width, constraints->max_width, |
764 |
|
✗ |
constraints->min_height, constraints->max_height); |
765 |
|
✗ |
err = AVERROR(EINVAL); |
766 |
|
✗ |
goto fail; |
767 |
|
|
} |
768 |
|
|
|
769 |
|
✗ |
*fmt = recon_format; |
770 |
|
✗ |
err = 0; |
771 |
|
✗ |
fail: |
772 |
|
✗ |
av_hwframe_constraints_free(&constraints); |
773 |
|
✗ |
return err; |
774 |
|
|
} |
775 |
|
|
|
776 |
|
✗ |
int ff_hw_base_encode_init(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx) |
777 |
|
|
{ |
778 |
|
✗ |
ctx->log_ctx = (void *)avctx; |
779 |
|
|
|
780 |
|
✗ |
ctx->frame = av_frame_alloc(); |
781 |
|
✗ |
if (!ctx->frame) |
782 |
|
✗ |
return AVERROR(ENOMEM); |
783 |
|
|
|
784 |
|
✗ |
if (!avctx->hw_frames_ctx) { |
785 |
|
✗ |
av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " |
786 |
|
|
"required to associate the encoding device.\n"); |
787 |
|
✗ |
return AVERROR(EINVAL); |
788 |
|
|
} |
789 |
|
|
|
790 |
|
✗ |
ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx); |
791 |
|
✗ |
if (!ctx->input_frames_ref) |
792 |
|
✗ |
return AVERROR(ENOMEM); |
793 |
|
|
|
794 |
|
✗ |
ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data; |
795 |
|
|
|
796 |
|
✗ |
ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); |
797 |
|
✗ |
if (!ctx->device_ref) |
798 |
|
✗ |
return AVERROR(ENOMEM); |
799 |
|
|
|
800 |
|
✗ |
ctx->device = (AVHWDeviceContext *)ctx->device_ref->data; |
801 |
|
|
|
802 |
|
✗ |
ctx->tail_pkt = av_packet_alloc(); |
803 |
|
✗ |
if (!ctx->tail_pkt) |
804 |
|
✗ |
return AVERROR(ENOMEM); |
805 |
|
|
|
806 |
|
✗ |
return 0; |
807 |
|
|
} |
808 |
|
|
|
809 |
|
✗ |
int ff_hw_base_encode_close(FFHWBaseEncodeContext *ctx) |
810 |
|
|
{ |
811 |
|
✗ |
for (FFHWBaseEncodePicture *pic = ctx->pic_start, *next_pic = pic; pic; pic = next_pic) { |
812 |
|
✗ |
next_pic = pic->next; |
813 |
|
✗ |
base_encode_pic_free(pic); |
814 |
|
|
} |
815 |
|
|
|
816 |
|
✗ |
av_fifo_freep2(&ctx->encode_fifo); |
817 |
|
|
|
818 |
|
✗ |
av_frame_free(&ctx->frame); |
819 |
|
✗ |
av_packet_free(&ctx->tail_pkt); |
820 |
|
|
|
821 |
|
✗ |
av_buffer_unref(&ctx->device_ref); |
822 |
|
✗ |
av_buffer_unref(&ctx->input_frames_ref); |
823 |
|
✗ |
av_buffer_unref(&ctx->recon_frames_ref); |
824 |
|
|
|
825 |
|
✗ |
return 0; |
826 |
|
|
} |
827 |
|
|
|