FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavcodec/hw_base_encode.c
Date: 2024-07-16 12:46:59
Exec Total Coverage
Lines: 0 438 0.0%
Functions: 0 15 0.0%
Branches: 0 346 0.0%

Line Branch Exec Source
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libavutil/avassert.h"
20 #include "libavutil/common.h"
21 #include "libavutil/internal.h"
22 #include "libavutil/log.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/pixdesc.h"
25
26 #include "encode.h"
27 #include "avcodec.h"
28 #include "hw_base_encode.h"
29
30 static void hw_base_encode_add_ref(FFHWBaseEncodePicture *pic,
31 FFHWBaseEncodePicture *target,
32 int is_ref, int in_dpb, int prev)
33 {
34 int refs = 0;
35
36 if (is_ref) {
37 av_assert0(pic != target);
38 av_assert0(pic->nb_refs[0] < MAX_PICTURE_REFERENCES &&
39 pic->nb_refs[1] < MAX_PICTURE_REFERENCES);
40 if (target->display_order < pic->display_order)
41 pic->refs[0][pic->nb_refs[0]++] = target;
42 else
43 pic->refs[1][pic->nb_refs[1]++] = target;
44 ++refs;
45 }
46
47 if (in_dpb) {
48 av_assert0(pic->nb_dpb_pics < MAX_DPB_SIZE);
49 pic->dpb[pic->nb_dpb_pics++] = target;
50 ++refs;
51 }
52
53 if (prev) {
54 av_assert0(!pic->prev);
55 pic->prev = target;
56 ++refs;
57 }
58
59 target->ref_count[0] += refs;
60 target->ref_count[1] += refs;
61 }
62
63 static void hw_base_encode_remove_refs(FFHWBaseEncodePicture *pic, int level)
64 {
65 int i;
66
67 if (pic->ref_removed[level])
68 return;
69
70 for (i = 0; i < pic->nb_refs[0]; i++) {
71 av_assert0(pic->refs[0][i]);
72 --pic->refs[0][i]->ref_count[level];
73 av_assert0(pic->refs[0][i]->ref_count[level] >= 0);
74 }
75
76 for (i = 0; i < pic->nb_refs[1]; i++) {
77 av_assert0(pic->refs[1][i]);
78 --pic->refs[1][i]->ref_count[level];
79 av_assert0(pic->refs[1][i]->ref_count[level] >= 0);
80 }
81
82 for (i = 0; i < pic->nb_dpb_pics; i++) {
83 av_assert0(pic->dpb[i]);
84 --pic->dpb[i]->ref_count[level];
85 av_assert0(pic->dpb[i]->ref_count[level] >= 0);
86 }
87
88 av_assert0(pic->prev || pic->type == FF_HW_PICTURE_TYPE_IDR);
89 if (pic->prev) {
90 --pic->prev->ref_count[level];
91 av_assert0(pic->prev->ref_count[level] >= 0);
92 }
93
94 pic->ref_removed[level] = 1;
95 }
96
97 static void hw_base_encode_set_b_pictures(FFHWBaseEncodeContext *ctx,
98 FFHWBaseEncodePicture *start,
99 FFHWBaseEncodePicture *end,
100 FFHWBaseEncodePicture *prev,
101 int current_depth,
102 FFHWBaseEncodePicture **last)
103 {
104 FFHWBaseEncodePicture *pic, *next, *ref;
105 int i, len;
106
107 av_assert0(start && end && start != end && start->next != end);
108
109 // If we are at the maximum depth then encode all pictures as
110 // non-referenced B-pictures. Also do this if there is exactly one
111 // picture left, since there will be nothing to reference it.
112 if (current_depth == ctx->max_b_depth || start->next->next == end) {
113 for (pic = start->next; pic; pic = pic->next) {
114 if (pic == end)
115 break;
116 pic->type = FF_HW_PICTURE_TYPE_B;
117 pic->b_depth = current_depth;
118
119 hw_base_encode_add_ref(pic, start, 1, 1, 0);
120 hw_base_encode_add_ref(pic, end, 1, 1, 0);
121 hw_base_encode_add_ref(pic, prev, 0, 0, 1);
122
123 for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
124 hw_base_encode_add_ref(pic, ref, 0, 1, 0);
125 }
126 *last = prev;
127
128 } else {
129 // Split the current list at the midpoint with a referenced
130 // B-picture, then descend into each side separately.
131 len = 0;
132 for (pic = start->next; pic != end; pic = pic->next)
133 ++len;
134 for (pic = start->next, i = 1; 2 * i < len; pic = pic->next, i++);
135
136 pic->type = FF_HW_PICTURE_TYPE_B;
137 pic->b_depth = current_depth;
138
139 pic->is_reference = 1;
140
141 hw_base_encode_add_ref(pic, pic, 0, 1, 0);
142 hw_base_encode_add_ref(pic, start, 1, 1, 0);
143 hw_base_encode_add_ref(pic, end, 1, 1, 0);
144 hw_base_encode_add_ref(pic, prev, 0, 0, 1);
145
146 for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
147 hw_base_encode_add_ref(pic, ref, 0, 1, 0);
148
149 if (i > 1)
150 hw_base_encode_set_b_pictures(ctx, start, pic, pic,
151 current_depth + 1, &next);
152 else
153 next = pic;
154
155 hw_base_encode_set_b_pictures(ctx, pic, end, next,
156 current_depth + 1, last);
157 }
158 }
159
160 static void hw_base_encode_add_next_prev(FFHWBaseEncodeContext *ctx,
161 FFHWBaseEncodePicture *pic)
162 {
163 int i;
164
165 if (!pic)
166 return;
167
168 if (pic->type == FF_HW_PICTURE_TYPE_IDR) {
169 for (i = 0; i < ctx->nb_next_prev; i++) {
170 --ctx->next_prev[i]->ref_count[0];
171 ctx->next_prev[i] = NULL;
172 }
173 ctx->next_prev[0] = pic;
174 ++pic->ref_count[0];
175 ctx->nb_next_prev = 1;
176
177 return;
178 }
179
180 if (ctx->nb_next_prev < MAX_PICTURE_REFERENCES) {
181 ctx->next_prev[ctx->nb_next_prev++] = pic;
182 ++pic->ref_count[0];
183 } else {
184 --ctx->next_prev[0]->ref_count[0];
185 for (i = 0; i < MAX_PICTURE_REFERENCES - 1; i++)
186 ctx->next_prev[i] = ctx->next_prev[i + 1];
187 ctx->next_prev[i] = pic;
188 ++pic->ref_count[0];
189 }
190 }
191
192 static int hw_base_encode_pick_next(AVCodecContext *avctx,
193 FFHWBaseEncodeContext *ctx,
194 FFHWBaseEncodePicture **pic_out)
195 {
196 FFHWBaseEncodePicture *pic = NULL, *prev = NULL, *next, *start;
197 int i, b_counter, closed_gop_end;
198
199 // If there are any B-frames already queued, the next one to encode
200 // is the earliest not-yet-issued frame for which all references are
201 // available.
202 for (pic = ctx->pic_start; pic; pic = pic->next) {
203 if (pic->encode_issued)
204 continue;
205 if (pic->type != FF_HW_PICTURE_TYPE_B)
206 continue;
207 for (i = 0; i < pic->nb_refs[0]; i++) {
208 if (!pic->refs[0][i]->encode_issued)
209 break;
210 }
211 if (i != pic->nb_refs[0])
212 continue;
213
214 for (i = 0; i < pic->nb_refs[1]; i++) {
215 if (!pic->refs[1][i]->encode_issued)
216 break;
217 }
218 if (i == pic->nb_refs[1])
219 break;
220 }
221
222 if (pic) {
223 av_log(avctx, AV_LOG_DEBUG, "Pick B-picture at depth %d to "
224 "encode next.\n", pic->b_depth);
225 *pic_out = pic;
226 return 0;
227 }
228
229 // Find the B-per-Pth available picture to become the next picture
230 // on the top layer.
231 start = NULL;
232 b_counter = 0;
233 closed_gop_end = ctx->closed_gop ||
234 ctx->idr_counter == ctx->gop_per_idr;
235 for (pic = ctx->pic_start; pic; pic = next) {
236 next = pic->next;
237 if (pic->encode_issued) {
238 start = pic;
239 continue;
240 }
241 // If the next available picture is force-IDR, encode it to start
242 // a new GOP immediately.
243 if (pic->force_idr)
244 break;
245 if (b_counter == ctx->b_per_p)
246 break;
247 // If this picture ends a closed GOP or starts a new GOP then it
248 // needs to be in the top layer.
249 if (ctx->gop_counter + b_counter + closed_gop_end >= ctx->gop_size)
250 break;
251 // If the picture after this one is force-IDR, we need to encode
252 // this one in the top layer.
253 if (next && next->force_idr)
254 break;
255 ++b_counter;
256 }
257
258 // At the end of the stream the last picture must be in the top layer.
259 if (!pic && ctx->end_of_stream) {
260 --b_counter;
261 pic = ctx->pic_end;
262 if (pic->encode_complete)
263 return AVERROR_EOF;
264 else if (pic->encode_issued)
265 return AVERROR(EAGAIN);
266 }
267
268 if (!pic) {
269 av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
270 "need more input for reference pictures.\n");
271 return AVERROR(EAGAIN);
272 }
273 if (ctx->input_order <= ctx->decode_delay && !ctx->end_of_stream) {
274 av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
275 "need more input for timestamps.\n");
276 return AVERROR(EAGAIN);
277 }
278
279 if (pic->force_idr) {
280 av_log(avctx, AV_LOG_DEBUG, "Pick forced IDR-picture to "
281 "encode next.\n");
282 pic->type = FF_HW_PICTURE_TYPE_IDR;
283 ctx->idr_counter = 1;
284 ctx->gop_counter = 1;
285
286 } else if (ctx->gop_counter + b_counter >= ctx->gop_size) {
287 if (ctx->idr_counter == ctx->gop_per_idr) {
288 av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP IDR-picture to "
289 "encode next.\n");
290 pic->type = FF_HW_PICTURE_TYPE_IDR;
291 ctx->idr_counter = 1;
292 } else {
293 av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP I-picture to "
294 "encode next.\n");
295 pic->type = FF_HW_PICTURE_TYPE_I;
296 ++ctx->idr_counter;
297 }
298 ctx->gop_counter = 1;
299
300 } else {
301 if (ctx->gop_counter + b_counter + closed_gop_end == ctx->gop_size) {
302 av_log(avctx, AV_LOG_DEBUG, "Pick group-end P-picture to "
303 "encode next.\n");
304 } else {
305 av_log(avctx, AV_LOG_DEBUG, "Pick normal P-picture to "
306 "encode next.\n");
307 }
308 pic->type = FF_HW_PICTURE_TYPE_P;
309 av_assert0(start);
310 ctx->gop_counter += 1 + b_counter;
311 }
312 pic->is_reference = 1;
313 *pic_out = pic;
314
315 hw_base_encode_add_ref(pic, pic, 0, 1, 0);
316 if (pic->type != FF_HW_PICTURE_TYPE_IDR) {
317 // TODO: apply both previous and forward multi reference for all vaapi encoders.
318 // And L0/L1 reference frame number can be set dynamically through query
319 // VAConfigAttribEncMaxRefFrames attribute.
320 if (avctx->codec_id == AV_CODEC_ID_AV1) {
321 for (i = 0; i < ctx->nb_next_prev; i++)
322 hw_base_encode_add_ref(pic, ctx->next_prev[i],
323 pic->type == FF_HW_PICTURE_TYPE_P,
324 b_counter > 0, 0);
325 } else
326 hw_base_encode_add_ref(pic, start,
327 pic->type == FF_HW_PICTURE_TYPE_P,
328 b_counter > 0, 0);
329
330 hw_base_encode_add_ref(pic, ctx->next_prev[ctx->nb_next_prev - 1], 0, 0, 1);
331 }
332
333 if (b_counter > 0) {
334 hw_base_encode_set_b_pictures(ctx, start, pic, pic, 1,
335 &prev);
336 } else {
337 prev = pic;
338 }
339 hw_base_encode_add_next_prev(ctx, prev);
340
341 return 0;
342 }
343
344 static int hw_base_encode_clear_old(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
345 {
346 FFHWBaseEncodePicture *pic, *prev, *next;
347
348 av_assert0(ctx->pic_start);
349
350 // Remove direct references once each picture is complete.
351 for (pic = ctx->pic_start; pic; pic = pic->next) {
352 if (pic->encode_complete && pic->next)
353 hw_base_encode_remove_refs(pic, 0);
354 }
355
356 // Remove indirect references once a picture has no direct references.
357 for (pic = ctx->pic_start; pic; pic = pic->next) {
358 if (pic->encode_complete && pic->ref_count[0] == 0)
359 hw_base_encode_remove_refs(pic, 1);
360 }
361
362 // Clear out all complete pictures with no remaining references.
363 prev = NULL;
364 for (pic = ctx->pic_start; pic; pic = next) {
365 next = pic->next;
366 if (pic->encode_complete && pic->ref_count[1] == 0) {
367 av_assert0(pic->ref_removed[0] && pic->ref_removed[1]);
368 if (prev)
369 prev->next = next;
370 else
371 ctx->pic_start = next;
372 ctx->op->free(avctx, pic);
373 } else {
374 prev = pic;
375 }
376 }
377
378 return 0;
379 }
380
381 static int hw_base_encode_check_frame(FFHWBaseEncodeContext *ctx,
382 const AVFrame *frame)
383 {
384 if ((frame->crop_top || frame->crop_bottom ||
385 frame->crop_left || frame->crop_right) && !ctx->crop_warned) {
386 av_log(ctx->log_ctx, AV_LOG_WARNING, "Cropping information on input "
387 "frames ignored due to lack of API support.\n");
388 ctx->crop_warned = 1;
389 }
390
391 if (!ctx->roi_allowed) {
392 AVFrameSideData *sd =
393 av_frame_get_side_data(frame, AV_FRAME_DATA_REGIONS_OF_INTEREST);
394
395 if (sd && !ctx->roi_warned) {
396 av_log(ctx->log_ctx, AV_LOG_WARNING, "ROI side data on input "
397 "frames ignored due to lack of driver support.\n");
398 ctx->roi_warned = 1;
399 }
400 }
401
402 return 0;
403 }
404
405 static int hw_base_encode_send_frame(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx,
406 AVFrame *frame)
407 {
408 FFHWBaseEncodePicture *pic;
409 int err;
410
411 if (frame) {
412 av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n",
413 frame->width, frame->height, frame->pts);
414
415 err = hw_base_encode_check_frame(ctx, frame);
416 if (err < 0)
417 return err;
418
419 pic = ctx->op->alloc(avctx, frame);
420 if (!pic)
421 return AVERROR(ENOMEM);
422
423 pic->input_image = av_frame_alloc();
424 if (!pic->input_image) {
425 err = AVERROR(ENOMEM);
426 goto fail;
427 }
428
429 pic->recon_image = av_frame_alloc();
430 if (!pic->recon_image) {
431 err = AVERROR(ENOMEM);
432 goto fail;
433 }
434
435 if (ctx->input_order == 0 || frame->pict_type == AV_PICTURE_TYPE_I)
436 pic->force_idr = 1;
437
438 pic->pts = frame->pts;
439 pic->duration = frame->duration;
440
441 if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
442 err = av_buffer_replace(&pic->opaque_ref, frame->opaque_ref);
443 if (err < 0)
444 goto fail;
445
446 pic->opaque = frame->opaque;
447 }
448
449 av_frame_move_ref(pic->input_image, frame);
450
451 if (ctx->input_order == 0)
452 ctx->first_pts = pic->pts;
453 if (ctx->input_order == ctx->decode_delay)
454 ctx->dts_pts_diff = pic->pts - ctx->first_pts;
455 if (ctx->output_delay > 0)
456 ctx->ts_ring[ctx->input_order %
457 (3 * ctx->output_delay + ctx->async_depth)] = pic->pts;
458
459 pic->display_order = ctx->input_order;
460 ++ctx->input_order;
461
462 if (ctx->pic_start) {
463 ctx->pic_end->next = pic;
464 ctx->pic_end = pic;
465 } else {
466 ctx->pic_start = pic;
467 ctx->pic_end = pic;
468 }
469
470 } else {
471 ctx->end_of_stream = 1;
472
473 // Fix timestamps if we hit end-of-stream before the initial decode
474 // delay has elapsed.
475 if (ctx->input_order < ctx->decode_delay)
476 ctx->dts_pts_diff = ctx->pic_end->pts - ctx->first_pts;
477 }
478
479 return 0;
480
481 fail:
482 ctx->op->free(avctx, pic);
483 return err;
484 }
485
486 int ff_hw_base_encode_set_output_property(FFHWBaseEncodeContext *ctx,
487 AVCodecContext *avctx,
488 FFHWBaseEncodePicture *pic,
489 AVPacket *pkt, int flag_no_delay)
490 {
491 if (pic->type == FF_HW_PICTURE_TYPE_IDR)
492 pkt->flags |= AV_PKT_FLAG_KEY;
493
494 pkt->pts = pic->pts;
495 pkt->duration = pic->duration;
496
497 // for no-delay encoders this is handled in generic codec
498 if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY &&
499 avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
500 pkt->opaque = pic->opaque;
501 pkt->opaque_ref = pic->opaque_ref;
502 pic->opaque_ref = NULL;
503 }
504
505 if (flag_no_delay) {
506 pkt->dts = pkt->pts;
507 return 0;
508 }
509
510 if (ctx->output_delay == 0) {
511 pkt->dts = pkt->pts;
512 } else if (pic->encode_order < ctx->decode_delay) {
513 if (ctx->ts_ring[pic->encode_order] < INT64_MIN + ctx->dts_pts_diff)
514 pkt->dts = INT64_MIN;
515 else
516 pkt->dts = ctx->ts_ring[pic->encode_order] - ctx->dts_pts_diff;
517 } else {
518 pkt->dts = ctx->ts_ring[(pic->encode_order - ctx->decode_delay) %
519 (3 * ctx->output_delay + ctx->async_depth)];
520 }
521
522 return 0;
523 }
524
525 int ff_hw_base_encode_receive_packet(FFHWBaseEncodeContext *ctx,
526 AVCodecContext *avctx, AVPacket *pkt)
527 {
528 FFHWBaseEncodePicture *pic = NULL;
529 AVFrame *frame = ctx->frame;
530 int err;
531
532 av_assert0(ctx->op && ctx->op->alloc && ctx->op->issue &&
533 ctx->op->output && ctx->op->free);
534
535 start:
536 /** if no B frame before repeat P frame, sent repeat P frame out. */
537 if (ctx->tail_pkt->size) {
538 for (FFHWBaseEncodePicture *tmp = ctx->pic_start; tmp; tmp = tmp->next) {
539 if (tmp->type == FF_HW_PICTURE_TYPE_B && tmp->pts < ctx->tail_pkt->pts)
540 break;
541 else if (!tmp->next) {
542 av_packet_move_ref(pkt, ctx->tail_pkt);
543 goto end;
544 }
545 }
546 }
547
548 err = ff_encode_get_frame(avctx, frame);
549 if (err == AVERROR_EOF) {
550 frame = NULL;
551 } else if (err < 0)
552 return err;
553
554 err = hw_base_encode_send_frame(avctx, ctx, frame);
555 if (err < 0)
556 return err;
557
558 if (!ctx->pic_start) {
559 if (ctx->end_of_stream)
560 return AVERROR_EOF;
561 else
562 return AVERROR(EAGAIN);
563 }
564
565 if (ctx->async_encode) {
566 if (av_fifo_can_write(ctx->encode_fifo)) {
567 err = hw_base_encode_pick_next(avctx, ctx, &pic);
568 if (!err) {
569 av_assert0(pic);
570 pic->encode_order = ctx->encode_order +
571 av_fifo_can_read(ctx->encode_fifo);
572 err = ctx->op->issue(avctx, pic);
573 if (err < 0) {
574 av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
575 return err;
576 }
577 pic->encode_issued = 1;
578 av_fifo_write(ctx->encode_fifo, &pic, 1);
579 }
580 }
581
582 if (!av_fifo_can_read(ctx->encode_fifo))
583 return err;
584
585 // More frames can be buffered
586 if (av_fifo_can_write(ctx->encode_fifo) && !ctx->end_of_stream)
587 return AVERROR(EAGAIN);
588
589 av_fifo_read(ctx->encode_fifo, &pic, 1);
590 ctx->encode_order = pic->encode_order + 1;
591 } else {
592 err = hw_base_encode_pick_next(avctx, ctx, &pic);
593 if (err < 0)
594 return err;
595 av_assert0(pic);
596
597 pic->encode_order = ctx->encode_order++;
598
599 err = ctx->op->issue(avctx, pic);
600 if (err < 0) {
601 av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
602 return err;
603 }
604
605 pic->encode_issued = 1;
606 }
607
608 err = ctx->op->output(avctx, pic, pkt);
609 if (err < 0) {
610 av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
611 return err;
612 }
613
614 ctx->output_order = pic->encode_order;
615 hw_base_encode_clear_old(avctx, ctx);
616
617 /** loop to get an available pkt in encoder flushing. */
618 if (ctx->end_of_stream && !pkt->size)
619 goto start;
620
621 end:
622 if (pkt->size)
623 av_log(avctx, AV_LOG_DEBUG, "Output packet: pts %"PRId64", dts %"PRId64", "
624 "size %d bytes.\n", pkt->pts, pkt->dts, pkt->size);
625
626 return 0;
627 }
628
629 int ff_hw_base_init_gop_structure(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx,
630 uint32_t ref_l0, uint32_t ref_l1,
631 int flags, int prediction_pre_only)
632 {
633 if (flags & FF_HW_FLAG_INTRA_ONLY || avctx->gop_size <= 1) {
634 av_log(avctx, AV_LOG_VERBOSE, "Using intra frames only.\n");
635 ctx->gop_size = 1;
636 } else if (ref_l0 < 1) {
637 av_log(avctx, AV_LOG_ERROR, "Driver does not support any "
638 "reference frames.\n");
639 return AVERROR(EINVAL);
640 } else if (!(flags & FF_HW_FLAG_B_PICTURES) || ref_l1 < 1 ||
641 avctx->max_b_frames < 1 || prediction_pre_only) {
642 if (ctx->p_to_gpb)
643 av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
644 "(supported references: %d / %d).\n",
645 ref_l0, ref_l1);
646 else
647 av_log(avctx, AV_LOG_VERBOSE, "Using intra and P-frames "
648 "(supported references: %d / %d).\n", ref_l0, ref_l1);
649 ctx->gop_size = avctx->gop_size;
650 ctx->p_per_i = INT_MAX;
651 ctx->b_per_p = 0;
652 } else {
653 if (ctx->p_to_gpb)
654 av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
655 "(supported references: %d / %d).\n",
656 ref_l0, ref_l1);
657 else
658 av_log(avctx, AV_LOG_VERBOSE, "Using intra, P- and B-frames "
659 "(supported references: %d / %d).\n", ref_l0, ref_l1);
660 ctx->gop_size = avctx->gop_size;
661 ctx->p_per_i = INT_MAX;
662 ctx->b_per_p = avctx->max_b_frames;
663 if (flags & FF_HW_FLAG_B_PICTURE_REFERENCES) {
664 ctx->max_b_depth = FFMIN(ctx->desired_b_depth,
665 av_log2(ctx->b_per_p) + 1);
666 } else {
667 ctx->max_b_depth = 1;
668 }
669 }
670
671 if (flags & FF_HW_FLAG_NON_IDR_KEY_PICTURES) {
672 ctx->closed_gop = !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP);
673 ctx->gop_per_idr = ctx->idr_interval + 1;
674 } else {
675 ctx->closed_gop = 1;
676 ctx->gop_per_idr = 1;
677 }
678
679 return 0;
680 }
681
682 int ff_hw_base_get_recon_format(FFHWBaseEncodeContext *ctx, const void *hwconfig,
683 enum AVPixelFormat *fmt)
684 {
685 AVHWFramesConstraints *constraints = NULL;
686 enum AVPixelFormat recon_format;
687 int err, i;
688
689 constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
690 hwconfig);
691 if (!constraints) {
692 err = AVERROR(ENOMEM);
693 goto fail;
694 }
695
696 // Probably we can use the input surface format as the surface format
697 // of the reconstructed frames. If not, we just pick the first (only?)
698 // format in the valid list and hope that it all works.
699 recon_format = AV_PIX_FMT_NONE;
700 if (constraints->valid_sw_formats) {
701 for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
702 if (ctx->input_frames->sw_format ==
703 constraints->valid_sw_formats[i]) {
704 recon_format = ctx->input_frames->sw_format;
705 break;
706 }
707 }
708 if (recon_format == AV_PIX_FMT_NONE) {
709 // No match. Just use the first in the supported list and
710 // hope for the best.
711 recon_format = constraints->valid_sw_formats[0];
712 }
713 } else {
714 // No idea what to use; copy input format.
715 recon_format = ctx->input_frames->sw_format;
716 }
717 av_log(ctx->log_ctx, AV_LOG_DEBUG, "Using %s as format of "
718 "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
719
720 if (ctx->surface_width < constraints->min_width ||
721 ctx->surface_height < constraints->min_height ||
722 ctx->surface_width > constraints->max_width ||
723 ctx->surface_height > constraints->max_height) {
724 av_log(ctx->log_ctx, AV_LOG_ERROR, "Hardware does not support encoding at "
725 "size %dx%d (constraints: width %d-%d height %d-%d).\n",
726 ctx->surface_width, ctx->surface_height,
727 constraints->min_width, constraints->max_width,
728 constraints->min_height, constraints->max_height);
729 err = AVERROR(EINVAL);
730 goto fail;
731 }
732
733 *fmt = recon_format;
734 err = 0;
735 fail:
736 av_hwframe_constraints_free(&constraints);
737 return err;
738 }
739
740 int ff_hw_base_encode_free(FFHWBaseEncodePicture *pic)
741 {
742 av_frame_free(&pic->input_image);
743 av_frame_free(&pic->recon_image);
744
745 av_buffer_unref(&pic->opaque_ref);
746 av_freep(&pic->priv_data);
747
748 return 0;
749 }
750
751 int ff_hw_base_encode_init(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
752 {
753 ctx->log_ctx = (void *)avctx;
754
755 ctx->frame = av_frame_alloc();
756 if (!ctx->frame)
757 return AVERROR(ENOMEM);
758
759 if (!avctx->hw_frames_ctx) {
760 av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
761 "required to associate the encoding device.\n");
762 return AVERROR(EINVAL);
763 }
764
765 ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
766 if (!ctx->input_frames_ref)
767 return AVERROR(ENOMEM);
768
769 ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data;
770
771 ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
772 if (!ctx->device_ref)
773 return AVERROR(ENOMEM);
774
775 ctx->device = (AVHWDeviceContext *)ctx->device_ref->data;
776
777 ctx->tail_pkt = av_packet_alloc();
778 if (!ctx->tail_pkt)
779 return AVERROR(ENOMEM);
780
781 return 0;
782 }
783
784 int ff_hw_base_encode_close(FFHWBaseEncodeContext *ctx)
785 {
786 av_fifo_freep2(&ctx->encode_fifo);
787
788 av_frame_free(&ctx->frame);
789 av_packet_free(&ctx->tail_pkt);
790
791 av_buffer_unref(&ctx->device_ref);
792 av_buffer_unref(&ctx->input_frames_ref);
793 av_buffer_unref(&ctx->recon_frames_ref);
794
795 return 0;
796 }
797