1 |
|
|
/* |
2 |
|
|
* generic encoding-related code |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/attributes.h" |
22 |
|
|
#include "libavutil/avassert.h" |
23 |
|
|
#include "libavutil/frame.h" |
24 |
|
|
#include "libavutil/imgutils.h" |
25 |
|
|
#include "libavutil/internal.h" |
26 |
|
|
#include "libavutil/samplefmt.h" |
27 |
|
|
|
28 |
|
|
#include "avcodec.h" |
29 |
|
|
#include "encode.h" |
30 |
|
|
#include "frame_thread_encoder.h" |
31 |
|
|
#include "internal.h" |
32 |
|
|
|
33 |
|
397637 |
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size) |
34 |
|
|
{ |
35 |
✓✗✗✓
|
397637 |
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { |
36 |
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n", |
37 |
|
|
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE); |
38 |
|
|
return AVERROR(EINVAL); |
39 |
|
|
} |
40 |
|
|
|
41 |
✗✓ |
397637 |
av_assert0(!avpkt->data); |
42 |
|
|
|
43 |
✓✗✓✓
|
397637 |
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned |
44 |
|
79124 |
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size); |
45 |
|
79124 |
avpkt->data = avctx->internal->byte_buffer; |
46 |
|
79124 |
avpkt->size = size; |
47 |
|
|
} |
48 |
|
|
|
49 |
✓✓ |
397637 |
if (!avpkt->data) { |
50 |
|
318513 |
int ret = av_new_packet(avpkt, size); |
51 |
✗✓ |
318513 |
if (ret < 0) |
52 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size); |
53 |
|
318513 |
return ret; |
54 |
|
|
} |
55 |
|
|
|
56 |
|
79124 |
return 0; |
57 |
|
|
} |
58 |
|
|
|
59 |
|
|
int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags) |
60 |
|
|
{ |
61 |
|
|
int ret; |
62 |
|
|
|
63 |
|
|
if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) |
64 |
|
|
return AVERROR(EINVAL); |
65 |
|
|
|
66 |
|
|
if (avpkt->data || avpkt->buf) { |
67 |
|
|
av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n"); |
68 |
|
|
return AVERROR(EINVAL); |
69 |
|
|
} |
70 |
|
|
|
71 |
|
|
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE); |
72 |
|
|
if (ret < 0) { |
73 |
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size); |
74 |
|
|
return ret; |
75 |
|
|
} |
76 |
|
|
avpkt->data = avpkt->buf->data; |
77 |
|
|
memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE); |
78 |
|
|
|
79 |
|
|
return 0; |
80 |
|
|
} |
81 |
|
|
|
82 |
|
|
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags) |
83 |
|
|
{ |
84 |
|
|
int ret; |
85 |
|
|
|
86 |
|
|
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) |
87 |
|
|
return AVERROR(EINVAL); |
88 |
|
|
|
89 |
|
|
av_assert0(!avpkt->data && !avpkt->buf); |
90 |
|
|
|
91 |
|
|
avpkt->size = size; |
92 |
|
|
ret = avctx->get_encode_buffer(avctx, avpkt, flags); |
93 |
|
|
if (ret < 0) |
94 |
|
|
goto fail; |
95 |
|
|
|
96 |
|
|
if (!avpkt->data || !avpkt->buf) { |
97 |
|
|
av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n"); |
98 |
|
|
ret = AVERROR(EINVAL); |
99 |
|
|
goto fail; |
100 |
|
|
} |
101 |
|
|
|
102 |
|
|
ret = 0; |
103 |
|
|
fail: |
104 |
|
|
if (ret < 0) { |
105 |
|
|
av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n"); |
106 |
|
|
av_packet_unref(avpkt); |
107 |
|
|
} |
108 |
|
|
|
109 |
|
|
return ret; |
110 |
|
|
} |
111 |
|
|
|
112 |
|
|
/** |
113 |
|
|
* Pad last frame with silence. |
114 |
|
|
*/ |
115 |
|
49 |
static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src) |
116 |
|
|
{ |
117 |
|
|
int ret; |
118 |
|
|
|
119 |
|
49 |
frame->format = src->format; |
120 |
|
49 |
frame->channel_layout = src->channel_layout; |
121 |
|
49 |
frame->channels = src->channels; |
122 |
|
49 |
frame->nb_samples = s->frame_size; |
123 |
|
49 |
ret = av_frame_get_buffer(frame, 0); |
124 |
✗✓ |
49 |
if (ret < 0) |
125 |
|
|
goto fail; |
126 |
|
|
|
127 |
|
49 |
ret = av_frame_copy_props(frame, src); |
128 |
✗✓ |
49 |
if (ret < 0) |
129 |
|
|
goto fail; |
130 |
|
|
|
131 |
✗✓ |
49 |
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0, |
132 |
|
|
src->nb_samples, s->channels, s->sample_fmt)) < 0) |
133 |
|
|
goto fail; |
134 |
✗✓ |
49 |
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples, |
135 |
|
49 |
frame->nb_samples - src->nb_samples, |
136 |
|
|
s->channels, s->sample_fmt)) < 0) |
137 |
|
|
goto fail; |
138 |
|
|
|
139 |
|
49 |
return 0; |
140 |
|
|
|
141 |
|
|
fail: |
142 |
|
|
av_frame_unref(frame); |
143 |
|
|
return ret; |
144 |
|
|
} |
145 |
|
|
|
146 |
|
704 |
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, |
147 |
|
|
const AVSubtitle *sub) |
148 |
|
|
{ |
149 |
|
|
int ret; |
150 |
✗✓ |
704 |
if (sub->start_display_time) { |
151 |
|
|
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); |
152 |
|
|
return -1; |
153 |
|
|
} |
154 |
|
|
|
155 |
|
704 |
ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub); |
156 |
|
704 |
avctx->frame_number++; |
157 |
|
704 |
return ret; |
158 |
|
|
} |
159 |
|
|
|
160 |
|
819788 |
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame) |
161 |
|
|
{ |
162 |
|
819788 |
AVCodecInternal *avci = avctx->internal; |
163 |
|
|
|
164 |
✗✓ |
819788 |
if (avci->draining) |
165 |
|
|
return AVERROR_EOF; |
166 |
|
|
|
167 |
✓✓ |
819788 |
if (!avci->buffer_frame->buf[0]) |
168 |
|
415987 |
return AVERROR(EAGAIN); |
169 |
|
|
|
170 |
|
403801 |
av_frame_move_ref(frame, avci->buffer_frame); |
171 |
|
|
|
172 |
|
403801 |
return 0; |
173 |
|
|
} |
174 |
|
|
|
175 |
|
826048 |
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt) |
176 |
|
|
{ |
177 |
|
826048 |
AVCodecInternal *avci = avctx->internal; |
178 |
|
826048 |
EncodeSimpleContext *es = &avci->es; |
179 |
|
826048 |
AVFrame *frame = es->in_frame; |
180 |
|
|
int got_packet; |
181 |
|
|
int ret; |
182 |
|
|
|
183 |
✓✓ |
826048 |
if (avci->draining_done) |
184 |
|
188 |
return AVERROR_EOF; |
185 |
|
|
|
186 |
✓✗✓✓
|
825860 |
if (!frame->buf[0] && !avci->draining) { |
187 |
|
819788 |
av_frame_unref(frame); |
188 |
|
819788 |
ret = ff_encode_get_frame(avctx, frame); |
189 |
✓✓✓✗
|
819788 |
if (ret < 0 && ret != AVERROR_EOF) |
190 |
|
415987 |
return ret; |
191 |
|
|
} |
192 |
|
|
|
193 |
✓✓ |
409873 |
if (!frame->buf[0]) { |
194 |
✓✓ |
6072 |
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY || |
195 |
✓✓✗✓
|
5743 |
(avci->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) |
196 |
|
5624 |
return AVERROR_EOF; |
197 |
|
|
|
198 |
|
|
// Flushing is signaled with a NULL frame |
199 |
|
448 |
frame = NULL; |
200 |
|
|
} |
201 |
|
|
|
202 |
|
404249 |
got_packet = 0; |
203 |
|
|
|
204 |
✗✓ |
404249 |
av_assert0(avctx->codec->encode2); |
205 |
|
|
|
206 |
|
404249 |
if (CONFIG_FRAME_THREAD_ENCODER && |
207 |
✓✓✓✗
|
404249 |
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME)) |
208 |
|
|
/* This might modify frame, but it doesn't matter, because |
209 |
|
|
* the frame properties used below are not used for video |
210 |
|
|
* (due to the delay inherent in frame threaded encoding, it makes |
211 |
|
|
* no sense to use the properties of the current frame anyway). */ |
212 |
|
884 |
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet); |
213 |
|
|
else { |
214 |
|
403365 |
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet); |
215 |
✓✓✓✗ ✓✓ |
403365 |
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet && |
216 |
✓✓ |
98981 |
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) |
217 |
|
92920 |
avpkt->pts = avpkt->dts = frame->pts; |
218 |
|
|
} |
219 |
|
|
|
220 |
✗✓ |
404249 |
av_assert0(ret <= 0); |
221 |
|
|
|
222 |
|
404249 |
emms_c(); |
223 |
|
|
|
224 |
✓✗✓✓
|
404249 |
if (!ret && got_packet) { |
225 |
✓✓ |
397687 |
if (avpkt->data) { |
226 |
|
397672 |
ret = av_packet_make_refcounted(avpkt); |
227 |
✗✓ |
397672 |
if (ret < 0) |
228 |
|
|
goto end; |
229 |
|
|
} |
230 |
|
|
|
231 |
✓✓✓✓
|
397687 |
if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) { |
232 |
✓✓ |
365426 |
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
233 |
✓✓ |
271842 |
if (avpkt->pts == AV_NOPTS_VALUE) |
234 |
|
260794 |
avpkt->pts = frame->pts; |
235 |
✓✓ |
271842 |
if (!avpkt->duration) |
236 |
|
270787 |
avpkt->duration = ff_samples_to_time_base(avctx, |
237 |
|
270787 |
frame->nb_samples); |
238 |
|
|
} |
239 |
|
|
} |
240 |
✓✓ |
397687 |
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
241 |
|
|
/* NOTE: if we add any audio encoders which output non-keyframe packets, |
242 |
|
|
* this needs to be moved to the encoders, but for now we can do it |
243 |
|
|
* here to simplify things */ |
244 |
|
297941 |
avpkt->flags |= AV_PKT_FLAG_KEY; |
245 |
|
297941 |
avpkt->dts = avpkt->pts; |
246 |
|
|
} |
247 |
|
|
} |
248 |
|
|
|
249 |
✓✓✓✓
|
404249 |
if (avci->draining && !got_packet) |
250 |
|
188 |
avci->draining_done = 1; |
251 |
|
|
|
252 |
|
404061 |
end: |
253 |
✓✗✓✓
|
404249 |
if (ret < 0 || !got_packet) |
254 |
|
6562 |
av_packet_unref(avpkt); |
255 |
|
|
|
256 |
✓✓ |
404249 |
if (frame) { |
257 |
✓✗ |
403801 |
if (!ret) |
258 |
|
403801 |
avctx->frame_number++; |
259 |
|
403801 |
av_frame_unref(frame); |
260 |
|
|
} |
261 |
|
|
|
262 |
✓✓ |
404249 |
if (got_packet) |
263 |
|
|
// Encoders must always return ref-counted buffers. |
264 |
|
|
// Side-data only packets have no data and can be not ref-counted. |
265 |
✓✓✗✓
|
397687 |
av_assert0(!avpkt->data || avpkt->buf); |
266 |
|
|
|
267 |
|
404249 |
return ret; |
268 |
|
|
} |
269 |
|
|
|
270 |
|
819486 |
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) |
271 |
|
|
{ |
272 |
|
|
int ret; |
273 |
|
|
|
274 |
✓✓✓✓
|
1223735 |
while (!avpkt->data && !avpkt->side_data) { |
275 |
|
826048 |
ret = encode_simple_internal(avctx, avpkt); |
276 |
✓✓ |
826048 |
if (ret < 0) |
277 |
|
421799 |
return ret; |
278 |
|
|
} |
279 |
|
|
|
280 |
|
397687 |
return 0; |
281 |
|
|
} |
282 |
|
|
|
283 |
|
825179 |
static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt) |
284 |
|
|
{ |
285 |
|
825179 |
AVCodecInternal *avci = avctx->internal; |
286 |
|
|
int ret; |
287 |
|
|
|
288 |
✓✓ |
825179 |
if (avci->draining_done) |
289 |
|
5693 |
return AVERROR_EOF; |
290 |
|
|
|
291 |
✓✗✗✓
|
819486 |
av_assert0(!avpkt->data && !avpkt->side_data); |
292 |
|
|
|
293 |
✓✓ |
819486 |
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { |
294 |
✗✓✗✗
|
209078 |
if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out) |
295 |
|
|
avctx->stats_out[0] = '\0'; |
296 |
✗✓ |
209078 |
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) |
297 |
|
|
return AVERROR(EINVAL); |
298 |
|
|
} |
299 |
|
|
|
300 |
✗✓ |
819486 |
if (avctx->codec->receive_packet) { |
301 |
|
|
ret = avctx->codec->receive_packet(avctx, avpkt); |
302 |
|
|
if (ret < 0) |
303 |
|
|
av_packet_unref(avpkt); |
304 |
|
|
else |
305 |
|
|
// Encoders must always return ref-counted buffers. |
306 |
|
|
// Side-data only packets have no data and can be not ref-counted. |
307 |
|
|
av_assert0(!avpkt->data || avpkt->buf); |
308 |
|
|
} else |
309 |
|
819486 |
ret = encode_simple_receive_packet(avctx, avpkt); |
310 |
|
|
|
311 |
✓✓ |
819486 |
if (ret == AVERROR_EOF) |
312 |
|
5812 |
avci->draining_done = 1; |
313 |
|
|
|
314 |
|
819486 |
return ret; |
315 |
|
|
} |
316 |
|
|
|
317 |
|
403801 |
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src) |
318 |
|
|
{ |
319 |
|
403801 |
AVCodecInternal *avci = avctx->internal; |
320 |
|
403801 |
AVFrame *dst = avci->buffer_frame; |
321 |
|
|
int ret; |
322 |
|
|
|
323 |
✓✓ |
403801 |
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
324 |
|
|
/* extract audio service type metadata */ |
325 |
|
304055 |
AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE); |
326 |
✗✓✗✗
|
304055 |
if (sd && sd->size >= sizeof(enum AVAudioServiceType)) |
327 |
|
|
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data; |
328 |
|
|
|
329 |
|
|
/* check for valid frame size */ |
330 |
✓✓ |
304055 |
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) { |
331 |
✗✓ |
12483 |
if (src->nb_samples > avctx->frame_size) { |
332 |
|
|
av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n"); |
333 |
|
|
return AVERROR(EINVAL); |
334 |
|
|
} |
335 |
✓✓ |
291572 |
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) { |
336 |
|
|
/* if we already got an undersized frame, that must have been the last */ |
337 |
✗✓ |
52512 |
if (avctx->internal->last_audio_frame) { |
338 |
|
|
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size); |
339 |
|
|
return AVERROR(EINVAL); |
340 |
|
|
} |
341 |
|
|
|
342 |
✓✓ |
52512 |
if (src->nb_samples < avctx->frame_size) { |
343 |
|
49 |
ret = pad_last_frame(avctx, dst, src); |
344 |
✗✓ |
49 |
if (ret < 0) |
345 |
|
|
return ret; |
346 |
|
|
|
347 |
|
49 |
avctx->internal->last_audio_frame = 1; |
348 |
✗✓ |
52463 |
} else if (src->nb_samples > avctx->frame_size) { |
349 |
|
|
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size); |
350 |
|
|
return AVERROR(EINVAL); |
351 |
|
|
} |
352 |
|
|
} |
353 |
|
|
} |
354 |
|
|
|
355 |
✓✓ |
403801 |
if (!dst->data[0]) { |
356 |
|
403752 |
ret = av_frame_ref(dst, src); |
357 |
✗✓ |
403752 |
if (ret < 0) |
358 |
|
|
return ret; |
359 |
|
|
} |
360 |
|
|
|
361 |
|
403801 |
return 0; |
362 |
|
|
} |
363 |
|
|
|
364 |
|
409613 |
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame) |
365 |
|
|
{ |
366 |
|
409613 |
AVCodecInternal *avci = avctx->internal; |
367 |
|
|
int ret; |
368 |
|
|
|
369 |
✓✗✗✓
|
409613 |
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) |
370 |
|
|
return AVERROR(EINVAL); |
371 |
|
|
|
372 |
✗✓ |
409613 |
if (avci->draining) |
373 |
|
|
return AVERROR_EOF; |
374 |
|
|
|
375 |
✗✓ |
409613 |
if (avci->buffer_frame->data[0]) |
376 |
|
|
return AVERROR(EAGAIN); |
377 |
|
|
|
378 |
✓✓ |
409613 |
if (!frame) { |
379 |
|
5812 |
avci->draining = 1; |
380 |
|
|
} else { |
381 |
|
403801 |
ret = encode_send_frame_internal(avctx, frame); |
382 |
✗✓ |
403801 |
if (ret < 0) |
383 |
|
|
return ret; |
384 |
|
|
} |
385 |
|
|
|
386 |
✓✗✓✗
|
409613 |
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) { |
387 |
|
409613 |
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt); |
388 |
✓✓✓✓ ✗✓ |
409613 |
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) |
389 |
|
|
return ret; |
390 |
|
|
} |
391 |
|
|
|
392 |
|
409613 |
return 0; |
393 |
|
|
} |
394 |
|
|
|
395 |
|
813112 |
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) |
396 |
|
|
{ |
397 |
|
813112 |
AVCodecInternal *avci = avctx->internal; |
398 |
|
|
int ret; |
399 |
|
|
|
400 |
|
813112 |
av_packet_unref(avpkt); |
401 |
|
|
|
402 |
✓✗✗✓
|
813112 |
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) |
403 |
|
|
return AVERROR(EINVAL); |
404 |
|
|
|
405 |
✓✓✓✓
|
813112 |
if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) { |
406 |
|
397546 |
av_packet_move_ref(avpkt, avci->buffer_pkt); |
407 |
|
|
} else { |
408 |
|
415566 |
ret = encode_receive_packet_internal(avctx, avpkt); |
409 |
✓✓ |
415566 |
if (ret < 0) |
410 |
|
415425 |
return ret; |
411 |
|
|
} |
412 |
|
|
|
413 |
|
397687 |
return 0; |
414 |
|
|
} |
415 |
|
|
|
416 |
|
|
#if FF_API_OLD_ENCDEC |
417 |
|
60 |
static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt, |
418 |
|
|
int *got_packet, const AVFrame *frame) |
419 |
|
|
{ |
420 |
|
60 |
AVCodecInternal *avci = avctx->internal; |
421 |
|
|
AVPacket user_pkt; |
422 |
|
|
int ret; |
423 |
|
|
|
424 |
|
60 |
*got_packet = 0; |
425 |
|
|
|
426 |
✓✗✓✗
|
60 |
if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) { |
427 |
✗✓ |
60 |
if (frame->format == AV_PIX_FMT_NONE) |
428 |
|
|
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n"); |
429 |
✓✗✗✓
|
60 |
if (frame->width == 0 || frame->height == 0) |
430 |
|
|
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n"); |
431 |
|
|
} |
432 |
|
|
|
433 |
✗✓ |
60 |
if (avctx->codec->capabilities & AV_CODEC_CAP_DR1) { |
434 |
|
|
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* API does not support " |
435 |
|
|
"AV_CODEC_CAP_DR1 encoders\n"); |
436 |
|
|
return AVERROR(ENOSYS); |
437 |
|
|
} |
438 |
|
|
|
439 |
|
60 |
ret = avcodec_send_frame(avctx, frame); |
440 |
✗✓ |
60 |
if (ret == AVERROR_EOF) |
441 |
|
|
ret = 0; |
442 |
✗✓ |
60 |
else if (ret == AVERROR(EAGAIN)) { |
443 |
|
|
/* we fully drain all the output in each encode call, so this should not |
444 |
|
|
* ever happen */ |
445 |
|
|
return AVERROR_BUG; |
446 |
✗✓ |
60 |
} else if (ret < 0) |
447 |
|
|
return ret; |
448 |
|
|
|
449 |
|
60 |
av_packet_move_ref(&user_pkt, avpkt); |
450 |
✓✗ |
120 |
while (ret >= 0) { |
451 |
|
120 |
ret = avcodec_receive_packet(avctx, avpkt); |
452 |
✓✓ |
120 |
if (ret < 0) { |
453 |
✗✓✗✗
|
60 |
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) |
454 |
|
60 |
ret = 0; |
455 |
|
60 |
goto finish; |
456 |
|
|
} |
457 |
|
|
|
458 |
✓✗ |
60 |
if (avpkt != avci->compat_encode_packet) { |
459 |
✓✗✗✓
|
60 |
if (avpkt->data && user_pkt.data) { |
460 |
|
|
if (user_pkt.size >= avpkt->size) { |
461 |
|
|
memcpy(user_pkt.data, avpkt->data, avpkt->size); |
462 |
|
|
av_buffer_unref(&avpkt->buf); |
463 |
|
|
avpkt->buf = user_pkt.buf; |
464 |
|
|
avpkt->data = user_pkt.data; |
465 |
|
|
FF_DISABLE_DEPRECATION_WARNINGS |
466 |
|
|
av_init_packet(&user_pkt); |
467 |
|
|
FF_ENABLE_DEPRECATION_WARNINGS |
468 |
|
|
} else { |
469 |
|
|
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); |
470 |
|
|
av_packet_unref(avpkt); |
471 |
|
|
ret = AVERROR(EINVAL); |
472 |
|
|
goto finish; |
473 |
|
|
} |
474 |
|
|
} |
475 |
|
|
|
476 |
|
60 |
*got_packet = 1; |
477 |
|
60 |
avpkt = avci->compat_encode_packet; |
478 |
|
|
} else { |
479 |
|
|
if (!avci->compat_decode_warned) { |
480 |
|
|
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* " |
481 |
|
|
"API cannot return all the packets for this encoder. " |
482 |
|
|
"Some packets will be dropped. Update your code to the " |
483 |
|
|
"new encoding API to fix this.\n"); |
484 |
|
|
avci->compat_decode_warned = 1; |
485 |
|
|
av_packet_unref(avpkt); |
486 |
|
|
} |
487 |
|
|
} |
488 |
|
|
|
489 |
✗✓ |
60 |
if (avci->draining) |
490 |
|
|
break; |
491 |
|
|
} |
492 |
|
|
|
493 |
|
|
finish: |
494 |
✗✓ |
60 |
if (ret < 0) |
495 |
|
|
av_packet_unref(&user_pkt); |
496 |
|
|
|
497 |
|
60 |
return ret; |
498 |
|
|
} |
499 |
|
|
|
500 |
|
|
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, |
501 |
|
|
AVPacket *avpkt, |
502 |
|
|
const AVFrame *frame, |
503 |
|
|
int *got_packet_ptr) |
504 |
|
|
{ |
505 |
|
|
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame); |
506 |
|
|
|
507 |
|
|
if (ret < 0) |
508 |
|
|
av_packet_unref(avpkt); |
509 |
|
|
|
510 |
|
|
return ret; |
511 |
|
|
} |
512 |
|
|
|
513 |
|
60 |
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, |
514 |
|
|
AVPacket *avpkt, |
515 |
|
|
const AVFrame *frame, |
516 |
|
|
int *got_packet_ptr) |
517 |
|
|
{ |
518 |
|
60 |
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame); |
519 |
|
|
|
520 |
✗✓ |
60 |
if (ret < 0) |
521 |
|
|
av_packet_unref(avpkt); |
522 |
|
|
|
523 |
|
60 |
return ret; |
524 |
|
|
} |
525 |
|
|
#endif |
526 |
|
|
|
527 |
|
5994 |
int ff_encode_preinit(AVCodecContext *avctx) |
528 |
|
|
{ |
529 |
|
|
int i; |
530 |
|
|
#if FF_API_CODED_FRAME |
531 |
|
|
FF_DISABLE_DEPRECATION_WARNINGS |
532 |
|
5994 |
avctx->coded_frame = av_frame_alloc(); |
533 |
✗✓ |
5994 |
if (!avctx->coded_frame) { |
534 |
|
|
return AVERROR(ENOMEM); |
535 |
|
|
} |
536 |
|
|
FF_ENABLE_DEPRECATION_WARNINGS |
537 |
|
|
#endif |
538 |
|
|
|
539 |
✓✗✗✓
|
5994 |
if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { |
540 |
|
|
av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); |
541 |
|
|
return AVERROR(EINVAL); |
542 |
|
|
} |
543 |
|
|
|
544 |
✓✓ |
5994 |
if (avctx->codec->sample_fmts) { |
545 |
✓✗ |
1156 |
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { |
546 |
✓✓ |
1156 |
if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) |
547 |
|
1144 |
break; |
548 |
✓✓✗✓
|
15 |
if (avctx->channels == 1 && |
549 |
|
3 |
av_get_planar_sample_fmt(avctx->sample_fmt) == |
550 |
|
3 |
av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { |
551 |
|
|
avctx->sample_fmt = avctx->codec->sample_fmts[i]; |
552 |
|
|
break; |
553 |
|
|
} |
554 |
|
|
} |
555 |
✗✓ |
1144 |
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { |
556 |
|
|
char buf[128]; |
557 |
|
|
snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); |
558 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", |
559 |
|
|
(char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); |
560 |
|
|
return AVERROR(EINVAL); |
561 |
|
|
} |
562 |
|
|
} |
563 |
✓✓ |
5994 |
if (avctx->codec->pix_fmts) { |
564 |
✓✗ |
2103 |
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) |
565 |
✓✓ |
2103 |
if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) |
566 |
|
704 |
break; |
567 |
✗✓ |
704 |
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) { |
568 |
|
|
char buf[128]; |
569 |
|
|
snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); |
570 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", |
571 |
|
|
(char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); |
572 |
|
|
return AVERROR(EINVAL); |
573 |
|
|
} |
574 |
✓✓ |
704 |
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || |
575 |
✓✗ |
682 |
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || |
576 |
✓✓ |
682 |
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || |
577 |
✓✗ |
678 |
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || |
578 |
✓✓ |
678 |
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) |
579 |
|
34 |
avctx->color_range = AVCOL_RANGE_JPEG; |
580 |
|
|
} |
581 |
✓✓ |
5994 |
if (avctx->codec->supported_samplerates) { |
582 |
✓✗ |
103 |
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) |
583 |
✓✓ |
103 |
if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) |
584 |
|
44 |
break; |
585 |
✗✓ |
44 |
if (avctx->codec->supported_samplerates[i] == 0) { |
586 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", |
587 |
|
|
avctx->sample_rate); |
588 |
|
|
return AVERROR(EINVAL); |
589 |
|
|
} |
590 |
|
|
} |
591 |
✗✓ |
5994 |
if (avctx->sample_rate < 0) { |
592 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", |
593 |
|
|
avctx->sample_rate); |
594 |
|
|
return AVERROR(EINVAL); |
595 |
|
|
} |
596 |
✓✓ |
5994 |
if (avctx->codec->channel_layouts) { |
597 |
✗✓ |
42 |
if (!avctx->channel_layout) { |
598 |
|
|
av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); |
599 |
|
|
} else { |
600 |
✓✗ |
58 |
for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) |
601 |
✓✓ |
58 |
if (avctx->channel_layout == avctx->codec->channel_layouts[i]) |
602 |
|
42 |
break; |
603 |
✗✓ |
42 |
if (avctx->codec->channel_layouts[i] == 0) { |
604 |
|
|
char buf[512]; |
605 |
|
|
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); |
606 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); |
607 |
|
|
return AVERROR(EINVAL); |
608 |
|
|
} |
609 |
|
|
} |
610 |
|
|
} |
611 |
✓✓✓✓
|
7114 |
if (avctx->channel_layout && avctx->channels) { |
612 |
|
1120 |
int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); |
613 |
✗✓ |
1120 |
if (channels != avctx->channels) { |
614 |
|
|
char buf[512]; |
615 |
|
|
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); |
616 |
|
|
av_log(avctx, AV_LOG_ERROR, |
617 |
|
|
"Channel layout '%s' with %d channels does not match number of specified channels %d\n", |
618 |
|
|
buf, channels, avctx->channels); |
619 |
|
|
return AVERROR(EINVAL); |
620 |
|
|
} |
621 |
✓✓ |
4874 |
} else if (avctx->channel_layout) { |
622 |
|
16 |
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); |
623 |
|
|
} |
624 |
✗✓ |
5994 |
if (avctx->channels < 0) { |
625 |
|
|
av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n", |
626 |
|
|
avctx->channels); |
627 |
|
|
return AVERROR(EINVAL); |
628 |
|
|
} |
629 |
✓✓ |
5994 |
if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
630 |
|
4818 |
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt); |
631 |
✓✗ |
4818 |
if ( avctx->bits_per_raw_sample < 0 |
632 |
✓✓✗✓
|
4818 |
|| (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) { |
633 |
|
|
av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n", |
634 |
|
|
avctx->bits_per_raw_sample, pixdesc->comp[0].depth); |
635 |
|
|
avctx->bits_per_raw_sample = pixdesc->comp[0].depth; |
636 |
|
|
} |
637 |
✓✗✗✓
|
4818 |
if (avctx->width <= 0 || avctx->height <= 0) { |
638 |
|
|
av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); |
639 |
|
|
return AVERROR(EINVAL); |
640 |
|
|
} |
641 |
|
|
} |
642 |
✓✓✓✓
|
5994 |
if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) |
643 |
✓✓✗✓
|
5962 |
&& avctx->bit_rate>0 && avctx->bit_rate<1000) { |
644 |
|
|
av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate); |
645 |
|
|
} |
646 |
|
|
|
647 |
✓✓ |
5994 |
if (!avctx->rc_initial_buffer_occupancy) |
648 |
|
5992 |
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4; |
649 |
|
|
|
650 |
✓✓✓✗
|
5994 |
if (avctx->ticks_per_frame && avctx->time_base.num && |
651 |
✗✓ |
5962 |
avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { |
652 |
|
|
av_log(avctx, AV_LOG_ERROR, |
653 |
|
|
"ticks_per_frame %d too large for the timebase %d/%d.", |
654 |
|
|
avctx->ticks_per_frame, |
655 |
|
|
avctx->time_base.num, |
656 |
|
|
avctx->time_base.den); |
657 |
|
|
return AVERROR(EINVAL); |
658 |
|
|
} |
659 |
|
|
|
660 |
✗✓ |
5994 |
if (avctx->hw_frames_ctx) { |
661 |
|
|
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
662 |
|
|
if (frames_ctx->format != avctx->pix_fmt) { |
663 |
|
|
av_log(avctx, AV_LOG_ERROR, |
664 |
|
|
"Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); |
665 |
|
|
return AVERROR(EINVAL); |
666 |
|
|
} |
667 |
|
|
if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE && |
668 |
|
|
avctx->sw_pix_fmt != frames_ctx->sw_format) { |
669 |
|
|
av_log(avctx, AV_LOG_ERROR, |
670 |
|
|
"Mismatching AVCodecContext.sw_pix_fmt (%s) " |
671 |
|
|
"and AVHWFramesContext.sw_format (%s)\n", |
672 |
|
|
av_get_pix_fmt_name(avctx->sw_pix_fmt), |
673 |
|
|
av_get_pix_fmt_name(frames_ctx->sw_format)); |
674 |
|
|
return AVERROR(EINVAL); |
675 |
|
|
} |
676 |
|
|
avctx->sw_pix_fmt = frames_ctx->sw_format; |
677 |
|
|
} |
678 |
|
|
|
679 |
|
5994 |
return 0; |
680 |
|
|
} |