Line | Branch | Exec | Source |
---|---|---|---|
1 | /* | ||
2 | * This file is part of FFmpeg. | ||
3 | * | ||
4 | * FFmpeg is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU Lesser General Public | ||
6 | * License as published by the Free Software Foundation; either | ||
7 | * version 2.1 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * FFmpeg is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * Lesser General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU Lesser General Public | ||
15 | * License along with FFmpeg; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef FFTOOLS_FFMPEG_H | ||
20 | #define FFTOOLS_FFMPEG_H | ||
21 | |||
22 | #include "config.h" | ||
23 | |||
24 | #include <stdatomic.h> | ||
25 | #include <stdint.h> | ||
26 | #include <stdio.h> | ||
27 | #include <signal.h> | ||
28 | |||
29 | #include "cmdutils.h" | ||
30 | #include "sync_queue.h" | ||
31 | |||
32 | #include "libavformat/avformat.h" | ||
33 | #include "libavformat/avio.h" | ||
34 | |||
35 | #include "libavcodec/avcodec.h" | ||
36 | #include "libavcodec/bsf.h" | ||
37 | |||
38 | #include "libavfilter/avfilter.h" | ||
39 | |||
40 | #include "libavutil/avutil.h" | ||
41 | #include "libavutil/dict.h" | ||
42 | #include "libavutil/eval.h" | ||
43 | #include "libavutil/fifo.h" | ||
44 | #include "libavutil/hwcontext.h" | ||
45 | #include "libavutil/pixfmt.h" | ||
46 | #include "libavutil/rational.h" | ||
47 | #include "libavutil/thread.h" | ||
48 | #include "libavutil/threadmessage.h" | ||
49 | |||
50 | #include "libswresample/swresample.h" | ||
51 | |||
52 | // deprecated features | ||
53 | #define FFMPEG_OPT_PSNR 1 | ||
54 | #define FFMPEG_OPT_MAP_CHANNEL 1 | ||
55 | #define FFMPEG_OPT_MAP_SYNC 1 | ||
56 | #define FFMPEG_ROTATION_METADATA 1 | ||
57 | #define FFMPEG_OPT_QPHIST 1 | ||
58 | #define FFMPEG_OPT_ADRIFT_THRESHOLD 1 | ||
59 | |||
60 | enum VideoSyncMethod { | ||
61 | VSYNC_AUTO = -1, | ||
62 | VSYNC_PASSTHROUGH, | ||
63 | VSYNC_CFR, | ||
64 | VSYNC_VFR, | ||
65 | VSYNC_VSCFR, | ||
66 | VSYNC_DROP, | ||
67 | }; | ||
68 | |||
69 | #define MAX_STREAMS 1024 /* arbitrary sanity check value */ | ||
70 | |||
71 | enum HWAccelID { | ||
72 | HWACCEL_NONE = 0, | ||
73 | HWACCEL_AUTO, | ||
74 | HWACCEL_GENERIC, | ||
75 | }; | ||
76 | |||
77 | typedef struct HWDevice { | ||
78 | const char *name; | ||
79 | enum AVHWDeviceType type; | ||
80 | AVBufferRef *device_ref; | ||
81 | } HWDevice; | ||
82 | |||
83 | /* select an input stream for an output stream */ | ||
84 | typedef struct StreamMap { | ||
85 | int disabled; /* 1 is this mapping is disabled by a negative map */ | ||
86 | int file_index; | ||
87 | int stream_index; | ||
88 | char *linklabel; /* name of an output link, for mapping lavfi outputs */ | ||
89 | } StreamMap; | ||
90 | |||
91 | #if FFMPEG_OPT_MAP_CHANNEL | ||
92 | typedef struct { | ||
93 | int file_idx, stream_idx, channel_idx; // input | ||
94 | int ofile_idx, ostream_idx; // output | ||
95 | } AudioChannelMap; | ||
96 | #endif | ||
97 | |||
98 | typedef struct DemuxPktData { | ||
99 | // estimated dts in AV_TIME_BASE_Q, | ||
100 | // to be used when real dts is missing | ||
101 | int64_t dts_est; | ||
102 | } DemuxPktData; | ||
103 | |||
104 | typedef struct OptionsContext { | ||
105 | OptionGroup *g; | ||
106 | |||
107 | /* input/output options */ | ||
108 | int64_t start_time; | ||
109 | int64_t start_time_eof; | ||
110 | int seek_timestamp; | ||
111 | const char *format; | ||
112 | |||
113 | SpecifierOpt *codec_names; | ||
114 | int nb_codec_names; | ||
115 | SpecifierOpt *audio_ch_layouts; | ||
116 | int nb_audio_ch_layouts; | ||
117 | SpecifierOpt *audio_channels; | ||
118 | int nb_audio_channels; | ||
119 | SpecifierOpt *audio_sample_rate; | ||
120 | int nb_audio_sample_rate; | ||
121 | SpecifierOpt *frame_rates; | ||
122 | int nb_frame_rates; | ||
123 | SpecifierOpt *max_frame_rates; | ||
124 | int nb_max_frame_rates; | ||
125 | SpecifierOpt *frame_sizes; | ||
126 | int nb_frame_sizes; | ||
127 | SpecifierOpt *frame_pix_fmts; | ||
128 | int nb_frame_pix_fmts; | ||
129 | |||
130 | /* input options */ | ||
131 | int64_t input_ts_offset; | ||
132 | int loop; | ||
133 | int rate_emu; | ||
134 | float readrate; | ||
135 | double readrate_initial_burst; | ||
136 | int accurate_seek; | ||
137 | int thread_queue_size; | ||
138 | int input_sync_ref; | ||
139 | int find_stream_info; | ||
140 | |||
141 | SpecifierOpt *ts_scale; | ||
142 | int nb_ts_scale; | ||
143 | SpecifierOpt *dump_attachment; | ||
144 | int nb_dump_attachment; | ||
145 | SpecifierOpt *hwaccels; | ||
146 | int nb_hwaccels; | ||
147 | SpecifierOpt *hwaccel_devices; | ||
148 | int nb_hwaccel_devices; | ||
149 | SpecifierOpt *hwaccel_output_formats; | ||
150 | int nb_hwaccel_output_formats; | ||
151 | SpecifierOpt *autorotate; | ||
152 | int nb_autorotate; | ||
153 | |||
154 | /* output options */ | ||
155 | StreamMap *stream_maps; | ||
156 | int nb_stream_maps; | ||
157 | #if FFMPEG_OPT_MAP_CHANNEL | ||
158 | AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */ | ||
159 | int nb_audio_channel_maps; /* number of (valid) -map_channel settings */ | ||
160 | #endif | ||
161 | const char **attachments; | ||
162 | int nb_attachments; | ||
163 | |||
164 | int chapters_input_file; | ||
165 | |||
166 | int64_t recording_time; | ||
167 | int64_t stop_time; | ||
168 | int64_t limit_filesize; | ||
169 | float mux_preload; | ||
170 | float mux_max_delay; | ||
171 | float shortest_buf_duration; | ||
172 | int shortest; | ||
173 | int bitexact; | ||
174 | |||
175 | int video_disable; | ||
176 | int audio_disable; | ||
177 | int subtitle_disable; | ||
178 | int data_disable; | ||
179 | |||
180 | /* indexed by output file stream index */ | ||
181 | int *streamid_map; | ||
182 | int nb_streamid_map; | ||
183 | |||
184 | SpecifierOpt *metadata; | ||
185 | int nb_metadata; | ||
186 | SpecifierOpt *max_frames; | ||
187 | int nb_max_frames; | ||
188 | SpecifierOpt *bitstream_filters; | ||
189 | int nb_bitstream_filters; | ||
190 | SpecifierOpt *codec_tags; | ||
191 | int nb_codec_tags; | ||
192 | SpecifierOpt *sample_fmts; | ||
193 | int nb_sample_fmts; | ||
194 | SpecifierOpt *qscale; | ||
195 | int nb_qscale; | ||
196 | SpecifierOpt *forced_key_frames; | ||
197 | int nb_forced_key_frames; | ||
198 | SpecifierOpt *fps_mode; | ||
199 | int nb_fps_mode; | ||
200 | SpecifierOpt *force_fps; | ||
201 | int nb_force_fps; | ||
202 | SpecifierOpt *frame_aspect_ratios; | ||
203 | int nb_frame_aspect_ratios; | ||
204 | SpecifierOpt *display_rotations; | ||
205 | int nb_display_rotations; | ||
206 | SpecifierOpt *display_hflips; | ||
207 | int nb_display_hflips; | ||
208 | SpecifierOpt *display_vflips; | ||
209 | int nb_display_vflips; | ||
210 | SpecifierOpt *rc_overrides; | ||
211 | int nb_rc_overrides; | ||
212 | SpecifierOpt *intra_matrices; | ||
213 | int nb_intra_matrices; | ||
214 | SpecifierOpt *inter_matrices; | ||
215 | int nb_inter_matrices; | ||
216 | SpecifierOpt *chroma_intra_matrices; | ||
217 | int nb_chroma_intra_matrices; | ||
218 | SpecifierOpt *top_field_first; | ||
219 | int nb_top_field_first; | ||
220 | SpecifierOpt *metadata_map; | ||
221 | int nb_metadata_map; | ||
222 | SpecifierOpt *presets; | ||
223 | int nb_presets; | ||
224 | SpecifierOpt *copy_initial_nonkeyframes; | ||
225 | int nb_copy_initial_nonkeyframes; | ||
226 | SpecifierOpt *copy_prior_start; | ||
227 | int nb_copy_prior_start; | ||
228 | SpecifierOpt *filters; | ||
229 | int nb_filters; | ||
230 | SpecifierOpt *filter_scripts; | ||
231 | int nb_filter_scripts; | ||
232 | SpecifierOpt *reinit_filters; | ||
233 | int nb_reinit_filters; | ||
234 | SpecifierOpt *fix_sub_duration; | ||
235 | int nb_fix_sub_duration; | ||
236 | SpecifierOpt *fix_sub_duration_heartbeat; | ||
237 | int nb_fix_sub_duration_heartbeat; | ||
238 | SpecifierOpt *canvas_sizes; | ||
239 | int nb_canvas_sizes; | ||
240 | SpecifierOpt *pass; | ||
241 | int nb_pass; | ||
242 | SpecifierOpt *passlogfiles; | ||
243 | int nb_passlogfiles; | ||
244 | SpecifierOpt *max_muxing_queue_size; | ||
245 | int nb_max_muxing_queue_size; | ||
246 | SpecifierOpt *muxing_queue_data_threshold; | ||
247 | int nb_muxing_queue_data_threshold; | ||
248 | SpecifierOpt *guess_layout_max; | ||
249 | int nb_guess_layout_max; | ||
250 | SpecifierOpt *apad; | ||
251 | int nb_apad; | ||
252 | SpecifierOpt *discard; | ||
253 | int nb_discard; | ||
254 | SpecifierOpt *disposition; | ||
255 | int nb_disposition; | ||
256 | SpecifierOpt *program; | ||
257 | int nb_program; | ||
258 | SpecifierOpt *time_bases; | ||
259 | int nb_time_bases; | ||
260 | SpecifierOpt *enc_time_bases; | ||
261 | int nb_enc_time_bases; | ||
262 | SpecifierOpt *autoscale; | ||
263 | int nb_autoscale; | ||
264 | SpecifierOpt *bits_per_raw_sample; | ||
265 | int nb_bits_per_raw_sample; | ||
266 | SpecifierOpt *enc_stats_pre; | ||
267 | int nb_enc_stats_pre; | ||
268 | SpecifierOpt *enc_stats_post; | ||
269 | int nb_enc_stats_post; | ||
270 | SpecifierOpt *mux_stats; | ||
271 | int nb_mux_stats; | ||
272 | SpecifierOpt *enc_stats_pre_fmt; | ||
273 | int nb_enc_stats_pre_fmt; | ||
274 | SpecifierOpt *enc_stats_post_fmt; | ||
275 | int nb_enc_stats_post_fmt; | ||
276 | SpecifierOpt *mux_stats_fmt; | ||
277 | int nb_mux_stats_fmt; | ||
278 | } OptionsContext; | ||
279 | |||
280 | typedef struct InputFilter { | ||
281 | struct FilterGraph *graph; | ||
282 | uint8_t *name; | ||
283 | } InputFilter; | ||
284 | |||
285 | typedef struct OutputFilter { | ||
286 | AVFilterContext *filter; | ||
287 | struct OutputStream *ost; | ||
288 | struct FilterGraph *graph; | ||
289 | uint8_t *name; | ||
290 | |||
291 | /* for filters that are not yet bound to an output stream, | ||
292 | * this stores the output linklabel, if any */ | ||
293 | uint8_t *linklabel; | ||
294 | |||
295 | enum AVMediaType type; | ||
296 | |||
297 | /* desired output stream properties */ | ||
298 | int width, height; | ||
299 | AVRational frame_rate; | ||
300 | int format; | ||
301 | int sample_rate; | ||
302 | AVChannelLayout ch_layout; | ||
303 | |||
304 | // those are only set if no format is specified and the encoder gives us multiple options | ||
305 | // They point directly to the relevant lists of the encoder. | ||
306 | const int *formats; | ||
307 | const AVChannelLayout *ch_layouts; | ||
308 | const int *sample_rates; | ||
309 | |||
310 | /* pts of the last frame received from this filter, in AV_TIME_BASE_Q */ | ||
311 | int64_t last_pts; | ||
312 | } OutputFilter; | ||
313 | |||
314 | typedef struct FilterGraph { | ||
315 | int index; | ||
316 | |||
317 | AVFilterGraph *graph; | ||
318 | // true when the filtergraph contains only meta filters | ||
319 | // that do not modify the frame data | ||
320 | int is_meta; | ||
321 | |||
322 | InputFilter **inputs; | ||
323 | int nb_inputs; | ||
324 | OutputFilter **outputs; | ||
325 | int nb_outputs; | ||
326 | } FilterGraph; | ||
327 | |||
328 | typedef struct Decoder Decoder; | ||
329 | |||
330 | typedef struct InputStream { | ||
331 | const AVClass *class; | ||
332 | |||
333 | int file_index; | ||
334 | int index; | ||
335 | |||
336 | AVStream *st; | ||
337 | int discard; /* true if stream data should be discarded */ | ||
338 | int user_set_discard; | ||
339 | int decoding_needed; /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */ | ||
340 | #define DECODING_FOR_OST 1 | ||
341 | #define DECODING_FOR_FILTER 2 | ||
342 | // should attach FrameData as opaque_ref after decoding | ||
343 | int want_frame_data; | ||
344 | |||
345 | /** | ||
346 | * Codec parameters - to be used by the decoding/streamcopy code. | ||
347 | * st->codecpar should not be accessed, because it may be modified | ||
348 | * concurrently by the demuxing thread. | ||
349 | */ | ||
350 | AVCodecParameters *par; | ||
351 | Decoder *decoder; | ||
352 | AVCodecContext *dec_ctx; | ||
353 | const AVCodec *dec; | ||
354 | const AVCodecDescriptor *codec_desc; | ||
355 | |||
356 | AVRational framerate_guessed; | ||
357 | |||
358 | int64_t nb_samples; /* number of samples in the last decoded audio frame before looping */ | ||
359 | |||
360 | AVDictionary *decoder_opts; | ||
361 | AVRational framerate; /* framerate forced with -r */ | ||
362 | int top_field_first; | ||
363 | |||
364 | int autorotate; | ||
365 | |||
366 | int fix_sub_duration; | ||
367 | struct { /* previous decoded subtitle and related variables */ | ||
368 | int got_output; | ||
369 | int ret; | ||
370 | AVSubtitle subtitle; | ||
371 | } prev_sub; | ||
372 | |||
373 | struct sub2video { | ||
374 | int w, h; | ||
375 | } sub2video; | ||
376 | |||
377 | /* decoded data from this stream goes into all those filters | ||
378 | * currently video and audio only */ | ||
379 | InputFilter **filters; | ||
380 | int nb_filters; | ||
381 | |||
382 | /* | ||
383 | * Output targets that do not go through lavfi, i.e. subtitles or | ||
384 | * streamcopy. Those two cases are distinguished by the OutputStream | ||
385 | * having an encoder or not. | ||
386 | */ | ||
387 | struct OutputStream **outputs; | ||
388 | int nb_outputs; | ||
389 | |||
390 | int reinit_filters; | ||
391 | |||
392 | /* hwaccel options */ | ||
393 | enum HWAccelID hwaccel_id; | ||
394 | enum AVHWDeviceType hwaccel_device_type; | ||
395 | char *hwaccel_device; | ||
396 | enum AVPixelFormat hwaccel_output_format; | ||
397 | |||
398 | int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame); | ||
399 | enum AVPixelFormat hwaccel_pix_fmt; | ||
400 | |||
401 | /* stats */ | ||
402 | // number of frames/samples retrieved from the decoder | ||
403 | uint64_t frames_decoded; | ||
404 | uint64_t samples_decoded; | ||
405 | uint64_t decode_errors; | ||
406 | } InputStream; | ||
407 | |||
408 | typedef struct LastFrameDuration { | ||
409 | int stream_idx; | ||
410 | int64_t duration; | ||
411 | } LastFrameDuration; | ||
412 | |||
413 | typedef struct InputFile { | ||
414 | const AVClass *class; | ||
415 | |||
416 | int index; | ||
417 | |||
418 | // input format has no timestamps | ||
419 | int format_nots; | ||
420 | |||
421 | AVFormatContext *ctx; | ||
422 | int eof_reached; /* true if eof reached */ | ||
423 | int eagain; /* true if last read attempt returned EAGAIN */ | ||
424 | int64_t input_ts_offset; | ||
425 | int input_sync_ref; | ||
426 | /** | ||
427 | * Effective format start time based on enabled streams. | ||
428 | */ | ||
429 | int64_t start_time_effective; | ||
430 | int64_t ts_offset; | ||
431 | int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */ | ||
432 | int64_t recording_time; | ||
433 | |||
434 | /* streams that ffmpeg is aware of; | ||
435 | * there may be extra streams in ctx that are not mapped to an InputStream | ||
436 | * if new streams appear dynamically during demuxing */ | ||
437 | InputStream **streams; | ||
438 | int nb_streams; | ||
439 | |||
440 | float readrate; | ||
441 | int accurate_seek; | ||
442 | |||
443 | /* when looping the input file, this queue is used by decoders to report | ||
444 | * the last frame duration back to the demuxer thread */ | ||
445 | AVThreadMessageQueue *audio_duration_queue; | ||
446 | int audio_duration_queue_size; | ||
447 | } InputFile; | ||
448 | |||
449 | enum forced_keyframes_const { | ||
450 | FKF_N, | ||
451 | FKF_N_FORCED, | ||
452 | FKF_PREV_FORCED_N, | ||
453 | FKF_PREV_FORCED_T, | ||
454 | FKF_T, | ||
455 | FKF_NB | ||
456 | }; | ||
457 | |||
458 | #define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0) | ||
459 | #define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM (1 << 1) | ||
460 | |||
461 | enum EncStatsType { | ||
462 | ENC_STATS_LITERAL = 0, | ||
463 | ENC_STATS_FILE_IDX, | ||
464 | ENC_STATS_STREAM_IDX, | ||
465 | ENC_STATS_FRAME_NUM, | ||
466 | ENC_STATS_FRAME_NUM_IN, | ||
467 | ENC_STATS_TIMEBASE, | ||
468 | ENC_STATS_TIMEBASE_IN, | ||
469 | ENC_STATS_PTS, | ||
470 | ENC_STATS_PTS_TIME, | ||
471 | ENC_STATS_PTS_IN, | ||
472 | ENC_STATS_PTS_TIME_IN, | ||
473 | ENC_STATS_DTS, | ||
474 | ENC_STATS_DTS_TIME, | ||
475 | ENC_STATS_SAMPLE_NUM, | ||
476 | ENC_STATS_NB_SAMPLES, | ||
477 | ENC_STATS_PKT_SIZE, | ||
478 | ENC_STATS_BITRATE, | ||
479 | ENC_STATS_AVG_BITRATE, | ||
480 | }; | ||
481 | |||
482 | typedef struct EncStatsComponent { | ||
483 | enum EncStatsType type; | ||
484 | |||
485 | uint8_t *str; | ||
486 | size_t str_len; | ||
487 | } EncStatsComponent; | ||
488 | |||
489 | typedef struct EncStats { | ||
490 | EncStatsComponent *components; | ||
491 | int nb_components; | ||
492 | |||
493 | AVIOContext *io; | ||
494 | } EncStats; | ||
495 | |||
496 | extern const char *const forced_keyframes_const_names[]; | ||
497 | |||
498 | typedef enum { | ||
499 | ENCODER_FINISHED = 1, | ||
500 | MUXER_FINISHED = 2, | ||
501 | } OSTFinished ; | ||
502 | |||
503 | enum { | ||
504 | KF_FORCE_SOURCE = 1, | ||
505 | KF_FORCE_SOURCE_NO_DROP = 2, | ||
506 | }; | ||
507 | |||
508 | typedef struct KeyframeForceCtx { | ||
509 | int type; | ||
510 | |||
511 | int64_t ref_pts; | ||
512 | |||
513 | // timestamps of the forced keyframes, in AV_TIME_BASE_Q | ||
514 | int64_t *pts; | ||
515 | int nb_pts; | ||
516 | int index; | ||
517 | |||
518 | AVExpr *pexpr; | ||
519 | double expr_const_values[FKF_NB]; | ||
520 | |||
521 | int dropped_keyframe; | ||
522 | } KeyframeForceCtx; | ||
523 | |||
524 | typedef struct Encoder Encoder; | ||
525 | |||
526 | typedef struct OutputStream { | ||
527 | const AVClass *class; | ||
528 | |||
529 | enum AVMediaType type; | ||
530 | |||
531 | int file_index; /* file index */ | ||
532 | int index; /* stream index in the output file */ | ||
533 | |||
534 | /** | ||
535 | * Codec parameters for packets submitted to the muxer (i.e. before | ||
536 | * bitstream filtering, if any). | ||
537 | */ | ||
538 | AVCodecParameters *par_in; | ||
539 | |||
540 | /* input stream that is the source for this output stream; | ||
541 | * may be NULL for streams with no well-defined source, e.g. | ||
542 | * attachments or outputs from complex filtergraphs */ | ||
543 | InputStream *ist; | ||
544 | |||
545 | AVStream *st; /* stream in the output file */ | ||
546 | /* dts of the last packet sent to the muxing queue, in AV_TIME_BASE_Q */ | ||
547 | int64_t last_mux_dts; | ||
548 | |||
549 | // the timebase of the packets sent to the muxer | ||
550 | AVRational mux_timebase; | ||
551 | AVRational enc_timebase; | ||
552 | |||
553 | Encoder *enc; | ||
554 | AVCodecContext *enc_ctx; | ||
555 | AVPacket *pkt; | ||
556 | int64_t last_dropped; | ||
557 | |||
558 | /* video only */ | ||
559 | AVRational frame_rate; | ||
560 | AVRational max_frame_rate; | ||
561 | enum VideoSyncMethod vsync_method; | ||
562 | int is_cfr; | ||
563 | int force_fps; | ||
564 | int top_field_first; | ||
565 | #if FFMPEG_ROTATION_METADATA | ||
566 | int rotate_overridden; | ||
567 | #endif | ||
568 | int autoscale; | ||
569 | int bitexact; | ||
570 | int bits_per_raw_sample; | ||
571 | #if FFMPEG_ROTATION_METADATA | ||
572 | double rotate_override_value; | ||
573 | #endif | ||
574 | |||
575 | AVRational frame_aspect_ratio; | ||
576 | |||
577 | KeyframeForceCtx kf; | ||
578 | |||
579 | /* audio only */ | ||
580 | #if FFMPEG_OPT_MAP_CHANNEL | ||
581 | int *audio_channels_map; /* list of the channels id to pick from the source stream */ | ||
582 | int audio_channels_mapped; /* number of channels in audio_channels_map */ | ||
583 | #endif | ||
584 | |||
585 | char *logfile_prefix; | ||
586 | FILE *logfile; | ||
587 | |||
588 | OutputFilter *filter; | ||
589 | |||
590 | AVDictionary *encoder_opts; | ||
591 | AVDictionary *sws_dict; | ||
592 | AVDictionary *swr_opts; | ||
593 | char *apad; | ||
594 | OSTFinished finished; /* no more packets should be written for this stream */ | ||
595 | int unavailable; /* true if the steram is unavailable (possibly temporarily) */ | ||
596 | |||
597 | // init_output_stream() has been called for this stream | ||
598 | // The encoder and the bitstream filters have been initialized and the stream | ||
599 | // parameters are set in the AVStream. | ||
600 | int initialized; | ||
601 | |||
602 | int inputs_done; | ||
603 | |||
604 | const char *attachment_filename; | ||
605 | |||
606 | int keep_pix_fmt; | ||
607 | |||
608 | /* stats */ | ||
609 | // number of packets send to the muxer | ||
610 | atomic_uint_least64_t packets_written; | ||
611 | // number of frames/samples sent to the encoder | ||
612 | uint64_t frames_encoded; | ||
613 | uint64_t samples_encoded; | ||
614 | |||
615 | /* packet quality factor */ | ||
616 | int quality; | ||
617 | |||
618 | int sq_idx_encode; | ||
619 | int sq_idx_mux; | ||
620 | |||
621 | EncStats enc_stats_pre; | ||
622 | EncStats enc_stats_post; | ||
623 | |||
624 | /* | ||
625 | * bool on whether this stream should be utilized for splitting | ||
626 | * subtitles utilizing fix_sub_duration at random access points. | ||
627 | */ | ||
628 | unsigned int fix_sub_duration_heartbeat; | ||
629 | } OutputStream; | ||
630 | |||
631 | typedef struct OutputFile { | ||
632 | const AVClass *class; | ||
633 | |||
634 | int index; | ||
635 | |||
636 | const AVOutputFormat *format; | ||
637 | const char *url; | ||
638 | |||
639 | OutputStream **streams; | ||
640 | int nb_streams; | ||
641 | |||
642 | SyncQueue *sq_encode; | ||
643 | |||
644 | int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units | ||
645 | int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units | ||
646 | |||
647 | int shortest; | ||
648 | int bitexact; | ||
649 | } OutputFile; | ||
650 | |||
651 | // optionally attached as opaque_ref to decoded AVFrames | ||
652 | typedef struct FrameData { | ||
653 | uint64_t idx; | ||
654 | int64_t pts; | ||
655 | AVRational tb; | ||
656 | } FrameData; | ||
657 | |||
658 | extern InputFile **input_files; | ||
659 | extern int nb_input_files; | ||
660 | |||
661 | extern OutputFile **output_files; | ||
662 | extern int nb_output_files; | ||
663 | |||
664 | extern FilterGraph **filtergraphs; | ||
665 | extern int nb_filtergraphs; | ||
666 | |||
667 | extern char *vstats_filename; | ||
668 | extern char *sdp_filename; | ||
669 | |||
670 | extern float dts_delta_threshold; | ||
671 | extern float dts_error_threshold; | ||
672 | |||
673 | extern enum VideoSyncMethod video_sync_method; | ||
674 | extern float frame_drop_threshold; | ||
675 | extern int do_benchmark; | ||
676 | extern int do_benchmark_all; | ||
677 | extern int do_hex_dump; | ||
678 | extern int do_pkt_dump; | ||
679 | extern int copy_ts; | ||
680 | extern int start_at_zero; | ||
681 | extern int copy_tb; | ||
682 | extern int debug_ts; | ||
683 | extern int exit_on_error; | ||
684 | extern int abort_on_flags; | ||
685 | extern int print_stats; | ||
686 | extern int64_t stats_period; | ||
687 | extern int stdin_interaction; | ||
688 | extern AVIOContext *progress_avio; | ||
689 | extern float max_error_rate; | ||
690 | |||
691 | extern char *filter_nbthreads; | ||
692 | extern int filter_complex_nbthreads; | ||
693 | extern int vstats_version; | ||
694 | extern int auto_conversion_filters; | ||
695 | |||
696 | extern const AVIOInterruptCB int_cb; | ||
697 | |||
698 | extern const OptionDef options[]; | ||
699 | extern HWDevice *filter_hw_device; | ||
700 | |||
701 | extern unsigned nb_output_dumped; | ||
702 | |||
703 | extern int ignore_unknown_streams; | ||
704 | extern int copy_unknown_streams; | ||
705 | |||
706 | extern int recast_media; | ||
707 | |||
708 | extern FILE *vstats_file; | ||
709 | |||
710 | extern int64_t nb_frames_dup; | ||
711 | extern int64_t nb_frames_drop; | ||
712 | |||
713 | #if FFMPEG_OPT_PSNR | ||
714 | extern int do_psnr; | ||
715 | #endif | ||
716 | |||
717 | void term_init(void); | ||
718 | void term_exit(void); | ||
719 | |||
720 | void show_usage(void); | ||
721 | |||
722 | void remove_avoptions(AVDictionary **a, AVDictionary *b); | ||
723 | void assert_avoptions(AVDictionary *m); | ||
724 | |||
725 | void assert_file_overwrite(const char *filename); | ||
726 | char *file_read(const char *filename); | ||
727 | AVDictionary *strip_specifiers(const AVDictionary *dict); | ||
728 | const AVCodec *find_codec_or_die(void *logctx, const char *name, | ||
729 | enum AVMediaType type, int encoder); | ||
730 | int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global); | ||
731 | |||
732 | int configure_filtergraph(FilterGraph *fg); | ||
733 | void check_filter_outputs(void); | ||
734 | int filtergraph_is_simple(const FilterGraph *fg); | ||
735 | int init_simple_filtergraph(InputStream *ist, OutputStream *ost, | ||
736 | char *graph_desc); | ||
737 | int init_complex_filtergraph(FilterGraph *fg); | ||
738 | |||
739 | int copy_av_subtitle(AVSubtitle *dst, const AVSubtitle *src); | ||
740 | |||
741 | int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference); | ||
742 | int ifilter_send_eof(InputFilter *ifilter, int64_t pts, AVRational tb); | ||
743 | int ifilter_sub2video(InputFilter *ifilter, const AVSubtitle *sub); | ||
744 | void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb); | ||
745 | |||
746 | /** | ||
747 | * Set up fallback filtering parameters from a decoder context. They will only | ||
748 | * be used if no frames are ever sent on this input, otherwise the actual | ||
749 | * parameters are taken from the frame. | ||
750 | */ | ||
751 | int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec); | ||
752 | |||
753 | void ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost); | ||
754 | |||
755 | /** | ||
756 | * Create a new filtergraph in the global filtergraph list. | ||
757 | * | ||
758 | * @param graph_desc Graph description; an av_malloc()ed string, filtergraph | ||
759 | * takes ownership of it. | ||
760 | */ | ||
761 | FilterGraph *fg_create(char *graph_desc); | ||
762 | |||
763 | void fg_free(FilterGraph **pfg); | ||
764 | |||
765 | /** | ||
766 | * Perform a step of transcoding for the specified filter graph. | ||
767 | * | ||
768 | * @param[in] graph filter graph to consider | ||
769 | * @param[out] best_ist input stream where a frame would allow to continue | ||
770 | * @return 0 for success, <0 for error | ||
771 | */ | ||
772 | int fg_transcode_step(FilterGraph *graph, InputStream **best_ist); | ||
773 | |||
774 | /** | ||
775 | * Get and encode new output from any of the filtergraphs, without causing | ||
776 | * activity. | ||
777 | * | ||
778 | * @return 0 for success, <0 for severe errors | ||
779 | */ | ||
780 | int reap_filters(int flush); | ||
781 | |||
782 | int ffmpeg_parse_options(int argc, char **argv); | ||
783 | |||
784 | void enc_stats_write(OutputStream *ost, EncStats *es, | ||
785 | const AVFrame *frame, const AVPacket *pkt, | ||
786 | uint64_t frame_num); | ||
787 | |||
788 | HWDevice *hw_device_get_by_name(const char *name); | ||
789 | HWDevice *hw_device_get_by_type(enum AVHWDeviceType type); | ||
790 | int hw_device_init_from_string(const char *arg, HWDevice **dev); | ||
791 | int hw_device_init_from_type(enum AVHWDeviceType type, | ||
792 | const char *device, | ||
793 | HWDevice **dev_out); | ||
794 | void hw_device_free_all(void); | ||
795 | |||
796 | /** | ||
797 | * Get a hardware device to be used with this filtergraph. | ||
798 | * The returned reference is owned by the callee, the caller | ||
799 | * must ref it explicitly for long-term use. | ||
800 | */ | ||
801 | AVBufferRef *hw_device_for_filter(void); | ||
802 | |||
803 | int hwaccel_decode_init(AVCodecContext *avctx); | ||
804 | |||
805 | int dec_open(InputStream *ist); | ||
806 | void dec_free(Decoder **pdec); | ||
807 | |||
808 | /** | ||
809 | * Submit a packet for decoding | ||
810 | * | ||
811 | * When pkt==NULL and no_eof=0, there will be no more input. Flush decoders and | ||
812 | * mark all downstreams as finished. | ||
813 | * | ||
814 | * When pkt==NULL and no_eof=1, the stream was reset (e.g. after a seek). Flush | ||
815 | * decoders and await further input. | ||
816 | */ | ||
817 | int dec_packet(InputStream *ist, const AVPacket *pkt, int no_eof); | ||
818 | |||
819 | int enc_alloc(Encoder **penc, const AVCodec *codec); | ||
820 | void enc_free(Encoder **penc); | ||
821 | |||
822 | int enc_open(OutputStream *ost, AVFrame *frame); | ||
823 | void enc_subtitle(OutputFile *of, OutputStream *ost, AVSubtitle *sub); | ||
824 | void enc_frame(OutputStream *ost, AVFrame *frame); | ||
825 | void enc_flush(void); | ||
826 | |||
827 | /* | ||
828 | * Initialize muxing state for the given stream, should be called | ||
829 | * after the codec/streamcopy setup has been done. | ||
830 | * | ||
831 | * Open the muxer once all the streams have been initialized. | ||
832 | */ | ||
833 | int of_stream_init(OutputFile *of, OutputStream *ost); | ||
834 | int of_write_trailer(OutputFile *of); | ||
835 | int of_open(const OptionsContext *o, const char *filename); | ||
836 | void of_close(OutputFile **pof); | ||
837 | |||
838 | void of_enc_stats_close(void); | ||
839 | |||
840 | /* | ||
841 | * Send a single packet to the output, applying any bitstream filters | ||
842 | * associated with the output stream. This may result in any number | ||
843 | * of packets actually being written, depending on what bitstream | ||
844 | * filters are applied. The supplied packet is consumed and will be | ||
845 | * blank (as if newly-allocated) when this function returns. | ||
846 | * | ||
847 | * If eof is set, instead indicate EOF to all bitstream filters and | ||
848 | * therefore flush any delayed packets to the output. A blank packet | ||
849 | * must be supplied in this case. | ||
850 | */ | ||
851 | void of_output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof); | ||
852 | |||
853 | /** | ||
854 | * @param dts predicted packet dts in AV_TIME_BASE_Q | ||
855 | */ | ||
856 | void of_streamcopy(OutputStream *ost, const AVPacket *pkt, int64_t dts); | ||
857 | |||
858 | int64_t of_filesize(OutputFile *of); | ||
859 | |||
860 | int ifile_open(const OptionsContext *o, const char *filename); | ||
861 | void ifile_close(InputFile **f); | ||
862 | |||
863 | /** | ||
864 | * Get next input packet from the demuxer. | ||
865 | * | ||
866 | * @param pkt the packet is written here when this function returns 0 | ||
867 | * @return | ||
868 | * - 0 when a packet has been read successfully | ||
869 | * - 1 when stream end was reached, but the stream is looped; | ||
870 | * caller should flush decoders and read from this demuxer again | ||
871 | * - a negative error code on failure | ||
872 | */ | ||
873 | int ifile_get_packet(InputFile *f, AVPacket **pkt); | ||
874 | |||
875 | int ist_output_add(InputStream *ist, OutputStream *ost); | ||
876 | int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple); | ||
877 | |||
878 | /** | ||
879 | * Find an unused input stream of given type. | ||
880 | */ | ||
881 | InputStream *ist_find_unused(enum AVMediaType type); | ||
882 | |||
883 | /* iterate over all input streams in all input files; | ||
884 | * pass NULL to start iteration */ | ||
885 | InputStream *ist_iter(InputStream *prev); | ||
886 | |||
887 | /* iterate over all output streams in all output files; | ||
888 | * pass NULL to start iteration */ | ||
889 | OutputStream *ost_iter(OutputStream *prev); | ||
890 | |||
891 | void close_output_stream(OutputStream *ost); | ||
892 | int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt); | ||
893 | int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output); | ||
894 | void update_benchmark(const char *fmt, ...); | ||
895 | |||
896 | /** | ||
897 | * Merge two return codes - return one of the error codes if at least one of | ||
898 | * them was negative, 0 otherwise. | ||
899 | * Currently just picks the first one, eventually we might want to do something | ||
900 | * more sophisticated, like sorting them by priority. | ||
901 | */ | ||
902 | 13132 | static inline int err_merge(int err0, int err1) | |
903 | { | ||
904 |
1/2✓ Branch 0 taken 13132 times.
✗ Branch 1 not taken.
|
13132 | return (err0 < 0) ? err0 : FFMIN(err1, 0); |
905 | } | ||
906 | |||
907 | #define SPECIFIER_OPT_FMT_str "%s" | ||
908 | #define SPECIFIER_OPT_FMT_i "%i" | ||
909 | #define SPECIFIER_OPT_FMT_i64 "%"PRId64 | ||
910 | #define SPECIFIER_OPT_FMT_ui64 "%"PRIu64 | ||
911 | #define SPECIFIER_OPT_FMT_f "%f" | ||
912 | #define SPECIFIER_OPT_FMT_dbl "%lf" | ||
913 | |||
914 | #define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\ | ||
915 | {\ | ||
916 | char namestr[128] = "";\ | ||
917 | const char *spec = so->specifier && so->specifier[0] ? so->specifier : "";\ | ||
918 | for (int _i = 0; opt_name_##name[_i]; _i++)\ | ||
919 | av_strlcatf(namestr, sizeof(namestr), "-%s%s", opt_name_##name[_i], opt_name_##name[_i+1] ? (opt_name_##name[_i+2] ? ", " : " or ") : "");\ | ||
920 | av_log(NULL, AV_LOG_WARNING, "Multiple %s options specified for stream %d, only the last option '-%s%s%s "SPECIFIER_OPT_FMT_##type"' will be used.\n",\ | ||
921 | namestr, st->index, opt_name_##name[0], spec[0] ? ":" : "", spec, so->u.type);\ | ||
922 | } | ||
923 | |||
924 | #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\ | ||
925 | {\ | ||
926 | int _ret, _matches = 0;\ | ||
927 | SpecifierOpt *so;\ | ||
928 | for (int _i = 0; _i < o->nb_ ## name; _i++) {\ | ||
929 | char *spec = o->name[_i].specifier;\ | ||
930 | if ((_ret = check_stream_specifier(fmtctx, st, spec)) > 0) {\ | ||
931 | outvar = o->name[_i].u.type;\ | ||
932 | so = &o->name[_i];\ | ||
933 | _matches++;\ | ||
934 | } else if (_ret < 0)\ | ||
935 | exit_program(1);\ | ||
936 | }\ | ||
937 | if (_matches > 1)\ | ||
938 | WARN_MULTIPLE_OPT_USAGE(name, type, so, st);\ | ||
939 | } | ||
940 | |||
941 | #define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\ | ||
942 | {\ | ||
943 | int i;\ | ||
944 | for (i = 0; i < o->nb_ ## name; i++) {\ | ||
945 | char *spec = o->name[i].specifier;\ | ||
946 | if (!strcmp(spec, mediatype))\ | ||
947 | outvar = o->name[i].u.type;\ | ||
948 | }\ | ||
949 | } | ||
950 | |||
951 | extern const char * const opt_name_codec_names[]; | ||
952 | extern const char * const opt_name_codec_tags[]; | ||
953 | extern const char * const opt_name_frame_rates[]; | ||
954 | extern const char * const opt_name_top_field_first[]; | ||
955 | |||
956 | #endif /* FFTOOLS_FFMPEG_H */ | ||
957 |