GCC Code Coverage Report
Directory: ../../../ffmpeg/ Exec Total Coverage
File: src/fftools/ffmpeg.c Lines: 2092 2783 75.2 %
Date: 2020-11-28 20:53:16 Branches: 1533 2187 70.1 %

Line Branch Exec Source
1
/*
2
 * Copyright (c) 2000-2003 Fabrice Bellard
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
/**
22
 * @file
23
 * multimedia converter based on the FFmpeg libraries
24
 */
25
26
#include "config.h"
27
#include <ctype.h>
28
#include <string.h>
29
#include <math.h>
30
#include <stdlib.h>
31
#include <errno.h>
32
#include <limits.h>
33
#include <stdatomic.h>
34
#include <stdint.h>
35
36
#if HAVE_IO_H
37
#include <io.h>
38
#endif
39
#if HAVE_UNISTD_H
40
#include <unistd.h>
41
#endif
42
43
#include "libavformat/avformat.h"
44
#include "libavdevice/avdevice.h"
45
#include "libswresample/swresample.h"
46
#include "libavutil/opt.h"
47
#include "libavutil/channel_layout.h"
48
#include "libavutil/parseutils.h"
49
#include "libavutil/samplefmt.h"
50
#include "libavutil/fifo.h"
51
#include "libavutil/hwcontext.h"
52
#include "libavutil/internal.h"
53
#include "libavutil/intreadwrite.h"
54
#include "libavutil/dict.h"
55
#include "libavutil/display.h"
56
#include "libavutil/mathematics.h"
57
#include "libavutil/pixdesc.h"
58
#include "libavutil/avstring.h"
59
#include "libavutil/libm.h"
60
#include "libavutil/imgutils.h"
61
#include "libavutil/timestamp.h"
62
#include "libavutil/bprint.h"
63
#include "libavutil/time.h"
64
#include "libavutil/thread.h"
65
#include "libavutil/threadmessage.h"
66
#include "libavcodec/mathops.h"
67
#include "libavformat/os_support.h"
68
69
# include "libavfilter/avfilter.h"
70
# include "libavfilter/buffersrc.h"
71
# include "libavfilter/buffersink.h"
72
73
#if HAVE_SYS_RESOURCE_H
74
#include <sys/time.h>
75
#include <sys/types.h>
76
#include <sys/resource.h>
77
#elif HAVE_GETPROCESSTIMES
78
#include <windows.h>
79
#endif
80
#if HAVE_GETPROCESSMEMORYINFO
81
#include <windows.h>
82
#include <psapi.h>
83
#endif
84
#if HAVE_SETCONSOLECTRLHANDLER
85
#include <windows.h>
86
#endif
87
88
89
#if HAVE_SYS_SELECT_H
90
#include <sys/select.h>
91
#endif
92
93
#if HAVE_TERMIOS_H
94
#include <fcntl.h>
95
#include <sys/ioctl.h>
96
#include <sys/time.h>
97
#include <termios.h>
98
#elif HAVE_KBHIT
99
#include <conio.h>
100
#endif
101
102
#include <time.h>
103
104
#include "ffmpeg.h"
105
#include "cmdutils.h"
106
107
#include "libavutil/avassert.h"
108
109
const char program_name[] = "ffmpeg";
110
const int program_birth_year = 2000;
111
112
static FILE *vstats_file;
113
114
const char *const forced_keyframes_const_names[] = {
115
    "n",
116
    "n_forced",
117
    "prev_forced_n",
118
    "prev_forced_t",
119
    "t",
120
    NULL
121
};
122
123
typedef struct BenchmarkTimeStamps {
124
    int64_t real_usec;
125
    int64_t user_usec;
126
    int64_t sys_usec;
127
} BenchmarkTimeStamps;
128
129
static void do_video_stats(OutputStream *ost, int frame_size);
130
static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131
static int64_t getmaxrss(void);
132
static int ifilter_has_all_input_formats(FilterGraph *fg);
133
134
static int run_as_daemon  = 0;
135
static int nb_frames_dup = 0;
136
static unsigned dup_warning = 1000;
137
static int nb_frames_drop = 0;
138
static int64_t decode_error_stat[2];
139
140
static int want_sdp = 1;
141
142
static BenchmarkTimeStamps current_time;
143
AVIOContext *progress_avio = NULL;
144
145
static uint8_t *subtitle_out;
146
147
InputStream **input_streams = NULL;
148
int        nb_input_streams = 0;
149
InputFile   **input_files   = NULL;
150
int        nb_input_files   = 0;
151
152
OutputStream **output_streams = NULL;
153
int         nb_output_streams = 0;
154
OutputFile   **output_files   = NULL;
155
int         nb_output_files   = 0;
156
157
FilterGraph **filtergraphs;
158
int        nb_filtergraphs;
159
160
#if HAVE_TERMIOS_H
161
162
/* init terminal so that we can grab keys */
163
static struct termios oldtty;
164
static int restore_tty;
165
#endif
166
167
#if HAVE_THREADS
168
static void free_input_threads(void);
169
#endif
170
171
/* sub2video hack:
172
   Convert subtitles to video with alpha to insert them in filter graphs.
173
   This is a temporary solution until libavfilter gets real subtitles support.
174
 */
175
176
180
static int sub2video_get_blank_frame(InputStream *ist)
177
{
178
    int ret;
179
180
    AVFrame *frame = ist->sub2video.frame;
180
181
180
    av_frame_unref(frame);
182
180
    ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
183
180
    ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184
180
    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185
180
    if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186
        return ret;
187
180
    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188
180
    return 0;
189
}
190
191
89
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192
                                AVSubtitleRect *r)
193
{
194
    uint32_t *pal, *dst2;
195
    uint8_t *src, *src2;
196
    int x, y;
197
198
89
    if (r->type != SUBTITLE_BITMAP) {
199
        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200
        return;
201
    }
202


89
    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203
        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204
            r->x, r->y, r->w, r->h, w, h
205
        );
206
        return;
207
    }
208
209
89
    dst += r->y * dst_linesize + r->x * 4;
210
89
    src = r->data[0];
211
89
    pal = (uint32_t *)r->data[1];
212
3380
    for (y = 0; y < r->h; y++) {
213
3291
        dst2 = (uint32_t *)dst;
214
3291
        src2 = src;
215
1039366
        for (x = 0; x < r->w; x++)
216
1036075
            *(dst2++) = pal[*(src2++)];
217
3291
        dst += dst_linesize;
218
3291
        src += r->linesize[0];
219
    }
220
}
221
222
429
static void sub2video_push_ref(InputStream *ist, int64_t pts)
223
{
224
429
    AVFrame *frame = ist->sub2video.frame;
225
    int i;
226
    int ret;
227
228
    av_assert1(frame->data[0]);
229
429
    ist->sub2video.last_pts = frame->pts = pts;
230
858
    for (i = 0; i < ist->nb_filters; i++) {
231
429
        ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232
                                           AV_BUFFERSRC_FLAG_KEEP_REF |
233
                                           AV_BUFFERSRC_FLAG_PUSH);
234

429
        if (ret != AVERROR_EOF && ret < 0)
235
            av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236
                   av_err2str(ret));
237
    }
238
429
}
239
240
210
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241
{
242
210
    AVFrame *frame = ist->sub2video.frame;
243
    int8_t *dst;
244
    int     dst_linesize;
245
    int num_rects, i;
246
    int64_t pts, end_pts;
247
248
210
    if (!frame)
249
30
        return;
250
180
    if (sub) {
251
88
        pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252
88
                                 AV_TIME_BASE_Q, ist->st->time_base);
253
88
        end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
254
88
                                 AV_TIME_BASE_Q, ist->st->time_base);
255
88
        num_rects = sub->num_rects;
256
    } else {
257
        /* If we are initializing the system, utilize current heartbeat
258
           PTS as the start time, and show until the following subpicture
259
           is received. Otherwise, utilize the previous subpicture's end time
260
           as the fall-back value. */
261
184
        pts       = ist->sub2video.initialize ?
262
92
                    heartbeat_pts : ist->sub2video.end_pts;
263
92
        end_pts   = INT64_MAX;
264
92
        num_rects = 0;
265
    }
266
180
    if (sub2video_get_blank_frame(ist) < 0) {
267
        av_log(ist->dec_ctx, AV_LOG_ERROR,
268
               "Impossible to get a blank canvas.\n");
269
        return;
270
    }
271
180
    dst          = frame->data    [0];
272
180
    dst_linesize = frame->linesize[0];
273
269
    for (i = 0; i < num_rects; i++)
274
89
        sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275
180
    sub2video_push_ref(ist, pts);
276
180
    ist->sub2video.end_pts = end_pts;
277
180
    ist->sub2video.initialize = 0;
278
}
279
280
391342
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281
{
282
391342
    InputFile *infile = input_files[ist->file_index];
283
    int i, j, nb_reqs;
284
    int64_t pts2;
285
286
    /* When a frame is read from a file, examine all sub2video streams in
287
       the same file and send the sub2video frame again. Otherwise, decoded
288
       video frames could be accumulating in the filter graph while a filter
289
       (possibly overlay) is desperately waiting for a subtitle frame. */
290
821638
    for (i = 0; i < infile->nb_streams; i++) {
291
430296
        InputStream *ist2 = input_streams[infile->ist_index + i];
292
430296
        if (!ist2->sub2video.frame)
293
429351
            continue;
294
        /* subtitles seem to be usually muxed ahead of other streams;
295
           if not, subtracting a larger time here is necessary */
296
945
        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297
        /* do not send the heartbeat frame if the subtitle is already ahead */
298
945
        if (pts2 <= ist2->sub2video.last_pts)
299
8
            continue;
300

937
        if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301
            /* if we have hit the end of the current displayed subpicture,
302
               or if we need to initialize the system, update the
303
               overlayed subpicture and its start/end times */
304
88
            sub2video_update(ist2, pts2 + 1, NULL);
305
1874
        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306
937
            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307
937
        if (nb_reqs)
308
249
            sub2video_push_ref(ist2, pts2);
309
    }
310
391342
}
311
312
34
static void sub2video_flush(InputStream *ist)
313
{
314
    int i;
315
    int ret;
316
317
34
    if (ist->sub2video.end_pts < INT64_MAX)
318
34
        sub2video_update(ist, INT64_MAX, NULL);
319
38
    for (i = 0; i < ist->nb_filters; i++) {
320
4
        ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321

4
        if (ret != AVERROR_EOF && ret < 0)
322
            av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323
    }
324
34
}
325
326
/* end of sub2video hack */
327
328
11956
static void term_exit_sigsafe(void)
329
{
330
#if HAVE_TERMIOS_H
331
11956
    if(restore_tty)
332
8
        tcsetattr (0, TCSANOW, &oldtty);
333
#endif
334
11956
}
335
336
11956
void term_exit(void)
337
{
338
11956
    av_log(NULL, AV_LOG_QUIET, "%s", "");
339
11956
    term_exit_sigsafe();
340
11956
}
341
342
static volatile int received_sigterm = 0;
343
static volatile int received_nb_signals = 0;
344
static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345
static volatile int ffmpeg_exited = 0;
346
static int main_return_code = 0;
347
static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
348
349
static void
350
sigterm_handler(int sig)
351
{
352
    int ret;
353
    received_sigterm = sig;
354
    received_nb_signals++;
355
    term_exit_sigsafe();
356
    if(received_nb_signals > 3) {
357
        ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358
                    strlen("Received > 3 system signals, hard exiting\n"));
359
        if (ret < 0) { /* Do nothing */ };
360
        exit(123);
361
    }
362
}
363
364
#if HAVE_SETCONSOLECTRLHANDLER
365
static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366
{
367
    av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
368
369
    switch (fdwCtrlType)
370
    {
371
    case CTRL_C_EVENT:
372
    case CTRL_BREAK_EVENT:
373
        sigterm_handler(SIGINT);
374
        return TRUE;
375
376
    case CTRL_CLOSE_EVENT:
377
    case CTRL_LOGOFF_EVENT:
378
    case CTRL_SHUTDOWN_EVENT:
379
        sigterm_handler(SIGTERM);
380
        /* Basically, with these 3 events, when we return from this method the
381
           process is hard terminated, so stall as long as we need to
382
           to try and let the main thread(s) clean up and gracefully terminate
383
           (we have at most 5 seconds, but should be done far before that). */
384
        while (!ffmpeg_exited) {
385
            Sleep(0);
386
        }
387
        return TRUE;
388
389
    default:
390
        av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
391
        return FALSE;
392
    }
393
}
394
#endif
395
396
5979
void term_init(void)
397
{
398
#if HAVE_TERMIOS_H
399

5979
    if (!run_as_daemon && stdin_interaction) {
400
        struct termios tty;
401
24
        if (tcgetattr (0, &tty) == 0) {
402
4
            oldtty = tty;
403
4
            restore_tty = 1;
404
405
4
            tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
406
                             |INLCR|IGNCR|ICRNL|IXON);
407
4
            tty.c_oflag |= OPOST;
408
4
            tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
409
4
            tty.c_cflag &= ~(CSIZE|PARENB);
410
4
            tty.c_cflag |= CS8;
411
4
            tty.c_cc[VMIN] = 1;
412
4
            tty.c_cc[VTIME] = 0;
413
414
4
            tcsetattr (0, TCSANOW, &tty);
415
        }
416
24
        signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
417
    }
418
#endif
419
420
5979
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
421
5979
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
422
#ifdef SIGXCPU
423
5979
    signal(SIGXCPU, sigterm_handler);
424
#endif
425
#ifdef SIGPIPE
426
5979
    signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
427
#endif
428
#if HAVE_SETCONSOLECTRLHANDLER
429
    SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
430
#endif
431
5979
}
432
433
/* read a key without blocking */
434
171
static int read_key(void)
435
{
436
    unsigned char ch;
437
#if HAVE_TERMIOS_H
438
171
    int n = 1;
439
    struct timeval tv;
440
    fd_set rfds;
441
442
171
    FD_ZERO(&rfds);
443
171
    FD_SET(0, &rfds);
444
171
    tv.tv_sec = 0;
445
171
    tv.tv_usec = 0;
446
171
    n = select(1, &rfds, NULL, NULL, &tv);
447
171
    if (n > 0) {
448
160
        n = read(0, &ch, 1);
449
160
        if (n == 1)
450
            return ch;
451
452
160
        return n;
453
    }
454
#elif HAVE_KBHIT
455
#    if HAVE_PEEKNAMEDPIPE
456
    static int is_pipe;
457
    static HANDLE input_handle;
458
    DWORD dw, nchars;
459
    if(!input_handle){
460
        input_handle = GetStdHandle(STD_INPUT_HANDLE);
461
        is_pipe = !GetConsoleMode(input_handle, &dw);
462
    }
463
464
    if (is_pipe) {
465
        /* When running under a GUI, you will end here. */
466
        if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
467
            // input pipe may have been closed by the program that ran ffmpeg
468
            return -1;
469
        }
470
        //Read it
471
        if(nchars != 0) {
472
            read(0, &ch, 1);
473
            return ch;
474
        }else{
475
            return -1;
476
        }
477
    }
478
#    endif
479
    if(kbhit())
480
        return(getch());
481
#endif
482
11
    return -1;
483
}
484
485
596803
static int decode_interrupt_cb(void *ctx)
486
{
487
596803
    return received_nb_signals > atomic_load(&transcode_init_done);
488
}
489
490
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
491
492
5980
static void ffmpeg_cleanup(int ret)
493
{
494
    int i, j;
495
496
5980
    if (do_benchmark) {
497
        int maxrss = getmaxrss() / 1024;
498
        av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
499
    }
500
501
11773
    for (i = 0; i < nb_filtergraphs; i++) {
502
5793
        FilterGraph *fg = filtergraphs[i];
503
5793
        avfilter_graph_free(&fg->graph);
504
11566
        for (j = 0; j < fg->nb_inputs; j++) {
505
5773
            InputFilter *ifilter = fg->inputs[j];
506
5773
            struct InputStream *ist = ifilter->ist;
507
508
5773
            while (av_fifo_size(ifilter->frame_queue)) {
509
                AVFrame *frame;
510
                av_fifo_generic_read(ifilter->frame_queue, &frame,
511
                                     sizeof(frame), NULL);
512
                av_frame_free(&frame);
513
            }
514
5773
            av_fifo_freep(&ifilter->frame_queue);
515
5773
            if (ist->sub2video.sub_queue) {
516
                while (av_fifo_size(ist->sub2video.sub_queue)) {
517
                    AVSubtitle sub;
518
                    av_fifo_generic_read(ist->sub2video.sub_queue,
519
                                         &sub, sizeof(sub), NULL);
520
                    avsubtitle_free(&sub);
521
                }
522
                av_fifo_freep(&ist->sub2video.sub_queue);
523
            }
524
5773
            av_buffer_unref(&ifilter->hw_frames_ctx);
525
5773
            av_freep(&ifilter->name);
526
5773
            av_freep(&fg->inputs[j]);
527
        }
528
5793
        av_freep(&fg->inputs);
529
11589
        for (j = 0; j < fg->nb_outputs; j++) {
530
5796
            OutputFilter *ofilter = fg->outputs[j];
531
532
5796
            avfilter_inout_free(&ofilter->out_tmp);
533
5796
            av_freep(&ofilter->name);
534
5796
            av_freep(&ofilter->formats);
535
5796
            av_freep(&ofilter->channel_layouts);
536
5796
            av_freep(&ofilter->sample_rates);
537
5796
            av_freep(&fg->outputs[j]);
538
        }
539
5793
        av_freep(&fg->outputs);
540
5793
        av_freep(&fg->graph_desc);
541
542
5793
        av_freep(&filtergraphs[i]);
543
    }
544
5980
    av_freep(&filtergraphs);
545
546
5980
    av_freep(&subtitle_out);
547
548
    /* close files */
549
11959
    for (i = 0; i < nb_output_files; i++) {
550
5979
        OutputFile *of = output_files[i];
551
        AVFormatContext *s;
552
5979
        if (!of)
553
            continue;
554
5979
        s = of->ctx;
555

5979
        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
556
5925
            avio_closep(&s->pb);
557
5979
        avformat_free_context(s);
558
5979
        av_dict_free(&of->opts);
559
560
5979
        av_freep(&output_files[i]);
561
    }
562
12106
    for (i = 0; i < nb_output_streams; i++) {
563
6126
        OutputStream *ost = output_streams[i];
564
565
6126
        if (!ost)
566
            continue;
567
568
6126
        av_bsf_free(&ost->bsf_ctx);
569
570
6126
        av_frame_free(&ost->filtered_frame);
571
6126
        av_frame_free(&ost->last_frame);
572
6126
        av_dict_free(&ost->encoder_opts);
573
574
6126
        av_freep(&ost->forced_keyframes);
575
6126
        av_expr_free(ost->forced_keyframes_pexpr);
576
6126
        av_freep(&ost->avfilter);
577
6126
        av_freep(&ost->logfile_prefix);
578
579
6126
        av_freep(&ost->audio_channels_map);
580
6126
        ost->audio_channels_mapped = 0;
581
582
6126
        av_dict_free(&ost->sws_dict);
583
6126
        av_dict_free(&ost->swr_opts);
584
585
6126
        avcodec_free_context(&ost->enc_ctx);
586
6126
        avcodec_parameters_free(&ost->ref_par);
587
588
6126
        if (ost->muxing_queue) {
589
6126
            while (av_fifo_size(ost->muxing_queue)) {
590
                AVPacket pkt;
591
                av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592
                av_packet_unref(&pkt);
593
            }
594
6126
            av_fifo_freep(&ost->muxing_queue);
595
        }
596
597
6126
        av_freep(&output_streams[i]);
598
    }
599
#if HAVE_THREADS
600
5980
    free_input_threads();
601
#endif
602
11986
    for (i = 0; i < nb_input_files; i++) {
603
6006
        avformat_close_input(&input_files[i]->ctx);
604
6006
        av_freep(&input_files[i]);
605
    }
606
12315
    for (i = 0; i < nb_input_streams; i++) {
607
6335
        InputStream *ist = input_streams[i];
608
609
6335
        av_frame_free(&ist->decoded_frame);
610
6335
        av_frame_free(&ist->filter_frame);
611
6335
        av_dict_free(&ist->decoder_opts);
612
6335
        avsubtitle_free(&ist->prev_sub.subtitle);
613
6335
        av_frame_free(&ist->sub2video.frame);
614
6335
        av_freep(&ist->filters);
615
6335
        av_freep(&ist->hwaccel_device);
616
6335
        av_freep(&ist->dts_buffer);
617
618
6335
        avcodec_free_context(&ist->dec_ctx);
619
620
6335
        av_freep(&input_streams[i]);
621
    }
622
623
5980
    if (vstats_file) {
624
        if (fclose(vstats_file))
625
            av_log(NULL, AV_LOG_ERROR,
626
                   "Error closing vstats file, loss of information possible: %s\n",
627
                   av_err2str(AVERROR(errno)));
628
    }
629
5980
    av_freep(&vstats_filename);
630
631
5980
    av_freep(&input_streams);
632
5980
    av_freep(&input_files);
633
5980
    av_freep(&output_streams);
634
5980
    av_freep(&output_files);
635
636
5980
    uninit_opts();
637
638
5980
    avformat_network_deinit();
639
640
5980
    if (received_sigterm) {
641
        av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
642
               (int) received_sigterm);
643

5980
    } else if (ret && atomic_load(&transcode_init_done)) {
644
3
        av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
645
    }
646
5980
    term_exit();
647
5980
    ffmpeg_exited = 1;
648
5980
}
649
650
6006
void remove_avoptions(AVDictionary **a, AVDictionary *b)
651
{
652
6006
    AVDictionaryEntry *t = NULL;
653
654
24974
    while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
655
18968
        av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
656
    }
657
6006
}
658
659
17632
void assert_avoptions(AVDictionary *m)
660
{
661
    AVDictionaryEntry *t;
662
17632
    if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
663
        av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
664
        exit_program(1);
665
    }
666
17632
}
667
668
static void abort_codec_experimental(AVCodec *c, int encoder)
669
{
670
    exit_program(1);
671
}
672
673
2388898
static void update_benchmark(const char *fmt, ...)
674
{
675
2388898
    if (do_benchmark_all) {
676
        BenchmarkTimeStamps t = get_benchmark_time_stamps();
677
        va_list va;
678
        char buf[1024];
679
680
        if (fmt) {
681
            va_start(va, fmt);
682
            vsnprintf(buf, sizeof(buf), fmt, va);
683
            va_end(va);
684
            av_log(NULL, AV_LOG_INFO,
685
                   "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686
                   t.user_usec - current_time.user_usec,
687
                   t.sys_usec - current_time.sys_usec,
688
                   t.real_usec - current_time.real_usec, buf);
689
        }
690
        current_time = t;
691
    }
692
2388898
}
693
694
1
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
695
{
696
    int i;
697
3
    for (i = 0; i < nb_output_streams; i++) {
698
2
        OutputStream *ost2 = output_streams[i];
699
2
        ost2->finished |= ost == ost2 ? this_stream : others;
700
    }
701
1
}
702
703
427433
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
704
{
705
427433
    AVFormatContext *s = of->ctx;
706
427433
    AVStream *st = ost->st;
707
    int ret;
708
709
    /*
710
     * Audio encoders may split the packets --  #frames in != #packets out.
711
     * But there is no reordering, so we can limit the number of output packets
712
     * by simply dropping them here.
713
     * Counting encoded video frames needs to be done separately because of
714
     * reordering, see do_video_out().
715
     * Do not count the packet when unqueued because it has been counted when queued.
716
     */
717

427433
    if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
718
327682
        if (ost->frame_number >= ost->max_frames) {
719
317
            av_packet_unref(pkt);
720
317
            return;
721
        }
722
327365
        ost->frame_number++;
723
    }
724
725
427116
    if (!of->header_written) {
726
293
        AVPacket tmp_pkt = {0};
727
        /* the muxer is not initialized yet, buffer the packet */
728
293
        if (!av_fifo_space(ost->muxing_queue)) {
729
16
            unsigned int are_we_over_size =
730
16
                (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
731
16
            int new_size = are_we_over_size ?
732
                           FFMIN(2 * av_fifo_size(ost->muxing_queue),
733
32
                                 ost->max_muxing_queue_size) :
734
16
                           2 * av_fifo_size(ost->muxing_queue);
735
736
16
            if (new_size <= av_fifo_size(ost->muxing_queue)) {
737
                av_log(NULL, AV_LOG_ERROR,
738
                       "Too many packets buffered for output stream %d:%d.\n",
739
                       ost->file_index, ost->st->index);
740
                exit_program(1);
741
            }
742
16
            ret = av_fifo_realloc2(ost->muxing_queue, new_size);
743
16
            if (ret < 0)
744
                exit_program(1);
745
        }
746
293
        ret = av_packet_make_refcounted(pkt);
747
293
        if (ret < 0)
748
            exit_program(1);
749
293
        av_packet_move_ref(&tmp_pkt, pkt);
750
293
        ost->muxing_queue_data_size += tmp_pkt.size;
751
293
        av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
752
293
        return;
753
    }
754
755

426823
    if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
756

403495
        (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
757
23328
        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
758
759
426823
    if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
760
        int i;
761
109387
        uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
762
                                              NULL);
763
109387
        ost->quality = sd ? AV_RL32(sd) : -1;
764
109387
        ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
765
766
546935
        for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
767

437548
            if (sd && i < sd[5])
768
                ost->error[i] = AV_RL64(sd + 8 + 8*i);
769
            else
770
437548
                ost->error[i] = -1;
771
        }
772
773

109387
        if (ost->frame_rate.num && ost->is_cfr) {
774
9789
            if (pkt->duration > 0)
775
                av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
776
9789
            pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
777
                                         ost->mux_timebase);
778
        }
779
    }
780
781
426823
    av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
782
783
426823
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
784
185276
        if (pkt->dts != AV_NOPTS_VALUE &&
785
161948
            pkt->pts != AV_NOPTS_VALUE &&
786
160981
            pkt->dts > pkt->pts) {
787
            av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
788
                   pkt->dts, pkt->pts,
789
                   ost->file_index, ost->st->index);
790
            pkt->pts =
791
            pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
792
                     - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
793
                     - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
794
        }
795

185276
        if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
796
185263
            pkt->dts != AV_NOPTS_VALUE &&
797

161935
            !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
798
161702
            ost->last_mux_dts != AV_NOPTS_VALUE) {
799
157183
            int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
800
157183
            if (pkt->dts < max) {
801

2
                int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
802
2
                if (exit_on_error)
803
                    loglevel = AV_LOG_ERROR;
804
2
                av_log(s, loglevel, "Non-monotonous DTS in output stream "
805
                       "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
806
2
                       ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
807
2
                if (exit_on_error) {
808
                    av_log(NULL, AV_LOG_FATAL, "aborting.\n");
809
                    exit_program(1);
810
                }
811
2
                av_log(s, loglevel, "changing to %"PRId64". This may result "
812
                       "in incorrect timestamps in the output file.\n",
813
                       max);
814
2
                if (pkt->pts >= pkt->dts)
815
2
                    pkt->pts = FFMAX(pkt->pts, max);
816
2
                pkt->dts = max;
817
            }
818
        }
819
    }
820
426823
    ost->last_mux_dts = pkt->dts;
821
822
426823
    ost->data_size += pkt->size;
823
426823
    ost->packets_written++;
824
825
426823
    pkt->stream_index = ost->index;
826
827
426823
    if (debug_ts) {
828
        av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
829
                "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
830
                av_get_media_type_string(ost->enc_ctx->codec_type),
831
                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
832
                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
833
                pkt->size
834
              );
835
    }
836
837
426823
    ret = av_interleaved_write_frame(s, pkt);
838
426823
    if (ret < 0) {
839
1
        print_error("av_interleaved_write_frame()", ret);
840
1
        main_return_code = 1;
841
1
        close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
842
    }
843
426823
    av_packet_unref(pkt);
844
}
845
846
5807
static void close_output_stream(OutputStream *ost)
847
{
848
5807
    OutputFile *of = output_files[ost->file_index];
849
850
5807
    ost->finished |= ENCODER_FINISHED;
851
5807
    if (of->shortest) {
852
        int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
853
        of->recording_time = FFMIN(of->recording_time, end);
854
    }
855
5807
}
856
857
/*
858
 * Send a single packet to the output, applying any bitstream filters
859
 * associated with the output stream.  This may result in any number
860
 * of packets actually being written, depending on what bitstream
861
 * filters are applied.  The supplied packet is consumed and will be
862
 * blank (as if newly-allocated) when this function returns.
863
 *
864
 * If eof is set, instead indicate EOF to all bitstream filters and
865
 * therefore flush any delayed packets to the output.  A blank packet
866
 * must be supplied in this case.
867
 */
868
432935
static void output_packet(OutputFile *of, AVPacket *pkt,
869
                          OutputStream *ost, int eof)
870
{
871
432935
    int ret = 0;
872
873
    /* apply the output bitstream filters */
874
432935
    if (ost->bsf_ctx) {
875
6769
        ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
876
6769
        if (ret < 0)
877
            goto finish;
878
13534
        while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
879
6765
            write_packet(of, pkt, ost, 0);
880
6769
        if (ret == AVERROR(EAGAIN))
881
6765
            ret = 0;
882
426166
    } else if (!eof)
883
420375
        write_packet(of, pkt, ost, 0);
884
885
5791
finish:
886

432935
    if (ret < 0 && ret != AVERROR_EOF) {
887
1
        av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
888
               "packet for stream #%d:%d.\n", ost->file_index, ost->index);
889
1
        if(exit_on_error)
890
            exit_program(1);
891
    }
892
432935
}
893
894
401048
static int check_recording_time(OutputStream *ost)
895
{
896
401048
    OutputFile *of = output_files[ost->file_index];
897
898

408522
    if (of->recording_time != INT64_MAX &&
899
7474
        av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
900
7474
                      AV_TIME_BASE_Q) >= 0) {
901
1
        close_output_stream(ost);
902
1
        return 0;
903
    }
904
401047
    return 1;
905
}
906
907
402762
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
908
                                             AVFrame *frame)
909
{
910
402762
    double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
911
402762
    AVCodecContext *enc = ost->enc_ctx;
912

402762
    if (!frame || frame->pts == AV_NOPTS_VALUE ||
913

400342
        !enc || !ost->filter || !ost->filter->graph->graph)
914
2420
        goto early_exit;
915
916
    {
917
400342
        AVFilterContext *filter = ost->filter->filter;
918
919
400342
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
920
400342
        AVRational filter_tb = av_buffersink_get_time_base(filter);
921
400342
        AVRational tb = enc->time_base;
922
400342
        int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
923
924
400342
        tb.den <<= extra_bits;
925
400342
        float_pts =
926
400342
            av_rescale_q(frame->pts, filter_tb, tb) -
927
400342
            av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
928
400342
        float_pts /= 1 << extra_bits;
929
        // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
930
400342
        float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
931
932
400342
        frame->pts =
933
400342
            av_rescale_q(frame->pts, filter_tb, enc->time_base) -
934
400342
            av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
935
    }
936
937
402762
early_exit:
938
939
402762
    if (debug_ts) {
940
        av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
941
               frame ? av_ts2str(frame->pts) : "NULL",
942
               frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
943
               float_pts,
944
               enc ? enc->time_base.num : -1,
945
               enc ? enc->time_base.den : -1);
946
    }
947
948
402762
    return float_pts;
949
}
950
951
static int init_output_stream(OutputStream *ost, AVFrame *frame,
952
                              char *error, int error_len);
953
954
618847
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
955
                                      unsigned int fatal)
956
{
957
618847
    int ret = AVERROR_BUG;
958
618847
    char error[1024] = {0};
959
960
618847
    if (ost->initialized)
961
612723
        return 0;
962
963
6124
    ret = init_output_stream(ost, frame, error, sizeof(error));
964
6124
    if (ret < 0) {
965
        av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
966
               ost->file_index, ost->index, error);
967
968
        if (fatal)
969
            exit_program(1);
970
    }
971
972
6124
    return ret;
973
}
974
975
300922
static void do_audio_out(OutputFile *of, OutputStream *ost,
976
                         AVFrame *frame)
977
{
978
300922
    AVCodecContext *enc = ost->enc_ctx;
979
    AVPacket pkt;
980
    int ret;
981
982
300922
    av_init_packet(&pkt);
983
300922
    pkt.data = NULL;
984
300922
    pkt.size = 0;
985
986
300922
    adjust_frame_pts_to_encoder_tb(of, ost, frame);
987
988
300922
    if (!check_recording_time(ost))
989
        return;
990
991

300922
    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
992
        frame->pts = ost->sync_opts;
993
300922
    ost->sync_opts = frame->pts + frame->nb_samples;
994
300922
    ost->samples_encoded += frame->nb_samples;
995
300922
    ost->frames_encoded++;
996
997

300922
    av_assert0(pkt.size || !pkt.data);
998
300922
    update_benchmark(NULL);
999
300922
    if (debug_ts) {
1000
        av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1001
               "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1002
               av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1003
               enc->time_base.num, enc->time_base.den);
1004
    }
1005
1006
300922
    ret = avcodec_send_frame(enc, frame);
1007
300922
    if (ret < 0)
1008
        goto error;
1009
1010
    while (1) {
1011
595688
        ret = avcodec_receive_packet(enc, &pkt);
1012
595688
        if (ret == AVERROR(EAGAIN))
1013
300922
            break;
1014
294766
        if (ret < 0)
1015
            goto error;
1016
1017
294766
        update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1018
1019
294766
        av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1020
1021
294766
        if (debug_ts) {
1022
            av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1023
                   "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1024
                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1025
                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1026
        }
1027
1028
294766
        output_packet(of, &pkt, ost, 0);
1029
    }
1030
1031
300922
    return;
1032
error:
1033
    av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1034
    exit_program(1);
1035
}
1036
1037
631
static void do_subtitle_out(OutputFile *of,
1038
                            OutputStream *ost,
1039
                            AVSubtitle *sub)
1040
{
1041
631
    int subtitle_out_max_size = 1024 * 1024;
1042
    int subtitle_out_size, nb, i;
1043
    AVCodecContext *enc;
1044
    AVPacket pkt;
1045
    int64_t pts;
1046
1047
631
    if (sub->pts == AV_NOPTS_VALUE) {
1048
        av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1049
        if (exit_on_error)
1050
            exit_program(1);
1051
        return;
1052
    }
1053
1054
631
    enc = ost->enc_ctx;
1055
1056
631
    if (!subtitle_out) {
1057
31
        subtitle_out = av_malloc(subtitle_out_max_size);
1058
31
        if (!subtitle_out) {
1059
            av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1060
            exit_program(1);
1061
        }
1062
    }
1063
1064
    /* Note: DVB subtitle need one packet to draw them and one other
1065
       packet to clear them */
1066
    /* XXX: signal it in the codec context ? */
1067
631
    if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1068
36
        nb = 2;
1069
    else
1070
595
        nb = 1;
1071
1072
    /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1073
631
    pts = sub->pts;
1074
631
    if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1075
        pts -= output_files[ost->file_index]->start_time;
1076
1298
    for (i = 0; i < nb; i++) {
1077
667
        unsigned save_num_rects = sub->num_rects;
1078
1079
667
        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1080
667
        if (!check_recording_time(ost))
1081
            return;
1082
1083
667
        sub->pts = pts;
1084
        // start_display_time is required to be 0
1085
667
        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1086
667
        sub->end_display_time  -= sub->start_display_time;
1087
667
        sub->start_display_time = 0;
1088
667
        if (i == 1)
1089
36
            sub->num_rects = 0;
1090
1091
667
        ost->frames_encoded++;
1092
1093
667
        subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1094
                                                    subtitle_out_max_size, sub);
1095
667
        if (i == 1)
1096
36
            sub->num_rects = save_num_rects;
1097
667
        if (subtitle_out_size < 0) {
1098
            av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1099
            exit_program(1);
1100
        }
1101
1102
667
        av_init_packet(&pkt);
1103
667
        pkt.data = subtitle_out;
1104
667
        pkt.size = subtitle_out_size;
1105
667
        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1106
667
        pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1107
667
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1108
            /* XXX: the pts correction is handled here. Maybe handling
1109
               it in the codec would be better */
1110
72
            if (i == 0)
1111
36
                pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1112
            else
1113
36
                pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1114
        }
1115
667
        pkt.dts = pkt.pts;
1116
667
        output_packet(of, &pkt, ost, 0);
1117
    }
1118
}
1119
1120
101840
static void do_video_out(OutputFile *of,
1121
                         OutputStream *ost,
1122
                         AVFrame *next_picture)
1123
{
1124
    int ret, format_video_sync;
1125
    AVPacket pkt;
1126
101840
    AVCodecContext *enc = ost->enc_ctx;
1127
    AVRational frame_rate;
1128
    int nb_frames, nb0_frames, i;
1129
    double delta, delta0;
1130
101840
    double duration = 0;
1131
101840
    double sync_ipts = AV_NOPTS_VALUE;
1132
101840
    int frame_size = 0;
1133
101840
    InputStream *ist = NULL;
1134
101840
    AVFilterContext *filter = ost->filter->filter;
1135
1136
101840
    init_output_stream_wrapper(ost, next_picture, 1);
1137
101840
    sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1138
1139
101840
    if (ost->source_index >= 0)
1140
100324
        ist = input_streams[ost->source_index];
1141
1142
101840
    frame_rate = av_buffersink_get_frame_rate(filter);
1143

101840
    if (frame_rate.num > 0 && frame_rate.den > 0)
1144
101711
        duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1145
1146


101840
    if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1147
65438
        duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1148
1149
101840
    if (!ost->filters_script &&
1150
101743
        !ost->filters &&
1151

85450
        (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1152
81283
        next_picture &&
1153
81283
        ist &&
1154
81283
        lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1155
76709
        duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1156
    }
1157
1158
101840
    if (!next_picture) {
1159
        //end, flushing
1160
2420
        nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1161
                                          ost->last_nb0_frames[1],
1162
                                          ost->last_nb0_frames[2]);
1163
    } else {
1164
99420
        delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1165
99420
        delta  = delta0 + duration;
1166
1167
        /* by default, we output a single frame */
1168
99420
        nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1169
99420
        nb_frames = 1;
1170
1171
99420
        format_video_sync = video_sync_method;
1172
99420
        if (format_video_sync == VSYNC_AUTO) {
1173
56393
            if(!strcmp(of->ctx->oformat->name, "avi")) {
1174
12650
                format_video_sync = VSYNC_VFR;
1175
            } else
1176

43743
                format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1177
56393
            if (   ist
1178
54922
                && format_video_sync == VSYNC_CFR
1179
9424
                && input_files[ist->file_index]->ctx->nb_streams == 1
1180
8349
                && input_files[ist->file_index]->input_ts_offset == 0) {
1181
8349
                format_video_sync = VSYNC_VSCFR;
1182
            }
1183

56393
            if (format_video_sync == VSYNC_CFR && copy_ts) {
1184
                format_video_sync = VSYNC_VSCFR;
1185
            }
1186
        }
1187

99420
        ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1188
1189

99420
        if (delta0 < 0 &&
1190
11212
            delta > 0 &&
1191
10786
            format_video_sync != VSYNC_PASSTHROUGH &&
1192
            format_video_sync != VSYNC_DROP) {
1193
8017
            if (delta0 < -0.6) {
1194
431
                av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1195
            } else
1196
7586
                av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1197
8017
            sync_ipts = ost->sync_opts;
1198
8017
            duration += delta0;
1199
8017
            delta0 = 0;
1200
        }
1201
1202

99420
        switch (format_video_sync) {
1203
8349
        case VSYNC_VSCFR:
1204

8349
            if (ost->frame_number == 0 && delta0 >= 0.5) {
1205
                av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1206
                delta = duration;
1207
                delta0 = 0;
1208
                ost->sync_opts = llrint(sync_ipts);
1209
            }
1210
        case VSYNC_CFR:
1211
            // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1212

9578
            if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1213
                nb_frames = 0;
1214
9578
            } else if (delta < -1.1)
1215
                nb_frames = 0;
1216
9578
            else if (delta > 1.1) {
1217
350
                nb_frames = lrintf(delta);
1218
350
                if (delta0 > 1.1)
1219
2
                    nb0_frames = llrintf(delta0 - 0.6);
1220
            }
1221
9578
            break;
1222
46962
        case VSYNC_VFR:
1223
46962
            if (delta <= -0.6)
1224
170
                nb_frames = 0;
1225
46792
            else if (delta > 0.6)
1226
46209
                ost->sync_opts = llrint(sync_ipts);
1227
46962
            break;
1228
42880
        case VSYNC_DROP:
1229
        case VSYNC_PASSTHROUGH:
1230
42880
            ost->sync_opts = llrint(sync_ipts);
1231
42880
            break;
1232
        default:
1233
            av_assert0(0);
1234
        }
1235
    }
1236
1237
101840
    nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1238
101840
    nb0_frames = FFMIN(nb0_frames, nb_frames);
1239
1240
101840
    memmove(ost->last_nb0_frames + 1,
1241
101840
            ost->last_nb0_frames,
1242
            sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1243
101840
    ost->last_nb0_frames[0] = nb0_frames;
1244
1245

101840
    if (nb0_frames == 0 && ost->last_dropped) {
1246
170
        nb_frames_drop++;
1247
170
        av_log(NULL, AV_LOG_VERBOSE,
1248
               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1249
170
               ost->frame_number, ost->st->index, ost->last_frame->pts);
1250
    }
1251

101840
    if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1252
49
        if (nb_frames > dts_error_threshold * 30) {
1253
            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1254
            nb_frames_drop++;
1255
            return;
1256
        }
1257

49
        nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1258
49
        av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1259
49
        if (nb_frames_dup > dup_warning) {
1260
            av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1261
            dup_warning *= 10;
1262
        }
1263
    }
1264

101840
    ost->last_dropped = nb_frames == nb0_frames && next_picture;
1265
1266
    /* duplicates frame if needed */
1267
201298
    for (i = 0; i < nb_frames; i++) {
1268
        AVFrame *in_picture;
1269
99459
        int forced_keyframe = 0;
1270
        double pts_time;
1271
99459
        av_init_packet(&pkt);
1272
99459
        pkt.data = NULL;
1273
99459
        pkt.size = 0;
1274
1275

99459
        if (i < nb0_frames && ost->last_frame) {
1276
69
            in_picture = ost->last_frame;
1277
        } else
1278
99390
            in_picture = next_picture;
1279
1280
99459
        if (!in_picture)
1281
            return;
1282
1283
99459
        in_picture->pts = ost->sync_opts;
1284
1285
99459
        if (!check_recording_time(ost))
1286
1
            return;
1287
1288
99458
        in_picture->quality = enc->global_quality;
1289
99458
        in_picture->pict_type = 0;
1290
1291
99458
        if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1292
4670
            in_picture->pts != AV_NOPTS_VALUE)
1293
4670
            ost->forced_kf_ref_pts = in_picture->pts;
1294
1295
198916
        pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1296
99458
            (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1297
99458
        if (ost->forced_kf_index < ost->forced_kf_count &&
1298
39
            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1299
2
            ost->forced_kf_index++;
1300
2
            forced_keyframe = 1;
1301
99456
        } else if (ost->forced_keyframes_pexpr) {
1302
            double res;
1303
            ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1304
            res = av_expr_eval(ost->forced_keyframes_pexpr,
1305
                               ost->forced_keyframes_expr_const_values, NULL);
1306
            ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1307
                    ost->forced_keyframes_expr_const_values[FKF_N],
1308
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1309
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1310
                    ost->forced_keyframes_expr_const_values[FKF_T],
1311
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1312
                    res);
1313
            if (res) {
1314
                forced_keyframe = 1;
1315
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1316
                    ost->forced_keyframes_expr_const_values[FKF_N];
1317
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1318
                    ost->forced_keyframes_expr_const_values[FKF_T];
1319
                ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1320
            }
1321
1322
            ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1323
99456
        } else if (   ost->forced_keyframes
1324
48
                   && !strncmp(ost->forced_keyframes, "source", 6)
1325
                   && in_picture->key_frame==1
1326
                   && !i) {
1327
            forced_keyframe = 1;
1328
        }
1329
1330
99458
        if (forced_keyframe) {
1331
2
            in_picture->pict_type = AV_PICTURE_TYPE_I;
1332
2
            av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1333
        }
1334
1335
99458
        update_benchmark(NULL);
1336
99458
        if (debug_ts) {
1337
            av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1338
                   "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1339
                   av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1340
                   enc->time_base.num, enc->time_base.den);
1341
        }
1342
1343
99458
        ost->frames_encoded++;
1344
1345
99458
        ret = avcodec_send_frame(enc, in_picture);
1346
99458
        if (ret < 0)
1347
            goto error;
1348
        // Make sure Closed Captions will not be duplicated
1349
99458
        av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1350
1351
        while (1) {
1352
198694
            ret = avcodec_receive_packet(enc, &pkt);
1353
198694
            update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1354
198694
            if (ret == AVERROR(EAGAIN))
1355
99458
                break;
1356
99236
            if (ret < 0)
1357
                goto error;
1358
1359
99236
            if (debug_ts) {
1360
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1361
                       "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1362
                       av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1363
                       av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1364
            }
1365
1366

99236
            if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1367
                pkt.pts = ost->sync_opts;
1368
1369
99236
            av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1370
1371
99236
            if (debug_ts) {
1372
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1373
                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1374
                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1375
                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1376
            }
1377
1378
99236
            frame_size = pkt.size;
1379
99236
            output_packet(of, &pkt, ost, 0);
1380
1381
            /* if two pass, output log */
1382

99236
            if (ost->logfile && enc->stats_out) {
1383
                fprintf(ost->logfile, "%s", enc->stats_out);
1384
            }
1385
        }
1386
99458
        ost->sync_opts++;
1387
        /*
1388
         * For video, number of frames in == number of packets out.
1389
         * But there may be reordering, so we can't throw away frames on encoder
1390
         * flush, we need to limit them here, before they go into encoder.
1391
         */
1392
99458
        ost->frame_number++;
1393
1394

99458
        if (vstats_filename && frame_size)
1395
            do_video_stats(ost, frame_size);
1396
    }
1397
1398
101839
    if (!ost->last_frame)
1399
4751
        ost->last_frame = av_frame_alloc();
1400
101839
    av_frame_unref(ost->last_frame);
1401

101839
    if (next_picture && ost->last_frame)
1402
99419
        av_frame_ref(ost->last_frame, next_picture);
1403
    else
1404
2420
        av_frame_free(&ost->last_frame);
1405
1406
101839
    return;
1407
error:
1408
    av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1409
    exit_program(1);
1410
}
1411
1412
static double psnr(double d)
1413
{
1414
    return -10.0 * log10(d);
1415
}
1416
1417
static void do_video_stats(OutputStream *ost, int frame_size)
1418
{
1419
    AVCodecContext *enc;
1420
    int frame_number;
1421
    double ti1, bitrate, avg_bitrate;
1422
1423
    /* this is executed just the first time do_video_stats is called */
1424
    if (!vstats_file) {
1425
        vstats_file = fopen(vstats_filename, "w");
1426
        if (!vstats_file) {
1427
            perror("fopen");
1428
            exit_program(1);
1429
        }
1430
    }
1431
1432
    enc = ost->enc_ctx;
1433
    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1434
        frame_number = ost->st->nb_frames;
1435
        if (vstats_version <= 1) {
1436
            fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1437
                    ost->quality / (float)FF_QP2LAMBDA);
1438
        } else  {
1439
            fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1440
                    ost->quality / (float)FF_QP2LAMBDA);
1441
        }
1442
1443
        if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1444
            fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1445
1446
        fprintf(vstats_file,"f_size= %6d ", frame_size);
1447
        /* compute pts value */
1448
        ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1449
        if (ti1 < 0.01)
1450
            ti1 = 0.01;
1451
1452
        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1453
        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1454
        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1455
               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1456
        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1457
    }
1458
}
1459
1460
314
static void finish_output_stream(OutputStream *ost)
1461
{
1462
314
    OutputFile *of = output_files[ost->file_index];
1463
    int i;
1464
1465
314
    ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1466
1467
314
    if (of->shortest) {
1468
        for (i = 0; i < of->ctx->nb_streams; i++)
1469
            output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1470
    }
1471
314
}
1472
1473
/**
1474
 * Get and encode new output from any of the filtergraphs, without causing
1475
 * activity.
1476
 *
1477
 * @return  0 for success, <0 for severe errors
1478
 */
1479
410145
static int reap_filters(int flush)
1480
{
1481
410145
    AVFrame *filtered_frame = NULL;
1482
    int i;
1483
1484
    /* Reap all buffers present in the buffer sinks */
1485
835053
    for (i = 0; i < nb_output_streams; i++) {
1486
424908
        OutputStream *ost = output_streams[i];
1487
424908
        OutputFile    *of = output_files[ost->file_index];
1488
        AVFilterContext *filter;
1489
424908
        AVCodecContext *enc = ost->enc_ctx;
1490
424908
        int ret = 0;
1491
1492

424908
        if (!ost->filter || !ost->filter->graph->graph)
1493
48400
            continue;
1494
376508
        filter = ost->filter->filter;
1495
1496
        /*
1497
         * Unlike video, with audio the audio frame size matters.
1498
         * Currently we are fully reliant on the lavfi filter chain to
1499
         * do the buffering deed for us, and thus the frame size parameter
1500
         * needs to be set accordingly. Where does one get the required
1501
         * frame size? From the initialized AVCodecContext of an audio
1502
         * encoder. Thus, if we have gotten to an audio stream, initialize
1503
         * the encoder earlier than receiving the first AVFrame.
1504
         */
1505
376508
        if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1506
260547
            init_output_stream_wrapper(ost, NULL, 1);
1507
1508

376508
        if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1509
            return AVERROR(ENOMEM);
1510
        }
1511
376508
        filtered_frame = ost->filtered_frame;
1512
1513
        while (1) {
1514
776850
            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1515
                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
1516
776850
            if (ret < 0) {
1517

376508
                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1518
                    av_log(NULL, AV_LOG_WARNING,
1519
                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520

376508
                } else if (flush && ret == AVERROR_EOF) {
1521
3586
                    if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1522
2420
                        do_video_out(of, ost, NULL);
1523
                }
1524
376508
                break;
1525
            }
1526
400342
            if (ost->finished) {
1527
                av_frame_unref(filtered_frame);
1528
                continue;
1529
            }
1530
1531
400342
            switch (av_buffersink_get_type(filter)) {
1532
99420
            case AVMEDIA_TYPE_VIDEO:
1533
99420
                if (!ost->frame_aspect_ratio.num)
1534
99420
                    enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1535
1536
99420
                do_video_out(of, ost, filtered_frame);
1537
99420
                break;
1538
300922
            case AVMEDIA_TYPE_AUDIO:
1539
300922
                if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540
300922
                    enc->channels != filtered_frame->channels) {
1541
                    av_log(NULL, AV_LOG_ERROR,
1542
                           "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1543
                    break;
1544
                }
1545
300922
                do_audio_out(of, ost, filtered_frame);
1546
300922
                break;
1547
            default:
1548
                // TODO support subtitle filters
1549
                av_assert0(0);
1550
            }
1551
1552
400342
            av_frame_unref(filtered_frame);
1553
        }
1554
    }
1555
1556
410145
    return 0;
1557
}
1558
1559
5976
static void print_final_stats(int64_t total_size)
1560
{
1561
5976
    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562
5976
    uint64_t subtitle_size = 0;
1563
5976
    uint64_t data_size = 0;
1564
5976
    float percent = -1.0;
1565
    int i, j;
1566
5976
    int pass1_used = 1;
1567
1568
12100
    for (i = 0; i < nb_output_streams; i++) {
1569
6124
        OutputStream *ost = output_streams[i];
1570

6124
        switch (ost->enc_ctx->codec_type) {
1571
4842
            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572
1233
            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573
38
            case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574
11
            default:                 other_size += ost->data_size; break;
1575
        }
1576
6124
        extra_size += ost->enc_ctx->extradata_size;
1577
6124
        data_size  += ost->data_size;
1578
6124
        if (   (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1579
            != AV_CODEC_FLAG_PASS1)
1580
6124
            pass1_used = 0;
1581
    }
1582
1583

5976
    if (data_size && total_size>0 && total_size >= data_size)
1584
4139
        percent = 100.0 * (total_size - data_size) / data_size;
1585
1586
5976
    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587
           video_size / 1024.0,
1588
           audio_size / 1024.0,
1589
           subtitle_size / 1024.0,
1590
           other_size / 1024.0,
1591
           extra_size / 1024.0);
1592
5976
    if (percent >= 0.0)
1593
4139
        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1594
    else
1595
1837
        av_log(NULL, AV_LOG_INFO, "unknown");
1596
5976
    av_log(NULL, AV_LOG_INFO, "\n");
1597
1598
    /* print verbose per-stream stats */
1599
11982
    for (i = 0; i < nb_input_files; i++) {
1600
6006
        InputFile *f = input_files[i];
1601
6006
        uint64_t total_packets = 0, total_size = 0;
1602
1603
6006
        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1604
6006
               i, f->ctx->url);
1605
1606
12341
        for (j = 0; j < f->nb_streams; j++) {
1607
6335
            InputStream *ist = input_streams[f->ist_index + j];
1608
6335
            enum AVMediaType type = ist->dec_ctx->codec_type;
1609
1610
6335
            total_size    += ist->data_size;
1611
6335
            total_packets += ist->nb_packets;
1612
1613
6335
            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
1614
                   i, j, media_type_string(type));
1615
6335
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616
                   ist->nb_packets, ist->data_size);
1617
1618
6335
            if (ist->decoding_needed) {
1619
5801
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620
                       ist->frames_decoded);
1621
5801
                if (type == AVMEDIA_TYPE_AUDIO)
1622
1124
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623
5801
                av_log(NULL, AV_LOG_VERBOSE, "; ");
1624
            }
1625
1626
6335
            av_log(NULL, AV_LOG_VERBOSE, "\n");
1627
        }
1628
1629
6006
        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630
               total_packets, total_size);
1631
    }
1632
1633
11953
    for (i = 0; i < nb_output_files; i++) {
1634
5977
        OutputFile *of = output_files[i];
1635
5977
        uint64_t total_packets = 0, total_size = 0;
1636
1637
5977
        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1638
5977
               i, of->ctx->url);
1639
1640
12101
        for (j = 0; j < of->ctx->nb_streams; j++) {
1641
6124
            OutputStream *ost = output_streams[of->ost_index + j];
1642
6124
            enum AVMediaType type = ost->enc_ctx->codec_type;
1643
1644
6124
            total_size    += ost->data_size;
1645
6124
            total_packets += ost->packets_written;
1646
1647
6124
            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
1648
                   i, j, media_type_string(type));
1649
6124
            if (ost->encoding_needed) {
1650
5825
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651
                       ost->frames_encoded);
1652
5825
                if (type == AVMEDIA_TYPE_AUDIO)
1653
1123
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654
5825
                av_log(NULL, AV_LOG_VERBOSE, "; ");
1655
            }
1656
1657
6124
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1658
                   ost->packets_written, ost->data_size);
1659
1660
6124
            av_log(NULL, AV_LOG_VERBOSE, "\n");
1661
        }
1662
1663
5977
        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664
               total_packets, total_size);
1665
    }
1666
5976
    if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667
6
        av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1668
6
        if (pass1_used) {
1669
1
            av_log(NULL, AV_LOG_WARNING, "\n");
1670
        } else {
1671
5
            av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1672
        }
1673
    }
1674
5976
}
1675
1676
1065916
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1677
{
1678
    AVBPrint buf, buf_script;
1679
    OutputStream *ost;
1680
    AVFormatContext *oc;
1681
    int64_t total_size;
1682
    AVCodecContext *enc;
1683
    int frame_number, vid, i;
1684
    double bitrate;
1685
    double speed;
1686
1065916
    int64_t pts = INT64_MIN + 1;
1687
    static int64_t last_time = -1;
1688
    static int qp_histogram[52];
1689
    int hours, mins, secs, us;
1690
    const char *hours_sign;
1691
    int ret;
1692
    float t;
1693
1694

1065916
    if (!print_stats && !is_last_report && !progress_avio)
1695
1059907
        return;
1696
1697
16343
    if (!is_last_report) {
1698
10367
        if (last_time == -1) {
1699
32
            last_time = cur_time;
1700
32
            return;
1701
        }
1702
10335
        if ((cur_time - last_time) < 500000)
1703
10302
            return;
1704
33
        last_time = cur_time;
1705
    }
1706
1707
6009
    t = (cur_time-timer_start) / 1000000.0;
1708
1709
1710
6009
    oc = output_files[0]->ctx;
1711
1712
6009
    total_size = avio_size(oc->pb);
1713
6009
    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1714
86
        total_size = avio_tell(oc->pb);
1715
1716
6009
    vid = 0;
1717
6009
    av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1718
6009
    av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1719
12166
    for (i = 0; i < nb_output_streams; i++) {
1720
6157
        float q = -1;
1721
6157
        ost = output_streams[i];
1722
6157
        enc = ost->enc_ctx;
1723
6157
        if (!ost->stream_copy)
1724
5859
            q = ost->quality / (float) FF_QP2LAMBDA;
1725
1726

6157
        if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1727
8
            av_bprintf(&buf, "q=%2.1f ", q);
1728
8
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1729
                       ost->file_index, ost->index, q);
1730
        }
1731

6157
        if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1732
            float fps;
1733
1734
4835
            frame_number = ost->frame_number;
1735
4835
            fps = t > 1 ? frame_number / t : 0;
1736
4835
            av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1737
                     frame_number, fps < 9.95, fps, q);
1738
4835
            av_bprintf(&buf_script, "frame=%d\n", frame_number);
1739
4835
            av_bprintf(&buf_script, "fps=%.2f\n", fps);
1740
4835
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1741
                       ost->file_index, ost->index, q);
1742
4835
            if (is_last_report)
1743
4834
                av_bprintf(&buf, "L");
1744
4835
            if (qp_hist) {
1745
                int j;
1746
                int qp = lrintf(q);
1747
                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1748
                    qp_histogram[qp]++;
1749
                for (j = 0; j < 32; j++)
1750
                    av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1751
            }
1752
1753

4835
            if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1754
                int j;
1755
                double error, error_sum = 0;
1756
                double scale, scale_sum = 0;
1757
                double p;
1758
                char type[3] = { 'Y','U','V' };
1759
                av_bprintf(&buf, "PSNR=");
1760
                for (j = 0; j < 3; j++) {
1761
                    if (is_last_report) {
1762
                        error = enc->error[j];
1763
                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1764
                    } else {
1765
                        error = ost->error[j];
1766
                        scale = enc->width * enc->height * 255.0 * 255.0;
1767
                    }
1768
                    if (j)
1769
                        scale /= 4;
1770
                    error_sum += error;
1771
                    scale_sum += scale;
1772
                    p = psnr(error / scale);
1773
                    av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1774
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1775
                               ost->file_index, ost->index, type[j] | 32, p);
1776
                }
1777
                p = psnr(error_sum / scale_sum);
1778
                av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1779
                av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1780
                           ost->file_index, ost->index, p);
1781
            }
1782
4835
            vid = 1;
1783
        }
1784
        /* compute min output value */
1785
6157
        if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1786
6157
            pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1787
                                          ost->st->time_base, AV_TIME_BASE_Q));
1788
6157
            if (copy_ts) {
1789

3
                if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1790
3
                    copy_ts_first_pts = pts;
1791
3
                if (copy_ts_first_pts != AV_NOPTS_VALUE)
1792
3
                    pts -= copy_ts_first_pts;
1793
            }
1794
        }
1795
1796
6157
        if (is_last_report)
1797
6124
            nb_frames_drop += ost->last_dropped;
1798
    }
1799
1800
6009
    secs = FFABS(pts) / AV_TIME_BASE;
1801
6009
    us = FFABS(pts) % AV_TIME_BASE;
1802
6009
    mins = secs / 60;
1803
6009
    secs %= 60;
1804
6009
    hours = mins / 60;
1805
6009
    mins %= 60;
1806
6009
    hours_sign = (pts < 0) ? "-" : "";
1807
1808

6009
    bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1809
6009
    speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1810
1811
6009
    if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1812
5925
    else                av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1813
6009
    if (pts == AV_NOPTS_VALUE) {
1814
        av_bprintf(&buf, "N/A ");
1815
    } else {
1816
6009
        av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1817
                   hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1818
    }
1819
1820
6009
    if (bitrate < 0) {
1821
99
        av_bprintf(&buf, "bitrate=N/A");
1822
99
        av_bprintf(&buf_script, "bitrate=N/A\n");
1823
    }else{
1824
5910
        av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1825
5910
        av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1826
    }
1827
1828
6009
    if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1829
5925
    else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1830
6009
    if (pts == AV_NOPTS_VALUE) {
1831
        av_bprintf(&buf_script, "out_time_us=N/A\n");
1832
        av_bprintf(&buf_script, "out_time_ms=N/A\n");
1833
        av_bprintf(&buf_script, "out_time=N/A\n");
1834
    } else {
1835
6009
        av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1836
6009
        av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1837
6009
        av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1838
                   hours_sign, hours, mins, secs, us);
1839
    }
1840
1841

6009
    if (nb_frames_dup || nb_frames_drop)
1842
26
        av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1843
6009
    av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1844
6009
    av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1845
1846
6009
    if (speed < 0) {
1847
1
        av_bprintf(&buf, " speed=N/A");
1848
1
        av_bprintf(&buf_script, "speed=N/A\n");
1849
    } else {
1850
6008
        av_bprintf(&buf, " speed=%4.3gx", speed);
1851
6008
        av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1852
    }
1853
1854

6009
    if (print_stats || is_last_report) {
1855
6009
        const char end = is_last_report ? '\n' : '\r';
1856

6009
        if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1857
            fprintf(stderr, "%s    %c", buf.str, end);
1858
        } else
1859
6009
            av_log(NULL, AV_LOG_INFO, "%s    %c", buf.str, end);
1860
1861
6009
        fflush(stderr);
1862
    }
1863
6009
    av_bprint_finalize(&buf, NULL);
1864
1865
6009
    if (progress_avio) {
1866
        av_bprintf(&buf_script, "progress=%s\n",
1867
                   is_last_report ? "end" : "continue");
1868
        avio_write(progress_avio, buf_script.str,
1869
                   FFMIN(buf_script.len, buf_script.size - 1));
1870
        avio_flush(progress_avio);
1871
        av_bprint_finalize(&buf_script, NULL);
1872
        if (is_last_report) {
1873
            if ((ret = avio_closep(&progress_avio)) < 0)
1874
                av_log(NULL, AV_LOG_ERROR,
1875
                       "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1876
        }
1877
    }
1878
1879
6009
    if (is_last_report)
1880
5976
        print_final_stats(total_size);
1881
}
1882
1883
1
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1884
{
1885
    // We never got any input. Set a fake format, which will
1886
    // come from libavformat.
1887
1
    ifilter->format                 = par->format;
1888
1
    ifilter->sample_rate            = par->sample_rate;
1889
1
    ifilter->channels               = par->channels;
1890
1
    ifilter->channel_layout         = par->channel_layout;
1891
1
    ifilter->width                  = par->width;
1892
1
    ifilter->height                 = par->height;
1893
1
    ifilter->sample_aspect_ratio    = par->sample_aspect_ratio;
1894
1
}
1895
1896
5978
static void flush_encoders(void)
1897
{
1898
    int i, ret;
1899
1900
12102
    for (i = 0; i < nb_output_streams; i++) {
1901
6126
        OutputStream   *ost = output_streams[i];
1902
6126
        AVCodecContext *enc = ost->enc_ctx;
1903
6126
        OutputFile      *of = output_files[ost->file_index];
1904
1905
6126
        if (!ost->encoding_needed)
1906
299
            continue;
1907
1908
        // Try to enable encoding with no input frames.
1909
        // Maybe we should just let encoding fail instead.
1910
5827
        if (!ost->initialized) {
1911
2
            FilterGraph *fg = ost->filter->graph;
1912
1913
2
            av_log(NULL, AV_LOG_WARNING,
1914
                   "Finishing stream %d:%d without any data written to it.\n",
1915
2
                   ost->file_index, ost->st->index);
1916
1917

2
            if (ost->filter && !fg->graph) {
1918
                int x;
1919
2
                for (x = 0; x < fg->nb_inputs; x++) {
1920
                    InputFilter *ifilter = fg->inputs[x];
1921
                    if (ifilter->format < 0)
1922
                        ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1923
                }
1924
1925
2
                if (!ifilter_has_all_input_formats(fg))
1926
                    continue;
1927
1928
2
                ret = configure_filtergraph(fg);
1929
2
                if (ret < 0) {
1930
2
                    av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1931
2
                    exit_program(1);
1932
                }
1933
1934
                finish_output_stream(ost);
1935
            }
1936
1937
            init_output_stream_wrapper(ost, NULL, 1);
1938
        }
1939
1940

5825
        if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1941
31
            continue;
1942
1943
264
        for (;;) {
1944
6058
            const char *desc = NULL;
1945
            AVPacket pkt;
1946
            int pkt_size;
1947
1948
6058
            switch (enc->codec_type) {
1949
1165
            case AVMEDIA_TYPE_AUDIO:
1950
1165
                desc   = "audio";
1951
1165
                break;
1952
4893
            case AVMEDIA_TYPE_VIDEO:
1953
4893
                desc   = "video";
1954
4893
                break;
1955
            default:
1956
                av_assert0(0);
1957
            }
1958
1959
6058
            av_init_packet(&pkt);
1960
6058
            pkt.data = NULL;
1961
6058
            pkt.size = 0;
1962
1963
6058
            update_benchmark(NULL);
1964
1965
11852
            while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1966
5794
                ret = avcodec_send_frame(enc, NULL);
1967
5794
                if (ret < 0) {
1968
                    av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1969
                           desc,
1970
                           av_err2str(ret));
1971
                    exit_program(1);
1972
                }
1973
            }
1974
1975
6058
            update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1976

6058
            if (ret < 0 && ret != AVERROR_EOF) {
1977
                av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1978
                       desc,
1979
                       av_err2str(ret));
1980
                exit_program(1);
1981
            }
1982

6058
            if (ost->logfile && enc->stats_out) {
1983
                fprintf(ost->logfile, "%s", enc->stats_out);
1984
            }
1985
6058
            if (ret == AVERROR_EOF) {
1986
5794
                output_packet(of, &pkt, ost, 1);
1987
5794
                break;
1988
            }
1989
264
            if (ost->finished & MUXER_FINISHED) {
1990
                av_packet_unref(&pkt);
1991
                continue;
1992
            }
1993
264
            av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1994
264
            pkt_size = pkt.size;
1995
264
            output_packet(of, &pkt, ost, 0);
1996

264
            if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1997
                do_video_stats(ost, pkt_size);
1998
            }
1999
        }
2000
    }
2001
5976
}
2002
2003
/*
2004
 * Check whether a packet from ist should be written into ost at this time
2005
 */
2006
412624
static int check_output_constraints(InputStream *ist, OutputStream *ost)
2007
{
2008
412624
    OutputFile *of = output_files[ost->file_index];
2009
412624
    int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
2010
2011
412624
    if (ost->source_index != ist_index)
2012
17092
        return 0;
2013
2014
395532
    if (ost->finished)
2015
2880
        return 0;
2016
2017

392652
    if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2018
5
        return 0;
2019
2020
392647
    return 1;
2021
}
2022
2023
32228
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2024
{
2025
32228
    OutputFile *of = output_files[ost->file_index];
2026
32228
    InputFile   *f = input_files [ist->file_index];
2027
32228
    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2028
32228
    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2029
    AVPacket opkt;
2030
2031
    // EOF: flush output bitstream filters.
2032
32228
    if (!pkt) {
2033
        av_init_packet(&opkt);
2034
        opkt.data = NULL;
2035
        opkt.size = 0;
2036
        output_packet(of, &opkt, ost, 1);
2037
20
        return;
2038
    }
2039
2040

32228
    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2041
12
        !ost->copy_initial_nonkeyframes)
2042
12
        return;
2043
2044

32216
    if (!ost->frame_number && !ost->copy_prior_start) {
2045
        int64_t comp_start = start_time;
2046
        if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2047
            comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2048
        if (pkt->pts == AV_NOPTS_VALUE ?
2049
            ist->pts < comp_start :
2050
            pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2051
            return;
2052
    }
2053
2054
32216
    if (of->recording_time != INT64_MAX &&
2055
446
        ist->pts >= of->recording_time + start_time) {
2056
6
        close_output_stream(ost);
2057
6
        return;
2058
    }
2059
2060
32210
    if (f->recording_time != INT64_MAX) {
2061
148
        start_time = f->ctx->start_time;
2062

148
        if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2063
74
            start_time += f->start_time;
2064
148
        if (ist->pts >= f->recording_time + start_time) {
2065
2
            close_output_stream(ost);
2066
2
            return;
2067
        }
2068
    }
2069
2070
    /* force the input stream PTS */
2071
32208
    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2072
9930
        ost->sync_opts++;
2073
2074
32208
    if (av_packet_ref(&opkt, pkt) < 0)
2075
        exit_program(1);
2076
2077
32208
    if (pkt->pts != AV_NOPTS_VALUE)
2078
26756
        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2079
2080
32208
    if (pkt->dts == AV_NOPTS_VALUE) {
2081
5200
        opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2082
27008
    } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2083
21747
        int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2084
21747
        if(!duration)
2085
8308
            duration = ist->dec_ctx->frame_size;
2086
43494
        opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2087
21747
                                    (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2088
                                    &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2089
        /* dts will be set immediately afterwards to what pts is now */
2090
21747
        opkt.pts = opkt.dts - ost_tb_start_time;
2091
    } else
2092
5261
        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2093
32208
    opkt.dts -= ost_tb_start_time;
2094
2095
32208
    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2096
2097
32208
    output_packet(of, &opkt, ost, 0);
2098
}
2099
2100
1379
int guess_input_channel_layout(InputStream *ist)
2101
{
2102
1379
    AVCodecContext *dec = ist->dec_ctx;
2103
2104
1379
    if (!dec->channel_layout) {
2105
        char layout_name[256];
2106
2107
749
        if (dec->channels > ist->guess_layout_max)
2108
7
            return 0;
2109
745
        dec->channel_layout = av_get_default_channel_layout(dec->channels);
2110
745
        if (!dec->channel_layout)
2111
3
            return 0;
2112
742
        av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113
                                     dec->channels, dec->channel_layout);
2114
742
        av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115
742
               "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2116
    }
2117
1372
    return 1;
2118
}
2119
2120
736991
static void check_decode_result(InputStream *ist, int *got_output, int ret)
2121
{
2122

736991
    if (*got_output || ret<0)
2123
380558
        decode_error_stat[ret<0] ++;
2124
2125

736991
    if (ret < 0 && exit_on_error)
2126
        exit_program(1);
2127
2128

736991
    if (*got_output && ist) {
2129

379410
        if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2130
36
            av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2131
36
                   "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2132
36
            if (exit_on_error)
2133
                exit_program(1);
2134
        }
2135
    }
2136
736991
}
2137
2138
// Filters can be configured only if the formats of all inputs are known.
2139
665500
static int ifilter_has_all_input_formats(FilterGraph *fg)
2140
{
2141
    int i;
2142
671395
    for (i = 0; i < fg->nb_inputs; i++) {
2143

665542
        if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144
659628
                                          fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2145
659647
            return 0;
2146
    }
2147
5853
    return 1;
2148
}
2149
2150
379810
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2151
{
2152
379810
    FilterGraph *fg = ifilter->graph;
2153
    int need_reinit, ret, i;
2154
2155
    /* determine if the parameters for this input changed */
2156
379810
    need_reinit = ifilter->format != frame->format;
2157
2158
379810
    switch (ifilter->ist->st->codecpar->codec_type) {
2159
280676
    case AVMEDIA_TYPE_AUDIO:
2160
840903
        need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
2161

560226
                       ifilter->channels       != frame->channels ||
2162
279550
                       ifilter->channel_layout != frame->channel_layout;
2163
280676
        break;
2164
99134
    case AVMEDIA_TYPE_VIDEO:
2165
193602
        need_reinit |= ifilter->width  != frame->width ||
2166
94468
                       ifilter->height != frame->height;
2167
99134
        break;
2168
    }
2169
2170

379810
    if (!ifilter->ist->reinit_filters && fg->graph)
2171
        need_reinit = 0;
2172
2173
379810
    if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2174

379810
        (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2175
        need_reinit = 1;
2176
2177
379810
    if (need_reinit) {
2178
5805
        ret = ifilter_parameters_from_frame(ifilter, frame);
2179
5805
        if (ret < 0)
2180
            return ret;
2181
    }
2182
2183
    /* (re)init the graph if possible, otherwise buffer the frame and return */
2184

379810
    if (need_reinit || !fg->graph) {
2185
11612
        for (i = 0; i < fg->nb_inputs; i++) {
2186
5826
            if (!ifilter_has_all_input_formats(fg)) {
2187
19
                AVFrame *tmp = av_frame_clone(frame);
2188
19
                if (!tmp)
2189
                    return AVERROR(ENOMEM);
2190
19
                av_frame_unref(frame);
2191
2192
19
                if (!av_fifo_space(ifilter->frame_queue)) {
2193
                    ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2194
                    if (ret < 0) {
2195
                        av_frame_free(&tmp);
2196
                        return ret;
2197
                    }
2198
                }
2199
19
                av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200
19
                return 0;
2201
            }
2202
        }
2203
2204
5786
        ret = reap_filters(1);
2205

5786
        if (ret < 0 && ret != AVERROR_EOF) {
2206
            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2207
            return ret;
2208
        }
2209
2210
5786
        ret = configure_filtergraph(fg);
2211
5786
        if (ret < 0) {
2212
            av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2213
            return ret;
2214
        }
2215
    }
2216
2217
379791
    ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218
379791
    if (ret < 0) {
2219
        if (ret != AVERROR_EOF)
2220
            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2221
        return ret;
2222
    }
2223
2224
379791
    return 0;
2225
}
2226
2227
5758
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2228
{
2229
    int ret;
2230
2231
5758
    ifilter->eof = 1;
2232
2233
5758
    if (ifilter->filter) {
2234
5757
        ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2235
5757
        if (ret < 0)
2236
            return ret;
2237
    } else {
2238
        // the filtergraph was never configured
2239
1
        if (ifilter->format < 0)
2240
1
            ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2241

1
        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2242
            av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2243
            return AVERROR_INVALIDDATA;
2244
        }
2245
    }
2246
2247
5758
    return 0;
2248
}
2249
2250
// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2251
// There is the following difference: if you got a frame, you must call
2252
// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2253
// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2254
741471
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2255
{
2256
    int ret;
2257
2258
741471
    *got_frame = 0;
2259
2260
741471
    if (pkt) {
2261
362734
        ret = avcodec_send_packet(avctx, pkt);
2262
        // In particular, we don't expect AVERROR(EAGAIN), because we read all
2263
        // decoded frames with avcodec_receive_frame() until done.
2264

362734
        if (ret < 0 && ret != AVERROR_EOF)
2265
379
            return ret;
2266
    }
2267
2268
741092
    ret = avcodec_receive_frame(avctx, frame);
2269

741092
    if (ret < 0 && ret != AVERROR(EAGAIN))
2270
5757
        return ret;
2271
735335
    if (ret >= 0)
2272
379410
        *got_frame = 1;
2273
2274
735335
    return 0;
2275
}
2276
2277
379410
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2278
{
2279
    int i, ret;
2280
    AVFrame *f;
2281
2282
    av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2283
759220
    for (i = 0; i < ist->nb_filters; i++) {
2284
379810
        if (i < ist->nb_filters - 1) {
2285
400
            f = ist->filter_frame;
2286
400
            ret = av_frame_ref(f, decoded_frame);
2287
400
            if (ret < 0)
2288
                break;
2289
        } else
2290
379410
            f = decoded_frame;
2291
379810
        ret = ifilter_send_frame(ist->filters[i], f);
2292
379810
        if (ret == AVERROR_EOF)
2293
            ret = 0; /* ignore */
2294
379810
        if (ret < 0) {
2295
            av_log(NULL, AV_LOG_ERROR,
2296
                   "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2297
            break;
2298
        }
2299
    }
2300
379410
    return ret;
2301
}
2302
2303
534453
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2304
                        int *decode_failed)
2305
{
2306
    AVFrame *decoded_frame;
2307
534453
    AVCodecContext *avctx = ist->dec_ctx;
2308
534453
    int ret, err = 0;
2309
    AVRational decoded_frame_tb;
2310
2311

534453
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2312
        return AVERROR(ENOMEM);
2313

534453
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2314
        return AVERROR(ENOMEM);
2315
534453
    decoded_frame = ist->decoded_frame;
2316
2317
534453
    update_benchmark(NULL);
2318
534453
    ret = decode(avctx, decoded_frame, got_output, pkt);
2319
534453
    update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2320
534453
    if (ret < 0)
2321
1133
        *decode_failed = 1;
2322
2323

534453
    if (ret >= 0 && avctx->sample_rate <= 0) {
2324
        av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2325
        ret = AVERROR_INVALIDDATA;
2326
    }
2327
2328
534453
    if (ret != AVERROR_EOF)
2329
533330
        check_decode_result(ist, got_output, ret);
2330
2331

534453
    if (!*got_output || ret < 0)
2332
254177
        return ret;
2333
2334
280276
    ist->samples_decoded += decoded_frame->nb_samples;
2335
280276
    ist->frames_decoded++;
2336
2337
    /* increment next_dts to use for the case where the input stream does not
2338
       have timestamps or there are multiple frames in the packet */
2339
280276
    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2340
280276
                     avctx->sample_rate;
2341
280276
    ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2342
280276
                     avctx->sample_rate;
2343
2344
280276
    if (decoded_frame->pts != AV_NOPTS_VALUE) {
2345
251515
        decoded_frame_tb   = ist->st->time_base;
2346

28761
    } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2347
3
        decoded_frame->pts = pkt->pts;
2348
3
        decoded_frame_tb   = ist->st->time_base;
2349
    }else {
2350
28758
        decoded_frame->pts = ist->dts;
2351
28758
        decoded_frame_tb   = AV_TIME_BASE_Q;
2352
    }
2353
280276
    if (decoded_frame->pts != AV_NOPTS_VALUE)
2354
280276
        decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2355
280276
                                              (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2356
280276
                                              (AVRational){1, avctx->sample_rate});
2357
280276
    ist->nb_samples = decoded_frame->nb_samples;
2358
280276
    err = send_frame_to_filters(ist, decoded_frame);
2359
2360
280276
    av_frame_unref(ist->filter_frame);
2361
280276
    av_frame_unref(decoded_frame);
2362
280276
    return err < 0 ? err : ret;
2363
}
2364
2365
208563
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2366
                        int *decode_failed)
2367
{
2368
    AVFrame *decoded_frame;
2369
208563
    int i, ret = 0, err = 0;
2370
    int64_t best_effort_timestamp;
2371
208563
    int64_t dts = AV_NOPTS_VALUE;
2372
    AVPacket avpkt;
2373
2374
    // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2375
    // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2376
    // skip the packet.
2377

208563
    if (!eof && pkt && pkt->size == 0)
2378
1545
        return 0;
2379
2380

207018
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2381
        return AVERROR(ENOMEM);
2382

207018
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2383
        return AVERROR(ENOMEM);
2384
207018
    decoded_frame = ist->decoded_frame;
2385
207018
    if (ist->dts != AV_NOPTS_VALUE)
2386
206607
        dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2387
207018
    if (pkt) {
2388
108529
        avpkt = *pkt;
2389
108529
        avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2390
    }
2391
2392
    // The old code used to set dts on the drain packet, which does not work
2393
    // with the new API anymore.
2394
207018
    if (eof) {
2395
5274
        void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2396
5274
        if (!new)
2397
            return AVERROR(ENOMEM);
2398
5274
        ist->dts_buffer = new;
2399
5274
        ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2400
    }
2401
2402
207018
    update_benchmark(NULL);
2403
207018
    ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2404
207018
    update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2405
207018
    if (ret < 0)
2406
5003
        *decode_failed = 1;
2407
2408
    // The following line may be required in some cases where there is no parser
2409
    // or the parser does not has_b_frames correctly
2410
207018
    if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2411
1
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2412
1
            ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2413
        } else
2414
            av_log(ist->dec_ctx, AV_LOG_WARNING,
2415
                   "video_delay is larger in decoder than demuxer %d > %d.\n"
2416
                   "If you want to help, upload a sample "
2417
                   "of this file to https://streams.videolan.org/upload/ "
2418
                   "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2419
                   ist->dec_ctx->has_b_frames,
2420
                   ist->st->codecpar->video_delay);
2421
    }
2422
2423
207018
    if (ret != AVERROR_EOF)
2424
202389
        check_decode_result(ist, got_output, ret);
2425
2426

207018
    if (*got_output && ret >= 0) {
2427
99134
        if (ist->dec_ctx->width  != decoded_frame->width ||
2428
99125
            ist->dec_ctx->height != decoded_frame->height ||
2429
99123
            ist->dec_ctx->pix_fmt != decoded_frame->format) {
2430
15
            av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2431
                decoded_frame->width,
2432
                decoded_frame->height,
2433
                decoded_frame->format,
2434
15
                ist->dec_ctx->width,
2435
15
                ist->dec_ctx->height,
2436
15
                ist->dec_ctx->pix_fmt);
2437
        }
2438
    }
2439
2440

207018
    if (!*got_output || ret < 0)
2441
107884
        return ret;
2442
2443
99134
    if(ist->top_field_first>=0)
2444
        decoded_frame->top_field_first = ist->top_field_first;
2445
2446
99134
    ist->frames_decoded++;
2447
2448

99134
    if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2449
        err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2450
        if (err < 0)
2451
            goto fail;
2452
    }
2453
99134
    ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2454
2455
99134
    best_effort_timestamp= decoded_frame->best_effort_timestamp;
2456
99134
    *duration_pts = decoded_frame->pkt_duration;
2457
2458
99134
    if (ist->framerate.num)
2459
103
        best_effort_timestamp = ist->cfr_next_pts++;
2460
2461

99134
    if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2462
528
        best_effort_timestamp = ist->dts_buffer[0];
2463
2464
528
        for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2465
            ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2466
528
        ist->nb_dts_buffer--;
2467
    }
2468
2469
99134
    if(best_effort_timestamp != AV_NOPTS_VALUE) {
2470
99134
        int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2471
2472
99134
        if (ts != AV_NOPTS_VALUE)
2473
99134
            ist->next_pts = ist->pts = ts;
2474
    }
2475
2476
99134
    if (debug_ts) {
2477
        av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2478
               "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2479
               ist->st->index, av_ts2str(decoded_frame->pts),
2480
               av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2481
               best_effort_timestamp,
2482
               av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2483
               decoded_frame->key_frame, decoded_frame->pict_type,
2484
               ist->st->time_base.num, ist->st->time_base.den);
2485
    }
2486
2487
99134
    if (ist->st->sample_aspect_ratio.num)
2488
4947
        decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2489
2490
99134
    err = send_frame_to_filters(ist, decoded_frame);
2491
2492
99134
fail:
2493
99134
    av_frame_unref(ist->filter_frame);
2494
99134
    av_frame_unref(decoded_frame);
2495
99134
    return err < 0 ? err : ret;
2496
}
2497
2498
1272
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2499
                               int *decode_failed)
2500
{
2501
    AVSubtitle subtitle;
2502
1272
    int free_sub = 1;
2503
1272
    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2504
                                          &subtitle, got_output, pkt);
2505
2506
1272
    check_decode_result(NULL, got_output, ret);
2507
2508

1272
    if (ret < 0 || !*got_output) {
2509
587
        *decode_failed = 1;
2510
587
        if (!pkt->size)
2511
34
            sub2video_flush(ist);
2512
587
        return ret;
2513
    }
2514
2515
685
    if (ist->fix_sub_duration) {
2516
        int end = 1;
2517
        if (ist->prev_sub.got_output) {
2518
            end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2519
                             1000, AV_TIME_BASE);
2520
            if (end < ist->prev_sub.subtitle.end_display_time) {
2521
                av_log(ist->dec_ctx, AV_LOG_DEBUG,
2522
                       "Subtitle duration reduced from %"PRId32" to %d%s\n",
2523
                       ist->prev_sub.subtitle.end_display_time, end,
2524
                       end <= 0 ? ", dropping it" : "");
2525
                ist->prev_sub.subtitle.end_display_time = end;
2526
            }
2527
        }
2528
        FFSWAP(int,        *got_output, ist->prev_sub.got_output);
2529
        FFSWAP(int,        ret,         ist->prev_sub.ret);
2530
        FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
2531
        if (end <= 0)
2532
            goto out;
2533
    }
2534
2535
685
    if (!*got_output)
2536
        return ret;
2537
2538
685
    if (ist->sub2video.frame) {
2539
88
        sub2video_update(ist, INT64_MIN, &subtitle);
2540
597
    } else if (ist->nb_filters) {
2541
        if (!ist->sub2video.sub_queue)
2542
            ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2543
        if (!ist->sub2video.sub_queue)
2544
            exit_program(1);
2545
        if (!av_fifo_space(ist->sub2video.sub_queue)) {
2546
            ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2547
            if (ret < 0)
2548
                exit_program(1);
2549
        }
2550
        av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2551
        free_sub = 0;
2552
    }
2553
2554
685
    if (!subtitle.num_rects)
2555
9
        goto out;
2556
2557
676
    ist->frames_decoded++;
2558
2559
1396
    for (i = 0; i < nb_output_streams; i++) {
2560
720
        OutputStream *ost = output_streams[i];
2561
2562

720
        if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2563
675
            || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2564
89
            continue;
2565
2566
631
        do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2567
    }
2568
2569
676
out:
2570
685
    if (free_sub)
2571
685
        avsubtitle_free(&subtitle);
2572
685
    return ret;
2573
}
2574
2575
5786
static int send_filter_eof(InputStream *ist)
2576
{
2577
    int i, ret;
2578
    /* TODO keep pts also in stream time base to avoid converting back */
2579
5786
    int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2580
                                   AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2581
2582
11544
    for (i = 0; i < ist->nb_filters; i++) {
2583
5758
        ret = ifilter_send_eof(ist->filters[i], pts);
2584
5758
        if (ret < 0)
2585
            return ret;
2586
    }
2587
5786
    return 0;
2588
}
2589
2590
/* pkt = NULL means EOF (needed to flush decoder buffers) */
2591
397867
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2592
{
2593
397867
    int ret = 0, i;
2594
397867
    int repeating = 0;
2595
397867
    int eof_reached = 0;
2596
2597
    AVPacket avpkt;
2598
397867
    if (!ist->saw_first_ts) {
2599
6129
        ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2600
6129
        ist->pts = 0;
2601

6129
        if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2602
219
            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2603
219
            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2604
        }
2605
6129
        ist->saw_first_ts = 1;
2606
    }
2607
2608
397867
    if (ist->next_dts == AV_NOPTS_VALUE)
2609
6543
        ist->next_dts = ist->dts;
2610
397867
    if (ist->next_pts == AV_NOPTS_VALUE)
2611
6129
        ist->next_pts = ist->pts;
2612
2613
397867
    if (!pkt) {
2614
        /* EOF handling */
2615
6525
        av_init_packet(&avpkt);
2616
6525
        avpkt.data = NULL;
2617
6525
        avpkt.size = 0;
2618
    } else {
2619
391342
        avpkt = *pkt;
2620
    }
2621
2622

397867
    if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2623
354547
        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2624

354547
        if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2625
279940
            ist->next_pts = ist->pts = ist->dts;
2626
    }
2627
2628
    // while we have more to decode or while the decoder did output something on EOF
2629
777289
    while (ist->decoding_needed) {
2630
744973
        int64_t duration_dts = 0;
2631
744973
        int64_t duration_pts = 0;
2632
744973
        int got_output = 0;
2633
744973
        int decode_failed = 0;
2634
2635
744973
        ist->pts = ist->next_pts;
2636
744973
        ist->dts = ist->next_dts;
2637
2638

744973
        switch (ist->dec_ctx->codec_type) {
2639
534453
        case AVMEDIA_TYPE_AUDIO:
2640
534453
            ret = decode_audio    (ist, repeating ? NULL : &avpkt, &got_output,
2641
                                   &decode_failed);
2642
534453
            break;
2643
208563
        case AVMEDIA_TYPE_VIDEO:
2644
208563
            ret = decode_video    (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2645
                                   &decode_failed);
2646

208563
            if (!repeating || !pkt || got_output) {
2647

110074
                if (pkt && pkt->duration) {
2648
102510
                    duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649

7564
                } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650
6317
                    int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651
6317
                    duration_dts = ((int64_t)AV_TIME_BASE *
2652
6317
                                    ist->dec_ctx->framerate.den * ticks) /
2653
6317
                                    ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2654
                }
2655
2656

110074
                if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2657
108827
                    ist->next_dts += duration_dts;
2658
                }else
2659
1247
                    ist->next_dts = AV_NOPTS_VALUE;
2660
            }
2661
2662
208563
            if (got_output) {
2663
99134
                if (duration_pts > 0) {
2664
96846
                    ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2665
                } else {
2666
2288
                    ist->next_pts += duration_dts;
2667
                }
2668
            }
2669
208563
            break;
2670
1957
        case AVMEDIA_TYPE_SUBTITLE:
2671
1957
            if (repeating)
2672
685
                break;
2673
1272
            ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2674

1272
            if (!pkt && ret >= 0)
2675
34
                ret = AVERROR_EOF;
2676
1272
            break;
2677
        default:
2678
            return -1;
2679
        }
2680
2681
744973
        if (ret == AVERROR_EOF) {
2682
5786
            eof_reached = 1;
2683
365551
            break;
2684
        }
2685
2686
739187
        if (ret < 0) {
2687
463
            if (decode_failed) {
2688
926
                av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2689
463
                       ist->file_index, ist->st->index, av_err2str(ret));
2690
            } else {
2691
                av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2692
                       "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2693
            }
2694

463
            if (!decode_failed || exit_on_error)
2695
                exit_program(1);
2696
463
            break;
2697
        }
2698
2699
738724
        if (got_output)
2700
380095
            ist->got_output = 1;
2701
2702
738724
        if (!got_output)
2703
358629
            break;
2704
2705
        // During draining, we might get multiple output frames in this loop.
2706
        // ffmpeg.c does not drain the filter chain on configuration changes,
2707
        // which means if we send multiple frames at once to the filters, and
2708
        // one of those frames changes configuration, the buffered frames will
2709
        // be lost. This can upset certain FATE tests.
2710
        // Decode only 1 frame per call on EOF to appease these FATE tests.
2711
        // The ideal solution would be to rewrite decoding to use the new
2712
        // decoding API in a better way.
2713
380095
        if (!pkt)
2714
673
            break;
2715
2716
379422
        repeating = 1;
2717
    }
2718
2719
    /* after flushing, send an EOF on all the filter inputs attached to the stream */
2720
    /* except when looping we need to flush but not to send an EOF */
2721


397867
    if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2722
5786
        int ret = send_filter_eof(ist);
2723
5786
        if (ret < 0) {
2724
            av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2725
            exit_program(1);
2726
        }
2727
    }
2728
2729
    /* handle stream copy */
2730

397867
    if (!ist->decoding_needed && pkt) {
2731
32251
        ist->dts = ist->next_dts;
2732
32251
        switch (ist->dec_ctx->codec_type) {
2733
21776
        case AVMEDIA_TYPE_AUDIO:
2734
            av_assert1(pkt->duration >= 0);
2735
21776
            if (ist->dec_ctx->sample_rate) {
2736
21776
                ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2737
21776
                                  ist->dec_ctx->sample_rate;
2738
            } else {
2739
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740
            }
2741
21776
            break;
2742
9944
        case AVMEDIA_TYPE_VIDEO:
2743
9944
            if (ist->framerate.num) {
2744
                // TODO: Remove work-around for c99-to-c89 issue 7
2745
79
                AVRational time_base_q = AV_TIME_BASE_Q;
2746
79
                int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2747
79
                ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2748
9865
            } else if (pkt->duration) {
2749
9462
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2750
403
            } else if(ist->dec_ctx->framerate.num != 0) {
2751
276
                int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2752
276
                ist->next_dts += ((int64_t)AV_TIME_BASE *
2753
276
                                  ist->dec_ctx->framerate.den * ticks) /
2754
276
                                  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2755
            }
2756
9944
            break;
2757
        }
2758
32251
        ist->pts = ist->dts;
2759
32251
        ist->next_pts = ist->next_dts;
2760
    }
2761
809771
    for (i = 0; i < nb_output_streams; i++) {
2762
411904
        OutputStream *ost = output_streams[i];
2763
2764

411904
        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2765
379676
            continue;
2766
2767
32228
        do_streamcopy(ist, ost, pkt);
2768
    }
2769
2770
397867
    return !eof_reached;
2771
}
2772
2773
static void print_sdp(void)
2774
{
2775
    char sdp[16384];
2776
    int i;
2777
    int j;
2778
    AVIOContext *sdp_pb;
2779
    AVFormatContext **avc;
2780
2781
    for (i = 0; i < nb_output_files; i++) {
2782
        if (!output_files[i]->header_written)
2783
            return;
2784
    }
2785
2786
    avc = av_malloc_array(nb_output_files, sizeof(*avc));
2787
    if (!avc)
2788
        exit_program(1);
2789
    for (i = 0, j = 0; i < nb_output_files; i++) {
2790
        if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2791
            avc[j] = output_files[i]->ctx;
2792
            j++;
2793
        }
2794
    }
2795
2796
    if (!j)
2797
        goto fail;
2798
2799
    av_sdp_create(avc, j, sdp, sizeof(sdp));
2800
2801
    if (!sdp_filename) {
2802
        printf("SDP:\n%s\n", sdp);
2803
        fflush(stdout);
2804
    } else {
2805
        if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2806
            av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2807
        } else {
2808
            avio_print(sdp_pb, sdp);
2809
            avio_closep(&sdp_pb);
2810
            av_freep(&sdp_filename);
2811
        }
2812
    }
2813
2814
fail:
2815
    av_freep(&avc);
2816
}
2817
2818
921
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2819
{
2820
921
    InputStream *ist = s->opaque;
2821
    const enum AVPixelFormat *p;
2822
    int ret;
2823
2824
2533
    for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2825
2533
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2826
2533
        const AVCodecHWConfig  *config = NULL;
2827
        int i;
2828
2829
2533
        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2830
921
            break;
2831
2832
1612
        if (ist->hwaccel_id == HWACCEL_GENERIC ||
2833
1612
            ist->hwaccel_id == HWACCEL_AUTO) {
2834
            for (i = 0;; i++) {
2835
                config = avcodec_get_hw_config(s->codec, i);
2836
                if (!config)
2837
                    break;
2838
                if (!(config->methods &
2839
                      AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2840
                    continue;
2841
                if (config->pix_fmt == *p)
2842
                    break;
2843
            }
2844
        }
2845
1612
        if (config) {
2846
            if (config->device_type != ist->hwaccel_device_type) {
2847
                // Different hwaccel offered, ignore.
2848
                continue;
2849
            }
2850
2851
            ret = hwaccel_decode_init(s);
2852
            if (ret < 0) {
2853
                if (ist->hwaccel_id == HWACCEL_GENERIC) {
2854
                    av_log(NULL, AV_LOG_FATAL,
2855
                           "%s hwaccel requested for input stream #%d:%d, "
2856
                           "but cannot be initialized.\n",
2857
                           av_hwdevice_get_type_name(config->device_type),
2858
                           ist->file_index, ist->st->index);
2859
                    return AV_PIX_FMT_NONE;
2860
                }
2861
                continue;
2862
            }
2863
        } else {
2864
1612
            const HWAccel *hwaccel = NULL;
2865
            int i;
2866
1612
            for (i = 0; hwaccels[i].name; i++) {
2867
                if (hwaccels[i].pix_fmt == *p) {
2868
                    hwaccel = &hwaccels[i];
2869
                    break;
2870
                }
2871
            }
2872
1612
            if (!hwaccel) {
2873
                // No hwaccel supporting this pixfmt.
2874
1612
                continue;
2875
            }
2876
            if (hwaccel->id != ist->hwaccel_id) {
2877
                // Does not match requested hwaccel.
2878
                continue;
2879
            }
2880
2881
            ret = hwaccel->init(s);
2882
            if (ret < 0) {
2883
                av_log(NULL, AV_LOG_FATAL,
2884
                       "%s hwaccel requested for input stream #%d:%d, "
2885
                       "but cannot be initialized.\n", hwaccel->name,
2886
                       ist->file_index, ist->st->index);
2887
                return AV_PIX_FMT_NONE;
2888
            }
2889
        }
2890
2891
        if (ist->hw_frames_ctx) {
2892
            s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2893
            if (!s->hw_frames_ctx)
2894
                return AV_PIX_FMT_NONE;
2895
        }
2896
2897
        ist->hwaccel_pix_fmt = *p;
2898
        break;
2899
    }
2900
2901
921
    return *p;
2902
}
2903
2904
354261
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2905
{
2906
354261
    InputStream *ist = s->opaque;
2907
2908

354261
    if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2909
        return ist->hwaccel_get_buffer(s, frame, flags);
2910
2911
354261
    return avcodec_default_get_buffer2(s, frame, flags);
2912
}
2913
2914
6335
static int init_input_stream(int ist_index, char *error, int error_len)
2915
{
2916
    int ret;
2917
6335
    InputStream *ist = input_streams[ist_index];
2918
2919
6335
    if (ist->decoding_needed) {
2920
5801
        AVCodec *codec = ist->dec;
2921
5801
        if (!codec) {
2922
            snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2923
                    avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2924
            return AVERROR(EINVAL);
2925
        }
2926
2927
5801
        ist->dec_ctx->opaque                = ist;
2928
5801
        ist->dec_ctx->get_format            = get_format;
2929
5801
        ist->dec_ctx->get_buffer2           = get_buffer;
2930
#if LIBAVCODEC_VERSION_MAJOR < 60
2931
5801
        ist->dec_ctx->thread_safe_callbacks = 1;
2932
#endif
2933
2934
5801
        av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2935
5801
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2936
1
           (ist->decoding_needed & DECODING_FOR_OST)) {
2937
1
            av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2938
1
            if (ist->decoding_needed & DECODING_FOR_FILTER)
2939
                av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2940
        }
2941
2942
5801
        av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2943
2944
        /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2945
         * audio, and video decoders such as cuvid or mediacodec */
2946
5801
        ist->dec_ctx->pkt_timebase = ist->st->time_base;
2947
2948
5801
        if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2949
54
            av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2950
        /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2951
5801
        if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2952
1
            av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2953
2954
5801
        ret = hw_device_setup_for_decode(ist);
2955
5801
        if (ret < 0) {
2956
            snprintf(error, error_len, "Device setup failed for "
2957
                     "decoder on input stream #%d:%d : %s",
2958
                     ist->file_index, ist->st->index, av_err2str(ret));
2959
            return ret;
2960
        }
2961
2962
5801
        if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2963
            if (ret == AVERROR_EXPERIMENTAL)
2964
                abort_codec_experimental(codec, 0);
2965
2966
            snprintf(error, error_len,
2967
                     "Error while opening decoder for input stream "
2968
                     "#%d:%d : %s",
2969
                     ist->file_index, ist->st->index, av_err2str(ret));
2970
            return ret;
2971
        }
2972
5801
        assert_avoptions(ist->decoder_opts);
2973
    }
2974
2975
6335
    ist->next_pts = AV_NOPTS_VALUE;
2976
6335
    ist->next_dts = AV_NOPTS_VALUE;
2977
2978
6335
    return 0;
2979
}
2980
2981
17742
static InputStream *get_input_stream(OutputStream *ost)
2982
{
2983
17742
    if (ost->source_index >= 0)
2984
17559
        return input_streams[ost->source_index];
2985
183
    return NULL;
2986
}
2987
2988
1
static int compare_int64(const void *a, const void *b)
2989
{
2990
1
    return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2991
}
2992
2993
/* open the muxer when all the streams are initialized */
2994
6125
static int check_init_output_file(OutputFile *of, int file_index)
2995
{
2996
    int ret, i;
2997
2998
12373
    for (i = 0; i < of->ctx->nb_streams; i++) {
2999
6396
        OutputStream *ost = output_streams[of->ost_index + i];
3000
6396
        if (!ost->initialized)
3001
148
            return 0;
3002
    }
3003
3004
5977
    of->ctx->interrupt_callback = int_cb;
3005
3006
5977
    ret = avformat_write_header(of->ctx, &of->opts);
3007
5977
    if (ret < 0) {
3008
        av_log(NULL, AV_LOG_ERROR,
3009
               "Could not write header for output file #%d "
3010
               "(incorrect codec parameters ?): %s\n",
3011
               file_index, av_err2str(ret));
3012
        return ret;
3013
    }
3014
    //assert_avoptions(of->opts);
3015
5977
    of->header_written = 1;
3016
3017
5977
    av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3018
3019

5977
    if (sdp_filename || want_sdp)
3020
        print_sdp();
3021
3022
    /* flush the muxing queues */
3023
12101
    for (i = 0; i < of->ctx->nb_streams; i++) {
3024
6124
        OutputStream *ost = output_streams[of->ost_index + i];
3025
3026
        /* try to improve muxing time_base (only possible if nothing has been written yet) */
3027
6124
        if (!av_fifo_size(ost->muxing_queue))
3028
6043
            ost->mux_timebase = ost->st->time_base;
3029
3030
6417
        while (av_fifo_size(ost->muxing_queue)) {
3031
            AVPacket pkt;
3032
293
            av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3033
293
            ost->muxing_queue_data_size -= pkt.size;
3034
293
            write_packet(of, &pkt, ost, 1);
3035
        }
3036
    }
3037
3038
5977
    return 0;
3039
}
3040
3041
6124
static int init_output_bsfs(OutputStream *ost)
3042
{
3043
6124
    AVBSFContext *ctx = ost->bsf_ctx;
3044
    int ret;
3045
3046
6124
    if (!ctx)
3047
6039
        return 0;
3048
3049
85
    ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3050
85
    if (ret < 0)
3051
        return ret;
3052
3053
85
    ctx->time_base_in = ost->st->time_base;
3054
3055
85
    ret = av_bsf_init(ctx);
3056
85
    if (ret < 0) {
3057
        av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3058
               ctx->filter->name);
3059
        return ret;
3060
    }
3061
3062
85
    ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3063
85
    if (ret < 0)
3064
        return ret;
3065
85
    ost->st->time_base = ctx->time_base_out;
3066
3067
85
    return 0;
3068
}
3069
3070
298
static int init_output_stream_streamcopy(OutputStream *ost)
3071
{
3072
298
    OutputFile *of = output_files[ost->file_index];
3073
298
    InputStream *ist = get_input_stream(ost);
3074
298
    AVCodecParameters *par_dst = ost->st->codecpar;
3075
298
    AVCodecParameters *par_src = ost->ref_par;
3076
    AVRational sar;
3077
    int i, ret;
3078
298
    uint32_t codec_tag = par_dst->codec_tag;
3079
3080

298
    av_assert0(ist && !ost->filter);
3081
3082
298
    ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3083
298
    if (ret >= 0)
3084
298
        ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3085
298
    if (ret < 0) {
3086
        av_log(NULL, AV_LOG_FATAL,
3087
               "Error setting up codec context options.\n");
3088
        return ret;
3089
    }
3090
3091
298
    ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3092
298
    if (ret < 0) {
3093
        av_log(NULL, AV_LOG_FATAL,
3094
               "Error getting reference codec parameters.\n");
3095
        return ret;
3096
    }
3097
3098
298
    if (!codec_tag) {
3099
        unsigned int codec_tag_tmp;
3100
292
        if (!of->ctx->oformat->codec_tag ||
3101

71
            av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3102
26
            !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3103
269
            codec_tag = par_src->codec_tag;
3104
    }
3105
3106
298
    ret = avcodec_parameters_copy(par_dst, par_src);
3107
298
    if (ret < 0)
3108
        return ret;
3109
3110
298
    par_dst->codec_tag = codec_tag;
3111
3112
298
    if (!ost->frame_rate.num)
3113
297
        ost->frame_rate = ist->framerate;
3114
298
    ost->st->avg_frame_rate = ost->frame_rate;
3115
3116
298
    ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3117
298
    if (ret < 0)
3118
        return ret;
3119
3120
    // copy timebase while removing common factors
3121

298
    if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3122
297
        ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3123
3124
    // copy estimated duration as a hint to the muxer
3125

298
    if (ost->st->duration <= 0 && ist->st->duration > 0)
3126
175
        ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3127
3128
    // copy disposition
3129
298
    ost->st->disposition = ist->st->disposition;
3130
3131
298
    if (ist->st->nb_side_data) {
3132
46
        for (i = 0; i < ist->st->nb_side_data; i++) {
3133
23
            const AVPacketSideData *sd_src = &ist->st->side_data[i];
3134
            uint8_t *dst_data;
3135
3136
23
            dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3137
23
            if (!dst_data)
3138
                return AVERROR(ENOMEM);
3139
23
            memcpy(dst_data, sd_src->data, sd_src->size);
3140
        }
3141
    }
3142
3143
298
    if (ost->rotate_overridden) {
3144
1
        uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3145
                                              sizeof(int32_t) * 9);
3146
1
        if (sd)
3147
1
            av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3148
    }
3149
3150
298
    switch (par_dst->codec_type) {
3151
110
    case AVMEDIA_TYPE_AUDIO:
3152
110
        if (audio_volume != 256) {
3153
            av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3154
            exit_program(1);
3155
        }
3156


110
        if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3157
            par_dst->block_align= 0;
3158
110
        if(par_dst->codec_id == AV_CODEC_ID_AC3)
3159
3
            par_dst->block_align= 0;
3160
110
        break;
3161
171
    case AVMEDIA_TYPE_VIDEO:
3162
171
        if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3163
            sar =
3164
                av_mul_q(ost->frame_aspect_ratio,
3165
                         (AVRational){ par_dst->height, par_dst->width });
3166
            av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3167
                   "with stream copy may produce invalid files\n");
3168
            }
3169
171
        else if (ist->st->sample_aspect_ratio.num)
3170
56
            sar = ist->st->sample_aspect_ratio;
3171
        else
3172
115
            sar = par_src->sample_aspect_ratio;
3173
171
        ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3174
171
        ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3175
171
        ost->st->r_frame_rate = ist->st->r_frame_rate;
3176
171
        break;
3177
    }
3178
3179
298
    ost->mux_timebase = ist->st->time_base;
3180
3181
298
    return 0;
3182
}
3183
3184
5825
static void set_encoder_id(OutputFile *of, OutputStream *ost)
3185
{
3186
    AVDictionaryEntry *e;
3187
3188
    uint8_t *encoder_string;
3189
    int encoder_string_len;
3190
5825
    int format_flags = 0;
3191
5825
    int codec_flags = ost->enc_ctx->flags;
3192
3193
5825
    if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
3194
        return;
3195
3196
5825
    e = av_dict_get(of->opts, "fflags", NULL, 0);
3197
5825
    if (e) {
3198
3389
        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3199
3389
        if (!o)
3200
            return;
3201
3389
        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3202
    }
3203
5825
    e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3204
5825
    if (e) {
3205
3388
        const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3206
3388
        if (!o)
3207
            return;
3208
3388
        av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3209
    }
3210
3211
5825
    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3212
5825
    encoder_string     = av_mallocz(encoder_string_len);
3213
5825
    if (!encoder_string)
3214
        exit_program(1);
3215
3216

5825
    if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3217
829
        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3218
    else
3219
4996
        av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3220
5825
    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3221
5825
    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
3222
                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3223
}
3224
3225
1
static void parse_forced_key_frames(char *kf, OutputStream *ost,
3226
                                    AVCodecContext *avctx)
3227
{
3228
    char *p;
3229
1
    int n = 1, i, size, index = 0;
3230
    int64_t t, *pts;
3231
3232
14
    for (p = kf; *p; p++)
3233
13
        if (*p == ',')
3234
1
            n++;
3235
1
    size = n;
3236
1
    pts = av_malloc_array(size, sizeof(*pts));
3237
1
    if (!pts) {
3238
        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3239
        exit_program(1);
3240
    }
3241
3242
1
    p = kf;
3243
3
    for (i = 0; i < n; i++) {
3244
2
        char *next = strchr(p, ',');
3245
3246
2
        if (next)
3247
1
            *next++ = 0;
3248
3249
2
        if (!memcmp(p, "chapters", 8)) {
3250
3251
            AVFormatContext *avf = output_files[ost->file_index]->ctx;
3252
            int j;
3253
3254
            if (avf->nb_chapters > INT_MAX - size ||
3255
                !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3256
                                     sizeof(*pts)))) {
3257
                av_log(NULL, AV_LOG_FATAL,
3258
                       "Could not allocate forced key frames array.\n");
3259
                exit_program(1);
3260
            }
3261
            t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3262
            t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3263
3264
            for (j = 0; j < avf->nb_chapters; j++) {
3265
                AVChapter *c = avf->chapters[j];
3266
                av_assert1(index < size);
3267
                pts[index++] = av_rescale_q(c->start, c->time_base,
3268
                                            avctx->time_base) + t;
3269
            }
3270
3271
        } else {
3272
3273
2
            t = parse_time_or_die("force_key_frames", p, 1);
3274
            av_assert1(index < size);
3275
2
            pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3276
3277
        }
3278
3279
2
        p = next;
3280
    }
3281
3282
1
    av_assert0(index == size);
3283
1
    qsort(pts, size, sizeof(*pts), compare_int64);
3284
1
    ost->forced_kf_count = size;
3285
1
    ost->forced_kf_pts   = pts;
3286
1
}
3287
3288
5794
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3289
{
3290
5794
    InputStream *ist = get_input_stream(ost);
3291
5794
    AVCodecContext *enc_ctx = ost->enc_ctx;
3292
    AVFormatContext *oc;
3293
3294
5794
    if (ost->enc_timebase.num > 0) {
3295
        enc_ctx->time_base = ost->enc_timebase;
3296
        return;
3297
    }
3298
3299
5794
    if (ost->enc_timebase.num < 0) {
3300
        if (ist) {
3301
            enc_ctx->time_base = ist->st->time_base;
3302
            return;
3303
        }
3304
3305
        oc = output_files[ost->file_index]->ctx;
3306
        av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3307
    }
3308
3309
5794
    enc_ctx->time_base = default_time_base;
3310
}
3311
3312
5825
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3313
{
3314
5825
    InputStream *ist = get_input_stream(ost);
3315
5825
    AVCodecContext *enc_ctx = ost->enc_ctx;
3316
5825
    AVCodecContext *dec_ctx = NULL;
3317
5825
    AVFormatContext *oc = output_files[ost->file_index]->ctx;
3318
    int j, ret;
3319
3320
5825
    set_encoder_id(output_files[ost->file_index], ost);
3321
3322
    // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3323
    // hand, the legacy API makes demuxers set "rotate" metadata entries,
3324
    // which have to be filtered out to prevent leaking them to output files.
3325
5825
    av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3326
3327
5825
    if (ist) {
3328
5764
        ost->st->disposition          = ist->st->disposition;
3329
3330
5764
        dec_ctx = ist->dec_ctx;
3331
3332
5764
        enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3333
    } else {
3334
128
        for (j = 0; j < oc->nb_streams; j++) {
3335
67
            AVStream *st = oc->streams[j];
3336

67
            if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3337
                break;
3338
        }
3339
61
        if (j == oc->nb_streams)
3340
61
            if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3341
52
                ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3342
61
                ost->st->disposition = AV_DISPOSITION_DEFAULT;
3343
    }
3344
3345
5825
    if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3346
4671
        if (!ost->frame_rate.num)
3347
4651
            ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3348

4671
        if (ist && !ost->frame_rate.num)
3349
2
            ost->frame_rate = ist->framerate;
3350

4671
        if (ist && !ost->frame_rate.num)
3351
2
            ost->frame_rate = ist->st->r_frame_rate;
3352

4671
        if (ist && !ost->frame_rate.num) {
3353
2
            ost->frame_rate = (AVRational){25, 1};
3354
2
            av_log(NULL, AV_LOG_WARNING,
3355
                   "No information "
3356
                   "about the input framerate is available. Falling "
3357
                   "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3358
                   "if you want a different framerate.\n",
3359
                   ost->file_index, ost->index);
3360
        }
3361