FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/fftools/ffplay.c
Date: 2024-04-25 15:36:26
Exec Total Coverage
Lines: 0 2206 0.0%
Functions: 0 100 0.0%
Branches: 0 1498 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * simple media player based on the FFmpeg libraries
24 */
25
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32
33 #include "libavutil/avstring.h"
34 #include "libavutil/channel_layout.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
48 #include "libswresample/swresample.h"
49
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103
104 #define CURSOR_HIDE_DELAY 1000000
105
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107
108 typedef struct MyAVPacketList {
109 AVPacket *pkt;
110 int serial;
111 } MyAVPacketList;
112
113 typedef struct PacketQueue {
114 AVFifo *pkt_list;
115 int nb_packets;
116 int size;
117 int64_t duration;
118 int abort_request;
119 int serial;
120 SDL_mutex *mutex;
121 SDL_cond *cond;
122 } PacketQueue;
123
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128
129 typedef struct AudioParams {
130 int freq;
131 AVChannelLayout ch_layout;
132 enum AVSampleFormat fmt;
133 int frame_size;
134 int bytes_per_sec;
135 } AudioParams;
136
137 typedef struct Clock {
138 double pts; /* clock base */
139 double pts_drift; /* clock base minus time at which we updated the clock */
140 double last_updated;
141 double speed;
142 int serial; /* clock is based on a packet with this serial */
143 int paused;
144 int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146
147 typedef struct FrameData {
148 int64_t pkt_pos;
149 } FrameData;
150
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
153 AVFrame *frame;
154 AVSubtitle sub;
155 int serial;
156 double pts; /* presentation timestamp for the frame */
157 double duration; /* estimated duration of the frame */
158 int64_t pos; /* byte position of the frame in the input file */
159 int width;
160 int height;
161 int format;
162 AVRational sar;
163 int uploaded;
164 int flip_v;
165 } Frame;
166
167 typedef struct FrameQueue {
168 Frame queue[FRAME_QUEUE_SIZE];
169 int rindex;
170 int windex;
171 int size;
172 int max_size;
173 int keep_last;
174 int rindex_shown;
175 SDL_mutex *mutex;
176 SDL_cond *cond;
177 PacketQueue *pktq;
178 } FrameQueue;
179
180 enum {
181 AV_SYNC_AUDIO_MASTER, /* default choice */
182 AV_SYNC_VIDEO_MASTER,
183 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185
186 typedef struct Decoder {
187 AVPacket *pkt;
188 PacketQueue *queue;
189 AVCodecContext *avctx;
190 int pkt_serial;
191 int finished;
192 int packet_pending;
193 SDL_cond *empty_queue_cond;
194 int64_t start_pts;
195 AVRational start_pts_tb;
196 int64_t next_pts;
197 AVRational next_pts_tb;
198 SDL_Thread *decoder_tid;
199 } Decoder;
200
201 typedef struct VideoState {
202 SDL_Thread *read_tid;
203 const AVInputFormat *iformat;
204 int abort_request;
205 int force_refresh;
206 int paused;
207 int last_paused;
208 int queue_attachments_req;
209 int seek_req;
210 int seek_flags;
211 int64_t seek_pos;
212 int64_t seek_rel;
213 int read_pause_return;
214 AVFormatContext *ic;
215 int realtime;
216
217 Clock audclk;
218 Clock vidclk;
219 Clock extclk;
220
221 FrameQueue pictq;
222 FrameQueue subpq;
223 FrameQueue sampq;
224
225 Decoder auddec;
226 Decoder viddec;
227 Decoder subdec;
228
229 int audio_stream;
230
231 int av_sync_type;
232
233 double audio_clock;
234 int audio_clock_serial;
235 double audio_diff_cum; /* used for AV difference average computation */
236 double audio_diff_avg_coef;
237 double audio_diff_threshold;
238 int audio_diff_avg_count;
239 AVStream *audio_st;
240 PacketQueue audioq;
241 int audio_hw_buf_size;
242 uint8_t *audio_buf;
243 uint8_t *audio_buf1;
244 unsigned int audio_buf_size; /* in bytes */
245 unsigned int audio_buf1_size;
246 int audio_buf_index; /* in bytes */
247 int audio_write_buf_size;
248 int audio_volume;
249 int muted;
250 struct AudioParams audio_src;
251 struct AudioParams audio_filter_src;
252 struct AudioParams audio_tgt;
253 struct SwrContext *swr_ctx;
254 int frame_drops_early;
255 int frame_drops_late;
256
257 enum ShowMode {
258 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
259 } show_mode;
260 int16_t sample_array[SAMPLE_ARRAY_SIZE];
261 int sample_array_index;
262 int last_i_start;
263 AVTXContext *rdft;
264 av_tx_fn rdft_fn;
265 int rdft_bits;
266 float *real_data;
267 AVComplexFloat *rdft_data;
268 int xpos;
269 double last_vis_time;
270 SDL_Texture *vis_texture;
271 SDL_Texture *sub_texture;
272 SDL_Texture *vid_texture;
273
274 int subtitle_stream;
275 AVStream *subtitle_st;
276 PacketQueue subtitleq;
277
278 double frame_timer;
279 double frame_last_returned_time;
280 double frame_last_filter_delay;
281 int video_stream;
282 AVStream *video_st;
283 PacketQueue videoq;
284 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
285 struct SwsContext *sub_convert_ctx;
286 int eof;
287
288 char *filename;
289 int width, height, xleft, ytop;
290 int step;
291
292 int vfilter_idx;
293 AVFilterContext *in_video_filter; // the first filter in the video chain
294 AVFilterContext *out_video_filter; // the last filter in the video chain
295 AVFilterContext *in_audio_filter; // the first filter in the audio chain
296 AVFilterContext *out_audio_filter; // the last filter in the audio chain
297 AVFilterGraph *agraph; // audio filter graph
298
299 int last_video_stream, last_audio_stream, last_subtitle_stream;
300
301 SDL_cond *continue_read_thread;
302 } VideoState;
303
304 /* options specified by the user */
305 static const AVInputFormat *file_iformat;
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
325 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
326 static int64_t start_time = AV_NOPTS_VALUE;
327 static int64_t duration = AV_NOPTS_VALUE;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
343 static int64_t cursor_last_shown;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354
355 /* current context */
356 static int is_full_screen;
357 static int64_t audio_callback_time;
358
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365
366 static VkRenderer *vk_renderer;
367
368 static const struct TextureFormatEntry {
369 enum AVPixelFormat format;
370 int texture_fmt;
371 } sdl_texture_format_map[] = {
372 { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373 { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374 { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375 { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376 { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377 { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378 { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379 { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380 { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381 { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382 { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383 { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384 { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385 { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386 { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387 { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388 { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389 { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390 { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391 { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
392 };
393
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
396 int ret = GROW_ARRAY(vfilters_list, nb_vfilters);
397 if (ret < 0)
398 return ret;
399
400 vfilters_list[nb_vfilters - 1] = av_strdup(arg);
401 if (!vfilters_list[nb_vfilters - 1])
402 return AVERROR(ENOMEM);
403
404 return 0;
405 }
406
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409 enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411 /* If channel count == 1, planar and non-planar formats are the same */
412 if (channel_count1 == 1 && channel_count2 == 1)
413 return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
414 else
415 return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417
418 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
419 {
420 MyAVPacketList pkt1;
421 int ret;
422
423 if (q->abort_request)
424 return -1;
425
426
427 pkt1.pkt = pkt;
428 pkt1.serial = q->serial;
429
430 ret = av_fifo_write(q->pkt_list, &pkt1, 1);
431 if (ret < 0)
432 return ret;
433 q->nb_packets++;
434 q->size += pkt1.pkt->size + sizeof(pkt1);
435 q->duration += pkt1.pkt->duration;
436 /* XXX: should duplicate packet data in DV case */
437 SDL_CondSignal(q->cond);
438 return 0;
439 }
440
441 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
442 {
443 AVPacket *pkt1;
444 int ret;
445
446 pkt1 = av_packet_alloc();
447 if (!pkt1) {
448 av_packet_unref(pkt);
449 return -1;
450 }
451 av_packet_move_ref(pkt1, pkt);
452
453 SDL_LockMutex(q->mutex);
454 ret = packet_queue_put_private(q, pkt1);
455 SDL_UnlockMutex(q->mutex);
456
457 if (ret < 0)
458 av_packet_free(&pkt1);
459
460 return ret;
461 }
462
463 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
464 {
465 pkt->stream_index = stream_index;
466 return packet_queue_put(q, pkt);
467 }
468
469 /* packet queue handling */
470 static int packet_queue_init(PacketQueue *q)
471 {
472 memset(q, 0, sizeof(PacketQueue));
473 q->pkt_list = av_fifo_alloc2(1, sizeof(MyAVPacketList), AV_FIFO_FLAG_AUTO_GROW);
474 if (!q->pkt_list)
475 return AVERROR(ENOMEM);
476 q->mutex = SDL_CreateMutex();
477 if (!q->mutex) {
478 av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
479 return AVERROR(ENOMEM);
480 }
481 q->cond = SDL_CreateCond();
482 if (!q->cond) {
483 av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
484 return AVERROR(ENOMEM);
485 }
486 q->abort_request = 1;
487 return 0;
488 }
489
490 static void packet_queue_flush(PacketQueue *q)
491 {
492 MyAVPacketList pkt1;
493
494 SDL_LockMutex(q->mutex);
495 while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
496 av_packet_free(&pkt1.pkt);
497 q->nb_packets = 0;
498 q->size = 0;
499 q->duration = 0;
500 q->serial++;
501 SDL_UnlockMutex(q->mutex);
502 }
503
504 static void packet_queue_destroy(PacketQueue *q)
505 {
506 packet_queue_flush(q);
507 av_fifo_freep2(&q->pkt_list);
508 SDL_DestroyMutex(q->mutex);
509 SDL_DestroyCond(q->cond);
510 }
511
512 static void packet_queue_abort(PacketQueue *q)
513 {
514 SDL_LockMutex(q->mutex);
515
516 q->abort_request = 1;
517
518 SDL_CondSignal(q->cond);
519
520 SDL_UnlockMutex(q->mutex);
521 }
522
523 static void packet_queue_start(PacketQueue *q)
524 {
525 SDL_LockMutex(q->mutex);
526 q->abort_request = 0;
527 q->serial++;
528 SDL_UnlockMutex(q->mutex);
529 }
530
531 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
532 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
533 {
534 MyAVPacketList pkt1;
535 int ret;
536
537 SDL_LockMutex(q->mutex);
538
539 for (;;) {
540 if (q->abort_request) {
541 ret = -1;
542 break;
543 }
544
545 if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
546 q->nb_packets--;
547 q->size -= pkt1.pkt->size + sizeof(pkt1);
548 q->duration -= pkt1.pkt->duration;
549 av_packet_move_ref(pkt, pkt1.pkt);
550 if (serial)
551 *serial = pkt1.serial;
552 av_packet_free(&pkt1.pkt);
553 ret = 1;
554 break;
555 } else if (!block) {
556 ret = 0;
557 break;
558 } else {
559 SDL_CondWait(q->cond, q->mutex);
560 }
561 }
562 SDL_UnlockMutex(q->mutex);
563 return ret;
564 }
565
566 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
567 memset(d, 0, sizeof(Decoder));
568 d->pkt = av_packet_alloc();
569 if (!d->pkt)
570 return AVERROR(ENOMEM);
571 d->avctx = avctx;
572 d->queue = queue;
573 d->empty_queue_cond = empty_queue_cond;
574 d->start_pts = AV_NOPTS_VALUE;
575 d->pkt_serial = -1;
576 return 0;
577 }
578
579 static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
580 int ret = AVERROR(EAGAIN);
581
582 for (;;) {
583 if (d->queue->serial == d->pkt_serial) {
584 do {
585 if (d->queue->abort_request)
586 return -1;
587
588 switch (d->avctx->codec_type) {
589 case AVMEDIA_TYPE_VIDEO:
590 ret = avcodec_receive_frame(d->avctx, frame);
591 if (ret >= 0) {
592 if (decoder_reorder_pts == -1) {
593 frame->pts = frame->best_effort_timestamp;
594 } else if (!decoder_reorder_pts) {
595 frame->pts = frame->pkt_dts;
596 }
597 }
598 break;
599 case AVMEDIA_TYPE_AUDIO:
600 ret = avcodec_receive_frame(d->avctx, frame);
601 if (ret >= 0) {
602 AVRational tb = (AVRational){1, frame->sample_rate};
603 if (frame->pts != AV_NOPTS_VALUE)
604 frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
605 else if (d->next_pts != AV_NOPTS_VALUE)
606 frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
607 if (frame->pts != AV_NOPTS_VALUE) {
608 d->next_pts = frame->pts + frame->nb_samples;
609 d->next_pts_tb = tb;
610 }
611 }
612 break;
613 }
614 if (ret == AVERROR_EOF) {
615 d->finished = d->pkt_serial;
616 avcodec_flush_buffers(d->avctx);
617 return 0;
618 }
619 if (ret >= 0)
620 return 1;
621 } while (ret != AVERROR(EAGAIN));
622 }
623
624 do {
625 if (d->queue->nb_packets == 0)
626 SDL_CondSignal(d->empty_queue_cond);
627 if (d->packet_pending) {
628 d->packet_pending = 0;
629 } else {
630 int old_serial = d->pkt_serial;
631 if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
632 return -1;
633 if (old_serial != d->pkt_serial) {
634 avcodec_flush_buffers(d->avctx);
635 d->finished = 0;
636 d->next_pts = d->start_pts;
637 d->next_pts_tb = d->start_pts_tb;
638 }
639 }
640 if (d->queue->serial == d->pkt_serial)
641 break;
642 av_packet_unref(d->pkt);
643 } while (1);
644
645 if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
646 int got_frame = 0;
647 ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
648 if (ret < 0) {
649 ret = AVERROR(EAGAIN);
650 } else {
651 if (got_frame && !d->pkt->data) {
652 d->packet_pending = 1;
653 }
654 ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
655 }
656 av_packet_unref(d->pkt);
657 } else {
658 if (d->pkt->buf && !d->pkt->opaque_ref) {
659 FrameData *fd;
660
661 d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
662 if (!d->pkt->opaque_ref)
663 return AVERROR(ENOMEM);
664 fd = (FrameData*)d->pkt->opaque_ref->data;
665 fd->pkt_pos = d->pkt->pos;
666 }
667
668 if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669 av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670 d->packet_pending = 1;
671 } else {
672 av_packet_unref(d->pkt);
673 }
674 }
675 }
676 }
677
678 static void decoder_destroy(Decoder *d) {
679 av_packet_free(&d->pkt);
680 avcodec_free_context(&d->avctx);
681 }
682
683 static void frame_queue_unref_item(Frame *vp)
684 {
685 av_frame_unref(vp->frame);
686 avsubtitle_free(&vp->sub);
687 }
688
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691 int i;
692 memset(f, 0, sizeof(FrameQueue));
693 if (!(f->mutex = SDL_CreateMutex())) {
694 av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695 return AVERROR(ENOMEM);
696 }
697 if (!(f->cond = SDL_CreateCond())) {
698 av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699 return AVERROR(ENOMEM);
700 }
701 f->pktq = pktq;
702 f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703 f->keep_last = !!keep_last;
704 for (i = 0; i < f->max_size; i++)
705 if (!(f->queue[i].frame = av_frame_alloc()))
706 return AVERROR(ENOMEM);
707 return 0;
708 }
709
710 static void frame_queue_destroy(FrameQueue *f)
711 {
712 int i;
713 for (i = 0; i < f->max_size; i++) {
714 Frame *vp = &f->queue[i];
715 frame_queue_unref_item(vp);
716 av_frame_free(&vp->frame);
717 }
718 SDL_DestroyMutex(f->mutex);
719 SDL_DestroyCond(f->cond);
720 }
721
722 static void frame_queue_signal(FrameQueue *f)
723 {
724 SDL_LockMutex(f->mutex);
725 SDL_CondSignal(f->cond);
726 SDL_UnlockMutex(f->mutex);
727 }
728
729 static Frame *frame_queue_peek(FrameQueue *f)
730 {
731 return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733
734 static Frame *frame_queue_peek_next(FrameQueue *f)
735 {
736 return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738
739 static Frame *frame_queue_peek_last(FrameQueue *f)
740 {
741 return &f->queue[f->rindex];
742 }
743
744 static Frame *frame_queue_peek_writable(FrameQueue *f)
745 {
746 /* wait until we have space to put a new frame */
747 SDL_LockMutex(f->mutex);
748 while (f->size >= f->max_size &&
749 !f->pktq->abort_request) {
750 SDL_CondWait(f->cond, f->mutex);
751 }
752 SDL_UnlockMutex(f->mutex);
753
754 if (f->pktq->abort_request)
755 return NULL;
756
757 return &f->queue[f->windex];
758 }
759
760 static Frame *frame_queue_peek_readable(FrameQueue *f)
761 {
762 /* wait until we have a readable a new frame */
763 SDL_LockMutex(f->mutex);
764 while (f->size - f->rindex_shown <= 0 &&
765 !f->pktq->abort_request) {
766 SDL_CondWait(f->cond, f->mutex);
767 }
768 SDL_UnlockMutex(f->mutex);
769
770 if (f->pktq->abort_request)
771 return NULL;
772
773 return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775
776 static void frame_queue_push(FrameQueue *f)
777 {
778 if (++f->windex == f->max_size)
779 f->windex = 0;
780 SDL_LockMutex(f->mutex);
781 f->size++;
782 SDL_CondSignal(f->cond);
783 SDL_UnlockMutex(f->mutex);
784 }
785
786 static void frame_queue_next(FrameQueue *f)
787 {
788 if (f->keep_last && !f->rindex_shown) {
789 f->rindex_shown = 1;
790 return;
791 }
792 frame_queue_unref_item(&f->queue[f->rindex]);
793 if (++f->rindex == f->max_size)
794 f->rindex = 0;
795 SDL_LockMutex(f->mutex);
796 f->size--;
797 SDL_CondSignal(f->cond);
798 SDL_UnlockMutex(f->mutex);
799 }
800
801 /* return the number of undisplayed frames in the queue */
802 static int frame_queue_nb_remaining(FrameQueue *f)
803 {
804 return f->size - f->rindex_shown;
805 }
806
807 /* return last shown position */
808 static int64_t frame_queue_last_pos(FrameQueue *f)
809 {
810 Frame *fp = &f->queue[f->rindex];
811 if (f->rindex_shown && fp->serial == f->pktq->serial)
812 return fp->pos;
813 else
814 return -1;
815 }
816
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
819 packet_queue_abort(d->queue);
820 frame_queue_signal(fq);
821 SDL_WaitThread(d->decoder_tid, NULL);
822 d->decoder_tid = NULL;
823 packet_queue_flush(d->queue);
824 }
825
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828 SDL_Rect rect;
829 rect.x = x;
830 rect.y = y;
831 rect.w = w;
832 rect.h = h;
833 if (w && h)
834 SDL_RenderFillRect(renderer, &rect);
835 }
836
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839 Uint32 format;
840 int access, w, h;
841 if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842 void *pixels;
843 int pitch;
844 if (*texture)
845 SDL_DestroyTexture(*texture);
846 if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847 return -1;
848 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849 return -1;
850 if (init_texture) {
851 if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852 return -1;
853 memset(pixels, 0, pitch * new_height);
854 SDL_UnlockTexture(*texture);
855 }
856 av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857 }
858 return 0;
859 }
860
861 static void calculate_display_rect(SDL_Rect *rect,
862 int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863 int pic_width, int pic_height, AVRational pic_sar)
864 {
865 AVRational aspect_ratio = pic_sar;
866 int64_t width, height, x, y;
867
868 if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869 aspect_ratio = av_make_q(1, 1);
870
871 aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872
873 /* XXX: we suppose the screen has a 1.0 pixel ratio */
874 height = scr_height;
875 width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876 if (width > scr_width) {
877 width = scr_width;
878 height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879 }
880 x = (scr_width - width) / 2;
881 y = (scr_height - height) / 2;
882 rect->x = scr_xleft + x;
883 rect->y = scr_ytop + y;
884 rect->w = FFMAX((int)width, 1);
885 rect->h = FFMAX((int)height, 1);
886 }
887
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890 int i;
891 *sdl_blendmode = SDL_BLENDMODE_NONE;
892 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893 if (format == AV_PIX_FMT_RGB32 ||
894 format == AV_PIX_FMT_RGB32_1 ||
895 format == AV_PIX_FMT_BGR32 ||
896 format == AV_PIX_FMT_BGR32_1)
897 *sdl_blendmode = SDL_BLENDMODE_BLEND;
898 for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
899 if (format == sdl_texture_format_map[i].format) {
900 *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901 return;
902 }
903 }
904 }
905
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
907 {
908 int ret = 0;
909 Uint32 sdl_pix_fmt;
910 SDL_BlendMode sdl_blendmode;
911 get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912 if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913 return -1;
914 switch (sdl_pix_fmt) {
915 case SDL_PIXELFORMAT_IYUV:
916 if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
917 ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
918 frame->data[1], frame->linesize[1],
919 frame->data[2], frame->linesize[2]);
920 } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
921 ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
922 frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
923 frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
924 } else {
925 av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
926 return -1;
927 }
928 break;
929 default:
930 if (frame->linesize[0] < 0) {
931 ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
932 } else {
933 ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
934 }
935 break;
936 }
937 return ret;
938 }
939
940 static enum AVColorSpace sdl_supported_color_spaces[] = {
941 AVCOL_SPC_BT709,
942 AVCOL_SPC_BT470BG,
943 AVCOL_SPC_SMPTE170M,
944 AVCOL_SPC_UNSPECIFIED,
945 };
946
947 static void set_sdl_yuv_conversion_mode(AVFrame *frame)
948 {
949 #if SDL_VERSION_ATLEAST(2,0,8)
950 SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
951 if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
952 if (frame->color_range == AVCOL_RANGE_JPEG)
953 mode = SDL_YUV_CONVERSION_JPEG;
954 else if (frame->colorspace == AVCOL_SPC_BT709)
955 mode = SDL_YUV_CONVERSION_BT709;
956 else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
957 mode = SDL_YUV_CONVERSION_BT601;
958 }
959 SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
960 #endif
961 }
962
963 static void video_image_display(VideoState *is)
964 {
965 Frame *vp;
966 Frame *sp = NULL;
967 SDL_Rect rect;
968
969 vp = frame_queue_peek_last(&is->pictq);
970 if (vk_renderer) {
971 vk_renderer_display(vk_renderer, vp->frame);
972 return;
973 }
974
975 if (is->subtitle_st) {
976 if (frame_queue_nb_remaining(&is->subpq) > 0) {
977 sp = frame_queue_peek(&is->subpq);
978
979 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
980 if (!sp->uploaded) {
981 uint8_t* pixels[4];
982 int pitch[4];
983 int i;
984 if (!sp->width || !sp->height) {
985 sp->width = vp->width;
986 sp->height = vp->height;
987 }
988 if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
989 return;
990
991 for (i = 0; i < sp->sub.num_rects; i++) {
992 AVSubtitleRect *sub_rect = sp->sub.rects[i];
993
994 sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
995 sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
996 sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
997 sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
998
999 is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1000 sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1001 sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1002 0, NULL, NULL, NULL);
1003 if (!is->sub_convert_ctx) {
1004 av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1005 return;
1006 }
1007 if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1008 sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1009 0, sub_rect->h, pixels, pitch);
1010 SDL_UnlockTexture(is->sub_texture);
1011 }
1012 }
1013 sp->uploaded = 1;
1014 }
1015 } else
1016 sp = NULL;
1017 }
1018 }
1019
1020 calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1021 set_sdl_yuv_conversion_mode(vp->frame);
1022
1023 if (!vp->uploaded) {
1024 if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1025 set_sdl_yuv_conversion_mode(NULL);
1026 return;
1027 }
1028 vp->uploaded = 1;
1029 vp->flip_v = vp->frame->linesize[0] < 0;
1030 }
1031
1032 SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1033 set_sdl_yuv_conversion_mode(NULL);
1034 if (sp) {
1035 #if USE_ONEPASS_SUBTITLE_RENDER
1036 SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1037 #else
1038 int i;
1039 double xratio = (double)rect.w / (double)sp->width;
1040 double yratio = (double)rect.h / (double)sp->height;
1041 for (i = 0; i < sp->sub.num_rects; i++) {
1042 SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1043 SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1044 .y = rect.y + sub_rect->y * yratio,
1045 .w = sub_rect->w * xratio,
1046 .h = sub_rect->h * yratio};
1047 SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1048 }
1049 #endif
1050 }
1051 }
1052
1053 static inline int compute_mod(int a, int b)
1054 {
1055 return a < 0 ? a%b + b : a%b;
1056 }
1057
1058 static void video_audio_display(VideoState *s)
1059 {
1060 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1061 int ch, channels, h, h2;
1062 int64_t time_diff;
1063 int rdft_bits, nb_freq;
1064
1065 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1066 ;
1067 nb_freq = 1 << (rdft_bits - 1);
1068
1069 /* compute display index : center on currently output samples */
1070 channels = s->audio_tgt.ch_layout.nb_channels;
1071 nb_display_channels = channels;
1072 if (!s->paused) {
1073 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1074 n = 2 * channels;
1075 delay = s->audio_write_buf_size;
1076 delay /= n;
1077
1078 /* to be more precise, we take into account the time spent since
1079 the last buffer computation */
1080 if (audio_callback_time) {
1081 time_diff = av_gettime_relative() - audio_callback_time;
1082 delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1083 }
1084
1085 delay += 2 * data_used;
1086 if (delay < data_used)
1087 delay = data_used;
1088
1089 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1090 if (s->show_mode == SHOW_MODE_WAVES) {
1091 h = INT_MIN;
1092 for (i = 0; i < 1000; i += channels) {
1093 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1094 int a = s->sample_array[idx];
1095 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1096 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1097 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1098 int score = a - d;
1099 if (h < score && (b ^ c) < 0) {
1100 h = score;
1101 i_start = idx;
1102 }
1103 }
1104 }
1105
1106 s->last_i_start = i_start;
1107 } else {
1108 i_start = s->last_i_start;
1109 }
1110
1111 if (s->show_mode == SHOW_MODE_WAVES) {
1112 SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1113
1114 /* total height for one channel */
1115 h = s->height / nb_display_channels;
1116 /* graph height / 2 */
1117 h2 = (h * 9) / 20;
1118 for (ch = 0; ch < nb_display_channels; ch++) {
1119 i = i_start + ch;
1120 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1121 for (x = 0; x < s->width; x++) {
1122 y = (s->sample_array[i] * h2) >> 15;
1123 if (y < 0) {
1124 y = -y;
1125 ys = y1 - y;
1126 } else {
1127 ys = y1;
1128 }
1129 fill_rectangle(s->xleft + x, ys, 1, y);
1130 i += channels;
1131 if (i >= SAMPLE_ARRAY_SIZE)
1132 i -= SAMPLE_ARRAY_SIZE;
1133 }
1134 }
1135
1136 SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1137
1138 for (ch = 1; ch < nb_display_channels; ch++) {
1139 y = s->ytop + ch * h;
1140 fill_rectangle(s->xleft, y, s->width, 1);
1141 }
1142 } else {
1143 int err = 0;
1144 if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1145 return;
1146
1147 if (s->xpos >= s->width)
1148 s->xpos = 0;
1149 nb_display_channels= FFMIN(nb_display_channels, 2);
1150 if (rdft_bits != s->rdft_bits) {
1151 const float rdft_scale = 1.0;
1152 av_tx_uninit(&s->rdft);
1153 av_freep(&s->real_data);
1154 av_freep(&s->rdft_data);
1155 s->rdft_bits = rdft_bits;
1156 s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1157 s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1158 err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1159 0, 1 << rdft_bits, &rdft_scale, 0);
1160 }
1161 if (err < 0 || !s->rdft_data) {
1162 av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163 s->show_mode = SHOW_MODE_WAVES;
1164 } else {
1165 float *data_in[2];
1166 AVComplexFloat *data[2];
1167 SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1168 uint32_t *pixels;
1169 int pitch;
1170 for (ch = 0; ch < nb_display_channels; ch++) {
1171 data_in[ch] = s->real_data + 2 * nb_freq * ch;
1172 data[ch] = s->rdft_data + nb_freq * ch;
1173 i = i_start + ch;
1174 for (x = 0; x < 2 * nb_freq; x++) {
1175 double w = (x-nb_freq) * (1.0 / nb_freq);
1176 data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1177 i += channels;
1178 if (i >= SAMPLE_ARRAY_SIZE)
1179 i -= SAMPLE_ARRAY_SIZE;
1180 }
1181 s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1182 data[ch][0].im = data[ch][nb_freq].re;
1183 data[ch][nb_freq].re = 0;
1184 }
1185 /* Least efficient way to do this, we should of course
1186 * directly access it but it is more than fast enough. */
1187 if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1188 pitch >>= 2;
1189 pixels += pitch * s->height;
1190 for (y = 0; y < s->height; y++) {
1191 double w = 1 / sqrt(nb_freq);
1192 int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1193 int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1194 : a;
1195 a = FFMIN(a, 255);
1196 b = FFMIN(b, 255);
1197 pixels -= pitch;
1198 *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1199 }
1200 SDL_UnlockTexture(s->vis_texture);
1201 }
1202 SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1203 }
1204 if (!s->paused)
1205 s->xpos++;
1206 }
1207 }
1208
1209 static void stream_component_close(VideoState *is, int stream_index)
1210 {
1211 AVFormatContext *ic = is->ic;
1212 AVCodecParameters *codecpar;
1213
1214 if (stream_index < 0 || stream_index >= ic->nb_streams)
1215 return;
1216 codecpar = ic->streams[stream_index]->codecpar;
1217
1218 switch (codecpar->codec_type) {
1219 case AVMEDIA_TYPE_AUDIO:
1220 decoder_abort(&is->auddec, &is->sampq);
1221 SDL_CloseAudioDevice(audio_dev);
1222 decoder_destroy(&is->auddec);
1223 swr_free(&is->swr_ctx);
1224 av_freep(&is->audio_buf1);
1225 is->audio_buf1_size = 0;
1226 is->audio_buf = NULL;
1227
1228 if (is->rdft) {
1229 av_tx_uninit(&is->rdft);
1230 av_freep(&is->real_data);
1231 av_freep(&is->rdft_data);
1232 is->rdft = NULL;
1233 is->rdft_bits = 0;
1234 }
1235 break;
1236 case AVMEDIA_TYPE_VIDEO:
1237 decoder_abort(&is->viddec, &is->pictq);
1238 decoder_destroy(&is->viddec);
1239 break;
1240 case AVMEDIA_TYPE_SUBTITLE:
1241 decoder_abort(&is->subdec, &is->subpq);
1242 decoder_destroy(&is->subdec);
1243 break;
1244 default:
1245 break;
1246 }
1247
1248 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1249 switch (codecpar->codec_type) {
1250 case AVMEDIA_TYPE_AUDIO:
1251 is->audio_st = NULL;
1252 is->audio_stream = -1;
1253 break;
1254 case AVMEDIA_TYPE_VIDEO:
1255 is->video_st = NULL;
1256 is->video_stream = -1;
1257 break;
1258 case AVMEDIA_TYPE_SUBTITLE:
1259 is->subtitle_st = NULL;
1260 is->subtitle_stream = -1;
1261 break;
1262 default:
1263 break;
1264 }
1265 }
1266
1267 static void stream_close(VideoState *is)
1268 {
1269 /* XXX: use a special url_shutdown call to abort parse cleanly */
1270 is->abort_request = 1;
1271 SDL_WaitThread(is->read_tid, NULL);
1272
1273 /* close each stream */
1274 if (is->audio_stream >= 0)
1275 stream_component_close(is, is->audio_stream);
1276 if (is->video_stream >= 0)
1277 stream_component_close(is, is->video_stream);
1278 if (is->subtitle_stream >= 0)
1279 stream_component_close(is, is->subtitle_stream);
1280
1281 avformat_close_input(&is->ic);
1282
1283 packet_queue_destroy(&is->videoq);
1284 packet_queue_destroy(&is->audioq);
1285 packet_queue_destroy(&is->subtitleq);
1286
1287 /* free all pictures */
1288 frame_queue_destroy(&is->pictq);
1289 frame_queue_destroy(&is->sampq);
1290 frame_queue_destroy(&is->subpq);
1291 SDL_DestroyCond(is->continue_read_thread);
1292 sws_freeContext(is->sub_convert_ctx);
1293 av_free(is->filename);
1294 if (is->vis_texture)
1295 SDL_DestroyTexture(is->vis_texture);
1296 if (is->vid_texture)
1297 SDL_DestroyTexture(is->vid_texture);
1298 if (is->sub_texture)
1299 SDL_DestroyTexture(is->sub_texture);
1300 av_free(is);
1301 }
1302
1303 static void do_exit(VideoState *is)
1304 {
1305 if (is) {
1306 stream_close(is);
1307 }
1308 if (renderer)
1309 SDL_DestroyRenderer(renderer);
1310 if (vk_renderer)
1311 vk_renderer_destroy(vk_renderer);
1312 if (window)
1313 SDL_DestroyWindow(window);
1314 uninit_opts();
1315 for (int i = 0; i < nb_vfilters; i++)
1316 av_freep(&vfilters_list[i]);
1317 av_freep(&vfilters_list);
1318 av_freep(&video_codec_name);
1319 av_freep(&audio_codec_name);
1320 av_freep(&subtitle_codec_name);
1321 av_freep(&input_filename);
1322 avformat_network_deinit();
1323 if (show_status)
1324 printf("\n");
1325 SDL_Quit();
1326 av_log(NULL, AV_LOG_QUIET, "%s", "");
1327 exit(0);
1328 }
1329
1330 static void sigterm_handler(int sig)
1331 {
1332 exit(123);
1333 }
1334
1335 static void set_default_window_size(int width, int height, AVRational sar)
1336 {
1337 SDL_Rect rect;
1338 int max_width = screen_width ? screen_width : INT_MAX;
1339 int max_height = screen_height ? screen_height : INT_MAX;
1340 if (max_width == INT_MAX && max_height == INT_MAX)
1341 max_height = height;
1342 calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1343 default_width = rect.w;
1344 default_height = rect.h;
1345 }
1346
1347 static int video_open(VideoState *is)
1348 {
1349 int w,h;
1350
1351 w = screen_width ? screen_width : default_width;
1352 h = screen_height ? screen_height : default_height;
1353
1354 if (!window_title)
1355 window_title = input_filename;
1356 SDL_SetWindowTitle(window, window_title);
1357
1358 SDL_SetWindowSize(window, w, h);
1359 SDL_SetWindowPosition(window, screen_left, screen_top);
1360 if (is_full_screen)
1361 SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1362 SDL_ShowWindow(window);
1363
1364 is->width = w;
1365 is->height = h;
1366
1367 return 0;
1368 }
1369
1370 /* display the current picture, if any */
1371 static void video_display(VideoState *is)
1372 {
1373 if (!is->width)
1374 video_open(is);
1375
1376 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1377 SDL_RenderClear(renderer);
1378 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1379 video_audio_display(is);
1380 else if (is->video_st)
1381 video_image_display(is);
1382 SDL_RenderPresent(renderer);
1383 }
1384
1385 static double get_clock(Clock *c)
1386 {
1387 if (*c->queue_serial != c->serial)
1388 return NAN;
1389 if (c->paused) {
1390 return c->pts;
1391 } else {
1392 double time = av_gettime_relative() / 1000000.0;
1393 return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1394 }
1395 }
1396
1397 static void set_clock_at(Clock *c, double pts, int serial, double time)
1398 {
1399 c->pts = pts;
1400 c->last_updated = time;
1401 c->pts_drift = c->pts - time;
1402 c->serial = serial;
1403 }
1404
1405 static void set_clock(Clock *c, double pts, int serial)
1406 {
1407 double time = av_gettime_relative() / 1000000.0;
1408 set_clock_at(c, pts, serial, time);
1409 }
1410
1411 static void set_clock_speed(Clock *c, double speed)
1412 {
1413 set_clock(c, get_clock(c), c->serial);
1414 c->speed = speed;
1415 }
1416
1417 static void init_clock(Clock *c, int *queue_serial)
1418 {
1419 c->speed = 1.0;
1420 c->paused = 0;
1421 c->queue_serial = queue_serial;
1422 set_clock(c, NAN, -1);
1423 }
1424
1425 static void sync_clock_to_slave(Clock *c, Clock *slave)
1426 {
1427 double clock = get_clock(c);
1428 double slave_clock = get_clock(slave);
1429 if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1430 set_clock(c, slave_clock, slave->serial);
1431 }
1432
1433 static int get_master_sync_type(VideoState *is) {
1434 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1435 if (is->video_st)
1436 return AV_SYNC_VIDEO_MASTER;
1437 else
1438 return AV_SYNC_AUDIO_MASTER;
1439 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1440 if (is->audio_st)
1441 return AV_SYNC_AUDIO_MASTER;
1442 else
1443 return AV_SYNC_EXTERNAL_CLOCK;
1444 } else {
1445 return AV_SYNC_EXTERNAL_CLOCK;
1446 }
1447 }
1448
1449 /* get the current master clock value */
1450 static double get_master_clock(VideoState *is)
1451 {
1452 double val;
1453
1454 switch (get_master_sync_type(is)) {
1455 case AV_SYNC_VIDEO_MASTER:
1456 val = get_clock(&is->vidclk);
1457 break;
1458 case AV_SYNC_AUDIO_MASTER:
1459 val = get_clock(&is->audclk);
1460 break;
1461 default:
1462 val = get_clock(&is->extclk);
1463 break;
1464 }
1465 return val;
1466 }
1467
1468 static void check_external_clock_speed(VideoState *is) {
1469 if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1470 is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1471 set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1472 } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1473 (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1474 set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1475 } else {
1476 double speed = is->extclk.speed;
1477 if (speed != 1.0)
1478 set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1479 }
1480 }
1481
1482 /* seek in the stream */
1483 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1484 {
1485 if (!is->seek_req) {
1486 is->seek_pos = pos;
1487 is->seek_rel = rel;
1488 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1489 if (by_bytes)
1490 is->seek_flags |= AVSEEK_FLAG_BYTE;
1491 is->seek_req = 1;
1492 SDL_CondSignal(is->continue_read_thread);
1493 }
1494 }
1495
1496 /* pause or resume the video */
1497 static void stream_toggle_pause(VideoState *is)
1498 {
1499 if (is->paused) {
1500 is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1501 if (is->read_pause_return != AVERROR(ENOSYS)) {
1502 is->vidclk.paused = 0;
1503 }
1504 set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1505 }
1506 set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1507 is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1508 }
1509
1510 static void toggle_pause(VideoState *is)
1511 {
1512 stream_toggle_pause(is);
1513 is->step = 0;
1514 }
1515
1516 static void toggle_mute(VideoState *is)
1517 {
1518 is->muted = !is->muted;
1519 }
1520
1521 static void update_volume(VideoState *is, int sign, double step)
1522 {
1523 double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1524 int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1525 is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1526 }
1527
1528 static void step_to_next_frame(VideoState *is)
1529 {
1530 /* if the stream is paused unpause it, then step */
1531 if (is->paused)
1532 stream_toggle_pause(is);
1533 is->step = 1;
1534 }
1535
1536 static double compute_target_delay(double delay, VideoState *is)
1537 {
1538 double sync_threshold, diff = 0;
1539
1540 /* update delay to follow master synchronisation source */
1541 if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1542 /* if video is slave, we try to correct big delays by
1543 duplicating or deleting a frame */
1544 diff = get_clock(&is->vidclk) - get_master_clock(is);
1545
1546 /* skip or repeat frame. We take into account the
1547 delay to compute the threshold. I still don't know
1548 if it is the best guess */
1549 sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1550 if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1551 if (diff <= -sync_threshold)
1552 delay = FFMAX(0, delay + diff);
1553 else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1554 delay = delay + diff;
1555 else if (diff >= sync_threshold)
1556 delay = 2 * delay;
1557 }
1558 }
1559
1560 av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1561 delay, -diff);
1562
1563 return delay;
1564 }
1565
1566 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1567 if (vp->serial == nextvp->serial) {
1568 double duration = nextvp->pts - vp->pts;
1569 if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1570 return vp->duration;
1571 else
1572 return duration;
1573 } else {
1574 return 0.0;
1575 }
1576 }
1577
1578 static void update_video_pts(VideoState *is, double pts, int serial)
1579 {
1580 /* update current video pts */
1581 set_clock(&is->vidclk, pts, serial);
1582 sync_clock_to_slave(&is->extclk, &is->vidclk);
1583 }
1584
1585 /* called to display each frame */
1586 static void video_refresh(void *opaque, double *remaining_time)
1587 {
1588 VideoState *is = opaque;
1589 double time;
1590
1591 Frame *sp, *sp2;
1592
1593 if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1594 check_external_clock_speed(is);
1595
1596 if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1597 time = av_gettime_relative() / 1000000.0;
1598 if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1599 video_display(is);
1600 is->last_vis_time = time;
1601 }
1602 *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1603 }
1604
1605 if (is->video_st) {
1606 retry:
1607 if (frame_queue_nb_remaining(&is->pictq) == 0) {
1608 // nothing to do, no picture to display in the queue
1609 } else {
1610 double last_duration, duration, delay;
1611 Frame *vp, *lastvp;
1612
1613 /* dequeue the picture */
1614 lastvp = frame_queue_peek_last(&is->pictq);
1615 vp = frame_queue_peek(&is->pictq);
1616
1617 if (vp->serial != is->videoq.serial) {
1618 frame_queue_next(&is->pictq);
1619 goto retry;
1620 }
1621
1622 if (lastvp->serial != vp->serial)
1623 is->frame_timer = av_gettime_relative() / 1000000.0;
1624
1625 if (is->paused)
1626 goto display;
1627
1628 /* compute nominal last_duration */
1629 last_duration = vp_duration(is, lastvp, vp);
1630 delay = compute_target_delay(last_duration, is);
1631
1632 time= av_gettime_relative()/1000000.0;
1633 if (time < is->frame_timer + delay) {
1634 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1635 goto display;
1636 }
1637
1638 is->frame_timer += delay;
1639 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1640 is->frame_timer = time;
1641
1642 SDL_LockMutex(is->pictq.mutex);
1643 if (!isnan(vp->pts))
1644 update_video_pts(is, vp->pts, vp->serial);
1645 SDL_UnlockMutex(is->pictq.mutex);
1646
1647 if (frame_queue_nb_remaining(&is->pictq) > 1) {
1648 Frame *nextvp = frame_queue_peek_next(&is->pictq);
1649 duration = vp_duration(is, vp, nextvp);
1650 if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1651 is->frame_drops_late++;
1652 frame_queue_next(&is->pictq);
1653 goto retry;
1654 }
1655 }
1656
1657 if (is->subtitle_st) {
1658 while (frame_queue_nb_remaining(&is->subpq) > 0) {
1659 sp = frame_queue_peek(&is->subpq);
1660
1661 if (frame_queue_nb_remaining(&is->subpq) > 1)
1662 sp2 = frame_queue_peek_next(&is->subpq);
1663 else
1664 sp2 = NULL;
1665
1666 if (sp->serial != is->subtitleq.serial
1667 || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1668 || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1669 {
1670 if (sp->uploaded) {
1671 int i;
1672 for (i = 0; i < sp->sub.num_rects; i++) {
1673 AVSubtitleRect *sub_rect = sp->sub.rects[i];
1674 uint8_t *pixels;
1675 int pitch, j;
1676
1677 if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1678 for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1679 memset(pixels, 0, sub_rect->w << 2);
1680 SDL_UnlockTexture(is->sub_texture);
1681 }
1682 }
1683 }
1684 frame_queue_next(&is->subpq);
1685 } else {
1686 break;
1687 }
1688 }
1689 }
1690
1691 frame_queue_next(&is->pictq);
1692 is->force_refresh = 1;
1693
1694 if (is->step && !is->paused)
1695 stream_toggle_pause(is);
1696 }
1697 display:
1698 /* display picture */
1699 if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1700 video_display(is);
1701 }
1702 is->force_refresh = 0;
1703 if (show_status) {
1704 AVBPrint buf;
1705 static int64_t last_time;
1706 int64_t cur_time;
1707 int aqsize, vqsize, sqsize;
1708 double av_diff;
1709
1710 cur_time = av_gettime_relative();
1711 if (!last_time || (cur_time - last_time) >= 30000) {
1712 aqsize = 0;
1713 vqsize = 0;
1714 sqsize = 0;
1715 if (is->audio_st)
1716 aqsize = is->audioq.size;
1717 if (is->video_st)
1718 vqsize = is->videoq.size;
1719 if (is->subtitle_st)
1720 sqsize = is->subtitleq.size;
1721 av_diff = 0;
1722 if (is->audio_st && is->video_st)
1723 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1724 else if (is->video_st)
1725 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1726 else if (is->audio_st)
1727 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1728
1729 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1730 av_bprintf(&buf,
1731 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1732 get_master_clock(is),
1733 (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1734 av_diff,
1735 is->frame_drops_early + is->frame_drops_late,
1736 aqsize / 1024,
1737 vqsize / 1024,
1738 sqsize);
1739
1740 if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1741 fprintf(stderr, "%s", buf.str);
1742 else
1743 av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1744
1745 fflush(stderr);
1746 av_bprint_finalize(&buf, NULL);
1747
1748 last_time = cur_time;
1749 }
1750 }
1751 }
1752
1753 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1754 {
1755 Frame *vp;
1756
1757 #if defined(DEBUG_SYNC)
1758 printf("frame_type=%c pts=%0.3f\n",
1759 av_get_picture_type_char(src_frame->pict_type), pts);
1760 #endif
1761
1762 if (!(vp = frame_queue_peek_writable(&is->pictq)))
1763 return -1;
1764
1765 vp->sar = src_frame->sample_aspect_ratio;
1766 vp->uploaded = 0;
1767
1768 vp->width = src_frame->width;
1769 vp->height = src_frame->height;
1770 vp->format = src_frame->format;
1771
1772 vp->pts = pts;
1773 vp->duration = duration;
1774 vp->pos = pos;
1775 vp->serial = serial;
1776
1777 set_default_window_size(vp->width, vp->height, vp->sar);
1778
1779 av_frame_move_ref(vp->frame, src_frame);
1780 frame_queue_push(&is->pictq);
1781 return 0;
1782 }
1783
1784 static int get_video_frame(VideoState *is, AVFrame *frame)
1785 {
1786 int got_picture;
1787
1788 if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1789 return -1;
1790
1791 if (got_picture) {
1792 double dpts = NAN;
1793
1794 if (frame->pts != AV_NOPTS_VALUE)
1795 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1796
1797 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1798
1799 if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1800 if (frame->pts != AV_NOPTS_VALUE) {
1801 double diff = dpts - get_master_clock(is);
1802 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1803 diff - is->frame_last_filter_delay < 0 &&
1804 is->viddec.pkt_serial == is->vidclk.serial &&
1805 is->videoq.nb_packets) {
1806 is->frame_drops_early++;
1807 av_frame_unref(frame);
1808 got_picture = 0;
1809 }
1810 }
1811 }
1812 }
1813
1814 return got_picture;
1815 }
1816
1817 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1818 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1819 {
1820 int ret, i;
1821 int nb_filters = graph->nb_filters;
1822 AVFilterInOut *outputs = NULL, *inputs = NULL;
1823
1824 if (filtergraph) {
1825 outputs = avfilter_inout_alloc();
1826 inputs = avfilter_inout_alloc();
1827 if (!outputs || !inputs) {
1828 ret = AVERROR(ENOMEM);
1829 goto fail;
1830 }
1831
1832 outputs->name = av_strdup("in");
1833 outputs->filter_ctx = source_ctx;
1834 outputs->pad_idx = 0;
1835 outputs->next = NULL;
1836
1837 inputs->name = av_strdup("out");
1838 inputs->filter_ctx = sink_ctx;
1839 inputs->pad_idx = 0;
1840 inputs->next = NULL;
1841
1842 if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1843 goto fail;
1844 } else {
1845 if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1846 goto fail;
1847 }
1848
1849 /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1850 for (i = 0; i < graph->nb_filters - nb_filters; i++)
1851 FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1852
1853 ret = avfilter_graph_config(graph, NULL);
1854 fail:
1855 avfilter_inout_free(&outputs);
1856 avfilter_inout_free(&inputs);
1857 return ret;
1858 }
1859
1860 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1861 {
1862 enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
1863 char sws_flags_str[512] = "";
1864 char buffersrc_args[256];
1865 int ret;
1866 AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1867 AVCodecParameters *codecpar = is->video_st->codecpar;
1868 AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1869 const AVDictionaryEntry *e = NULL;
1870 int nb_pix_fmts = 0;
1871 int i, j;
1872 AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
1873
1874 if (!par)
1875 return AVERROR(ENOMEM);
1876
1877 for (i = 0; i < renderer_info.num_texture_formats; i++) {
1878 for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1879 if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1880 pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1881 break;
1882 }
1883 }
1884 }
1885 pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1886
1887 while ((e = av_dict_iterate(sws_dict, e))) {
1888 if (!strcmp(e->key, "sws_flags")) {
1889 av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1890 } else
1891 av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1892 }
1893 if (strlen(sws_flags_str))
1894 sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1895
1896 graph->scale_sws_opts = av_strdup(sws_flags_str);
1897
1898 snprintf(buffersrc_args, sizeof(buffersrc_args),
1899 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1900 "colorspace=%d:range=%d",
1901 frame->width, frame->height, frame->format,
1902 is->video_st->time_base.num, is->video_st->time_base.den,
1903 codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1904 frame->colorspace, frame->color_range);
1905 if (fr.num && fr.den)
1906 av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1907
1908 if ((ret = avfilter_graph_create_filter(&filt_src,
1909 avfilter_get_by_name("buffer"),
1910 "ffplay_buffer", buffersrc_args, NULL,
1911 graph)) < 0)
1912 goto fail;
1913 par->hw_frames_ctx = frame->hw_frames_ctx;
1914 ret = av_buffersrc_parameters_set(filt_src, par);
1915 if (ret < 0)
1916 goto fail;
1917
1918 ret = avfilter_graph_create_filter(&filt_out,
1919 avfilter_get_by_name("buffersink"),
1920 "ffplay_buffersink", NULL, NULL, graph);
1921 if (ret < 0)
1922 goto fail;
1923
1924 if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1925 goto fail;
1926 if (!vk_renderer &&
1927 (ret = av_opt_set_int_list(filt_out, "color_spaces", sdl_supported_color_spaces, AVCOL_SPC_UNSPECIFIED, AV_OPT_SEARCH_CHILDREN)) < 0)
1928 goto fail;
1929
1930 last_filter = filt_out;
1931
1932 /* Note: this macro adds a filter before the lastly added filter, so the
1933 * processing order of the filters is in reverse */
1934 #define INSERT_FILT(name, arg) do { \
1935 AVFilterContext *filt_ctx; \
1936 \
1937 ret = avfilter_graph_create_filter(&filt_ctx, \
1938 avfilter_get_by_name(name), \
1939 "ffplay_" name, arg, NULL, graph); \
1940 if (ret < 0) \
1941 goto fail; \
1942 \
1943 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1944 if (ret < 0) \
1945 goto fail; \
1946 \
1947 last_filter = filt_ctx; \
1948 } while (0)
1949
1950 if (autorotate) {
1951 double theta = 0.0;
1952 int32_t *displaymatrix = NULL;
1953 AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
1954 if (sd)
1955 displaymatrix = (int32_t *)sd->data;
1956 if (!displaymatrix) {
1957 const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1958 is->video_st->codecpar->nb_coded_side_data,
1959 AV_PKT_DATA_DISPLAYMATRIX);
1960 if (psd)
1961 displaymatrix = (int32_t *)psd->data;
1962 }
1963 theta = get_rotation(displaymatrix);
1964
1965 if (fabs(theta - 90) < 1.0) {
1966 INSERT_FILT("transpose", "clock");
1967 } else if (fabs(theta - 180) < 1.0) {
1968 INSERT_FILT("hflip", NULL);
1969 INSERT_FILT("vflip", NULL);
1970 } else if (fabs(theta - 270) < 1.0) {
1971 INSERT_FILT("transpose", "cclock");
1972 } else if (fabs(theta) > 1.0) {
1973 char rotate_buf[64];
1974 snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1975 INSERT_FILT("rotate", rotate_buf);
1976 }
1977 }
1978
1979 if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1980 goto fail;
1981
1982 is->in_video_filter = filt_src;
1983 is->out_video_filter = filt_out;
1984
1985 fail:
1986 av_freep(&par);
1987 return ret;
1988 }
1989
1990 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1991 {
1992 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1993 int sample_rates[2] = { 0, -1 };
1994 AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1995 char aresample_swr_opts[512] = "";
1996 const AVDictionaryEntry *e = NULL;
1997 AVBPrint bp;
1998 char asrc_args[256];
1999 int ret;
2000
2001 avfilter_graph_free(&is->agraph);
2002 if (!(is->agraph = avfilter_graph_alloc()))
2003 return AVERROR(ENOMEM);
2004 is->agraph->nb_threads = filter_nbthreads;
2005
2006 av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
2007
2008 while ((e = av_dict_iterate(swr_opts, e)))
2009 av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2010 if (strlen(aresample_swr_opts))
2011 aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2012 av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2013
2014 av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2015
2016 ret = snprintf(asrc_args, sizeof(asrc_args),
2017 "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2018 is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2019 1, is->audio_filter_src.freq, bp.str);
2020
2021 ret = avfilter_graph_create_filter(&filt_asrc,
2022 avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2023 asrc_args, NULL, is->agraph);
2024 if (ret < 0)
2025 goto end;
2026
2027
2028 ret = avfilter_graph_create_filter(&filt_asink,
2029 avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2030 NULL, NULL, is->agraph);
2031 if (ret < 0)
2032 goto end;
2033
2034 if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2035 goto end;
2036 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2037 goto end;
2038
2039 if (force_output_format) {
2040 av_bprint_clear(&bp);
2041 av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2042 sample_rates [0] = is->audio_tgt.freq;
2043 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2044 goto end;
2045 if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2046 goto end;
2047 if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2048 goto end;
2049 }
2050
2051
2052 if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2053 goto end;
2054
2055 is->in_audio_filter = filt_asrc;
2056 is->out_audio_filter = filt_asink;
2057
2058 end:
2059 if (ret < 0)
2060 avfilter_graph_free(&is->agraph);
2061 av_bprint_finalize(&bp, NULL);
2062
2063 return ret;
2064 }
2065
2066 static int audio_thread(void *arg)
2067 {
2068 VideoState *is = arg;
2069 AVFrame *frame = av_frame_alloc();
2070 Frame *af;
2071 int last_serial = -1;
2072 int reconfigure;
2073 int got_frame = 0;
2074 AVRational tb;
2075 int ret = 0;
2076
2077 if (!frame)
2078 return AVERROR(ENOMEM);
2079
2080 do {
2081 if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2082 goto the_end;
2083
2084 if (got_frame) {
2085 tb = (AVRational){1, frame->sample_rate};
2086
2087 reconfigure =
2088 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2089 frame->format, frame->ch_layout.nb_channels) ||
2090 av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2091 is->audio_filter_src.freq != frame->sample_rate ||
2092 is->auddec.pkt_serial != last_serial;
2093
2094 if (reconfigure) {
2095 char buf1[1024], buf2[1024];
2096 av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2097 av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2098 av_log(NULL, AV_LOG_DEBUG,
2099 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2100 is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2101 frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2102
2103 is->audio_filter_src.fmt = frame->format;
2104 ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2105 if (ret < 0)
2106 goto the_end;
2107 is->audio_filter_src.freq = frame->sample_rate;
2108 last_serial = is->auddec.pkt_serial;
2109
2110 if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2111 goto the_end;
2112 }
2113
2114 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2115 goto the_end;
2116
2117 while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2118 FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2119 tb = av_buffersink_get_time_base(is->out_audio_filter);
2120 if (!(af = frame_queue_peek_writable(&is->sampq)))
2121 goto the_end;
2122
2123 af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2124 af->pos = fd ? fd->pkt_pos : -1;
2125 af->serial = is->auddec.pkt_serial;
2126 af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2127
2128 av_frame_move_ref(af->frame, frame);
2129 frame_queue_push(&is->sampq);
2130
2131 if (is->audioq.serial != is->auddec.pkt_serial)
2132 break;
2133 }
2134 if (ret == AVERROR_EOF)
2135 is->auddec.finished = is->auddec.pkt_serial;
2136 }
2137 } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2138 the_end:
2139 avfilter_graph_free(&is->agraph);
2140 av_frame_free(&frame);
2141 return ret;
2142 }
2143
2144 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2145 {
2146 packet_queue_start(d->queue);
2147 d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2148 if (!d->decoder_tid) {
2149 av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2150 return AVERROR(ENOMEM);
2151 }
2152 return 0;
2153 }
2154
2155 static int video_thread(void *arg)
2156 {
2157 VideoState *is = arg;
2158 AVFrame *frame = av_frame_alloc();
2159 double pts;
2160 double duration;
2161 int ret;
2162 AVRational tb = is->video_st->time_base;
2163 AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2164
2165 AVFilterGraph *graph = NULL;
2166 AVFilterContext *filt_out = NULL, *filt_in = NULL;
2167 int last_w = 0;
2168 int last_h = 0;
2169 enum AVPixelFormat last_format = -2;
2170 int last_serial = -1;
2171 int last_vfilter_idx = 0;
2172
2173 if (!frame)
2174 return AVERROR(ENOMEM);
2175
2176 for (;;) {
2177 ret = get_video_frame(is, frame);
2178 if (ret < 0)
2179 goto the_end;
2180 if (!ret)
2181 continue;
2182
2183 if ( last_w != frame->width
2184 || last_h != frame->height
2185 || last_format != frame->format
2186 || last_serial != is->viddec.pkt_serial
2187 || last_vfilter_idx != is->vfilter_idx) {
2188 av_log(NULL, AV_LOG_DEBUG,
2189 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2190 last_w, last_h,
2191 (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2192 frame->width, frame->height,
2193 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2194 avfilter_graph_free(&graph);
2195 graph = avfilter_graph_alloc();
2196 if (!graph) {
2197 ret = AVERROR(ENOMEM);
2198 goto the_end;
2199 }
2200 graph->nb_threads = filter_nbthreads;
2201 if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2202 SDL_Event event;
2203 event.type = FF_QUIT_EVENT;
2204 event.user.data1 = is;
2205 SDL_PushEvent(&event);
2206 goto the_end;
2207 }
2208 filt_in = is->in_video_filter;
2209 filt_out = is->out_video_filter;
2210 last_w = frame->width;
2211 last_h = frame->height;
2212 last_format = frame->format;
2213 last_serial = is->viddec.pkt_serial;
2214 last_vfilter_idx = is->vfilter_idx;
2215 frame_rate = av_buffersink_get_frame_rate(filt_out);
2216 }
2217
2218 ret = av_buffersrc_add_frame(filt_in, frame);
2219 if (ret < 0)
2220 goto the_end;
2221
2222 while (ret >= 0) {
2223 FrameData *fd;
2224
2225 is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2226
2227 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2228 if (ret < 0) {
2229 if (ret == AVERROR_EOF)
2230 is->viddec.finished = is->viddec.pkt_serial;
2231 ret = 0;
2232 break;
2233 }
2234
2235 fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2236
2237 is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2238 if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2239 is->frame_last_filter_delay = 0;
2240 tb = av_buffersink_get_time_base(filt_out);
2241 duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2242 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2243 ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2244 av_frame_unref(frame);
2245 if (is->videoq.serial != is->viddec.pkt_serial)
2246 break;
2247 }
2248
2249 if (ret < 0)
2250 goto the_end;
2251 }
2252 the_end:
2253 avfilter_graph_free(&graph);
2254 av_frame_free(&frame);
2255 return 0;
2256 }
2257
2258 static int subtitle_thread(void *arg)
2259 {
2260 VideoState *is = arg;
2261 Frame *sp;
2262 int got_subtitle;
2263 double pts;
2264
2265 for (;;) {
2266 if (!(sp = frame_queue_peek_writable(&is->subpq)))
2267 return 0;
2268
2269 if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2270 break;
2271
2272 pts = 0;
2273
2274 if (got_subtitle && sp->sub.format == 0) {
2275 if (sp->sub.pts != AV_NOPTS_VALUE)
2276 pts = sp->sub.pts / (double)AV_TIME_BASE;
2277 sp->pts = pts;
2278 sp->serial = is->subdec.pkt_serial;
2279 sp->width = is->subdec.avctx->width;
2280 sp->height = is->subdec.avctx->height;
2281 sp->uploaded = 0;
2282
2283 /* now we can update the picture count */
2284 frame_queue_push(&is->subpq);
2285 } else if (got_subtitle) {
2286 avsubtitle_free(&sp->sub);
2287 }
2288 }
2289 return 0;
2290 }
2291
2292 /* copy samples for viewing in editor window */
2293 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2294 {
2295 int size, len;
2296
2297 size = samples_size / sizeof(short);
2298 while (size > 0) {
2299 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2300 if (len > size)
2301 len = size;
2302 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2303 samples += len;
2304 is->sample_array_index += len;
2305 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2306 is->sample_array_index = 0;
2307 size -= len;
2308 }
2309 }
2310
2311 /* return the wanted number of samples to get better sync if sync_type is video
2312 * or external master clock */
2313 static int synchronize_audio(VideoState *is, int nb_samples)
2314 {
2315 int wanted_nb_samples = nb_samples;
2316
2317 /* if not master, then we try to remove or add samples to correct the clock */
2318 if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2319 double diff, avg_diff;
2320 int min_nb_samples, max_nb_samples;
2321
2322 diff = get_clock(&is->audclk) - get_master_clock(is);
2323
2324 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2325 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2326 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2327 /* not enough measures to have a correct estimate */
2328 is->audio_diff_avg_count++;
2329 } else {
2330 /* estimate the A-V difference */
2331 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2332
2333 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2334 wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2335 min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2336 max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2337 wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2338 }
2339 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2340 diff, avg_diff, wanted_nb_samples - nb_samples,
2341 is->audio_clock, is->audio_diff_threshold);
2342 }
2343 } else {
2344 /* too big difference : may be initial PTS errors, so
2345 reset A-V filter */
2346 is->audio_diff_avg_count = 0;
2347 is->audio_diff_cum = 0;
2348 }
2349 }
2350
2351 return wanted_nb_samples;
2352 }
2353
2354 /**
2355 * Decode one audio frame and return its uncompressed size.
2356 *
2357 * The processed audio frame is decoded, converted if required, and
2358 * stored in is->audio_buf, with size in bytes given by the return
2359 * value.
2360 */
2361 static int audio_decode_frame(VideoState *is)
2362 {
2363 int data_size, resampled_data_size;
2364 av_unused double audio_clock0;
2365 int wanted_nb_samples;
2366 Frame *af;
2367
2368 if (is->paused)
2369 return -1;
2370
2371 do {
2372 #if defined(_WIN32)
2373 while (frame_queue_nb_remaining(&is->sampq) == 0) {
2374 if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2375 return -1;
2376 av_usleep (1000);
2377 }
2378 #endif
2379 if (!(af = frame_queue_peek_readable(&is->sampq)))
2380 return -1;
2381 frame_queue_next(&is->sampq);
2382 } while (af->serial != is->audioq.serial);
2383
2384 data_size = av_samples_get_buffer_size(NULL, af->frame->ch_layout.nb_channels,
2385 af->frame->nb_samples,
2386 af->frame->format, 1);
2387
2388 wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2389
2390 if (af->frame->format != is->audio_src.fmt ||
2391 av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2392 af->frame->sample_rate != is->audio_src.freq ||
2393 (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2394 swr_free(&is->swr_ctx);
2395 swr_alloc_set_opts2(&is->swr_ctx,
2396 &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2397 &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2398 0, NULL);
2399 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2400 av_log(NULL, AV_LOG_ERROR,
2401 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2402 af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->ch_layout.nb_channels,
2403 is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2404 swr_free(&is->swr_ctx);
2405 return -1;
2406 }
2407 if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2408 return -1;
2409 is->audio_src.freq = af->frame->sample_rate;
2410 is->audio_src.fmt = af->frame->format;
2411 }
2412
2413 if (is->swr_ctx) {
2414 const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2415 uint8_t **out = &is->audio_buf1;
2416 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2417 int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2418 int len2;
2419 if (out_size < 0) {
2420 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2421 return -1;
2422 }
2423 if (wanted_nb_samples != af->frame->nb_samples) {
2424 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2425 wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2426 av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2427 return -1;
2428 }
2429 }
2430 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2431 if (!is->audio_buf1)
2432 return AVERROR(ENOMEM);
2433 len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2434 if (len2 < 0) {
2435 av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2436 return -1;
2437 }
2438 if (len2 == out_count) {
2439 av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2440 if (swr_init(is->swr_ctx) < 0)
2441 swr_free(&is->swr_ctx);
2442 }
2443 is->audio_buf = is->audio_buf1;
2444 resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2445 } else {
2446 is->audio_buf = af->frame->data[0];
2447 resampled_data_size = data_size;
2448 }
2449
2450 audio_clock0 = is->audio_clock;
2451 /* update the audio clock with the pts */
2452 if (!isnan(af->pts))
2453 is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2454 else
2455 is->audio_clock = NAN;
2456 is->audio_clock_serial = af->serial;
2457 #ifdef DEBUG
2458 {
2459 static double last_clock;
2460 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2461 is->audio_clock - last_clock,
2462 is->audio_clock, audio_clock0);
2463 last_clock = is->audio_clock;
2464 }
2465 #endif
2466 return resampled_data_size;
2467 }
2468
2469 /* prepare a new audio buffer */
2470 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2471 {
2472 VideoState *is = opaque;
2473 int audio_size, len1;
2474
2475 audio_callback_time = av_gettime_relative();
2476
2477 while (len > 0) {
2478 if (is->audio_buf_index >= is->audio_buf_size) {
2479 audio_size = audio_decode_frame(is);
2480 if (audio_size < 0) {
2481 /* if error, just output silence */
2482 is->audio_buf = NULL;
2483 is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2484 } else {
2485 if (is->show_mode != SHOW_MODE_VIDEO)
2486 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2487 is->audio_buf_size = audio_size;
2488 }
2489 is->audio_buf_index = 0;
2490 }
2491 len1 = is->audio_buf_size - is->audio_buf_index;
2492 if (len1 > len)
2493 len1 = len;
2494 if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2495 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2496 else {
2497 memset(stream, 0, len1);
2498 if (!is->muted && is->audio_buf)
2499 SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2500 }
2501 len -= len1;
2502 stream += len1;
2503 is->audio_buf_index += len1;
2504 }
2505 is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2506 /* Let's assume the audio driver that is used by SDL has two periods. */
2507 if (!isnan(is->audio_clock)) {
2508 set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2509 sync_clock_to_slave(&is->extclk, &is->audclk);
2510 }
2511 }
2512
2513 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2514 {
2515 SDL_AudioSpec wanted_spec, spec;
2516 const char *env;
2517 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2518 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2519 int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2520 int wanted_nb_channels = wanted_channel_layout->nb_channels;
2521
2522 env = SDL_getenv("SDL_AUDIO_CHANNELS");
2523 if (env) {
2524 wanted_nb_channels = atoi(env);
2525 av_channel_layout_uninit(wanted_channel_layout);
2526 av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2527 }
2528 if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2529 av_channel_layout_uninit(wanted_channel_layout);
2530 av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2531 }
2532 wanted_nb_channels = wanted_channel_layout->nb_channels;
2533 wanted_spec.channels = wanted_nb_channels;
2534 wanted_spec.freq = wanted_sample_rate;
2535 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2536 av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2537 return -1;
2538 }
2539 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2540 next_sample_rate_idx--;
2541 wanted_spec.format = AUDIO_S16SYS;
2542 wanted_spec.silence = 0;
2543 wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2544 wanted_spec.callback = sdl_audio_callback;
2545 wanted_spec.userdata = opaque;
2546 while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2547 av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2548 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2549 wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2550 if (!wanted_spec.channels) {
2551 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2552 wanted_spec.channels = wanted_nb_channels;
2553 if (!wanted_spec.freq) {
2554 av_log(NULL, AV_LOG_ERROR,
2555 "No more combinations to try, audio open failed\n");
2556 return -1;
2557 }
2558 }
2559 av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2560 }
2561 if (spec.format != AUDIO_S16SYS) {
2562 av_log(NULL, AV_LOG_ERROR,
2563 "SDL advised audio format %d is not supported!\n", spec.format);
2564 return -1;
2565 }
2566 if (spec.channels != wanted_spec.channels) {
2567 av_channel_layout_uninit(wanted_channel_layout);
2568 av_channel_layout_default(wanted_channel_layout, spec.channels);
2569 if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2570 av_log(NULL, AV_LOG_ERROR,
2571 "SDL advised channel count %d is not supported!\n", spec.channels);
2572 return -1;
2573 }
2574 }
2575
2576 audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2577 audio_hw_params->freq = spec.freq;
2578 if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2579 return -1;
2580 audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2581 audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2582 if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2583 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2584 return -1;
2585 }
2586 return spec.size;
2587 }
2588
2589 static int create_hwaccel(AVBufferRef **device_ctx)
2590 {
2591 enum AVHWDeviceType type;
2592 int ret;
2593 AVBufferRef *vk_dev;
2594
2595 *device_ctx = NULL;
2596
2597 if (!hwaccel)
2598 return 0;
2599
2600 type = av_hwdevice_find_type_by_name(hwaccel);
2601 if (type == AV_HWDEVICE_TYPE_NONE)
2602 return AVERROR(ENOTSUP);
2603
2604 ret = vk_renderer_get_hw_dev(vk_renderer, &vk_dev);
2605 if (ret < 0)
2606 return ret;
2607
2608 ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2609 if (!ret)
2610 return 0;
2611
2612 if (ret != AVERROR(ENOSYS))
2613 return ret;
2614
2615 av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2616 ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2617 return ret;
2618 }
2619
2620 /* open a given stream. Return 0 if OK */
2621 static int stream_component_open(VideoState *is, int stream_index)
2622 {
2623 AVFormatContext *ic = is->ic;
2624 AVCodecContext *avctx;
2625 const AVCodec *codec;
2626 const char *forced_codec_name = NULL;
2627 AVDictionary *opts = NULL;
2628 const AVDictionaryEntry *t = NULL;
2629 int sample_rate;
2630 AVChannelLayout ch_layout = { 0 };
2631 int ret = 0;
2632 int stream_lowres = lowres;
2633
2634 if (stream_index < 0 || stream_index >= ic->nb_streams)
2635 return -1;
2636
2637 avctx = avcodec_alloc_context3(NULL);
2638 if (!avctx)
2639 return AVERROR(ENOMEM);
2640
2641 ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2642 if (ret < 0)
2643 goto fail;
2644 avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2645
2646 codec = avcodec_find_decoder(avctx->codec_id);
2647
2648 switch(avctx->codec_type){
2649 case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2650 case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2651 case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2652 }
2653 if (forced_codec_name)
2654 codec = avcodec_find_decoder_by_name(forced_codec_name);
2655 if (!codec) {
2656 if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2657 "No codec could be found with name '%s'\n", forced_codec_name);
2658 else av_log(NULL, AV_LOG_WARNING,
2659 "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2660 ret = AVERROR(EINVAL);
2661 goto fail;
2662 }
2663
2664 avctx->codec_id = codec->id;
2665 if (stream_lowres > codec->max_lowres) {
2666 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2667 codec->max_lowres);
2668 stream_lowres = codec->max_lowres;
2669 }
2670 avctx->lowres = stream_lowres;
2671
2672 if (fast)
2673 avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2674
2675 ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2676 ic->streams[stream_index], codec, &opts);
2677 if (ret < 0)
2678 goto fail;
2679
2680 if (!av_dict_get(opts, "threads", NULL, 0))
2681 av_dict_set(&opts, "threads", "auto", 0);
2682 if (stream_lowres)
2683 av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2684
2685 av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2686
2687 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2688 ret = create_hwaccel(&avctx->hw_device_ctx);
2689 if (ret < 0)
2690 goto fail;
2691 }
2692
2693 if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2694 goto fail;
2695 }
2696 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2697 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2698 ret = AVERROR_OPTION_NOT_FOUND;
2699 goto fail;
2700 }
2701
2702 is->eof = 0;
2703 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2704 switch (avctx->codec_type) {
2705 case AVMEDIA_TYPE_AUDIO:
2706 {
2707 AVFilterContext *sink;
2708
2709 is->audio_filter_src.freq = avctx->sample_rate;
2710 ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2711 if (ret < 0)
2712 goto fail;
2713 is->audio_filter_src.fmt = avctx->sample_fmt;
2714 if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2715 goto fail;
2716 sink = is->out_audio_filter;
2717 sample_rate = av_buffersink_get_sample_rate(sink);
2718 ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2719 if (ret < 0)
2720 goto fail;
2721 }
2722
2723 /* prepare audio output */
2724 if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2725 goto fail;
2726 is->audio_hw_buf_size = ret;
2727 is->audio_src = is->audio_tgt;
2728 is->audio_buf_size = 0;
2729 is->audio_buf_index = 0;
2730
2731 /* init averaging filter */
2732 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2733 is->audio_diff_avg_count = 0;
2734 /* since we do not have a precise anough audio FIFO fullness,
2735 we correct audio sync only if larger than this threshold */
2736 is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2737
2738 is->audio_stream = stream_index;
2739 is->audio_st = ic->streams[stream_index];
2740
2741 if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2742 goto fail;
2743 if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2744 is->auddec.start_pts = is->audio_st->start_time;
2745 is->auddec.start_pts_tb = is->audio_st->time_base;
2746 }
2747 if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2748 goto out;
2749 SDL_PauseAudioDevice(audio_dev, 0);
2750 break;
2751 case AVMEDIA_TYPE_VIDEO:
2752 is->video_stream = stream_index;
2753 is->video_st = ic->streams[stream_index];
2754
2755 if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2756 goto fail;
2757 if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2758 goto out;
2759 is->queue_attachments_req = 1;
2760 break;
2761 case AVMEDIA_TYPE_SUBTITLE:
2762 is->subtitle_stream = stream_index;
2763 is->subtitle_st = ic->streams[stream_index];
2764
2765 if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2766 goto fail;
2767 if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2768 goto out;
2769 break;
2770 default:
2771 break;
2772 }
2773 goto out;
2774
2775 fail:
2776 avcodec_free_context(&avctx);
2777 out:
2778 av_channel_layout_uninit(&ch_layout);
2779 av_dict_free(&opts);
2780
2781 return ret;
2782 }
2783
2784 static int decode_interrupt_cb(void *ctx)
2785 {
2786 VideoState *is = ctx;
2787 return is->abort_request;
2788 }
2789
2790 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2791 return stream_id < 0 ||
2792 queue->abort_request ||
2793 (st->disposition & AV_DISPOSITION_ATTACHED_PIC) ||
2794 queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2795 }
2796
2797 static int is_realtime(AVFormatContext *s)
2798 {
2799 if( !strcmp(s->iformat->name, "rtp")
2800 || !strcmp(s->iformat->name, "rtsp")
2801 || !strcmp(s->iformat->name, "sdp")
2802 )
2803 return 1;
2804
2805 if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2806 || !strncmp(s->url, "udp:", 4)
2807 )
2808 )
2809 return 1;
2810 return 0;
2811 }
2812
2813 /* this thread gets the stream from the disk or the network */
2814 static int read_thread(void *arg)
2815 {
2816 VideoState *is = arg;
2817 AVFormatContext *ic = NULL;
2818 int err, i, ret;
2819 int st_index[AVMEDIA_TYPE_NB];
2820 AVPacket *pkt = NULL;
2821 int64_t stream_start_time;
2822 int pkt_in_play_range = 0;
2823 const AVDictionaryEntry *t;
2824 SDL_mutex *wait_mutex = SDL_CreateMutex();
2825 int scan_all_pmts_set = 0;
2826 int64_t pkt_ts;
2827
2828 if (!wait_mutex) {
2829 av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2830 ret = AVERROR(ENOMEM);
2831 goto fail;
2832 }
2833
2834 memset(st_index, -1, sizeof(st_index));
2835 is->eof = 0;
2836
2837 pkt = av_packet_alloc();
2838 if (!pkt) {
2839 av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2840 ret = AVERROR(ENOMEM);
2841 goto fail;
2842 }
2843 ic = avformat_alloc_context();
2844 if (!ic) {
2845 av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2846 ret = AVERROR(ENOMEM);
2847 goto fail;
2848 }
2849 ic->interrupt_callback.callback = decode_interrupt_cb;
2850 ic->interrupt_callback.opaque = is;
2851 if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2852 av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2853 scan_all_pmts_set = 1;
2854 }
2855 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2856 if (err < 0) {
2857 print_error(is->filename, err);
2858 ret = -1;
2859 goto fail;
2860 }
2861 if (scan_all_pmts_set)
2862 av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2863
2864 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2865 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2866 ret = AVERROR_OPTION_NOT_FOUND;
2867 goto fail;
2868 }
2869 is->ic = ic;
2870
2871 if (genpts)
2872 ic->flags |= AVFMT_FLAG_GENPTS;
2873
2874 if (find_stream_info) {
2875 AVDictionary **opts;
2876 int orig_nb_streams = ic->nb_streams;
2877
2878 err = setup_find_stream_info_opts(ic, codec_opts, &opts);
2879 if (err < 0) {
2880 av_log(NULL, AV_LOG_ERROR,
2881 "Error setting up avformat_find_stream_info() options\n");
2882 ret = err;
2883 goto fail;
2884 }
2885
2886 err = avformat_find_stream_info(ic, opts);
2887
2888 for (i = 0; i < orig_nb_streams; i++)
2889 av_dict_free(&opts[i]);
2890 av_freep(&opts);
2891
2892 if (err < 0) {
2893 av_log(NULL, AV_LOG_WARNING,
2894 "%s: could not find codec parameters\n", is->filename);
2895 ret = -1;
2896 goto fail;
2897 }
2898 }
2899
2900 if (ic->pb)
2901 ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2902
2903 if (seek_by_bytes < 0)
2904 seek_by_bytes = !(ic->iformat->flags & AVFMT_NO_BYTE_SEEK) &&
2905 !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2906 strcmp("ogg", ic->iformat->name);
2907
2908 is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2909
2910 if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2911 window_title = av_asprintf("%s - %s", t->value, input_filename);
2912
2913 /* if seeking requested, we execute it */
2914 if (start_time != AV_NOPTS_VALUE) {
2915 int64_t timestamp;
2916
2917 timestamp = start_time;
2918 /* add the stream start time */
2919 if (ic->start_time != AV_NOPTS_VALUE)
2920 timestamp += ic->start_time;
2921 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2922 if (ret < 0) {
2923 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2924 is->filename, (double)timestamp / AV_TIME_BASE);
2925 }
2926 }
2927
2928 is->realtime = is_realtime(ic);
2929
2930 if (show_status)
2931 av_dump_format(ic, 0, is->filename, 0);
2932
2933 for (i = 0; i < ic->nb_streams; i++) {
2934 AVStream *st = ic->streams[i];
2935 enum AVMediaType type = st->codecpar->codec_type;
2936 st->discard = AVDISCARD_ALL;
2937 if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2938 if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2939 st_index[type] = i;
2940 }
2941 for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2942 if (wanted_stream_spec[i] && st_index[i] == -1) {
2943 av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2944 st_index[i] = INT_MAX;
2945 }
2946 }
2947
2948 if (!video_disable)
2949 st_index[AVMEDIA_TYPE_VIDEO] =
2950 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2951 st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2952 if (!audio_disable)
2953 st_index[AVMEDIA_TYPE_AUDIO] =
2954 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2955 st_index[AVMEDIA_TYPE_AUDIO],
2956 st_index[AVMEDIA_TYPE_VIDEO],
2957 NULL, 0);
2958 if (!video_disable && !subtitle_disable)
2959 st_index[AVMEDIA_TYPE_SUBTITLE] =
2960 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2961 st_index[AVMEDIA_TYPE_SUBTITLE],
2962 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2963 st_index[AVMEDIA_TYPE_AUDIO] :
2964 st_index[AVMEDIA_TYPE_VIDEO]),
2965 NULL, 0);
2966
2967 is->show_mode = show_mode;
2968 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2969 AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2970 AVCodecParameters *codecpar = st->codecpar;
2971 AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
2972 if (codecpar->width)
2973 set_default_window_size(codecpar->width, codecpar->height, sar);
2974 }
2975
2976 /* open the streams */
2977 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2978 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2979 }
2980
2981 ret = -1;
2982 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2983 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2984 }
2985 if (is->show_mode == SHOW_MODE_NONE)
2986 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2987
2988 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2989 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2990 }
2991
2992 if (is->video_stream < 0 && is->audio_stream < 0) {
2993 av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2994 is->filename);
2995 ret = -1;
2996 goto fail;
2997 }
2998
2999 if (infinite_buffer < 0 && is->realtime)
3000 infinite_buffer = 1;
3001
3002 for (;;) {
3003 if (is->abort_request)
3004 break;
3005 if (is->paused != is->last_paused) {
3006 is->last_paused = is->paused;
3007 if (is->paused)
3008 is->read_pause_return = av_read_pause(ic);
3009 else
3010 av_read_play(ic);
3011 }
3012 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3013 if (is->paused &&
3014 (!strcmp(ic->iformat->name, "rtsp") ||
3015 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3016 /* wait 10 ms to avoid trying to get another packet */
3017 /* XXX: horrible */
3018 SDL_Delay(10);
3019 continue;
3020 }
3021 #endif
3022 if (is->seek_req) {
3023 int64_t seek_target = is->seek_pos;
3024 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3025 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3026 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3027 // of the seek_pos/seek_rel variables
3028
3029 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3030 if (ret < 0) {
3031 av_log(NULL, AV_LOG_ERROR,
3032 "%s: error while seeking\n", is->ic->url);
3033 } else {
3034 if (is->audio_stream >= 0)
3035 packet_queue_flush(&is->audioq);
3036 if (is->subtitle_stream >= 0)
3037 packet_queue_flush(&is->subtitleq);
3038 if (is->video_stream >= 0)
3039 packet_queue_flush(&is->videoq);
3040 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3041 set_clock(&is->extclk, NAN, 0);
3042 } else {
3043 set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3044 }
3045 }
3046 is->seek_req = 0;
3047 is->queue_attachments_req = 1;
3048 is->eof = 0;
3049 if (is->paused)
3050 step_to_next_frame(is);
3051 }
3052 if (is->queue_attachments_req) {
3053 if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3054 if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3055 goto fail;
3056 packet_queue_put(&is->videoq, pkt);
3057 packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3058 }
3059 is->queue_attachments_req = 0;
3060 }
3061
3062 /* if the queue are full, no need to read more */
3063 if (infinite_buffer<1 &&
3064 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3065 || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3066 stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3067 stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3068 /* wait 10 ms */
3069 SDL_LockMutex(wait_mutex);
3070 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3071 SDL_UnlockMutex(wait_mutex);
3072 continue;
3073 }
3074 if (!is->paused &&
3075 (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3076 (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3077 if (loop != 1 && (!loop || --loop)) {
3078 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3079 } else if (autoexit) {
3080 ret = AVERROR_EOF;
3081 goto fail;
3082 }
3083 }
3084 ret = av_read_frame(ic, pkt);
3085 if (ret < 0) {
3086 if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3087 if (is->video_stream >= 0)
3088 packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3089 if (is->audio_stream >= 0)
3090 packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3091 if (is->subtitle_stream >= 0)
3092 packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3093 is->eof = 1;
3094 }
3095 if (ic->pb && ic->pb->error) {
3096 if (autoexit)
3097 goto fail;
3098 else
3099 break;
3100 }
3101 SDL_LockMutex(wait_mutex);
3102 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3103 SDL_UnlockMutex(wait_mutex);
3104 continue;
3105 } else {
3106 is->eof = 0;
3107 }
3108 /* check if packet is in play range specified by user, then queue, otherwise discard */
3109 stream_start_time = ic->streams[pkt->stream_index]->start_time;
3110 pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3111 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3112 (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3113 av_q2d(ic->streams[pkt->stream_index]->time_base) -
3114 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3115 <= ((double)duration / 1000000);
3116 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3117 packet_queue_put(&is->audioq, pkt);
3118 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3119 && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3120 packet_queue_put(&is->videoq, pkt);
3121 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3122 packet_queue_put(&is->subtitleq, pkt);
3123 } else {
3124 av_packet_unref(pkt);
3125 }
3126 }
3127
3128 ret = 0;
3129 fail:
3130 if (ic && !is->ic)
3131 avformat_close_input(&ic);
3132
3133 av_packet_free(&pkt);
3134 if (ret != 0) {
3135 SDL_Event event;
3136
3137 event.type = FF_QUIT_EVENT;
3138 event.user.data1 = is;
3139 SDL_PushEvent(&event);
3140 }
3141 SDL_DestroyMutex(wait_mutex);
3142 return 0;
3143 }
3144
3145 static VideoState *stream_open(const char *filename,
3146 const AVInputFormat *iformat)
3147 {
3148 VideoState *is;
3149
3150 is = av_mallocz(sizeof(VideoState));
3151 if (!is)
3152 return NULL;
3153 is->last_video_stream = is->video_stream = -1;
3154 is->last_audio_stream = is->audio_stream = -1;
3155 is->last_subtitle_stream = is->subtitle_stream = -1;
3156 is->filename = av_strdup(filename);
3157 if (!is->filename)
3158 goto fail;
3159 is->iformat = iformat;
3160 is->ytop = 0;
3161 is->xleft = 0;
3162
3163 /* start video display */
3164 if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3165 goto fail;
3166 if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3167 goto fail;
3168 if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3169 goto fail;
3170
3171 if (packet_queue_init(&is->videoq) < 0 ||
3172 packet_queue_init(&is->audioq) < 0 ||
3173 packet_queue_init(&is->subtitleq) < 0)
3174 goto fail;
3175
3176 if (!(is->continue_read_thread = SDL_CreateCond())) {
3177 av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3178 goto fail;
3179 }
3180
3181 init_clock(&is->vidclk, &is->videoq.serial);
3182 init_clock(&is->audclk, &is->audioq.serial);
3183 init_clock(&is->extclk, &is->extclk.serial);
3184 is->audio_clock_serial = -1;
3185 if (startup_volume < 0)
3186 av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3187 if (startup_volume > 100)
3188 av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3189 startup_volume = av_clip(startup_volume, 0, 100);
3190 startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3191 is->audio_volume = startup_volume;
3192 is->muted = 0;
3193 is->av_sync_type = av_sync_type;
3194 is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3195 if (!is->read_tid) {
3196 av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3197 fail:
3198 stream_close(is);
3199 return NULL;
3200 }
3201 return is;
3202 }
3203
3204 static void stream_cycle_channel(VideoState *is, int codec_type)
3205 {
3206 AVFormatContext *ic = is->ic;
3207 int start_index, stream_index;
3208 int old_index;
3209 AVStream *st;
3210 AVProgram *p = NULL;
3211 int nb_streams = is->ic->nb_streams;
3212
3213 if (codec_type == AVMEDIA_TYPE_VIDEO) {
3214 start_index = is->last_video_stream;
3215 old_index = is->video_stream;
3216 } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3217 start_index = is->last_audio_stream;
3218 old_index = is->audio_stream;
3219 } else {
3220 start_index = is->last_subtitle_stream;
3221 old_index = is->subtitle_stream;
3222 }
3223 stream_index = start_index;
3224
3225 if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3226 p = av_find_program_from_stream(ic, NULL, is->video_stream);
3227 if (p) {
3228 nb_streams = p->nb_stream_indexes;
3229 for (start_index = 0; start_index < nb_streams; start_index++)
3230 if (p->stream_index[start_index] == stream_index)
3231 break;
3232 if (start_index == nb_streams)
3233 start_index = -1;
3234 stream_index = start_index;
3235 }
3236 }
3237
3238 for (;;) {
3239 if (++stream_index >= nb_streams)
3240 {
3241 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3242 {
3243 stream_index = -1;
3244 is->last_subtitle_stream = -1;
3245 goto the_end;
3246 }
3247 if (start_index == -1)
3248 return;
3249 stream_index = 0;
3250 }
3251 if (stream_index == start_index)
3252 return;
3253 st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3254 if (st->codecpar->codec_type == codec_type) {
3255 /* check that parameters are OK */
3256 switch (codec_type) {
3257 case AVMEDIA_TYPE_AUDIO:
3258 if (st->codecpar->sample_rate != 0 &&
3259 st->codecpar->ch_layout.nb_channels != 0)
3260 goto the_end;
3261 break;
3262 case AVMEDIA_TYPE_VIDEO:
3263 case AVMEDIA_TYPE_SUBTITLE:
3264 goto the_end;
3265 default:
3266 break;
3267 }
3268 }
3269 }
3270 the_end:
3271 if (p && stream_index != -1)
3272 stream_index = p->stream_index[stream_index];
3273 av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3274 av_get_media_type_string(codec_type),
3275 old_index,
3276 stream_index);
3277
3278 stream_component_close(is, old_index);
3279 stream_component_open(is, stream_index);
3280 }
3281
3282
3283 static void toggle_full_screen(VideoState *is)
3284 {
3285 is_full_screen = !is_full_screen;
3286 SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3287 }
3288
3289 static void toggle_audio_display(VideoState *is)
3290 {
3291 int next = is->show_mode;
3292 do {
3293 next = (next + 1) % SHOW_MODE_NB;
3294 } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3295 if (is->show_mode != next) {
3296 is->force_refresh = 1;
3297 is->show_mode = next;
3298 }
3299 }
3300
3301 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3302 double remaining_time = 0.0;
3303 SDL_PumpEvents();
3304 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3305 if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3306 SDL_ShowCursor(0);
3307 cursor_hidden = 1;
3308 }
3309 if (remaining_time > 0.0)
3310 av_usleep((int64_t)(remaining_time * 1000000.0));
3311 remaining_time = REFRESH_RATE;
3312 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3313 video_refresh(is, &remaining_time);
3314 SDL_PumpEvents();
3315 }
3316 }
3317
3318 static void seek_chapter(VideoState *is, int incr)
3319 {
3320 int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3321 int i;
3322
3323 if (!is->ic->nb_chapters)
3324 return;
3325
3326 /* find the current chapter */
3327 for (i = 0; i < is->ic->nb_chapters; i++) {
3328 AVChapter *ch = is->ic->chapters[i];
3329 if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3330 i--;
3331 break;
3332 }
3333 }
3334
3335 i += incr;
3336 i = FFMAX(i, 0);
3337 if (i >= is->ic->nb_chapters)
3338 return;
3339
3340 av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3341 stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3342 AV_TIME_BASE_Q), 0, 0);
3343 }
3344
3345 /* handle an event sent by the GUI */
3346 static void event_loop(VideoState *cur_stream)
3347 {
3348 SDL_Event event;
3349 double incr, pos, frac;
3350
3351 for (;;) {
3352 double x;
3353 refresh_loop_wait_event(cur_stream, &event);
3354 switch (event.type) {
3355 case SDL_KEYDOWN:
3356 if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3357 do_exit(cur_stream);
3358 break;
3359 }
3360 // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3361 if (!cur_stream->width)
3362 continue;
3363 switch (event.key.keysym.sym) {
3364 case SDLK_f:
3365 toggle_full_screen(cur_stream);
3366 cur_stream->force_refresh = 1;
3367 break;
3368 case SDLK_p:
3369 case SDLK_SPACE:
3370 toggle_pause(cur_stream);
3371 break;
3372 case SDLK_m:
3373 toggle_mute(cur_stream);
3374 break;
3375 case SDLK_KP_MULTIPLY:
3376 case SDLK_0:
3377 update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3378 break;
3379 case SDLK_KP_DIVIDE:
3380 case SDLK_9:
3381 update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3382 break;
3383 case SDLK_s: // S: Step to next frame
3384 step_to_next_frame(cur_stream);
3385 break;
3386 case SDLK_a:
3387 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3388 break;
3389 case SDLK_v:
3390 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3391 break;
3392 case SDLK_c:
3393 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3394 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3395 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3396 break;
3397 case SDLK_t:
3398 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3399 break;
3400 case SDLK_w:
3401 if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3402 if (++cur_stream->vfilter_idx >= nb_vfilters)
3403 cur_stream->vfilter_idx = 0;
3404 } else {
3405 cur_stream->vfilter_idx = 0;
3406 toggle_audio_display(cur_stream);
3407 }
3408 break;
3409 case SDLK_PAGEUP:
3410 if (cur_stream->ic->nb_chapters <= 1) {
3411 incr = 600.0;
3412 goto do_seek;
3413 }
3414 seek_chapter(cur_stream, 1);
3415 break;
3416 case SDLK_PAGEDOWN:
3417 if (cur_stream->ic->nb_chapters <= 1) {
3418 incr = -600.0;
3419 goto do_seek;
3420 }
3421 seek_chapter(cur_stream, -1);
3422 break;
3423 case SDLK_LEFT:
3424 incr = seek_interval ? -seek_interval : -10.0;
3425 goto do_seek;
3426 case SDLK_RIGHT:
3427 incr = seek_interval ? seek_interval : 10.0;
3428 goto do_seek;
3429 case SDLK_UP:
3430 incr = 60.0;
3431 goto do_seek;
3432 case SDLK_DOWN:
3433 incr = -60.0;
3434 do_seek:
3435 if (seek_by_bytes) {
3436 pos = -1;
3437 if (pos < 0 && cur_stream->video_stream >= 0)
3438 pos = frame_queue_last_pos(&cur_stream->pictq);
3439 if (pos < 0 && cur_stream->audio_stream >= 0)
3440 pos = frame_queue_last_pos(&cur_stream->sampq);
3441 if (pos < 0)
3442 pos = avio_tell(cur_stream->ic->pb);
3443 if (cur_stream->ic->bit_rate)
3444 incr *= cur_stream->ic->bit_rate / 8.0;
3445 else
3446 incr *= 180000.0;
3447 pos += incr;
3448 stream_seek(cur_stream, pos, incr, 1);
3449 } else {
3450 pos = get_master_clock(cur_stream);
3451 if (isnan(pos))
3452 pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3453 pos += incr;
3454 if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3455 pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3456 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3457 }
3458 break;
3459 default:
3460 break;
3461 }
3462 break;
3463 case SDL_MOUSEBUTTONDOWN:
3464 if (exit_on_mousedown) {
3465 do_exit(cur_stream);
3466 break;
3467 }
3468 if (event.button.button == SDL_BUTTON_LEFT) {
3469 static int64_t last_mouse_left_click = 0;
3470 if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3471 toggle_full_screen(cur_stream);
3472 cur_stream->force_refresh = 1;
3473 last_mouse_left_click = 0;
3474 } else {
3475 last_mouse_left_click = av_gettime_relative();
3476 }
3477 }
3478 case SDL_MOUSEMOTION:
3479 if (cursor_hidden) {
3480 SDL_ShowCursor(1);
3481 cursor_hidden = 0;
3482 }
3483 cursor_last_shown = av_gettime_relative();
3484 if (event.type == SDL_MOUSEBUTTONDOWN) {
3485 if (event.button.button != SDL_BUTTON_RIGHT)
3486 break;
3487 x = event.button.x;
3488 } else {
3489 if (!(event.motion.state & SDL_BUTTON_RMASK))
3490 break;
3491 x = event.motion.x;
3492 }
3493 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3494 uint64_t size = avio_size(cur_stream->ic->pb);
3495 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3496 } else {
3497 int64_t ts;
3498 int ns, hh, mm, ss;
3499 int tns, thh, tmm, tss;
3500 tns = cur_stream->ic->duration / 1000000LL;
3501 thh = tns / 3600;
3502 tmm = (tns % 3600) / 60;
3503 tss = (tns % 60);
3504 frac = x / cur_stream->width;
3505 ns = frac * tns;
3506 hh = ns / 3600;
3507 mm = (ns % 3600) / 60;
3508 ss = (ns % 60);
3509 av_log(NULL, AV_LOG_INFO,
3510 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3511 hh, mm, ss, thh, tmm, tss);
3512 ts = frac * cur_stream->ic->duration;
3513 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3514 ts += cur_stream->ic->start_time;
3515 stream_seek(cur_stream, ts, 0, 0);
3516 }
3517 break;
3518 case SDL_WINDOWEVENT:
3519 switch (event.window.event) {
3520 case SDL_WINDOWEVENT_SIZE_CHANGED:
3521 screen_width = cur_stream->width = event.window.data1;
3522 screen_height = cur_stream->height = event.window.data2;
3523 if (cur_stream->vis_texture) {
3524 SDL_DestroyTexture(cur_stream->vis_texture);
3525 cur_stream->vis_texture = NULL;
3526 }
3527 if (vk_renderer)
3528 vk_renderer_resize(vk_renderer, screen_width, screen_height);
3529 case SDL_WINDOWEVENT_EXPOSED:
3530 cur_stream->force_refresh = 1;
3531 }
3532 break;
3533 case SDL_QUIT:
3534 case FF_QUIT_EVENT:
3535 do_exit(cur_stream);
3536 break;
3537 default:
3538 break;
3539 }
3540 }
3541 }
3542
3543 static int opt_width(void *optctx, const char *opt, const char *arg)
3544 {
3545 double num;
3546 int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3547 if (ret < 0)
3548 return ret;
3549
3550 screen_width = num;
3551 return 0;
3552 }
3553
3554 static int opt_height(void *optctx, const char *opt, const char *arg)
3555 {
3556 double num;
3557 int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3558 if (ret < 0)
3559 return ret;
3560
3561 screen_height = num;
3562 return 0;
3563 }
3564
3565 static int opt_format(void *optctx, const char *opt, const char *arg)
3566 {
3567 file_iformat = av_find_input_format(arg);
3568 if (!file_iformat) {
3569 av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3570 return AVERROR(EINVAL);
3571 }
3572 return 0;
3573 }
3574
3575 static int opt_sync(void *optctx, const char *opt, const char *arg)
3576 {
3577 if (!strcmp(arg, "audio"))
3578 av_sync_type = AV_SYNC_AUDIO_MASTER;
3579 else if (!strcmp(arg, "video"))
3580 av_sync_type = AV_SYNC_VIDEO_MASTER;
3581 else if (!strcmp(arg, "ext"))
3582 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3583 else {
3584 av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3585 exit(1);
3586 }
3587 return 0;
3588 }
3589
3590 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3591 {
3592 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3593 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3594 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3595
3596 if (show_mode == SHOW_MODE_NONE) {
3597 double num;
3598 int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3599 if (ret < 0)
3600 return ret;
3601 show_mode = num;
3602 }
3603 return 0;
3604 }
3605
3606 static int opt_input_file(void *optctx, const char *filename)
3607 {
3608 if (input_filename) {
3609 av_log(NULL, AV_LOG_FATAL,
3610 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3611 filename, input_filename);
3612 return AVERROR(EINVAL);
3613 }
3614 if (!strcmp(filename, "-"))
3615 filename = "fd:";
3616 input_filename = av_strdup(filename);
3617 if (!input_filename)
3618 return AVERROR(ENOMEM);
3619
3620 return 0;
3621 }
3622
3623 static int opt_codec(void *optctx, const char *opt, const char *arg)
3624 {
3625 const char *spec = strchr(opt, ':');
3626 const char **name;
3627 if (!spec) {
3628 av_log(NULL, AV_LOG_ERROR,
3629 "No media specifier was specified in '%s' in option '%s'\n",
3630 arg, opt);
3631 return AVERROR(EINVAL);
3632 }
3633 spec++;
3634
3635 switch (spec[0]) {
3636 case 'a' : name = &audio_codec_name; break;
3637 case 's' : name = &subtitle_codec_name; break;
3638 case 'v' : name = &video_codec_name; break;
3639 default:
3640 av_log(NULL, AV_LOG_ERROR,
3641 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3642 return AVERROR(EINVAL);
3643 }
3644
3645 av_freep(name);
3646 *name = av_strdup(arg);
3647 return *name ? 0 : AVERROR(ENOMEM);
3648 }
3649
3650 static int dummy;
3651
3652 static const OptionDef options[] = {
3653 CMDUTILS_COMMON_OPTIONS
3654 { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3655 { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3656 { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3657 { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3658 { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3659 { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3660 { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3661 { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3662 { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3663 { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3664 { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3665 { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3666 { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3667 { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3668 { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3669 { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3670 { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3671 { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3672 { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3673 { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3674 { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3675 { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3676 { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3677 { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3678 { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3679 { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3680 { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3681 { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3682 { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3683 { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3684 { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3685 { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3686 { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3687 { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3688 { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3689 { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3690 { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3691 { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3692 { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3693 { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3694 { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3695 { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3696 { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3697 { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3698 "read and decode the streams to fill missing information with heuristics" },
3699 { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3700 { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3701 { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3702 { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3703 { NULL, },
3704 };
3705
3706 static void show_usage(void)
3707 {
3708 av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3709 av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3710 av_log(NULL, AV_LOG_INFO, "\n");
3711 }
3712
3713 void show_help_default(const char *opt, const char *arg)
3714 {
3715 av_log_set_callback(log_callback_help);
3716 show_usage();
3717 show_help_options(options, "Main options:", 0, OPT_EXPERT);
3718 show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3719 printf("\n");
3720 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3721 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3722 show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3723 printf("\nWhile playing:\n"
3724 "q, ESC quit\n"
3725 "f toggle full screen\n"
3726 "p, SPC pause\n"
3727 "m toggle mute\n"
3728 "9, 0 decrease and increase volume respectively\n"
3729 "/, * decrease and increase volume respectively\n"
3730 "a cycle audio channel in the current program\n"
3731 "v cycle video channel\n"
3732 "t cycle subtitle channel in the current program\n"
3733 "c cycle program\n"
3734 "w cycle video filters or show modes\n"
3735 "s activate frame-step mode\n"
3736 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3737 "down/up seek backward/forward 1 minute\n"
3738 "page down/page up seek backward/forward 10 minutes\n"
3739 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3740 "left double-click toggle full screen\n"
3741 );
3742 }
3743
3744 /* Called from the main */
3745 int main(int argc, char **argv)
3746 {
3747 int flags, ret;
3748 VideoState *is;
3749
3750 init_dynload();
3751
3752 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3753 parse_loglevel(argc, argv, options);
3754
3755 /* register all codecs, demux and protocols */
3756 #if CONFIG_AVDEVICE
3757 avdevice_register_all();
3758 #endif
3759 avformat_network_init();
3760
3761 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3762 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3763
3764 show_banner(argc, argv, options);
3765
3766 ret = parse_options(NULL, argc, argv, options, opt_input_file);
3767 if (ret < 0)
3768 exit(ret == AVERROR_EXIT ? 0 : 1);
3769
3770 if (!input_filename) {
3771 show_usage();
3772 av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3773 av_log(NULL, AV_LOG_FATAL,
3774 "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3775 exit(1);
3776 }
3777
3778 if (display_disable) {
3779 video_disable = 1;
3780 }
3781 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3782 if (audio_disable)
3783 flags &= ~SDL_INIT_AUDIO;
3784 else {
3785 /* Try to work around an occasional ALSA buffer underflow issue when the
3786 * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3787 if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3788 SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3789 }
3790 if (display_disable)
3791 flags &= ~SDL_INIT_VIDEO;
3792 if (SDL_Init (flags)) {
3793 av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3794 av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3795 exit(1);
3796 }
3797
3798 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3799 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3800
3801 if (!display_disable) {
3802 int flags = SDL_WINDOW_HIDDEN;
3803 if (alwaysontop)
3804 #if SDL_VERSION_ATLEAST(2,0,5)
3805 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3806 #else
3807 av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3808 #endif
3809 if (borderless)
3810 flags |= SDL_WINDOW_BORDERLESS;
3811 else
3812 flags |= SDL_WINDOW_RESIZABLE;
3813
3814 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3815 SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3816 #endif
3817 if (hwaccel && !enable_vulkan) {
3818 av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3819 enable_vulkan = 1;
3820 }
3821 if (enable_vulkan) {
3822 vk_renderer = vk_get_renderer();
3823 if (vk_renderer) {
3824 #if SDL_VERSION_ATLEAST(2, 0, 6)
3825 flags |= SDL_WINDOW_VULKAN;
3826 #endif
3827 } else {
3828 av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3829 enable_vulkan = 0;
3830 }
3831 }
3832 window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3833 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3834 if (!window) {
3835 av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3836 do_exit(NULL);
3837 }
3838
3839 if (vk_renderer) {
3840 AVDictionary *dict = NULL;
3841
3842 if (vulkan_params)
3843 av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3844 ret = vk_renderer_create(vk_renderer, window, dict);
3845 av_dict_free(&dict);
3846 if (ret < 0) {
3847 av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3848 do_exit(NULL);
3849 }
3850 } else {
3851 renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3852 if (!renderer) {
3853 av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3854 renderer = SDL_CreateRenderer(window, -1, 0);
3855 }
3856 if (renderer) {
3857 if (!SDL_GetRendererInfo(renderer, &renderer_info))
3858 av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3859 }
3860 if (!renderer || !renderer_info.num_texture_formats) {
3861 av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3862 do_exit(NULL);
3863 }
3864 }
3865 }
3866
3867 is = stream_open(input_filename, file_iformat);
3868 if (!is) {
3869 av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3870 do_exit(NULL);
3871 }
3872
3873 event_loop(is);
3874
3875 /* never returns */
3876
3877 return 0;
3878 }
3879