FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/tx.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "opt_common.h"
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 typedef struct MyAVPacketList {
112  int serial;
114 
115 typedef struct PacketQueue {
118  int size;
119  int64_t duration;
121  int serial;
122  SDL_mutex *mutex;
123  SDL_cond *cond;
124 } PacketQueue;
125 
126 #define VIDEO_PICTURE_QUEUE_SIZE 3
127 #define SUBPICTURE_QUEUE_SIZE 16
128 #define SAMPLE_QUEUE_SIZE 9
129 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
130 
131 typedef struct AudioParams {
132  int freq;
137 } AudioParams;
138 
139 typedef struct Clock {
140  double pts; /* clock base */
141  double pts_drift; /* clock base minus time at which we updated the clock */
142  double last_updated;
143  double speed;
144  int serial; /* clock is based on a packet with this serial */
145  int paused;
146  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
147 } Clock;
148 
149 typedef struct FrameData {
150  int64_t pkt_pos;
151 } FrameData;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
189  AVPacket *pkt;
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
244  uint8_t *audio_buf;
245  uint8_t *audio_buf1;
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
258 
259  enum ShowMode {
261  } show_mode;
268  float *real_data;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
288  int eof;
289 
290  char *filename;
292  int step;
293 
295  AVFilterContext *in_video_filter; // the first filter in the video chain
296  AVFilterContext *out_video_filter; // the last filter in the video chain
297  AVFilterContext *in_audio_filter; // the first filter in the audio chain
298  AVFilterContext *out_audio_filter; // the last filter in the audio chain
299  AVFilterGraph *agraph; // audio filter graph
300 
302 
304 } VideoState;
305 
306 /* options specified by the user */
308 static const char *input_filename;
309 static const char *window_title;
310 static int default_width = 640;
311 static int default_height = 480;
312 static int screen_width = 0;
313 static int screen_height = 0;
314 static int screen_left = SDL_WINDOWPOS_CENTERED;
315 static int screen_top = SDL_WINDOWPOS_CENTERED;
316 static int audio_disable;
317 static int video_disable;
318 static int subtitle_disable;
319 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
320 static int seek_by_bytes = -1;
321 static float seek_interval = 10;
322 static int display_disable;
323 static int borderless;
324 static int alwaysontop;
325 static int startup_volume = 100;
326 static int show_status = -1;
328 static int64_t start_time = AV_NOPTS_VALUE;
329 static int64_t duration = AV_NOPTS_VALUE;
330 static int fast = 0;
331 static int genpts = 0;
332 static int lowres = 0;
333 static int decoder_reorder_pts = -1;
334 static int autoexit;
335 static int exit_on_keydown;
336 static int exit_on_mousedown;
337 static int loop = 1;
338 static int framedrop = -1;
339 static int infinite_buffer = -1;
340 static enum ShowMode show_mode = SHOW_MODE_NONE;
341 static const char *audio_codec_name;
342 static const char *subtitle_codec_name;
343 static const char *video_codec_name;
344 double rdftspeed = 0.02;
345 static int64_t cursor_last_shown;
346 static int cursor_hidden = 0;
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 static int autorotate = 1;
351 static int find_stream_info = 1;
352 static int filter_nbthreads = 0;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
358 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
359 
360 static SDL_Window *window;
361 static SDL_Renderer *renderer;
362 static SDL_RendererInfo renderer_info = {0};
363 static SDL_AudioDeviceID audio_dev;
364 
365 static const struct TextureFormatEntry {
369  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
370  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
371  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
372  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
373  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
374  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
375  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
376  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
377  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
378  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
379  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
380  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
381  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
382  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
383  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
384  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
385  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
386  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
387  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
388  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
389 };
390 
391 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
392 {
394  if (ret < 0)
395  return ret;
396 
398  return 0;
399 }
400 
401 static inline
402 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
403  enum AVSampleFormat fmt2, int64_t channel_count2)
404 {
405  /* If channel count == 1, planar and non-planar formats are the same */
406  if (channel_count1 == 1 && channel_count2 == 1)
408  else
409  return channel_count1 != channel_count2 || fmt1 != fmt2;
410 }
411 
413 {
414  MyAVPacketList pkt1;
415  int ret;
416 
417  if (q->abort_request)
418  return -1;
419 
420 
421  pkt1.pkt = pkt;
422  pkt1.serial = q->serial;
423 
424  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
425  if (ret < 0)
426  return ret;
427  q->nb_packets++;
428  q->size += pkt1.pkt->size + sizeof(pkt1);
429  q->duration += pkt1.pkt->duration;
430  /* XXX: should duplicate packet data in DV case */
431  SDL_CondSignal(q->cond);
432  return 0;
433 }
434 
436 {
437  AVPacket *pkt1;
438  int ret;
439 
440  pkt1 = av_packet_alloc();
441  if (!pkt1) {
443  return -1;
444  }
445  av_packet_move_ref(pkt1, pkt);
446 
447  SDL_LockMutex(q->mutex);
448  ret = packet_queue_put_private(q, pkt1);
449  SDL_UnlockMutex(q->mutex);
450 
451  if (ret < 0)
452  av_packet_free(&pkt1);
453 
454  return ret;
455 }
456 
457 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
458 {
459  pkt->stream_index = stream_index;
460  return packet_queue_put(q, pkt);
461 }
462 
463 /* packet queue handling */
465 {
466  memset(q, 0, sizeof(PacketQueue));
468  if (!q->pkt_list)
469  return AVERROR(ENOMEM);
470  q->mutex = SDL_CreateMutex();
471  if (!q->mutex) {
472  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
473  return AVERROR(ENOMEM);
474  }
475  q->cond = SDL_CreateCond();
476  if (!q->cond) {
477  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
478  return AVERROR(ENOMEM);
479  }
480  q->abort_request = 1;
481  return 0;
482 }
483 
485 {
486  MyAVPacketList pkt1;
487 
488  SDL_LockMutex(q->mutex);
489  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
490  av_packet_free(&pkt1.pkt);
491  q->nb_packets = 0;
492  q->size = 0;
493  q->duration = 0;
494  q->serial++;
495  SDL_UnlockMutex(q->mutex);
496 }
497 
499 {
502  SDL_DestroyMutex(q->mutex);
503  SDL_DestroyCond(q->cond);
504 }
505 
507 {
508  SDL_LockMutex(q->mutex);
509 
510  q->abort_request = 1;
511 
512  SDL_CondSignal(q->cond);
513 
514  SDL_UnlockMutex(q->mutex);
515 }
516 
518 {
519  SDL_LockMutex(q->mutex);
520  q->abort_request = 0;
521  q->serial++;
522  SDL_UnlockMutex(q->mutex);
523 }
524 
525 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
526 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
527 {
528  MyAVPacketList pkt1;
529  int ret;
530 
531  SDL_LockMutex(q->mutex);
532 
533  for (;;) {
534  if (q->abort_request) {
535  ret = -1;
536  break;
537  }
538 
539  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
540  q->nb_packets--;
541  q->size -= pkt1.pkt->size + sizeof(pkt1);
542  q->duration -= pkt1.pkt->duration;
543  av_packet_move_ref(pkt, pkt1.pkt);
544  if (serial)
545  *serial = pkt1.serial;
546  av_packet_free(&pkt1.pkt);
547  ret = 1;
548  break;
549  } else if (!block) {
550  ret = 0;
551  break;
552  } else {
553  SDL_CondWait(q->cond, q->mutex);
554  }
555  }
556  SDL_UnlockMutex(q->mutex);
557  return ret;
558 }
559 
560 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
561  memset(d, 0, sizeof(Decoder));
562  d->pkt = av_packet_alloc();
563  if (!d->pkt)
564  return AVERROR(ENOMEM);
565  d->avctx = avctx;
566  d->queue = queue;
567  d->empty_queue_cond = empty_queue_cond;
568  d->start_pts = AV_NOPTS_VALUE;
569  d->pkt_serial = -1;
570  return 0;
571 }
572 
574  int ret = AVERROR(EAGAIN);
575 
576  for (;;) {
577  if (d->queue->serial == d->pkt_serial) {
578  do {
579  if (d->queue->abort_request)
580  return -1;
581 
582  switch (d->avctx->codec_type) {
583  case AVMEDIA_TYPE_VIDEO:
584  ret = avcodec_receive_frame(d->avctx, frame);
585  if (ret >= 0) {
586  if (decoder_reorder_pts == -1) {
587  frame->pts = frame->best_effort_timestamp;
588  } else if (!decoder_reorder_pts) {
589  frame->pts = frame->pkt_dts;
590  }
591  }
592  break;
593  case AVMEDIA_TYPE_AUDIO:
594  ret = avcodec_receive_frame(d->avctx, frame);
595  if (ret >= 0) {
596  AVRational tb = (AVRational){1, frame->sample_rate};
597  if (frame->pts != AV_NOPTS_VALUE)
598  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
599  else if (d->next_pts != AV_NOPTS_VALUE)
600  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
601  if (frame->pts != AV_NOPTS_VALUE) {
602  d->next_pts = frame->pts + frame->nb_samples;
603  d->next_pts_tb = tb;
604  }
605  }
606  break;
607  }
608  if (ret == AVERROR_EOF) {
609  d->finished = d->pkt_serial;
610  avcodec_flush_buffers(d->avctx);
611  return 0;
612  }
613  if (ret >= 0)
614  return 1;
615  } while (ret != AVERROR(EAGAIN));
616  }
617 
618  do {
619  if (d->queue->nb_packets == 0)
620  SDL_CondSignal(d->empty_queue_cond);
621  if (d->packet_pending) {
622  d->packet_pending = 0;
623  } else {
624  int old_serial = d->pkt_serial;
625  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
626  return -1;
627  if (old_serial != d->pkt_serial) {
628  avcodec_flush_buffers(d->avctx);
629  d->finished = 0;
630  d->next_pts = d->start_pts;
631  d->next_pts_tb = d->start_pts_tb;
632  }
633  }
634  if (d->queue->serial == d->pkt_serial)
635  break;
636  av_packet_unref(d->pkt);
637  } while (1);
638 
639  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
640  int got_frame = 0;
641  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
642  if (ret < 0) {
643  ret = AVERROR(EAGAIN);
644  } else {
645  if (got_frame && !d->pkt->data) {
646  d->packet_pending = 1;
647  }
648  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
649  }
650  av_packet_unref(d->pkt);
651  } else {
652  if (d->pkt->buf && !d->pkt->opaque_ref) {
653  FrameData *fd;
654 
655  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
656  if (!d->pkt->opaque_ref)
657  return AVERROR(ENOMEM);
658  fd = (FrameData*)d->pkt->opaque_ref->data;
659  fd->pkt_pos = d->pkt->pos;
660  }
661 
662  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
663  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
664  d->packet_pending = 1;
665  } else {
666  av_packet_unref(d->pkt);
667  }
668  }
669  }
670 }
671 
672 static void decoder_destroy(Decoder *d) {
673  av_packet_free(&d->pkt);
674  avcodec_free_context(&d->avctx);
675 }
676 
678 {
679  av_frame_unref(vp->frame);
680  avsubtitle_free(&vp->sub);
681 }
682 
683 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
684 {
685  int i;
686  memset(f, 0, sizeof(FrameQueue));
687  if (!(f->mutex = SDL_CreateMutex())) {
688  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
689  return AVERROR(ENOMEM);
690  }
691  if (!(f->cond = SDL_CreateCond())) {
692  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
693  return AVERROR(ENOMEM);
694  }
695  f->pktq = pktq;
696  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
697  f->keep_last = !!keep_last;
698  for (i = 0; i < f->max_size; i++)
699  if (!(f->queue[i].frame = av_frame_alloc()))
700  return AVERROR(ENOMEM);
701  return 0;
702 }
703 
705 {
706  int i;
707  for (i = 0; i < f->max_size; i++) {
708  Frame *vp = &f->queue[i];
710  av_frame_free(&vp->frame);
711  }
712  SDL_DestroyMutex(f->mutex);
713  SDL_DestroyCond(f->cond);
714 }
715 
717 {
718  SDL_LockMutex(f->mutex);
719  SDL_CondSignal(f->cond);
720  SDL_UnlockMutex(f->mutex);
721 }
722 
724 {
725  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
731 }
732 
734 {
735  return &f->queue[f->rindex];
736 }
737 
739 {
740  /* wait until we have space to put a new frame */
741  SDL_LockMutex(f->mutex);
742  while (f->size >= f->max_size &&
743  !f->pktq->abort_request) {
744  SDL_CondWait(f->cond, f->mutex);
745  }
746  SDL_UnlockMutex(f->mutex);
747 
748  if (f->pktq->abort_request)
749  return NULL;
750 
751  return &f->queue[f->windex];
752 }
753 
755 {
756  /* wait until we have a readable a new frame */
757  SDL_LockMutex(f->mutex);
758  while (f->size - f->rindex_shown <= 0 &&
759  !f->pktq->abort_request) {
760  SDL_CondWait(f->cond, f->mutex);
761  }
762  SDL_UnlockMutex(f->mutex);
763 
764  if (f->pktq->abort_request)
765  return NULL;
766 
767  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
768 }
769 
771 {
772  if (++f->windex == f->max_size)
773  f->windex = 0;
774  SDL_LockMutex(f->mutex);
775  f->size++;
776  SDL_CondSignal(f->cond);
777  SDL_UnlockMutex(f->mutex);
778 }
779 
781 {
782  if (f->keep_last && !f->rindex_shown) {
783  f->rindex_shown = 1;
784  return;
785  }
786  frame_queue_unref_item(&f->queue[f->rindex]);
787  if (++f->rindex == f->max_size)
788  f->rindex = 0;
789  SDL_LockMutex(f->mutex);
790  f->size--;
791  SDL_CondSignal(f->cond);
792  SDL_UnlockMutex(f->mutex);
793 }
794 
795 /* return the number of undisplayed frames in the queue */
797 {
798  return f->size - f->rindex_shown;
799 }
800 
801 /* return last shown position */
803 {
804  Frame *fp = &f->queue[f->rindex];
805  if (f->rindex_shown && fp->serial == f->pktq->serial)
806  return fp->pos;
807  else
808  return -1;
809 }
810 
811 static void decoder_abort(Decoder *d, FrameQueue *fq)
812 {
813  packet_queue_abort(d->queue);
814  frame_queue_signal(fq);
815  SDL_WaitThread(d->decoder_tid, NULL);
816  d->decoder_tid = NULL;
817  packet_queue_flush(d->queue);
818 }
819 
820 static inline void fill_rectangle(int x, int y, int w, int h)
821 {
822  SDL_Rect rect;
823  rect.x = x;
824  rect.y = y;
825  rect.w = w;
826  rect.h = h;
827  if (w && h)
828  SDL_RenderFillRect(renderer, &rect);
829 }
830 
831 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
832 {
833  Uint32 format;
834  int access, w, h;
835  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
836  void *pixels;
837  int pitch;
838  if (*texture)
839  SDL_DestroyTexture(*texture);
840  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
841  return -1;
842  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
843  return -1;
844  if (init_texture) {
845  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
846  return -1;
847  memset(pixels, 0, pitch * new_height);
848  SDL_UnlockTexture(*texture);
849  }
850  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
851  }
852  return 0;
853 }
854 
855 static void calculate_display_rect(SDL_Rect *rect,
856  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
857  int pic_width, int pic_height, AVRational pic_sar)
858 {
859  AVRational aspect_ratio = pic_sar;
860  int64_t width, height, x, y;
861 
862  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
863  aspect_ratio = av_make_q(1, 1);
864 
865  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
866 
867  /* XXX: we suppose the screen has a 1.0 pixel ratio */
868  height = scr_height;
869  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
870  if (width > scr_width) {
871  width = scr_width;
872  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
873  }
874  x = (scr_width - width) / 2;
875  y = (scr_height - height) / 2;
876  rect->x = scr_xleft + x;
877  rect->y = scr_ytop + y;
878  rect->w = FFMAX((int)width, 1);
879  rect->h = FFMAX((int)height, 1);
880 }
881 
882 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
883 {
884  int i;
885  *sdl_blendmode = SDL_BLENDMODE_NONE;
886  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
887  if (format == AV_PIX_FMT_RGB32 ||
891  *sdl_blendmode = SDL_BLENDMODE_BLEND;
892  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
894  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
895  return;
896  }
897  }
898 }
899 
900 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
901 {
902  int ret = 0;
903  Uint32 sdl_pix_fmt;
904  SDL_BlendMode sdl_blendmode;
905  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
906  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
907  return -1;
908  switch (sdl_pix_fmt) {
909  case SDL_PIXELFORMAT_IYUV:
910  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
911  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
912  frame->data[1], frame->linesize[1],
913  frame->data[2], frame->linesize[2]);
914  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
915  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
916  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
917  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
918  } else {
919  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
920  return -1;
921  }
922  break;
923  default:
924  if (frame->linesize[0] < 0) {
925  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
926  } else {
927  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
928  }
929  break;
930  }
931  return ret;
932 }
933 
935 {
936 #if SDL_VERSION_ATLEAST(2,0,8)
937  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
938  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
939  if (frame->color_range == AVCOL_RANGE_JPEG)
940  mode = SDL_YUV_CONVERSION_JPEG;
941  else if (frame->colorspace == AVCOL_SPC_BT709)
942  mode = SDL_YUV_CONVERSION_BT709;
943  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
944  mode = SDL_YUV_CONVERSION_BT601;
945  }
946  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
947 #endif
948 }
949 
951 {
952  Frame *vp;
953  Frame *sp = NULL;
954  SDL_Rect rect;
955 
956  vp = frame_queue_peek_last(&is->pictq);
957  if (is->subtitle_st) {
958  if (frame_queue_nb_remaining(&is->subpq) > 0) {
959  sp = frame_queue_peek(&is->subpq);
960 
961  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
962  if (!sp->uploaded) {
963  uint8_t* pixels[4];
964  int pitch[4];
965  int i;
966  if (!sp->width || !sp->height) {
967  sp->width = vp->width;
968  sp->height = vp->height;
969  }
970  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
971  return;
972 
973  for (i = 0; i < sp->sub.num_rects; i++) {
974  AVSubtitleRect *sub_rect = sp->sub.rects[i];
975 
976  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
977  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
978  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
979  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
980 
981  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
982  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
983  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
984  0, NULL, NULL, NULL);
985  if (!is->sub_convert_ctx) {
986  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
987  return;
988  }
989  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
990  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
991  0, sub_rect->h, pixels, pitch);
992  SDL_UnlockTexture(is->sub_texture);
993  }
994  }
995  sp->uploaded = 1;
996  }
997  } else
998  sp = NULL;
999  }
1000  }
1001 
1002  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1004 
1005  if (!vp->uploaded) {
1006  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1008  return;
1009  }
1010  vp->uploaded = 1;
1011  vp->flip_v = vp->frame->linesize[0] < 0;
1012  }
1013 
1014  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1016  if (sp) {
1017 #if USE_ONEPASS_SUBTITLE_RENDER
1018  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1019 #else
1020  int i;
1021  double xratio = (double)rect.w / (double)sp->width;
1022  double yratio = (double)rect.h / (double)sp->height;
1023  for (i = 0; i < sp->sub.num_rects; i++) {
1024  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1025  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1026  .y = rect.y + sub_rect->y * yratio,
1027  .w = sub_rect->w * xratio,
1028  .h = sub_rect->h * yratio};
1029  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1030  }
1031 #endif
1032  }
1033 }
1034 
1035 static inline int compute_mod(int a, int b)
1036 {
1037  return a < 0 ? a%b + b : a%b;
1038 }
1039 
1041 {
1042  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1043  int ch, channels, h, h2;
1044  int64_t time_diff;
1045  int rdft_bits, nb_freq;
1046 
1047  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1048  ;
1049  nb_freq = 1 << (rdft_bits - 1);
1050 
1051  /* compute display index : center on currently output samples */
1052  channels = s->audio_tgt.ch_layout.nb_channels;
1053  nb_display_channels = channels;
1054  if (!s->paused) {
1055  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1056  n = 2 * channels;
1057  delay = s->audio_write_buf_size;
1058  delay /= n;
1059 
1060  /* to be more precise, we take into account the time spent since
1061  the last buffer computation */
1062  if (audio_callback_time) {
1063  time_diff = av_gettime_relative() - audio_callback_time;
1064  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1065  }
1066 
1067  delay += 2 * data_used;
1068  if (delay < data_used)
1069  delay = data_used;
1070 
1071  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1072  if (s->show_mode == SHOW_MODE_WAVES) {
1073  h = INT_MIN;
1074  for (i = 0; i < 1000; i += channels) {
1075  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1076  int a = s->sample_array[idx];
1077  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1078  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1079  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1080  int score = a - d;
1081  if (h < score && (b ^ c) < 0) {
1082  h = score;
1083  i_start = idx;
1084  }
1085  }
1086  }
1087 
1088  s->last_i_start = i_start;
1089  } else {
1090  i_start = s->last_i_start;
1091  }
1092 
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1095 
1096  /* total height for one channel */
1097  h = s->height / nb_display_channels;
1098  /* graph height / 2 */
1099  h2 = (h * 9) / 20;
1100  for (ch = 0; ch < nb_display_channels; ch++) {
1101  i = i_start + ch;
1102  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1103  for (x = 0; x < s->width; x++) {
1104  y = (s->sample_array[i] * h2) >> 15;
1105  if (y < 0) {
1106  y = -y;
1107  ys = y1 - y;
1108  } else {
1109  ys = y1;
1110  }
1111  fill_rectangle(s->xleft + x, ys, 1, y);
1112  i += channels;
1113  if (i >= SAMPLE_ARRAY_SIZE)
1114  i -= SAMPLE_ARRAY_SIZE;
1115  }
1116  }
1117 
1118  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1119 
1120  for (ch = 1; ch < nb_display_channels; ch++) {
1121  y = s->ytop + ch * h;
1122  fill_rectangle(s->xleft, y, s->width, 1);
1123  }
1124  } else {
1125  int err = 0;
1126  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1127  return;
1128 
1129  if (s->xpos >= s->width)
1130  s->xpos = 0;
1131  nb_display_channels= FFMIN(nb_display_channels, 2);
1132  if (rdft_bits != s->rdft_bits) {
1133  const float rdft_scale = 1.0;
1134  av_tx_uninit(&s->rdft);
1135  av_freep(&s->real_data);
1136  av_freep(&s->rdft_data);
1137  s->rdft_bits = rdft_bits;
1138  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1139  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1140  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1141  0, 1 << rdft_bits, &rdft_scale, 0);
1142  }
1143  if (err < 0 || !s->rdft_data) {
1144  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1145  s->show_mode = SHOW_MODE_WAVES;
1146  } else {
1147  float *data_in[2];
1148  AVComplexFloat *data[2];
1149  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1150  uint32_t *pixels;
1151  int pitch;
1152  for (ch = 0; ch < nb_display_channels; ch++) {
1153  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1154  data[ch] = s->rdft_data + nb_freq * ch;
1155  i = i_start + ch;
1156  for (x = 0; x < 2 * nb_freq; x++) {
1157  double w = (x-nb_freq) * (1.0 / nb_freq);
1158  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1159  i += channels;
1160  if (i >= SAMPLE_ARRAY_SIZE)
1161  i -= SAMPLE_ARRAY_SIZE;
1162  }
1163  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1164  data[ch][0].im = data[ch][nb_freq].re;
1165  data[ch][nb_freq].re = 0;
1166  }
1167  /* Least efficient way to do this, we should of course
1168  * directly access it but it is more than fast enough. */
1169  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1170  pitch >>= 2;
1171  pixels += pitch * s->height;
1172  for (y = 0; y < s->height; y++) {
1173  double w = 1 / sqrt(nb_freq);
1174  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1175  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1176  : a;
1177  a = FFMIN(a, 255);
1178  b = FFMIN(b, 255);
1179  pixels -= pitch;
1180  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1181  }
1182  SDL_UnlockTexture(s->vis_texture);
1183  }
1184  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1185  }
1186  if (!s->paused)
1187  s->xpos++;
1188  }
1189 }
1190 
1191 static void stream_component_close(VideoState *is, int stream_index)
1192 {
1193  AVFormatContext *ic = is->ic;
1194  AVCodecParameters *codecpar;
1195 
1196  if (stream_index < 0 || stream_index >= ic->nb_streams)
1197  return;
1198  codecpar = ic->streams[stream_index]->codecpar;
1199 
1200  switch (codecpar->codec_type) {
1201  case AVMEDIA_TYPE_AUDIO:
1202  decoder_abort(&is->auddec, &is->sampq);
1203  SDL_CloseAudioDevice(audio_dev);
1204  decoder_destroy(&is->auddec);
1205  swr_free(&is->swr_ctx);
1206  av_freep(&is->audio_buf1);
1207  is->audio_buf1_size = 0;
1208  is->audio_buf = NULL;
1209 
1210  if (is->rdft) {
1211  av_tx_uninit(&is->rdft);
1212  av_freep(&is->real_data);
1213  av_freep(&is->rdft_data);
1214  is->rdft = NULL;
1215  is->rdft_bits = 0;
1216  }
1217  break;
1218  case AVMEDIA_TYPE_VIDEO:
1219  decoder_abort(&is->viddec, &is->pictq);
1220  decoder_destroy(&is->viddec);
1221  break;
1222  case AVMEDIA_TYPE_SUBTITLE:
1223  decoder_abort(&is->subdec, &is->subpq);
1224  decoder_destroy(&is->subdec);
1225  break;
1226  default:
1227  break;
1228  }
1229 
1230  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1231  switch (codecpar->codec_type) {
1232  case AVMEDIA_TYPE_AUDIO:
1233  is->audio_st = NULL;
1234  is->audio_stream = -1;
1235  break;
1236  case AVMEDIA_TYPE_VIDEO:
1237  is->video_st = NULL;
1238  is->video_stream = -1;
1239  break;
1240  case AVMEDIA_TYPE_SUBTITLE:
1241  is->subtitle_st = NULL;
1242  is->subtitle_stream = -1;
1243  break;
1244  default:
1245  break;
1246  }
1247 }
1248 
1250 {
1251  /* XXX: use a special url_shutdown call to abort parse cleanly */
1252  is->abort_request = 1;
1253  SDL_WaitThread(is->read_tid, NULL);
1254 
1255  /* close each stream */
1256  if (is->audio_stream >= 0)
1257  stream_component_close(is, is->audio_stream);
1258  if (is->video_stream >= 0)
1259  stream_component_close(is, is->video_stream);
1260  if (is->subtitle_stream >= 0)
1261  stream_component_close(is, is->subtitle_stream);
1262 
1263  avformat_close_input(&is->ic);
1264 
1265  packet_queue_destroy(&is->videoq);
1266  packet_queue_destroy(&is->audioq);
1267  packet_queue_destroy(&is->subtitleq);
1268 
1269  /* free all pictures */
1270  frame_queue_destroy(&is->pictq);
1271  frame_queue_destroy(&is->sampq);
1272  frame_queue_destroy(&is->subpq);
1273  SDL_DestroyCond(is->continue_read_thread);
1274  sws_freeContext(is->sub_convert_ctx);
1275  av_free(is->filename);
1276  if (is->vis_texture)
1277  SDL_DestroyTexture(is->vis_texture);
1278  if (is->vid_texture)
1279  SDL_DestroyTexture(is->vid_texture);
1280  if (is->sub_texture)
1281  SDL_DestroyTexture(is->sub_texture);
1282  av_free(is);
1283 }
1284 
1285 static void do_exit(VideoState *is)
1286 {
1287  if (is) {
1288  stream_close(is);
1289  }
1290  if (renderer)
1291  SDL_DestroyRenderer(renderer);
1292  if (window)
1293  SDL_DestroyWindow(window);
1294  uninit_opts();
1297  if (show_status)
1298  printf("\n");
1299  SDL_Quit();
1300  av_log(NULL, AV_LOG_QUIET, "%s", "");
1301  exit(0);
1302 }
1303 
1304 static void sigterm_handler(int sig)
1305 {
1306  exit(123);
1307 }
1308 
1310 {
1311  SDL_Rect rect;
1312  int max_width = screen_width ? screen_width : INT_MAX;
1313  int max_height = screen_height ? screen_height : INT_MAX;
1314  if (max_width == INT_MAX && max_height == INT_MAX)
1315  max_height = height;
1316  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1317  default_width = rect.w;
1318  default_height = rect.h;
1319 }
1320 
1322 {
1323  int w,h;
1324 
1327 
1328  if (!window_title)
1330  SDL_SetWindowTitle(window, window_title);
1331 
1332  SDL_SetWindowSize(window, w, h);
1333  SDL_SetWindowPosition(window, screen_left, screen_top);
1334  if (is_full_screen)
1335  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1336  SDL_ShowWindow(window);
1337 
1338  is->width = w;
1339  is->height = h;
1340 
1341  return 0;
1342 }
1343 
1344 /* display the current picture, if any */
1346 {
1347  if (!is->width)
1348  video_open(is);
1349 
1350  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1351  SDL_RenderClear(renderer);
1352  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1354  else if (is->video_st)
1356  SDL_RenderPresent(renderer);
1357 }
1358 
1359 static double get_clock(Clock *c)
1360 {
1361  if (*c->queue_serial != c->serial)
1362  return NAN;
1363  if (c->paused) {
1364  return c->pts;
1365  } else {
1366  double time = av_gettime_relative() / 1000000.0;
1367  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1368  }
1369 }
1370 
1371 static void set_clock_at(Clock *c, double pts, int serial, double time)
1372 {
1373  c->pts = pts;
1374  c->last_updated = time;
1375  c->pts_drift = c->pts - time;
1376  c->serial = serial;
1377 }
1378 
1379 static void set_clock(Clock *c, double pts, int serial)
1380 {
1381  double time = av_gettime_relative() / 1000000.0;
1382  set_clock_at(c, pts, serial, time);
1383 }
1384 
1385 static void set_clock_speed(Clock *c, double speed)
1386 {
1387  set_clock(c, get_clock(c), c->serial);
1388  c->speed = speed;
1389 }
1390 
1391 static void init_clock(Clock *c, int *queue_serial)
1392 {
1393  c->speed = 1.0;
1394  c->paused = 0;
1395  c->queue_serial = queue_serial;
1396  set_clock(c, NAN, -1);
1397 }
1398 
1399 static void sync_clock_to_slave(Clock *c, Clock *slave)
1400 {
1401  double clock = get_clock(c);
1402  double slave_clock = get_clock(slave);
1403  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1404  set_clock(c, slave_clock, slave->serial);
1405 }
1406 
1408  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1409  if (is->video_st)
1410  return AV_SYNC_VIDEO_MASTER;
1411  else
1412  return AV_SYNC_AUDIO_MASTER;
1413  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1414  if (is->audio_st)
1415  return AV_SYNC_AUDIO_MASTER;
1416  else
1417  return AV_SYNC_EXTERNAL_CLOCK;
1418  } else {
1419  return AV_SYNC_EXTERNAL_CLOCK;
1420  }
1421 }
1422 
1423 /* get the current master clock value */
1425 {
1426  double val;
1427 
1428  switch (get_master_sync_type(is)) {
1429  case AV_SYNC_VIDEO_MASTER:
1430  val = get_clock(&is->vidclk);
1431  break;
1432  case AV_SYNC_AUDIO_MASTER:
1433  val = get_clock(&is->audclk);
1434  break;
1435  default:
1436  val = get_clock(&is->extclk);
1437  break;
1438  }
1439  return val;
1440 }
1441 
1443  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1444  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1446  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1447  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1449  } else {
1450  double speed = is->extclk.speed;
1451  if (speed != 1.0)
1452  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1453  }
1454 }
1455 
1456 /* seek in the stream */
1457 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1458 {
1459  if (!is->seek_req) {
1460  is->seek_pos = pos;
1461  is->seek_rel = rel;
1462  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1463  if (by_bytes)
1464  is->seek_flags |= AVSEEK_FLAG_BYTE;
1465  is->seek_req = 1;
1466  SDL_CondSignal(is->continue_read_thread);
1467  }
1468 }
1469 
1470 /* pause or resume the video */
1472 {
1473  if (is->paused) {
1474  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1475  if (is->read_pause_return != AVERROR(ENOSYS)) {
1476  is->vidclk.paused = 0;
1477  }
1478  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1479  }
1480  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1481  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1482 }
1483 
1485 {
1487  is->step = 0;
1488 }
1489 
1491 {
1492  is->muted = !is->muted;
1493 }
1494 
1495 static void update_volume(VideoState *is, int sign, double step)
1496 {
1497  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1498  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1499  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1500 }
1501 
1503 {
1504  /* if the stream is paused unpause it, then step */
1505  if (is->paused)
1507  is->step = 1;
1508 }
1509 
1510 static double compute_target_delay(double delay, VideoState *is)
1511 {
1512  double sync_threshold, diff = 0;
1513 
1514  /* update delay to follow master synchronisation source */
1516  /* if video is slave, we try to correct big delays by
1517  duplicating or deleting a frame */
1518  diff = get_clock(&is->vidclk) - get_master_clock(is);
1519 
1520  /* skip or repeat frame. We take into account the
1521  delay to compute the threshold. I still don't know
1522  if it is the best guess */
1523  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1524  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1525  if (diff <= -sync_threshold)
1526  delay = FFMAX(0, delay + diff);
1527  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1528  delay = delay + diff;
1529  else if (diff >= sync_threshold)
1530  delay = 2 * delay;
1531  }
1532  }
1533 
1534  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1535  delay, -diff);
1536 
1537  return delay;
1538 }
1539 
1540 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1541  if (vp->serial == nextvp->serial) {
1542  double duration = nextvp->pts - vp->pts;
1543  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1544  return vp->duration;
1545  else
1546  return duration;
1547  } else {
1548  return 0.0;
1549  }
1550 }
1551 
1552 static void update_video_pts(VideoState *is, double pts, int serial)
1553 {
1554  /* update current video pts */
1555  set_clock(&is->vidclk, pts, serial);
1556  sync_clock_to_slave(&is->extclk, &is->vidclk);
1557 }
1558 
1559 /* called to display each frame */
1560 static void video_refresh(void *opaque, double *remaining_time)
1561 {
1562  VideoState *is = opaque;
1563  double time;
1564 
1565  Frame *sp, *sp2;
1566 
1567  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1569 
1570  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1571  time = av_gettime_relative() / 1000000.0;
1572  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1573  video_display(is);
1574  is->last_vis_time = time;
1575  }
1576  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1577  }
1578 
1579  if (is->video_st) {
1580 retry:
1581  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1582  // nothing to do, no picture to display in the queue
1583  } else {
1584  double last_duration, duration, delay;
1585  Frame *vp, *lastvp;
1586 
1587  /* dequeue the picture */
1588  lastvp = frame_queue_peek_last(&is->pictq);
1589  vp = frame_queue_peek(&is->pictq);
1590 
1591  if (vp->serial != is->videoq.serial) {
1592  frame_queue_next(&is->pictq);
1593  goto retry;
1594  }
1595 
1596  if (lastvp->serial != vp->serial)
1597  is->frame_timer = av_gettime_relative() / 1000000.0;
1598 
1599  if (is->paused)
1600  goto display;
1601 
1602  /* compute nominal last_duration */
1603  last_duration = vp_duration(is, lastvp, vp);
1604  delay = compute_target_delay(last_duration, is);
1605 
1606  time= av_gettime_relative()/1000000.0;
1607  if (time < is->frame_timer + delay) {
1608  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1609  goto display;
1610  }
1611 
1612  is->frame_timer += delay;
1613  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1614  is->frame_timer = time;
1615 
1616  SDL_LockMutex(is->pictq.mutex);
1617  if (!isnan(vp->pts))
1618  update_video_pts(is, vp->pts, vp->serial);
1619  SDL_UnlockMutex(is->pictq.mutex);
1620 
1621  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1622  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1623  duration = vp_duration(is, vp, nextvp);
1624  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1625  is->frame_drops_late++;
1626  frame_queue_next(&is->pictq);
1627  goto retry;
1628  }
1629  }
1630 
1631  if (is->subtitle_st) {
1632  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1633  sp = frame_queue_peek(&is->subpq);
1634 
1635  if (frame_queue_nb_remaining(&is->subpq) > 1)
1636  sp2 = frame_queue_peek_next(&is->subpq);
1637  else
1638  sp2 = NULL;
1639 
1640  if (sp->serial != is->subtitleq.serial
1641  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1642  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1643  {
1644  if (sp->uploaded) {
1645  int i;
1646  for (i = 0; i < sp->sub.num_rects; i++) {
1647  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1648  uint8_t *pixels;
1649  int pitch, j;
1650 
1651  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1652  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1653  memset(pixels, 0, sub_rect->w << 2);
1654  SDL_UnlockTexture(is->sub_texture);
1655  }
1656  }
1657  }
1658  frame_queue_next(&is->subpq);
1659  } else {
1660  break;
1661  }
1662  }
1663  }
1664 
1665  frame_queue_next(&is->pictq);
1666  is->force_refresh = 1;
1667 
1668  if (is->step && !is->paused)
1670  }
1671 display:
1672  /* display picture */
1673  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1674  video_display(is);
1675  }
1676  is->force_refresh = 0;
1677  if (show_status) {
1678  AVBPrint buf;
1679  static int64_t last_time;
1680  int64_t cur_time;
1681  int aqsize, vqsize, sqsize;
1682  double av_diff;
1683 
1684  cur_time = av_gettime_relative();
1685  if (!last_time || (cur_time - last_time) >= 30000) {
1686  aqsize = 0;
1687  vqsize = 0;
1688  sqsize = 0;
1689  if (is->audio_st)
1690  aqsize = is->audioq.size;
1691  if (is->video_st)
1692  vqsize = is->videoq.size;
1693  if (is->subtitle_st)
1694  sqsize = is->subtitleq.size;
1695  av_diff = 0;
1696  if (is->audio_st && is->video_st)
1697  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1698  else if (is->video_st)
1699  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1700  else if (is->audio_st)
1701  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1702 
1704  av_bprintf(&buf,
1705  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1707  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1708  av_diff,
1709  is->frame_drops_early + is->frame_drops_late,
1710  aqsize / 1024,
1711  vqsize / 1024,
1712  sqsize,
1713  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1714  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1715 
1716  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1717  fprintf(stderr, "%s", buf.str);
1718  else
1719  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1720 
1721  fflush(stderr);
1722  av_bprint_finalize(&buf, NULL);
1723 
1724  last_time = cur_time;
1725  }
1726  }
1727 }
1728 
1729 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1730 {
1731  Frame *vp;
1732 
1733 #if defined(DEBUG_SYNC)
1734  printf("frame_type=%c pts=%0.3f\n",
1735  av_get_picture_type_char(src_frame->pict_type), pts);
1736 #endif
1737 
1738  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1739  return -1;
1740 
1741  vp->sar = src_frame->sample_aspect_ratio;
1742  vp->uploaded = 0;
1743 
1744  vp->width = src_frame->width;
1745  vp->height = src_frame->height;
1746  vp->format = src_frame->format;
1747 
1748  vp->pts = pts;
1749  vp->duration = duration;
1750  vp->pos = pos;
1751  vp->serial = serial;
1752 
1753  set_default_window_size(vp->width, vp->height, vp->sar);
1754 
1755  av_frame_move_ref(vp->frame, src_frame);
1756  frame_queue_push(&is->pictq);
1757  return 0;
1758 }
1759 
1761 {
1762  int got_picture;
1763 
1764  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1765  return -1;
1766 
1767  if (got_picture) {
1768  double dpts = NAN;
1769 
1770  if (frame->pts != AV_NOPTS_VALUE)
1771  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1772 
1773  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1774 
1776  if (frame->pts != AV_NOPTS_VALUE) {
1777  double diff = dpts - get_master_clock(is);
1778  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1779  diff - is->frame_last_filter_delay < 0 &&
1780  is->viddec.pkt_serial == is->vidclk.serial &&
1781  is->videoq.nb_packets) {
1782  is->frame_drops_early++;
1784  got_picture = 0;
1785  }
1786  }
1787  }
1788  }
1789 
1790  return got_picture;
1791 }
1792 
1793 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1794  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1795 {
1796  int ret, i;
1797  int nb_filters = graph->nb_filters;
1799 
1800  if (filtergraph) {
1803  if (!outputs || !inputs) {
1804  ret = AVERROR(ENOMEM);
1805  goto fail;
1806  }
1807 
1808  outputs->name = av_strdup("in");
1809  outputs->filter_ctx = source_ctx;
1810  outputs->pad_idx = 0;
1811  outputs->next = NULL;
1812 
1813  inputs->name = av_strdup("out");
1814  inputs->filter_ctx = sink_ctx;
1815  inputs->pad_idx = 0;
1816  inputs->next = NULL;
1817 
1818  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1819  goto fail;
1820  } else {
1821  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1822  goto fail;
1823  }
1824 
1825  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1826  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1827  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1828 
1829  ret = avfilter_graph_config(graph, NULL);
1830 fail:
1833  return ret;
1834 }
1835 
1836 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1837 {
1839  char sws_flags_str[512] = "";
1840  char buffersrc_args[256];
1841  int ret;
1842  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1843  AVCodecParameters *codecpar = is->video_st->codecpar;
1844  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1845  const AVDictionaryEntry *e = NULL;
1846  int nb_pix_fmts = 0;
1847  int i, j;
1848 
1849  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1850  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1851  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1852  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1853  break;
1854  }
1855  }
1856  }
1857  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1858 
1859  while ((e = av_dict_iterate(sws_dict, e))) {
1860  if (!strcmp(e->key, "sws_flags")) {
1861  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1862  } else
1863  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1864  }
1865  if (strlen(sws_flags_str))
1866  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1867 
1868  graph->scale_sws_opts = av_strdup(sws_flags_str);
1869 
1870  snprintf(buffersrc_args, sizeof(buffersrc_args),
1871  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1872  frame->width, frame->height, frame->format,
1873  is->video_st->time_base.num, is->video_st->time_base.den,
1874  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1875  if (fr.num && fr.den)
1876  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1877 
1878  if ((ret = avfilter_graph_create_filter(&filt_src,
1879  avfilter_get_by_name("buffer"),
1880  "ffplay_buffer", buffersrc_args, NULL,
1881  graph)) < 0)
1882  goto fail;
1883 
1884  ret = avfilter_graph_create_filter(&filt_out,
1885  avfilter_get_by_name("buffersink"),
1886  "ffplay_buffersink", NULL, NULL, graph);
1887  if (ret < 0)
1888  goto fail;
1889 
1890  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1891  goto fail;
1892 
1893  last_filter = filt_out;
1894 
1895 /* Note: this macro adds a filter before the lastly added filter, so the
1896  * processing order of the filters is in reverse */
1897 #define INSERT_FILT(name, arg) do { \
1898  AVFilterContext *filt_ctx; \
1899  \
1900  ret = avfilter_graph_create_filter(&filt_ctx, \
1901  avfilter_get_by_name(name), \
1902  "ffplay_" name, arg, NULL, graph); \
1903  if (ret < 0) \
1904  goto fail; \
1905  \
1906  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  last_filter = filt_ctx; \
1911 } while (0)
1912 
1913  if (autorotate) {
1914  double theta = 0.0;
1915  int32_t *displaymatrix = NULL;
1917  if (sd)
1918  displaymatrix = (int32_t *)sd->data;
1919  if (!displaymatrix)
1920  displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
1921  theta = get_rotation(displaymatrix);
1922 
1923  if (fabs(theta - 90) < 1.0) {
1924  INSERT_FILT("transpose", "clock");
1925  } else if (fabs(theta - 180) < 1.0) {
1926  INSERT_FILT("hflip", NULL);
1927  INSERT_FILT("vflip", NULL);
1928  } else if (fabs(theta - 270) < 1.0) {
1929  INSERT_FILT("transpose", "cclock");
1930  } else if (fabs(theta) > 1.0) {
1931  char rotate_buf[64];
1932  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1933  INSERT_FILT("rotate", rotate_buf);
1934  }
1935  }
1936 
1937  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1938  goto fail;
1939 
1940  is->in_video_filter = filt_src;
1941  is->out_video_filter = filt_out;
1942 
1943 fail:
1944  return ret;
1945 }
1946 
1947 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1948 {
1950  int sample_rates[2] = { 0, -1 };
1951  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1952  char aresample_swr_opts[512] = "";
1953  const AVDictionaryEntry *e = NULL;
1954  AVBPrint bp;
1955  char asrc_args[256];
1956  int ret;
1957 
1958  avfilter_graph_free(&is->agraph);
1959  if (!(is->agraph = avfilter_graph_alloc()))
1960  return AVERROR(ENOMEM);
1961  is->agraph->nb_threads = filter_nbthreads;
1962 
1964 
1965  while ((e = av_dict_iterate(swr_opts, e)))
1966  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1967  if (strlen(aresample_swr_opts))
1968  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1969  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1970 
1971  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
1972 
1973  ret = snprintf(asrc_args, sizeof(asrc_args),
1974  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
1975  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1976  1, is->audio_filter_src.freq, bp.str);
1977 
1978  ret = avfilter_graph_create_filter(&filt_asrc,
1979  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1980  asrc_args, NULL, is->agraph);
1981  if (ret < 0)
1982  goto end;
1983 
1984 
1985  ret = avfilter_graph_create_filter(&filt_asink,
1986  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1987  NULL, NULL, is->agraph);
1988  if (ret < 0)
1989  goto end;
1990 
1991  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto end;
1993  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1994  goto end;
1995 
1996  if (force_output_format) {
1997  sample_rates [0] = is->audio_tgt.freq;
1998  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1999  goto end;
2000  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2001  goto end;
2002  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004  }
2005 
2006 
2007  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2008  goto end;
2009 
2010  is->in_audio_filter = filt_asrc;
2011  is->out_audio_filter = filt_asink;
2012 
2013 end:
2014  if (ret < 0)
2015  avfilter_graph_free(&is->agraph);
2016  av_bprint_finalize(&bp, NULL);
2017 
2018  return ret;
2019 }
2020 
2021 static int audio_thread(void *arg)
2022 {
2023  VideoState *is = arg;
2025  Frame *af;
2026  int last_serial = -1;
2027  int reconfigure;
2028  int got_frame = 0;
2029  AVRational tb;
2030  int ret = 0;
2031 
2032  if (!frame)
2033  return AVERROR(ENOMEM);
2034 
2035  do {
2036  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2037  goto the_end;
2038 
2039  if (got_frame) {
2040  tb = (AVRational){1, frame->sample_rate};
2041 
2042  reconfigure =
2043  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2044  frame->format, frame->ch_layout.nb_channels) ||
2045  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2046  is->audio_filter_src.freq != frame->sample_rate ||
2047  is->auddec.pkt_serial != last_serial;
2048 
2049  if (reconfigure) {
2050  char buf1[1024], buf2[1024];
2051  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2052  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2054  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2055  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2056  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2057 
2058  is->audio_filter_src.fmt = frame->format;
2059  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2060  if (ret < 0)
2061  goto the_end;
2062  is->audio_filter_src.freq = frame->sample_rate;
2063  last_serial = is->auddec.pkt_serial;
2064 
2065  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2066  goto the_end;
2067  }
2068 
2069  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2070  goto the_end;
2071 
2072  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2073  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2074  tb = av_buffersink_get_time_base(is->out_audio_filter);
2075  if (!(af = frame_queue_peek_writable(&is->sampq)))
2076  goto the_end;
2077 
2078  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2079  af->pos = fd ? fd->pkt_pos : -1;
2080  af->serial = is->auddec.pkt_serial;
2081  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2082 
2084  frame_queue_push(&is->sampq);
2085 
2086  if (is->audioq.serial != is->auddec.pkt_serial)
2087  break;
2088  }
2089  if (ret == AVERROR_EOF)
2090  is->auddec.finished = is->auddec.pkt_serial;
2091  }
2092  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2093  the_end:
2094  avfilter_graph_free(&is->agraph);
2095  av_frame_free(&frame);
2096  return ret;
2097 }
2098 
2099 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2100 {
2101  packet_queue_start(d->queue);
2102  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2103  if (!d->decoder_tid) {
2104  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2105  return AVERROR(ENOMEM);
2106  }
2107  return 0;
2108 }
2109 
2110 static int video_thread(void *arg)
2111 {
2112  VideoState *is = arg;
2114  double pts;
2115  double duration;
2116  int ret;
2117  AVRational tb = is->video_st->time_base;
2118  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2119 
2120  AVFilterGraph *graph = NULL;
2121  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2122  int last_w = 0;
2123  int last_h = 0;
2124  enum AVPixelFormat last_format = -2;
2125  int last_serial = -1;
2126  int last_vfilter_idx = 0;
2127 
2128  if (!frame)
2129  return AVERROR(ENOMEM);
2130 
2131  for (;;) {
2133  if (ret < 0)
2134  goto the_end;
2135  if (!ret)
2136  continue;
2137 
2138  if ( last_w != frame->width
2139  || last_h != frame->height
2140  || last_format != frame->format
2141  || last_serial != is->viddec.pkt_serial
2142  || last_vfilter_idx != is->vfilter_idx) {
2144  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2145  last_w, last_h,
2146  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2147  frame->width, frame->height,
2148  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2149  avfilter_graph_free(&graph);
2150  graph = avfilter_graph_alloc();
2151  if (!graph) {
2152  ret = AVERROR(ENOMEM);
2153  goto the_end;
2154  }
2155  graph->nb_threads = filter_nbthreads;
2156  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2157  SDL_Event event;
2158  event.type = FF_QUIT_EVENT;
2159  event.user.data1 = is;
2160  SDL_PushEvent(&event);
2161  goto the_end;
2162  }
2163  filt_in = is->in_video_filter;
2164  filt_out = is->out_video_filter;
2165  last_w = frame->width;
2166  last_h = frame->height;
2167  last_format = frame->format;
2168  last_serial = is->viddec.pkt_serial;
2169  last_vfilter_idx = is->vfilter_idx;
2170  frame_rate = av_buffersink_get_frame_rate(filt_out);
2171  }
2172 
2173  ret = av_buffersrc_add_frame(filt_in, frame);
2174  if (ret < 0)
2175  goto the_end;
2176 
2177  while (ret >= 0) {
2178  FrameData *fd;
2179 
2180  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2181 
2182  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2183  if (ret < 0) {
2184  if (ret == AVERROR_EOF)
2185  is->viddec.finished = is->viddec.pkt_serial;
2186  ret = 0;
2187  break;
2188  }
2189 
2190  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2191 
2192  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2193  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2194  is->frame_last_filter_delay = 0;
2195  tb = av_buffersink_get_time_base(filt_out);
2196  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2197  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2198  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2200  if (is->videoq.serial != is->viddec.pkt_serial)
2201  break;
2202  }
2203 
2204  if (ret < 0)
2205  goto the_end;
2206  }
2207  the_end:
2208  avfilter_graph_free(&graph);
2209  av_frame_free(&frame);
2210  return 0;
2211 }
2212 
2213 static int subtitle_thread(void *arg)
2214 {
2215  VideoState *is = arg;
2216  Frame *sp;
2217  int got_subtitle;
2218  double pts;
2219 
2220  for (;;) {
2221  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2222  return 0;
2223 
2224  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2225  break;
2226 
2227  pts = 0;
2228 
2229  if (got_subtitle && sp->sub.format == 0) {
2230  if (sp->sub.pts != AV_NOPTS_VALUE)
2231  pts = sp->sub.pts / (double)AV_TIME_BASE;
2232  sp->pts = pts;
2233  sp->serial = is->subdec.pkt_serial;
2234  sp->width = is->subdec.avctx->width;
2235  sp->height = is->subdec.avctx->height;
2236  sp->uploaded = 0;
2237 
2238  /* now we can update the picture count */
2239  frame_queue_push(&is->subpq);
2240  } else if (got_subtitle) {
2241  avsubtitle_free(&sp->sub);
2242  }
2243  }
2244  return 0;
2245 }
2246 
2247 /* copy samples for viewing in editor window */
2248 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2249 {
2250  int size, len;
2251 
2252  size = samples_size / sizeof(short);
2253  while (size > 0) {
2254  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2255  if (len > size)
2256  len = size;
2257  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2258  samples += len;
2259  is->sample_array_index += len;
2260  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2261  is->sample_array_index = 0;
2262  size -= len;
2263  }
2264 }
2265 
2266 /* return the wanted number of samples to get better sync if sync_type is video
2267  * or external master clock */
2268 static int synchronize_audio(VideoState *is, int nb_samples)
2269 {
2270  int wanted_nb_samples = nb_samples;
2271 
2272  /* if not master, then we try to remove or add samples to correct the clock */
2274  double diff, avg_diff;
2275  int min_nb_samples, max_nb_samples;
2276 
2277  diff = get_clock(&is->audclk) - get_master_clock(is);
2278 
2279  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2280  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2281  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2282  /* not enough measures to have a correct estimate */
2283  is->audio_diff_avg_count++;
2284  } else {
2285  /* estimate the A-V difference */
2286  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2287 
2288  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2289  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2290  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2291  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2292  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2293  }
2294  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2295  diff, avg_diff, wanted_nb_samples - nb_samples,
2296  is->audio_clock, is->audio_diff_threshold);
2297  }
2298  } else {
2299  /* too big difference : may be initial PTS errors, so
2300  reset A-V filter */
2301  is->audio_diff_avg_count = 0;
2302  is->audio_diff_cum = 0;
2303  }
2304  }
2305 
2306  return wanted_nb_samples;
2307 }
2308 
2309 /**
2310  * Decode one audio frame and return its uncompressed size.
2311  *
2312  * The processed audio frame is decoded, converted if required, and
2313  * stored in is->audio_buf, with size in bytes given by the return
2314  * value.
2315  */
2317 {
2318  int data_size, resampled_data_size;
2319  av_unused double audio_clock0;
2320  int wanted_nb_samples;
2321  Frame *af;
2322 
2323  if (is->paused)
2324  return -1;
2325 
2326  do {
2327 #if defined(_WIN32)
2328  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2329  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2330  return -1;
2331  av_usleep (1000);
2332  }
2333 #endif
2334  if (!(af = frame_queue_peek_readable(&is->sampq)))
2335  return -1;
2336  frame_queue_next(&is->sampq);
2337  } while (af->serial != is->audioq.serial);
2338 
2340  af->frame->nb_samples,
2341  af->frame->format, 1);
2342 
2343  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2344 
2345  if (af->frame->format != is->audio_src.fmt ||
2346  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2347  af->frame->sample_rate != is->audio_src.freq ||
2348  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2349  swr_free(&is->swr_ctx);
2350  swr_alloc_set_opts2(&is->swr_ctx,
2351  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2352  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2353  0, NULL);
2354  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2356  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2358  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2359  swr_free(&is->swr_ctx);
2360  return -1;
2361  }
2362  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2363  return -1;
2364  is->audio_src.freq = af->frame->sample_rate;
2365  is->audio_src.fmt = af->frame->format;
2366  }
2367 
2368  if (is->swr_ctx) {
2369  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2370  uint8_t **out = &is->audio_buf1;
2371  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2372  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2373  int len2;
2374  if (out_size < 0) {
2375  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2376  return -1;
2377  }
2378  if (wanted_nb_samples != af->frame->nb_samples) {
2379  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2380  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2381  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2382  return -1;
2383  }
2384  }
2385  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2386  if (!is->audio_buf1)
2387  return AVERROR(ENOMEM);
2388  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2389  if (len2 < 0) {
2390  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2391  return -1;
2392  }
2393  if (len2 == out_count) {
2394  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2395  if (swr_init(is->swr_ctx) < 0)
2396  swr_free(&is->swr_ctx);
2397  }
2398  is->audio_buf = is->audio_buf1;
2399  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2400  } else {
2401  is->audio_buf = af->frame->data[0];
2402  resampled_data_size = data_size;
2403  }
2404 
2405  audio_clock0 = is->audio_clock;
2406  /* update the audio clock with the pts */
2407  if (!isnan(af->pts))
2408  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2409  else
2410  is->audio_clock = NAN;
2411  is->audio_clock_serial = af->serial;
2412 #ifdef DEBUG
2413  {
2414  static double last_clock;
2415  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2416  is->audio_clock - last_clock,
2417  is->audio_clock, audio_clock0);
2418  last_clock = is->audio_clock;
2419  }
2420 #endif
2421  return resampled_data_size;
2422 }
2423 
2424 /* prepare a new audio buffer */
2425 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2426 {
2427  VideoState *is = opaque;
2428  int audio_size, len1;
2429 
2431 
2432  while (len > 0) {
2433  if (is->audio_buf_index >= is->audio_buf_size) {
2434  audio_size = audio_decode_frame(is);
2435  if (audio_size < 0) {
2436  /* if error, just output silence */
2437  is->audio_buf = NULL;
2438  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2439  } else {
2440  if (is->show_mode != SHOW_MODE_VIDEO)
2441  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2442  is->audio_buf_size = audio_size;
2443  }
2444  is->audio_buf_index = 0;
2445  }
2446  len1 = is->audio_buf_size - is->audio_buf_index;
2447  if (len1 > len)
2448  len1 = len;
2449  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2450  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2451  else {
2452  memset(stream, 0, len1);
2453  if (!is->muted && is->audio_buf)
2454  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2455  }
2456  len -= len1;
2457  stream += len1;
2458  is->audio_buf_index += len1;
2459  }
2460  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2461  /* Let's assume the audio driver that is used by SDL has two periods. */
2462  if (!isnan(is->audio_clock)) {
2463  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2464  sync_clock_to_slave(&is->extclk, &is->audclk);
2465  }
2466 }
2467 
2468 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2469 {
2470  SDL_AudioSpec wanted_spec, spec;
2471  const char *env;
2472  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2473  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2474  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2475  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2476 
2477  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2478  if (env) {
2479  wanted_nb_channels = atoi(env);
2480  av_channel_layout_uninit(wanted_channel_layout);
2481  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2482  }
2483  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2484  av_channel_layout_uninit(wanted_channel_layout);
2485  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2486  }
2487  wanted_nb_channels = wanted_channel_layout->nb_channels;
2488  wanted_spec.channels = wanted_nb_channels;
2489  wanted_spec.freq = wanted_sample_rate;
2490  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2491  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2492  return -1;
2493  }
2494  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2495  next_sample_rate_idx--;
2496  wanted_spec.format = AUDIO_S16SYS;
2497  wanted_spec.silence = 0;
2498  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2499  wanted_spec.callback = sdl_audio_callback;
2500  wanted_spec.userdata = opaque;
2501  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2502  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2503  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2504  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2505  if (!wanted_spec.channels) {
2506  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2507  wanted_spec.channels = wanted_nb_channels;
2508  if (!wanted_spec.freq) {
2510  "No more combinations to try, audio open failed\n");
2511  return -1;
2512  }
2513  }
2514  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2515  }
2516  if (spec.format != AUDIO_S16SYS) {
2518  "SDL advised audio format %d is not supported!\n", spec.format);
2519  return -1;
2520  }
2521  if (spec.channels != wanted_spec.channels) {
2522  av_channel_layout_uninit(wanted_channel_layout);
2523  av_channel_layout_default(wanted_channel_layout, spec.channels);
2524  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2526  "SDL advised channel count %d is not supported!\n", spec.channels);
2527  return -1;
2528  }
2529  }
2530 
2531  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2532  audio_hw_params->freq = spec.freq;
2533  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2534  return -1;
2535  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2536  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2537  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2538  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2539  return -1;
2540  }
2541  return spec.size;
2542 }
2543 
2544 /* open a given stream. Return 0 if OK */
2545 static int stream_component_open(VideoState *is, int stream_index)
2546 {
2547  AVFormatContext *ic = is->ic;
2548  AVCodecContext *avctx;
2549  const AVCodec *codec;
2550  const char *forced_codec_name = NULL;
2551  AVDictionary *opts = NULL;
2552  const AVDictionaryEntry *t = NULL;
2553  int sample_rate;
2554  AVChannelLayout ch_layout = { 0 };
2555  int ret = 0;
2556  int stream_lowres = lowres;
2557 
2558  if (stream_index < 0 || stream_index >= ic->nb_streams)
2559  return -1;
2560 
2561  avctx = avcodec_alloc_context3(NULL);
2562  if (!avctx)
2563  return AVERROR(ENOMEM);
2564 
2565  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2566  if (ret < 0)
2567  goto fail;
2568  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2569 
2570  codec = avcodec_find_decoder(avctx->codec_id);
2571 
2572  switch(avctx->codec_type){
2573  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2574  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2575  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2576  }
2577  if (forced_codec_name)
2578  codec = avcodec_find_decoder_by_name(forced_codec_name);
2579  if (!codec) {
2580  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2581  "No codec could be found with name '%s'\n", forced_codec_name);
2582  else av_log(NULL, AV_LOG_WARNING,
2583  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2584  ret = AVERROR(EINVAL);
2585  goto fail;
2586  }
2587 
2588  avctx->codec_id = codec->id;
2589  if (stream_lowres > codec->max_lowres) {
2590  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2591  codec->max_lowres);
2592  stream_lowres = codec->max_lowres;
2593  }
2594  avctx->lowres = stream_lowres;
2595 
2596  if (fast)
2597  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2598 
2599  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2600  ic->streams[stream_index], codec, &opts);
2601  if (ret < 0)
2602  goto fail;
2603 
2604  if (!av_dict_get(opts, "threads", NULL, 0))
2605  av_dict_set(&opts, "threads", "auto", 0);
2606  if (stream_lowres)
2607  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2608 
2609  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2610 
2611  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2612  goto fail;
2613  }
2614  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2615  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2617  goto fail;
2618  }
2619 
2620  is->eof = 0;
2621  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2622  switch (avctx->codec_type) {
2623  case AVMEDIA_TYPE_AUDIO:
2624  {
2625  AVFilterContext *sink;
2626 
2627  is->audio_filter_src.freq = avctx->sample_rate;
2628  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2629  if (ret < 0)
2630  goto fail;
2631  is->audio_filter_src.fmt = avctx->sample_fmt;
2632  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2633  goto fail;
2634  sink = is->out_audio_filter;
2636  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2637  if (ret < 0)
2638  goto fail;
2639  }
2640 
2641  /* prepare audio output */
2642  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2643  goto fail;
2644  is->audio_hw_buf_size = ret;
2645  is->audio_src = is->audio_tgt;
2646  is->audio_buf_size = 0;
2647  is->audio_buf_index = 0;
2648 
2649  /* init averaging filter */
2650  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2651  is->audio_diff_avg_count = 0;
2652  /* since we do not have a precise anough audio FIFO fullness,
2653  we correct audio sync only if larger than this threshold */
2654  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2655 
2656  is->audio_stream = stream_index;
2657  is->audio_st = ic->streams[stream_index];
2658 
2659  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2660  goto fail;
2661  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2662  is->auddec.start_pts = is->audio_st->start_time;
2663  is->auddec.start_pts_tb = is->audio_st->time_base;
2664  }
2665  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2666  goto out;
2667  SDL_PauseAudioDevice(audio_dev, 0);
2668  break;
2669  case AVMEDIA_TYPE_VIDEO:
2670  is->video_stream = stream_index;
2671  is->video_st = ic->streams[stream_index];
2672 
2673  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2674  goto fail;
2675  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2676  goto out;
2677  is->queue_attachments_req = 1;
2678  break;
2679  case AVMEDIA_TYPE_SUBTITLE:
2680  is->subtitle_stream = stream_index;
2681  is->subtitle_st = ic->streams[stream_index];
2682 
2683  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2684  goto fail;
2685  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2686  goto out;
2687  break;
2688  default:
2689  break;
2690  }
2691  goto out;
2692 
2693 fail:
2694  avcodec_free_context(&avctx);
2695 out:
2696  av_channel_layout_uninit(&ch_layout);
2697  av_dict_free(&opts);
2698 
2699  return ret;
2700 }
2701 
2702 static int decode_interrupt_cb(void *ctx)
2703 {
2704  VideoState *is = ctx;
2705  return is->abort_request;
2706 }
2707 
2708 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2709  return stream_id < 0 ||
2710  queue->abort_request ||
2712  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2713 }
2714 
2716 {
2717  if( !strcmp(s->iformat->name, "rtp")
2718  || !strcmp(s->iformat->name, "rtsp")
2719  || !strcmp(s->iformat->name, "sdp")
2720  )
2721  return 1;
2722 
2723  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2724  || !strncmp(s->url, "udp:", 4)
2725  )
2726  )
2727  return 1;
2728  return 0;
2729 }
2730 
2731 /* this thread gets the stream from the disk or the network */
2732 static int read_thread(void *arg)
2733 {
2734  VideoState *is = arg;
2735  AVFormatContext *ic = NULL;
2736  int err, i, ret;
2737  int st_index[AVMEDIA_TYPE_NB];
2738  AVPacket *pkt = NULL;
2739  int64_t stream_start_time;
2740  int pkt_in_play_range = 0;
2741  const AVDictionaryEntry *t;
2742  SDL_mutex *wait_mutex = SDL_CreateMutex();
2743  int scan_all_pmts_set = 0;
2744  int64_t pkt_ts;
2745 
2746  if (!wait_mutex) {
2747  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2748  ret = AVERROR(ENOMEM);
2749  goto fail;
2750  }
2751 
2752  memset(st_index, -1, sizeof(st_index));
2753  is->eof = 0;
2754 
2755  pkt = av_packet_alloc();
2756  if (!pkt) {
2757  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2758  ret = AVERROR(ENOMEM);
2759  goto fail;
2760  }
2761  ic = avformat_alloc_context();
2762  if (!ic) {
2763  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2764  ret = AVERROR(ENOMEM);
2765  goto fail;
2766  }
2769  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2770  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2771  scan_all_pmts_set = 1;
2772  }
2773  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2774  if (err < 0) {
2775  print_error(is->filename, err);
2776  ret = -1;
2777  goto fail;
2778  }
2779  if (scan_all_pmts_set)
2780  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2781 
2783  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2785  goto fail;
2786  }
2787  is->ic = ic;
2788 
2789  if (genpts)
2790  ic->flags |= AVFMT_FLAG_GENPTS;
2791 
2793 
2794  if (find_stream_info) {
2795  AVDictionary **opts;
2796  int orig_nb_streams = ic->nb_streams;
2797 
2799  if (err < 0) {
2801  "Error setting up avformat_find_stream_info() options\n");
2802  ret = err;
2803  goto fail;
2804  }
2805 
2806  err = avformat_find_stream_info(ic, opts);
2807 
2808  for (i = 0; i < orig_nb_streams; i++)
2809  av_dict_free(&opts[i]);
2810  av_freep(&opts);
2811 
2812  if (err < 0) {
2814  "%s: could not find codec parameters\n", is->filename);
2815  ret = -1;
2816  goto fail;
2817  }
2818  }
2819 
2820  if (ic->pb)
2821  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2822 
2823  if (seek_by_bytes < 0)
2825  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2826  strcmp("ogg", ic->iformat->name);
2827 
2828  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2829 
2830  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2831  window_title = av_asprintf("%s - %s", t->value, input_filename);
2832 
2833  /* if seeking requested, we execute it */
2834  if (start_time != AV_NOPTS_VALUE) {
2835  int64_t timestamp;
2836 
2837  timestamp = start_time;
2838  /* add the stream start time */
2839  if (ic->start_time != AV_NOPTS_VALUE)
2840  timestamp += ic->start_time;
2841  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2842  if (ret < 0) {
2843  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2844  is->filename, (double)timestamp / AV_TIME_BASE);
2845  }
2846  }
2847 
2848  is->realtime = is_realtime(ic);
2849 
2850  if (show_status)
2851  av_dump_format(ic, 0, is->filename, 0);
2852 
2853  for (i = 0; i < ic->nb_streams; i++) {
2854  AVStream *st = ic->streams[i];
2855  enum AVMediaType type = st->codecpar->codec_type;
2856  st->discard = AVDISCARD_ALL;
2857  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2859  st_index[type] = i;
2860  }
2861  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2862  if (wanted_stream_spec[i] && st_index[i] == -1) {
2863  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2864  st_index[i] = INT_MAX;
2865  }
2866  }
2867 
2868  if (!video_disable)
2869  st_index[AVMEDIA_TYPE_VIDEO] =
2871  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2872  if (!audio_disable)
2873  st_index[AVMEDIA_TYPE_AUDIO] =
2875  st_index[AVMEDIA_TYPE_AUDIO],
2876  st_index[AVMEDIA_TYPE_VIDEO],
2877  NULL, 0);
2879  st_index[AVMEDIA_TYPE_SUBTITLE] =
2881  st_index[AVMEDIA_TYPE_SUBTITLE],
2882  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2883  st_index[AVMEDIA_TYPE_AUDIO] :
2884  st_index[AVMEDIA_TYPE_VIDEO]),
2885  NULL, 0);
2886 
2887  is->show_mode = show_mode;
2888  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2889  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2890  AVCodecParameters *codecpar = st->codecpar;
2892  if (codecpar->width)
2893  set_default_window_size(codecpar->width, codecpar->height, sar);
2894  }
2895 
2896  /* open the streams */
2897  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2899  }
2900 
2901  ret = -1;
2902  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2904  }
2905  if (is->show_mode == SHOW_MODE_NONE)
2906  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2907 
2908  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2910  }
2911 
2912  if (is->video_stream < 0 && is->audio_stream < 0) {
2913  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2914  is->filename);
2915  ret = -1;
2916  goto fail;
2917  }
2918 
2919  if (infinite_buffer < 0 && is->realtime)
2920  infinite_buffer = 1;
2921 
2922  for (;;) {
2923  if (is->abort_request)
2924  break;
2925  if (is->paused != is->last_paused) {
2926  is->last_paused = is->paused;
2927  if (is->paused)
2928  is->read_pause_return = av_read_pause(ic);
2929  else
2930  av_read_play(ic);
2931  }
2932 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2933  if (is->paused &&
2934  (!strcmp(ic->iformat->name, "rtsp") ||
2935  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2936  /* wait 10 ms to avoid trying to get another packet */
2937  /* XXX: horrible */
2938  SDL_Delay(10);
2939  continue;
2940  }
2941 #endif
2942  if (is->seek_req) {
2943  int64_t seek_target = is->seek_pos;
2944  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2945  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2946 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2947 // of the seek_pos/seek_rel variables
2948 
2949  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2950  if (ret < 0) {
2952  "%s: error while seeking\n", is->ic->url);
2953  } else {
2954  if (is->audio_stream >= 0)
2955  packet_queue_flush(&is->audioq);
2956  if (is->subtitle_stream >= 0)
2957  packet_queue_flush(&is->subtitleq);
2958  if (is->video_stream >= 0)
2959  packet_queue_flush(&is->videoq);
2960  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2961  set_clock(&is->extclk, NAN, 0);
2962  } else {
2963  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2964  }
2965  }
2966  is->seek_req = 0;
2967  is->queue_attachments_req = 1;
2968  is->eof = 0;
2969  if (is->paused)
2971  }
2972  if (is->queue_attachments_req) {
2973  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2974  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2975  goto fail;
2976  packet_queue_put(&is->videoq, pkt);
2977  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2978  }
2979  is->queue_attachments_req = 0;
2980  }
2981 
2982  /* if the queue are full, no need to read more */
2983  if (infinite_buffer<1 &&
2984  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2985  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2986  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2987  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
2988  /* wait 10 ms */
2989  SDL_LockMutex(wait_mutex);
2990  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2991  SDL_UnlockMutex(wait_mutex);
2992  continue;
2993  }
2994  if (!is->paused &&
2995  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2996  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2997  if (loop != 1 && (!loop || --loop)) {
2999  } else if (autoexit) {
3000  ret = AVERROR_EOF;
3001  goto fail;
3002  }
3003  }
3004  ret = av_read_frame(ic, pkt);
3005  if (ret < 0) {
3006  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3007  if (is->video_stream >= 0)
3008  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3009  if (is->audio_stream >= 0)
3010  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3011  if (is->subtitle_stream >= 0)
3012  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3013  is->eof = 1;
3014  }
3015  if (ic->pb && ic->pb->error) {
3016  if (autoexit)
3017  goto fail;
3018  else
3019  break;
3020  }
3021  SDL_LockMutex(wait_mutex);
3022  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3023  SDL_UnlockMutex(wait_mutex);
3024  continue;
3025  } else {
3026  is->eof = 0;
3027  }
3028  /* check if packet is in play range specified by user, then queue, otherwise discard */
3029  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3030  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3031  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3032  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3034  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3035  <= ((double)duration / 1000000);
3036  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3037  packet_queue_put(&is->audioq, pkt);
3038  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3039  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3040  packet_queue_put(&is->videoq, pkt);
3041  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3042  packet_queue_put(&is->subtitleq, pkt);
3043  } else {
3045  }
3046  }
3047 
3048  ret = 0;
3049  fail:
3050  if (ic && !is->ic)
3051  avformat_close_input(&ic);
3052 
3053  av_packet_free(&pkt);
3054  if (ret != 0) {
3055  SDL_Event event;
3056 
3057  event.type = FF_QUIT_EVENT;
3058  event.user.data1 = is;
3059  SDL_PushEvent(&event);
3060  }
3061  SDL_DestroyMutex(wait_mutex);
3062  return 0;
3063 }
3064 
3065 static VideoState *stream_open(const char *filename,
3066  const AVInputFormat *iformat)
3067 {
3068  VideoState *is;
3069 
3070  is = av_mallocz(sizeof(VideoState));
3071  if (!is)
3072  return NULL;
3073  is->last_video_stream = is->video_stream = -1;
3074  is->last_audio_stream = is->audio_stream = -1;
3075  is->last_subtitle_stream = is->subtitle_stream = -1;
3076  is->filename = av_strdup(filename);
3077  if (!is->filename)
3078  goto fail;
3079  is->iformat = iformat;
3080  is->ytop = 0;
3081  is->xleft = 0;
3082 
3083  /* start video display */
3084  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3085  goto fail;
3086  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3087  goto fail;
3088  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3089  goto fail;
3090 
3091  if (packet_queue_init(&is->videoq) < 0 ||
3092  packet_queue_init(&is->audioq) < 0 ||
3093  packet_queue_init(&is->subtitleq) < 0)
3094  goto fail;
3095 
3096  if (!(is->continue_read_thread = SDL_CreateCond())) {
3097  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3098  goto fail;
3099  }
3100 
3101  init_clock(&is->vidclk, &is->videoq.serial);
3102  init_clock(&is->audclk, &is->audioq.serial);
3103  init_clock(&is->extclk, &is->extclk.serial);
3104  is->audio_clock_serial = -1;
3105  if (startup_volume < 0)
3106  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3107  if (startup_volume > 100)
3108  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3110  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3111  is->audio_volume = startup_volume;
3112  is->muted = 0;
3113  is->av_sync_type = av_sync_type;
3114  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3115  if (!is->read_tid) {
3116  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3117 fail:
3118  stream_close(is);
3119  return NULL;
3120  }
3121  return is;
3122 }
3123 
3125 {
3126  AVFormatContext *ic = is->ic;
3127  int start_index, stream_index;
3128  int old_index;
3129  AVStream *st;
3130  AVProgram *p = NULL;
3131  int nb_streams = is->ic->nb_streams;
3132 
3133  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3134  start_index = is->last_video_stream;
3135  old_index = is->video_stream;
3136  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3137  start_index = is->last_audio_stream;
3138  old_index = is->audio_stream;
3139  } else {
3140  start_index = is->last_subtitle_stream;
3141  old_index = is->subtitle_stream;
3142  }
3143  stream_index = start_index;
3144 
3145  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3146  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3147  if (p) {
3149  for (start_index = 0; start_index < nb_streams; start_index++)
3150  if (p->stream_index[start_index] == stream_index)
3151  break;
3152  if (start_index == nb_streams)
3153  start_index = -1;
3154  stream_index = start_index;
3155  }
3156  }
3157 
3158  for (;;) {
3159  if (++stream_index >= nb_streams)
3160  {
3162  {
3163  stream_index = -1;
3164  is->last_subtitle_stream = -1;
3165  goto the_end;
3166  }
3167  if (start_index == -1)
3168  return;
3169  stream_index = 0;
3170  }
3171  if (stream_index == start_index)
3172  return;
3173  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3174  if (st->codecpar->codec_type == codec_type) {
3175  /* check that parameters are OK */
3176  switch (codec_type) {
3177  case AVMEDIA_TYPE_AUDIO:
3178  if (st->codecpar->sample_rate != 0 &&
3179  st->codecpar->ch_layout.nb_channels != 0)
3180  goto the_end;
3181  break;
3182  case AVMEDIA_TYPE_VIDEO:
3183  case AVMEDIA_TYPE_SUBTITLE:
3184  goto the_end;
3185  default:
3186  break;
3187  }
3188  }
3189  }
3190  the_end:
3191  if (p && stream_index != -1)
3192  stream_index = p->stream_index[stream_index];
3193  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3195  old_index,
3196  stream_index);
3197 
3198  stream_component_close(is, old_index);
3199  stream_component_open(is, stream_index);
3200 }
3201 
3202 
3204 {
3206  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3207 }
3208 
3210 {
3211  int next = is->show_mode;
3212  do {
3213  next = (next + 1) % SHOW_MODE_NB;
3214  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3215  if (is->show_mode != next) {
3216  is->force_refresh = 1;
3217  is->show_mode = next;
3218  }
3219 }
3220 
3221 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3222  double remaining_time = 0.0;
3223  SDL_PumpEvents();
3224  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3226  SDL_ShowCursor(0);
3227  cursor_hidden = 1;
3228  }
3229  if (remaining_time > 0.0)
3230  av_usleep((int64_t)(remaining_time * 1000000.0));
3231  remaining_time = REFRESH_RATE;
3232  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3233  video_refresh(is, &remaining_time);
3234  SDL_PumpEvents();
3235  }
3236 }
3237 
3238 static void seek_chapter(VideoState *is, int incr)
3239 {
3240  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3241  int i;
3242 
3243  if (!is->ic->nb_chapters)
3244  return;
3245 
3246  /* find the current chapter */
3247  for (i = 0; i < is->ic->nb_chapters; i++) {
3248  AVChapter *ch = is->ic->chapters[i];
3249  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3250  i--;
3251  break;
3252  }
3253  }
3254 
3255  i += incr;
3256  i = FFMAX(i, 0);
3257  if (i >= is->ic->nb_chapters)
3258  return;
3259 
3260  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3261  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3262  AV_TIME_BASE_Q), 0, 0);
3263 }
3264 
3265 /* handle an event sent by the GUI */
3266 static void event_loop(VideoState *cur_stream)
3267 {
3268  SDL_Event event;
3269  double incr, pos, frac;
3270 
3271  for (;;) {
3272  double x;
3273  refresh_loop_wait_event(cur_stream, &event);
3274  switch (event.type) {
3275  case SDL_KEYDOWN:
3276  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3277  do_exit(cur_stream);
3278  break;
3279  }
3280  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3281  if (!cur_stream->width)
3282  continue;
3283  switch (event.key.keysym.sym) {
3284  case SDLK_f:
3285  toggle_full_screen(cur_stream);
3286  cur_stream->force_refresh = 1;
3287  break;
3288  case SDLK_p:
3289  case SDLK_SPACE:
3290  toggle_pause(cur_stream);
3291  break;
3292  case SDLK_m:
3293  toggle_mute(cur_stream);
3294  break;
3295  case SDLK_KP_MULTIPLY:
3296  case SDLK_0:
3297  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3298  break;
3299  case SDLK_KP_DIVIDE:
3300  case SDLK_9:
3301  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3302  break;
3303  case SDLK_s: // S: Step to next frame
3304  step_to_next_frame(cur_stream);
3305  break;
3306  case SDLK_a:
3308  break;
3309  case SDLK_v:
3311  break;
3312  case SDLK_c:
3316  break;
3317  case SDLK_t:
3319  break;
3320  case SDLK_w:
3321  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3322  if (++cur_stream->vfilter_idx >= nb_vfilters)
3323  cur_stream->vfilter_idx = 0;
3324  } else {
3325  cur_stream->vfilter_idx = 0;
3326  toggle_audio_display(cur_stream);
3327  }
3328  break;
3329  case SDLK_PAGEUP:
3330  if (cur_stream->ic->nb_chapters <= 1) {
3331  incr = 600.0;
3332  goto do_seek;
3333  }
3334  seek_chapter(cur_stream, 1);
3335  break;
3336  case SDLK_PAGEDOWN:
3337  if (cur_stream->ic->nb_chapters <= 1) {
3338  incr = -600.0;
3339  goto do_seek;
3340  }
3341  seek_chapter(cur_stream, -1);
3342  break;
3343  case SDLK_LEFT:
3344  incr = seek_interval ? -seek_interval : -10.0;
3345  goto do_seek;
3346  case SDLK_RIGHT:
3347  incr = seek_interval ? seek_interval : 10.0;
3348  goto do_seek;
3349  case SDLK_UP:
3350  incr = 60.0;
3351  goto do_seek;
3352  case SDLK_DOWN:
3353  incr = -60.0;
3354  do_seek:
3355  if (seek_by_bytes) {
3356  pos = -1;
3357  if (pos < 0 && cur_stream->video_stream >= 0)
3358  pos = frame_queue_last_pos(&cur_stream->pictq);
3359  if (pos < 0 && cur_stream->audio_stream >= 0)
3360  pos = frame_queue_last_pos(&cur_stream->sampq);
3361  if (pos < 0)
3362  pos = avio_tell(cur_stream->ic->pb);
3363  if (cur_stream->ic->bit_rate)
3364  incr *= cur_stream->ic->bit_rate / 8.0;
3365  else
3366  incr *= 180000.0;
3367  pos += incr;
3368  stream_seek(cur_stream, pos, incr, 1);
3369  } else {
3370  pos = get_master_clock(cur_stream);
3371  if (isnan(pos))
3372  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3373  pos += incr;
3374  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3375  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3376  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3377  }
3378  break;
3379  default:
3380  break;
3381  }
3382  break;
3383  case SDL_MOUSEBUTTONDOWN:
3384  if (exit_on_mousedown) {
3385  do_exit(cur_stream);
3386  break;
3387  }
3388  if (event.button.button == SDL_BUTTON_LEFT) {
3389  static int64_t last_mouse_left_click = 0;
3390  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3391  toggle_full_screen(cur_stream);
3392  cur_stream->force_refresh = 1;
3393  last_mouse_left_click = 0;
3394  } else {
3395  last_mouse_left_click = av_gettime_relative();
3396  }
3397  }
3398  case SDL_MOUSEMOTION:
3399  if (cursor_hidden) {
3400  SDL_ShowCursor(1);
3401  cursor_hidden = 0;
3402  }
3404  if (event.type == SDL_MOUSEBUTTONDOWN) {
3405  if (event.button.button != SDL_BUTTON_RIGHT)
3406  break;
3407  x = event.button.x;
3408  } else {
3409  if (!(event.motion.state & SDL_BUTTON_RMASK))
3410  break;
3411  x = event.motion.x;
3412  }
3413  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3414  uint64_t size = avio_size(cur_stream->ic->pb);
3415  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3416  } else {
3417  int64_t ts;
3418  int ns, hh, mm, ss;
3419  int tns, thh, tmm, tss;
3420  tns = cur_stream->ic->duration / 1000000LL;
3421  thh = tns / 3600;
3422  tmm = (tns % 3600) / 60;
3423  tss = (tns % 60);
3424  frac = x / cur_stream->width;
3425  ns = frac * tns;
3426  hh = ns / 3600;
3427  mm = (ns % 3600) / 60;
3428  ss = (ns % 60);
3430  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3431  hh, mm, ss, thh, tmm, tss);
3432  ts = frac * cur_stream->ic->duration;
3433  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3434  ts += cur_stream->ic->start_time;
3435  stream_seek(cur_stream, ts, 0, 0);
3436  }
3437  break;
3438  case SDL_WINDOWEVENT:
3439  switch (event.window.event) {
3440  case SDL_WINDOWEVENT_SIZE_CHANGED:
3441  screen_width = cur_stream->width = event.window.data1;
3442  screen_height = cur_stream->height = event.window.data2;
3443  if (cur_stream->vis_texture) {
3444  SDL_DestroyTexture(cur_stream->vis_texture);
3445  cur_stream->vis_texture = NULL;
3446  }
3447  case SDL_WINDOWEVENT_EXPOSED:
3448  cur_stream->force_refresh = 1;
3449  }
3450  break;
3451  case SDL_QUIT:
3452  case FF_QUIT_EVENT:
3453  do_exit(cur_stream);
3454  break;
3455  default:
3456  break;
3457  }
3458  }
3459 }
3460 
3461 static int opt_width(void *optctx, const char *opt, const char *arg)
3462 {
3463  double num;
3464  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3465  if (ret < 0)
3466  return ret;
3467 
3468  screen_width = num;
3469  return 0;
3470 }
3471 
3472 static int opt_height(void *optctx, const char *opt, const char *arg)
3473 {
3474  double num;
3475  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3476  if (ret < 0)
3477  return ret;
3478 
3479  screen_height = num;
3480  return 0;
3481 }
3482 
3483 static int opt_format(void *optctx, const char *opt, const char *arg)
3484 {
3486  if (!file_iformat) {
3487  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3488  return AVERROR(EINVAL);
3489  }
3490  return 0;
3491 }
3492 
3493 static int opt_sync(void *optctx, const char *opt, const char *arg)
3494 {
3495  if (!strcmp(arg, "audio"))
3497  else if (!strcmp(arg, "video"))
3499  else if (!strcmp(arg, "ext"))
3501  else {
3502  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3503  exit(1);
3504  }
3505  return 0;
3506 }
3507 
3508 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3509 {
3510  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3511  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3512  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3513 
3514  if (show_mode == SHOW_MODE_NONE) {
3515  double num;
3516  int ret = parse_number(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1, &num);
3517  if (ret < 0)
3518  return ret;
3519  show_mode = num;
3520  }
3521  return 0;
3522 }
3523 
3524 static int opt_input_file(void *optctx, const char *filename)
3525 {
3526  if (input_filename) {
3528  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3529  filename, input_filename);
3530  return AVERROR(EINVAL);
3531  }
3532  if (!strcmp(filename, "-"))
3533  filename = "fd:";
3534  input_filename = filename;
3535 
3536  return 0;
3537 }
3538 
3539 static int opt_codec(void *optctx, const char *opt, const char *arg)
3540 {
3541  const char *spec = strchr(opt, ':');
3542  if (!spec) {
3544  "No media specifier was specified in '%s' in option '%s'\n",
3545  arg, opt);
3546  return AVERROR(EINVAL);
3547  }
3548  spec++;
3549  switch (spec[0]) {
3550  case 'a' : audio_codec_name = arg; break;
3551  case 's' : subtitle_codec_name = arg; break;
3552  case 'v' : video_codec_name = arg; break;
3553  default:
3555  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3556  return AVERROR(EINVAL);
3557  }
3558  return 0;
3559 }
3560 
3561 static int dummy;
3562 
3563 static const OptionDef options[] = {
3565  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3566  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3567  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3568  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3569  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3570  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3571  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3572  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3573  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3574  { "ss", HAS_ARG | OPT_TIME, { &start_time }, "seek to a given position in seconds", "pos" },
3575  { "t", HAS_ARG | OPT_TIME, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3576  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3577  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3578  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3579  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3580  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3581  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3582  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3583  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3584  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3585  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3586  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3587  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3588  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3589  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3590  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3591  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3592  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3593  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3594  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3595  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3596  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3597  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3598  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3599  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3600  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3601  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3602  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3603  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3604  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3605  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3606  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3607  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3608  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3609  "read and decode the streams to fill missing information with heuristics" },
3610  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3611  { NULL, },
3612 };
3613 
3614 static void show_usage(void)
3615 {
3616  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3617  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3618  av_log(NULL, AV_LOG_INFO, "\n");
3619 }
3620 
3621 void show_help_default(const char *opt, const char *arg)
3622 {
3624  show_usage();
3625  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3626  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3627  printf("\n");
3631  printf("\nWhile playing:\n"
3632  "q, ESC quit\n"
3633  "f toggle full screen\n"
3634  "p, SPC pause\n"
3635  "m toggle mute\n"
3636  "9, 0 decrease and increase volume respectively\n"
3637  "/, * decrease and increase volume respectively\n"
3638  "a cycle audio channel in the current program\n"
3639  "v cycle video channel\n"
3640  "t cycle subtitle channel in the current program\n"
3641  "c cycle program\n"
3642  "w cycle video filters or show modes\n"
3643  "s activate frame-step mode\n"
3644  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3645  "down/up seek backward/forward 1 minute\n"
3646  "page down/page up seek backward/forward 10 minutes\n"
3647  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3648  "left double-click toggle full screen\n"
3649  );
3650 }
3651 
3652 /* Called from the main */
3653 int main(int argc, char **argv)
3654 {
3655  int flags, ret;
3656  VideoState *is;
3657 
3658  init_dynload();
3659 
3661  parse_loglevel(argc, argv, options);
3662 
3663  /* register all codecs, demux and protocols */
3664 #if CONFIG_AVDEVICE
3666 #endif
3668 
3669  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3670  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3671 
3672  show_banner(argc, argv, options);
3673 
3674  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3675  if (ret < 0)
3676  exit(ret == AVERROR_EXIT ? 0 : 1);
3677 
3678  if (!input_filename) {
3679  show_usage();
3680  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3682  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3683  exit(1);
3684  }
3685 
3686  if (display_disable) {
3687  video_disable = 1;
3688  }
3689  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3690  if (audio_disable)
3691  flags &= ~SDL_INIT_AUDIO;
3692  else {
3693  /* Try to work around an occasional ALSA buffer underflow issue when the
3694  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3695  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3696  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3697  }
3698  if (display_disable)
3699  flags &= ~SDL_INIT_VIDEO;
3700  if (SDL_Init (flags)) {
3701  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3702  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3703  exit(1);
3704  }
3705 
3706  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3707  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3708 
3709  if (!display_disable) {
3710  int flags = SDL_WINDOW_HIDDEN;
3711  if (alwaysontop)
3712 #if SDL_VERSION_ATLEAST(2,0,5)
3713  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3714 #else
3715  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3716 #endif
3717  if (borderless)
3718  flags |= SDL_WINDOW_BORDERLESS;
3719  else
3720  flags |= SDL_WINDOW_RESIZABLE;
3721 
3722 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3723  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3724 #endif
3725  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3726  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3727  if (window) {
3728  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3729  if (!renderer) {
3730  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3731  renderer = SDL_CreateRenderer(window, -1, 0);
3732  }
3733  if (renderer) {
3734  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3735  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3736  }
3737  }
3738  if (!window || !renderer || !renderer_info.num_texture_formats) {
3739  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3740  do_exit(NULL);
3741  }
3742  }
3743 
3745  if (!is) {
3746  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3747  do_exit(NULL);
3748  }
3749 
3750  event_loop(is);
3751 
3752  /* never returns */
3753 
3754  return 0;
3755 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:116
AVSubtitle
Definition: avcodec.h:2267
rect::w
int w
Definition: f_ebur128.c:78
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2501
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1285
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:214
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:423
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:284
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:221
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:161
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:488
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:184
av_clip
#define av_clip
Definition: common.h:96
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:265
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:134
av_sync_type
static int av_sync_type
Definition: ffplay.c:327
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:362
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:150
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1049
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:796
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:299
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1947
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:391
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:780
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:50
Decoder::finished
int finished
Definition: ffplay.c:193
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:890
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:636
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1036
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:802
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:416
out
FILE * out
Definition: movenc.c:54
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:266
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1064
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2110
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:233
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1309
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:443
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1324
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:46
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:175
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:243
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:573
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:772
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:178
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:148
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:912
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:365
display_disable
static int display_disable
Definition: ffplay.c:322
screen_width
static int screen_width
Definition: ffplay.c:312
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:58
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1021
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1495
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:480
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:227
screen_left
static int screen_left
Definition: ffplay.c:314
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:135
AVSubtitleRect
Definition: avcodec.h:2239
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:198
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2099
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:185
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:173
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:205
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:164
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:272
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1729
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AudioParams
Definition: ffplay.c:131
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:277
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:253
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1175
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:282
AVFrame::width
int width
Definition: frame.h:412
VideoState::xleft
int xleft
Definition: ffplay.c:291
Frame::pts
double pts
Definition: ffplay.c:158
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:126
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:683
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:342
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:109
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:157
AVChapter::start
int64_t start
Definition: avformat.h:1069
Clock
Definition: ffplay.c:139
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:128
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:190
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:60
fn
#define fn(a)
Definition: adynamicequalizer_template.c:61
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2200
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:249
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:442
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:238
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:138
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:350
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:367
video_disable
static int video_disable
Definition: ffplay.c:317
Frame::uploaded
int uploaded
Definition: ffplay.c:165
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1204
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1510
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:154
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3524
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1249
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1460
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
VideoState::paused
int paused
Definition: ffplay.c:208
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1391
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1424
VideoState::width
int width
Definition: ffplay.c:291
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:307
sample_rate
sample_rate
Definition: ffmpeg_filter.c:331
dummy
static int dummy
Definition: ffplay.c:3561
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:358
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
PacketQueue
Definition: ffplay.c:115
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:901
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2213
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:736
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:301
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:260
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:106
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2316
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:318
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:223
genpts
static int genpts
Definition: ffplay.c:331
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:255
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3493
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1502
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:900
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:225
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:366
FrameQueue::rindex
int rindex
Definition: ffplay.c:171
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1345
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:374
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1373
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:829
startup_volume
static int startup_volume
Definition: ffplay.c:325
window
static SDL_Window * window
Definition: ffplay.c:360
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3203
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:146
VideoState::extclk
Clock extclk
Definition: ffplay.c:221
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:212
alwaysontop
static int alwaysontop
Definition: ffplay.c:324
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:241
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:464
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2105
fail
#define fail()
Definition: checkasm.h:138
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:169
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:435
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2240
VideoState::video_stream
int video_stream
Definition: ffplay.c:283
autoexit
static int autoexit
Definition: ffplay.c:334
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:513
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1066
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:950
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3508
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
pts
static int64_t pts
Definition: transcode_aac.c:643
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1385
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:239
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:237
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:301
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:112
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:330
loop
static int loop
Definition: ffplay.c:337
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:558
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:267
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3472
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:412
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1216
is_full_screen
static int is_full_screen
Definition: ffplay.c:355
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:973
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:934
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:166
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1335
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:549
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2021
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1379
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
VideoState
Definition: ffplay.c:203
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:728
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2425
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1399
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:193
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:226
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:716
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:782
OPT_INT
#define OPT_INT
Definition: cmdutils.h:115
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:259
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:191
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3621
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
default_height
static int default_height
Definition: ffplay.c:311
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1225
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:130
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:554
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1119
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:446
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:142
PacketQueue::duration
int64_t duration
Definition: ffplay.c:119
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2241
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:120
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:855
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:313
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:192
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1836
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:712
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:297
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:990
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:202
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3539
Clock::pts_drift
double pts_drift
Definition: ffplay.c:141
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:285
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:658
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:100
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:176
nb_streams
static int nb_streams
Definition: ffprobe.c:315
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:207
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1359
screen_top
static int screen_top
Definition: ffplay.c:315
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:240
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:361
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst)
Filter out options for given codec.
Definition: cmdutils.c:925
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1540
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:444
VideoState::step
int step
Definition: ffplay.c:292
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2268
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:309
Clock::speed
double speed
Definition: ffplay.c:143
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:260
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AVFormatContext
Format I/O context.
Definition: avformat.h:1107
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:625
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:864
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2242
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3238
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1407
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:187
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:704
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1169
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:880
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1003
FrameQueue::max_size
int max_size
Definition: ffplay.c:174
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:111
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffmpeg_dec.c:35
AudioParams::freq
int freq
Definition: ffplay.c:132
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:133
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2468
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:866
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3124
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:257
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:353
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1149
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:677
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:264
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:194
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:345