FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83 
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
94 
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97 
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 
102 #define CURSOR_HIDE_DELAY 1000000
103 
104 static int64_t sws_flags = SWS_BICUBIC;
105 
106 typedef struct MyAVPacketList {
109  int serial;
111 
112 typedef struct PacketQueue {
115  int size;
117  int serial;
118  SDL_mutex *mutex;
119  SDL_cond *cond;
120 } PacketQueue;
121 
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 #define SAMPLE_QUEUE_SIZE 9
125 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
126 
127 typedef struct AudioParams {
128  int freq;
129  int channels;
130  int64_t channel_layout;
134 } AudioParams;
135 
136 typedef struct Clock {
137  double pts; /* clock base */
138  double pts_drift; /* clock base minus time at which we updated the clock */
139  double last_updated;
140  double speed;
141  int serial; /* clock is based on a packet with this serial */
142  int paused;
143  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
144 } Clock;
145 
146 /* Common struct for handling all types of decoded data and allocated render buffers. */
147 typedef struct Frame {
150  int serial;
151  double pts; /* presentation timestamp for the frame */
152  double duration; /* estimated duration of the frame */
153  int64_t pos; /* byte position of the frame in the input file */
154  SDL_Overlay *bmp;
157  int width;
158  int height;
160 } Frame;
161 
162 typedef struct FrameQueue {
164  int rindex;
165  int windex;
166  int size;
167  int max_size;
170  SDL_mutex *mutex;
171  SDL_cond *cond;
173 } FrameQueue;
174 
175 enum {
176  AV_SYNC_AUDIO_MASTER, /* default choice */
178  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
179 };
180 
181 typedef struct Decoder {
187  int finished;
188  int flushed;
190  SDL_cond *empty_queue_cond;
191  int64_t start_pts;
193  int64_t next_pts;
195 } Decoder;
196 
197 typedef struct VideoState {
198  SDL_Thread *read_tid;
199  SDL_Thread *video_tid;
200  SDL_Thread *audio_tid;
205  int paused;
208  int seek_req;
210  int64_t seek_pos;
211  int64_t seek_rel;
214  int realtime;
215 
219 
223 
227 
229 
231 
232  double audio_clock;
234  double audio_diff_cum; /* used for AV difference average computation */
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249 #if CONFIG_AVFILTER
250  struct AudioParams audio_filter_src;
251 #endif
256 
257  enum ShowMode {
259  } show_mode;
266  int xpos;
268 
269  SDL_Thread *subtitle_tid;
273 
274  double frame_timer;
280  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
281 #if !CONFIG_AVFILTER
283 #endif
285 
286  char filename[1024];
288  int step;
289 
290 #if CONFIG_AVFILTER
291  int vfilter_idx;
292  AVFilterContext *in_video_filter; // the first filter in the video chain
293  AVFilterContext *out_video_filter; // the last filter in the video chain
294  AVFilterContext *in_audio_filter; // the first filter in the audio chain
295  AVFilterContext *out_audio_filter; // the last filter in the audio chain
296  AVFilterGraph *agraph; // audio filter graph
297 #endif
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int fs_screen_width;
309 static int fs_screen_height;
310 static int default_width = 640;
311 static int default_height = 480;
312 static int screen_width = 0;
313 static int screen_height = 0;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
318  [AVMEDIA_TYPE_AUDIO] = -1,
319  [AVMEDIA_TYPE_VIDEO] = -1,
320  [AVMEDIA_TYPE_SUBTITLE] = -1,
321 };
322 static int seek_by_bytes = -1;
323 static int display_disable;
324 static int show_status = 1;
326 static int64_t start_time = AV_NOPTS_VALUE;
327 static int64_t duration = AV_NOPTS_VALUE;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
343 static int64_t cursor_last_shown;
344 static int cursor_hidden = 0;
345 #if CONFIG_AVFILTER
346 static const char **vfilters_list = NULL;
347 static int nb_vfilters = 0;
348 static char *afilters = NULL;
349 #endif
350 static int autorotate = 1;
351 
352 /* current context */
353 static int is_full_screen;
354 static int64_t audio_callback_time;
355 
357 
358 #define FF_ALLOC_EVENT (SDL_USEREVENT)
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Surface *screen;
362 
363 #if CONFIG_AVFILTER
364 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
365 {
366  GROW_ARRAY(vfilters_list, nb_vfilters);
367  vfilters_list[nb_vfilters - 1] = arg;
368  return 0;
369 }
370 #endif
371 
372 static inline
373 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
374  enum AVSampleFormat fmt2, int64_t channel_count2)
375 {
376  /* If channel count == 1, planar and non-planar formats are the same */
377  if (channel_count1 == 1 && channel_count2 == 1)
379  else
380  return channel_count1 != channel_count2 || fmt1 != fmt2;
381 }
382 
383 static inline
384 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
385 {
386  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
387  return channel_layout;
388  else
389  return 0;
390 }
391 
392 static void free_picture(Frame *vp);
393 
395 {
396  MyAVPacketList *pkt1;
397 
398  if (q->abort_request)
399  return -1;
400 
401  pkt1 = av_malloc(sizeof(MyAVPacketList));
402  if (!pkt1)
403  return -1;
404  pkt1->pkt = *pkt;
405  pkt1->next = NULL;
406  if (pkt == &flush_pkt)
407  q->serial++;
408  pkt1->serial = q->serial;
409 
410  if (!q->last_pkt)
411  q->first_pkt = pkt1;
412  else
413  q->last_pkt->next = pkt1;
414  q->last_pkt = pkt1;
415  q->nb_packets++;
416  q->size += pkt1->pkt.size + sizeof(*pkt1);
417  /* XXX: should duplicate packet data in DV case */
418  SDL_CondSignal(q->cond);
419  return 0;
420 }
421 
423 {
424  int ret;
425 
426  /* duplicate the packet */
427  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
428  return -1;
429 
430  SDL_LockMutex(q->mutex);
431  ret = packet_queue_put_private(q, pkt);
432  SDL_UnlockMutex(q->mutex);
433 
434  if (pkt != &flush_pkt && ret < 0)
435  av_free_packet(pkt);
436 
437  return ret;
438 }
439 
440 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
441 {
442  AVPacket pkt1, *pkt = &pkt1;
443  av_init_packet(pkt);
444  pkt->data = NULL;
445  pkt->size = 0;
446  pkt->stream_index = stream_index;
447  return packet_queue_put(q, pkt);
448 }
449 
450 /* packet queue handling */
452 {
453  memset(q, 0, sizeof(PacketQueue));
454  q->mutex = SDL_CreateMutex();
455  q->cond = SDL_CreateCond();
456  q->abort_request = 1;
457 }
458 
460 {
461  MyAVPacketList *pkt, *pkt1;
462 
463  SDL_LockMutex(q->mutex);
464  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
465  pkt1 = pkt->next;
466  av_free_packet(&pkt->pkt);
467  av_freep(&pkt);
468  }
469  q->last_pkt = NULL;
470  q->first_pkt = NULL;
471  q->nb_packets = 0;
472  q->size = 0;
473  SDL_UnlockMutex(q->mutex);
474 }
475 
477 {
479  SDL_DestroyMutex(q->mutex);
480  SDL_DestroyCond(q->cond);
481 }
482 
484 {
485  SDL_LockMutex(q->mutex);
486 
487  q->abort_request = 1;
488 
489  SDL_CondSignal(q->cond);
490 
491  SDL_UnlockMutex(q->mutex);
492 }
493 
495 {
496  SDL_LockMutex(q->mutex);
497  q->abort_request = 0;
498  packet_queue_put_private(q, &flush_pkt);
499  SDL_UnlockMutex(q->mutex);
500 }
501 
502 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
503 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
504 {
505  MyAVPacketList *pkt1;
506  int ret;
507 
508  SDL_LockMutex(q->mutex);
509 
510  for (;;) {
511  if (q->abort_request) {
512  ret = -1;
513  break;
514  }
515 
516  pkt1 = q->first_pkt;
517  if (pkt1) {
518  q->first_pkt = pkt1->next;
519  if (!q->first_pkt)
520  q->last_pkt = NULL;
521  q->nb_packets--;
522  q->size -= pkt1->pkt.size + sizeof(*pkt1);
523  *pkt = pkt1->pkt;
524  if (serial)
525  *serial = pkt1->serial;
526  av_free(pkt1);
527  ret = 1;
528  break;
529  } else if (!block) {
530  ret = 0;
531  break;
532  } else {
533  SDL_CondWait(q->cond, q->mutex);
534  }
535  }
536  SDL_UnlockMutex(q->mutex);
537  return ret;
538 }
539 
540 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
541  memset(d, 0, sizeof(Decoder));
542  d->avctx = avctx;
543  d->queue = queue;
544  d->empty_queue_cond = empty_queue_cond;
546 }
547 
549  int got_frame = 0;
550 
551  d->flushed = 0;
552 
553  do {
554  int ret = -1;
555 
556  if (d->queue->abort_request)
557  return -1;
558 
559  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
560  AVPacket pkt;
561  do {
562  if (d->queue->nb_packets == 0)
563  SDL_CondSignal(d->empty_queue_cond);
564  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
565  return -1;
566  if (pkt.data == flush_pkt.data) {
568  d->finished = 0;
569  d->flushed = 1;
570  d->next_pts = d->start_pts;
571  d->next_pts_tb = d->start_pts_tb;
572  }
573  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
574  av_free_packet(&d->pkt);
575  d->pkt_temp = d->pkt = pkt;
576  d->packet_pending = 1;
577  }
578 
579  switch (d->avctx->codec_type) {
580  case AVMEDIA_TYPE_VIDEO:
581  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
582  if (got_frame) {
583  if (decoder_reorder_pts == -1) {
584  frame->pts = av_frame_get_best_effort_timestamp(frame);
585  } else if (decoder_reorder_pts) {
586  frame->pts = frame->pkt_pts;
587  } else {
588  frame->pts = frame->pkt_dts;
589  }
590  }
591  break;
592  case AVMEDIA_TYPE_AUDIO:
593  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
594  if (got_frame) {
595  AVRational tb = (AVRational){1, frame->sample_rate};
596  if (frame->pts != AV_NOPTS_VALUE)
597  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
598  else if (frame->pkt_pts != AV_NOPTS_VALUE)
599  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
600  else if (d->next_pts != AV_NOPTS_VALUE)
601  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
602  if (frame->pts != AV_NOPTS_VALUE) {
603  d->next_pts = frame->pts + frame->nb_samples;
604  d->next_pts_tb = tb;
605  }
606  }
607  break;
609  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
610  break;
611  }
612 
613  if (ret < 0) {
614  d->packet_pending = 0;
615  } else {
616  d->pkt_temp.dts =
618  if (d->pkt_temp.data) {
620  ret = d->pkt_temp.size;
621  d->pkt_temp.data += ret;
622  d->pkt_temp.size -= ret;
623  if (d->pkt_temp.size <= 0)
624  d->packet_pending = 0;
625  } else {
626  if (!got_frame) {
627  d->packet_pending = 0;
628  d->finished = d->pkt_serial;
629  }
630  }
631  }
632  } while (!got_frame && !d->finished);
633 
634  return got_frame;
635 }
636 
637 static void decoder_destroy(Decoder *d) {
638  av_free_packet(&d->pkt);
639 }
640 
642 {
643  av_frame_unref(vp->frame);
644  avsubtitle_free(&vp->sub);
645 }
646 
647 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
648 {
649  int i;
650  memset(f, 0, sizeof(FrameQueue));
651  if (!(f->mutex = SDL_CreateMutex()))
652  return AVERROR(ENOMEM);
653  if (!(f->cond = SDL_CreateCond()))
654  return AVERROR(ENOMEM);
655  f->pktq = pktq;
656  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
657  f->keep_last = !!keep_last;
658  for (i = 0; i < f->max_size; i++)
659  if (!(f->queue[i].frame = av_frame_alloc()))
660  return AVERROR(ENOMEM);
661  return 0;
662 }
663 
665 {
666  int i;
667  for (i = 0; i < f->max_size; i++) {
668  Frame *vp = &f->queue[i];
670  av_frame_free(&vp->frame);
671  free_picture(vp);
672  }
673  SDL_DestroyMutex(f->mutex);
674  SDL_DestroyCond(f->cond);
675 }
676 
678 {
679  SDL_LockMutex(f->mutex);
680  SDL_CondSignal(f->cond);
681  SDL_UnlockMutex(f->mutex);
682 }
683 
685 {
686  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
687 }
688 
690 {
691  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
692 }
693 
695 {
696  return &f->queue[f->rindex];
697 }
698 
700 {
701  /* wait until we have space to put a new frame */
702  SDL_LockMutex(f->mutex);
703  while (f->size >= f->max_size &&
704  !f->pktq->abort_request) {
705  SDL_CondWait(f->cond, f->mutex);
706  }
707  SDL_UnlockMutex(f->mutex);
708 
709  if (f->pktq->abort_request)
710  return NULL;
711 
712  return &f->queue[f->windex];
713 }
714 
716 {
717  /* wait until we have a readable a new frame */
718  SDL_LockMutex(f->mutex);
719  while (f->size - f->rindex_shown <= 0 &&
720  !f->pktq->abort_request) {
721  SDL_CondWait(f->cond, f->mutex);
722  }
723  SDL_UnlockMutex(f->mutex);
724 
725  if (f->pktq->abort_request)
726  return NULL;
727 
728  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
729 }
730 
732 {
733  if (++f->windex == f->max_size)
734  f->windex = 0;
735  SDL_LockMutex(f->mutex);
736  f->size++;
737  SDL_CondSignal(f->cond);
738  SDL_UnlockMutex(f->mutex);
739 }
740 
742 {
743  if (f->keep_last && !f->rindex_shown) {
744  f->rindex_shown = 1;
745  return;
746  }
748  if (++f->rindex == f->max_size)
749  f->rindex = 0;
750  SDL_LockMutex(f->mutex);
751  f->size--;
752  SDL_CondSignal(f->cond);
753  SDL_UnlockMutex(f->mutex);
754 }
755 
756 /* jump back to the previous frame if available by resetting rindex_shown */
758 {
759  int ret = f->rindex_shown;
760  f->rindex_shown = 0;
761  return ret;
762 }
763 
764 /* return the number of undisplayed frames in the queue */
766 {
767  return f->size - f->rindex_shown;
768 }
769 
770 /* return last shown position */
772 {
773  Frame *fp = &f->queue[f->rindex];
774  if (f->rindex_shown && fp->serial == f->pktq->serial)
775  return fp->pos;
776  else
777  return -1;
778 }
779 
780 static inline void fill_rectangle(SDL_Surface *screen,
781  int x, int y, int w, int h, int color, int update)
782 {
783  SDL_Rect rect;
784  rect.x = x;
785  rect.y = y;
786  rect.w = w;
787  rect.h = h;
788  SDL_FillRect(screen, &rect, color);
789  if (update && w > 0 && h > 0)
790  SDL_UpdateRect(screen, x, y, w, h);
791 }
792 
793 /* draw only the border of a rectangle */
794 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
795 {
796  int w1, w2, h1, h2;
797 
798  /* fill the background */
799  w1 = x;
800  if (w1 < 0)
801  w1 = 0;
802  w2 = width - (x + w);
803  if (w2 < 0)
804  w2 = 0;
805  h1 = y;
806  if (h1 < 0)
807  h1 = 0;
808  h2 = height - (y + h);
809  if (h2 < 0)
810  h2 = 0;
812  xleft, ytop,
813  w1, height,
814  color, update);
816  xleft + width - w2, ytop,
817  w2, height,
818  color, update);
820  xleft + w1, ytop,
821  width - w1 - w2, h1,
822  color, update);
824  xleft + w1, ytop + height - h2,
825  width - w1 - w2, h2,
826  color, update);
827 }
828 
829 #define ALPHA_BLEND(a, oldp, newp, s)\
830 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
831 
832 #define RGBA_IN(r, g, b, a, s)\
833 {\
834  unsigned int v = ((const uint32_t *)(s))[0];\
835  a = (v >> 24) & 0xff;\
836  r = (v >> 16) & 0xff;\
837  g = (v >> 8) & 0xff;\
838  b = v & 0xff;\
839 }
840 
841 #define YUVA_IN(y, u, v, a, s, pal)\
842 {\
843  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
844  a = (val >> 24) & 0xff;\
845  y = (val >> 16) & 0xff;\
846  u = (val >> 8) & 0xff;\
847  v = val & 0xff;\
848 }
849 
850 #define YUVA_OUT(d, y, u, v, a)\
851 {\
852  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
853 }
854 
855 
856 #define BPP 1
857 
858 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
859 {
860  int wrap, wrap3, width2, skip2;
861  int y, u, v, a, u1, v1, a1, w, h;
862  uint8_t *lum, *cb, *cr;
863  const uint8_t *p;
864  const uint32_t *pal;
865  int dstx, dsty, dstw, dsth;
866 
867  dstw = av_clip(rect->w, 0, imgw);
868  dsth = av_clip(rect->h, 0, imgh);
869  dstx = av_clip(rect->x, 0, imgw - dstw);
870  dsty = av_clip(rect->y, 0, imgh - dsth);
871  lum = dst->data[0] + dsty * dst->linesize[0];
872  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
873  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
874 
875  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
876  skip2 = dstx >> 1;
877  wrap = dst->linesize[0];
878  wrap3 = rect->pict.linesize[0];
879  p = rect->pict.data[0];
880  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
881 
882  if (dsty & 1) {
883  lum += dstx;
884  cb += skip2;
885  cr += skip2;
886 
887  if (dstx & 1) {
888  YUVA_IN(y, u, v, a, p, pal);
889  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
890  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
891  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
892  cb++;
893  cr++;
894  lum++;
895  p += BPP;
896  }
897  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
898  YUVA_IN(y, u, v, a, p, pal);
899  u1 = u;
900  v1 = v;
901  a1 = a;
902  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
903 
904  YUVA_IN(y, u, v, a, p + BPP, pal);
905  u1 += u;
906  v1 += v;
907  a1 += a;
908  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
909  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
910  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
911  cb++;
912  cr++;
913  p += 2 * BPP;
914  lum += 2;
915  }
916  if (w) {
917  YUVA_IN(y, u, v, a, p, pal);
918  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
919  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
920  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
921  p++;
922  lum++;
923  }
924  p += wrap3 - dstw * BPP;
925  lum += wrap - dstw - dstx;
926  cb += dst->linesize[1] - width2 - skip2;
927  cr += dst->linesize[2] - width2 - skip2;
928  }
929  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
930  lum += dstx;
931  cb += skip2;
932  cr += skip2;
933 
934  if (dstx & 1) {
935  YUVA_IN(y, u, v, a, p, pal);
936  u1 = u;
937  v1 = v;
938  a1 = a;
939  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
940  p += wrap3;
941  lum += wrap;
942  YUVA_IN(y, u, v, a, p, pal);
943  u1 += u;
944  v1 += v;
945  a1 += a;
946  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
947  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
948  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
949  cb++;
950  cr++;
951  p += -wrap3 + BPP;
952  lum += -wrap + 1;
953  }
954  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
955  YUVA_IN(y, u, v, a, p, pal);
956  u1 = u;
957  v1 = v;
958  a1 = a;
959  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
960 
961  YUVA_IN(y, u, v, a, p + BPP, pal);
962  u1 += u;
963  v1 += v;
964  a1 += a;
965  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
966  p += wrap3;
967  lum += wrap;
968 
969  YUVA_IN(y, u, v, a, p, pal);
970  u1 += u;
971  v1 += v;
972  a1 += a;
973  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
974 
975  YUVA_IN(y, u, v, a, p + BPP, pal);
976  u1 += u;
977  v1 += v;
978  a1 += a;
979  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
980 
981  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
982  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
983 
984  cb++;
985  cr++;
986  p += -wrap3 + 2 * BPP;
987  lum += -wrap + 2;
988  }
989  if (w) {
990  YUVA_IN(y, u, v, a, p, pal);
991  u1 = u;
992  v1 = v;
993  a1 = a;
994  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
995  p += wrap3;
996  lum += wrap;
997  YUVA_IN(y, u, v, a, p, pal);
998  u1 += u;
999  v1 += v;
1000  a1 += a;
1001  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1002  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
1003  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
1004  cb++;
1005  cr++;
1006  p += -wrap3 + BPP;
1007  lum += -wrap + 1;
1008  }
1009  p += wrap3 + (wrap3 - dstw * BPP);
1010  lum += wrap + (wrap - dstw - dstx);
1011  cb += dst->linesize[1] - width2 - skip2;
1012  cr += dst->linesize[2] - width2 - skip2;
1013  }
1014  /* handle odd height */
1015  if (h) {
1016  lum += dstx;
1017  cb += skip2;
1018  cr += skip2;
1019 
1020  if (dstx & 1) {
1021  YUVA_IN(y, u, v, a, p, pal);
1022  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1023  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1024  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1025  cb++;
1026  cr++;
1027  lum++;
1028  p += BPP;
1029  }
1030  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
1031  YUVA_IN(y, u, v, a, p, pal);
1032  u1 = u;
1033  v1 = v;
1034  a1 = a;
1035  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1036 
1037  YUVA_IN(y, u, v, a, p + BPP, pal);
1038  u1 += u;
1039  v1 += v;
1040  a1 += a;
1041  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
1042  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
1043  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
1044  cb++;
1045  cr++;
1046  p += 2 * BPP;
1047  lum += 2;
1048  }
1049  if (w) {
1050  YUVA_IN(y, u, v, a, p, pal);
1051  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1052  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1053  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1054  }
1055  }
1056 }
1057 
1058 static void free_picture(Frame *vp)
1059 {
1060  if (vp->bmp) {
1061  SDL_FreeYUVOverlay(vp->bmp);
1062  vp->bmp = NULL;
1063  }
1064 }
1065 
1066 static void calculate_display_rect(SDL_Rect *rect,
1067  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
1068  int pic_width, int pic_height, AVRational pic_sar)
1069 {
1070  float aspect_ratio;
1071  int width, height, x, y;
1072 
1073  if (pic_sar.num == 0)
1074  aspect_ratio = 0;
1075  else
1076  aspect_ratio = av_q2d(pic_sar);
1077 
1078  if (aspect_ratio <= 0.0)
1079  aspect_ratio = 1.0;
1080  aspect_ratio *= (float)pic_width / (float)pic_height;
1081 
1082  /* XXX: we suppose the screen has a 1.0 pixel ratio */
1083  height = scr_height;
1084  width = ((int)rint(height * aspect_ratio)) & ~1;
1085  if (width > scr_width) {
1086  width = scr_width;
1087  height = ((int)rint(width / aspect_ratio)) & ~1;
1088  }
1089  x = (scr_width - width) / 2;
1090  y = (scr_height - height) / 2;
1091  rect->x = scr_xleft + x;
1092  rect->y = scr_ytop + y;
1093  rect->w = FFMAX(width, 1);
1094  rect->h = FFMAX(height, 1);
1095 }
1096 
1098 {
1099  Frame *vp;
1100  Frame *sp;
1101  AVPicture pict;
1102  SDL_Rect rect;
1103  int i;
1104 
1105  vp = frame_queue_peek(&is->pictq);
1106  if (vp->bmp) {
1107  if (is->subtitle_st) {
1108  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1109  sp = frame_queue_peek(&is->subpq);
1110 
1111  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1112  SDL_LockYUVOverlay (vp->bmp);
1113 
1114  pict.data[0] = vp->bmp->pixels[0];
1115  pict.data[1] = vp->bmp->pixels[2];
1116  pict.data[2] = vp->bmp->pixels[1];
1117 
1118  pict.linesize[0] = vp->bmp->pitches[0];
1119  pict.linesize[1] = vp->bmp->pitches[2];
1120  pict.linesize[2] = vp->bmp->pitches[1];
1121 
1122  for (i = 0; i < sp->sub.num_rects; i++)
1123  blend_subrect(&pict, sp->sub.rects[i],
1124  vp->bmp->w, vp->bmp->h);
1125 
1126  SDL_UnlockYUVOverlay (vp->bmp);
1127  }
1128  }
1129  }
1130 
1131  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1132 
1133  SDL_DisplayYUVOverlay(vp->bmp, &rect);
1134 
1135  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1136  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1137  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1138  is->last_display_rect = rect;
1139  }
1140  }
1141 }
1142 
1143 static inline int compute_mod(int a, int b)
1144 {
1145  return a < 0 ? a%b + b : a%b;
1146 }
1147 
1149 {
1150  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1151  int ch, channels, h, h2, bgcolor, fgcolor;
1152  int64_t time_diff;
1153  int rdft_bits, nb_freq;
1154 
1155  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1156  ;
1157  nb_freq = 1 << (rdft_bits - 1);
1158 
1159  /* compute display index : center on currently output samples */
1160  channels = s->audio_tgt.channels;
1161  nb_display_channels = channels;
1162  if (!s->paused) {
1163  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1164  n = 2 * channels;
1165  delay = s->audio_write_buf_size;
1166  delay /= n;
1167 
1168  /* to be more precise, we take into account the time spent since
1169  the last buffer computation */
1170  if (audio_callback_time) {
1171  time_diff = av_gettime_relative() - audio_callback_time;
1172  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1173  }
1174 
1175  delay += 2 * data_used;
1176  if (delay < data_used)
1177  delay = data_used;
1178 
1179  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1180  if (s->show_mode == SHOW_MODE_WAVES) {
1181  h = INT_MIN;
1182  for (i = 0; i < 1000; i += channels) {
1183  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1184  int a = s->sample_array[idx];
1185  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1186  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1187  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1188  int score = a - d;
1189  if (h < score && (b ^ c) < 0) {
1190  h = score;
1191  i_start = idx;
1192  }
1193  }
1194  }
1195 
1196  s->last_i_start = i_start;
1197  } else {
1198  i_start = s->last_i_start;
1199  }
1200 
1201  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1202  if (s->show_mode == SHOW_MODE_WAVES) {
1204  s->xleft, s->ytop, s->width, s->height,
1205  bgcolor, 0);
1206 
1207  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1208 
1209  /* total height for one channel */
1210  h = s->height / nb_display_channels;
1211  /* graph height / 2 */
1212  h2 = (h * 9) / 20;
1213  for (ch = 0; ch < nb_display_channels; ch++) {
1214  i = i_start + ch;
1215  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1216  for (x = 0; x < s->width; x++) {
1217  y = (s->sample_array[i] * h2) >> 15;
1218  if (y < 0) {
1219  y = -y;
1220  ys = y1 - y;
1221  } else {
1222  ys = y1;
1223  }
1225  s->xleft + x, ys, 1, y,
1226  fgcolor, 0);
1227  i += channels;
1228  if (i >= SAMPLE_ARRAY_SIZE)
1229  i -= SAMPLE_ARRAY_SIZE;
1230  }
1231  }
1232 
1233  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1234 
1235  for (ch = 1; ch < nb_display_channels; ch++) {
1236  y = s->ytop + ch * h;
1238  s->xleft, y, s->width, 1,
1239  fgcolor, 0);
1240  }
1241  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1242  } else {
1243  nb_display_channels= FFMIN(nb_display_channels, 2);
1244  if (rdft_bits != s->rdft_bits) {
1245  av_rdft_end(s->rdft);
1246  av_free(s->rdft_data);
1247  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1248  s->rdft_bits = rdft_bits;
1249  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1250  }
1251  {
1252  FFTSample *data[2];
1253  for (ch = 0; ch < nb_display_channels; ch++) {
1254  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1255  i = i_start + ch;
1256  for (x = 0; x < 2 * nb_freq; x++) {
1257  double w = (x-nb_freq) * (1.0 / nb_freq);
1258  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1259  i += channels;
1260  if (i >= SAMPLE_ARRAY_SIZE)
1261  i -= SAMPLE_ARRAY_SIZE;
1262  }
1263  av_rdft_calc(s->rdft, data[ch]);
1264  }
1265  /* Least efficient way to do this, we should of course
1266  * directly access it but it is more than fast enough. */
1267  for (y = 0; y < s->height; y++) {
1268  double w = 1 / sqrt(nb_freq);
1269  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1270  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1271  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1272  a = FFMIN(a, 255);
1273  b = FFMIN(b, 255);
1274  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1275 
1277  s->xpos, s->height-y, 1, 1,
1278  fgcolor, 0);
1279  }
1280  }
1281  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1282  if (!s->paused)
1283  s->xpos++;
1284  if (s->xpos >= s->width)
1285  s->xpos= s->xleft;
1286  }
1287 }
1288 
1289 static void stream_close(VideoState *is)
1290 {
1291  /* XXX: use a special url_shutdown call to abort parse cleanly */
1292  is->abort_request = 1;
1293  SDL_WaitThread(is->read_tid, NULL);
1297 
1298  /* free all pictures */
1299  frame_queue_destory(&is->pictq);
1300  frame_queue_destory(&is->sampq);
1301  frame_queue_destory(&is->subpq);
1302  SDL_DestroyCond(is->continue_read_thread);
1303 #if !CONFIG_AVFILTER
1305 #endif
1306  av_free(is);
1307 }
1308 
1309 static void do_exit(VideoState *is)
1310 {
1311  if (is) {
1312  stream_close(is);
1313  }
1314  av_lockmgr_register(NULL);
1315  uninit_opts();
1316 #if CONFIG_AVFILTER
1317  av_freep(&vfilters_list);
1318 #endif
1320  if (show_status)
1321  printf("\n");
1322  SDL_Quit();
1323  av_log(NULL, AV_LOG_QUIET, "%s", "");
1324  exit(0);
1325 }
1326 
1327 static void sigterm_handler(int sig)
1328 {
1329  exit(123);
1330 }
1331 
1333 {
1334  SDL_Rect rect;
1335  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1336  default_width = rect.w;
1337  default_height = rect.h;
1338 }
1339 
1340 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1341 {
1342  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1343  int w,h;
1344 
1345  if (is_full_screen) flags |= SDL_FULLSCREEN;
1346  else flags |= SDL_RESIZABLE;
1347 
1348  if (vp && vp->width)
1349  set_default_window_size(vp->width, vp->height, vp->sar);
1350 
1352  w = fs_screen_width;
1353  h = fs_screen_height;
1354  } else if (!is_full_screen && screen_width) {
1355  w = screen_width;
1356  h = screen_height;
1357  } else {
1358  w = default_width;
1359  h = default_height;
1360  }
1361  w = FFMIN(16383, w);
1362  if (screen && is->width == screen->w && screen->w == w
1363  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1364  return 0;
1365  screen = SDL_SetVideoMode(w, h, 0, flags);
1366  if (!screen) {
1367  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1368  do_exit(is);
1369  }
1370  if (!window_title)
1372  SDL_WM_SetCaption(window_title, window_title);
1373 
1374  is->width = screen->w;
1375  is->height = screen->h;
1376 
1377  return 0;
1378 }
1379 
1380 /* display the current picture, if any */
1381 static void video_display(VideoState *is)
1382 {
1383  if (!screen)
1384  video_open(is, 0, NULL);
1385  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1386  video_audio_display(is);
1387  else if (is->video_st)
1388  video_image_display(is);
1389 }
1390 
1391 static double get_clock(Clock *c)
1392 {
1393  if (*c->queue_serial != c->serial)
1394  return NAN;
1395  if (c->paused) {
1396  return c->pts;
1397  } else {
1398  double time = av_gettime_relative() / 1000000.0;
1399  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1400  }
1401 }
1402 
1403 static void set_clock_at(Clock *c, double pts, int serial, double time)
1404 {
1405  c->pts = pts;
1406  c->last_updated = time;
1407  c->pts_drift = c->pts - time;
1408  c->serial = serial;
1409 }
1410 
1411 static void set_clock(Clock *c, double pts, int serial)
1412 {
1413  double time = av_gettime_relative() / 1000000.0;
1414  set_clock_at(c, pts, serial, time);
1415 }
1416 
1417 static void set_clock_speed(Clock *c, double speed)
1418 {
1419  set_clock(c, get_clock(c), c->serial);
1420  c->speed = speed;
1421 }
1422 
1423 static void init_clock(Clock *c, int *queue_serial)
1424 {
1425  c->speed = 1.0;
1426  c->paused = 0;
1427  c->queue_serial = queue_serial;
1428  set_clock(c, NAN, -1);
1429 }
1430 
1431 static void sync_clock_to_slave(Clock *c, Clock *slave)
1432 {
1433  double clock = get_clock(c);
1434  double slave_clock = get_clock(slave);
1435  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1436  set_clock(c, slave_clock, slave->serial);
1437 }
1438 
1440  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1441  if (is->video_st)
1442  return AV_SYNC_VIDEO_MASTER;
1443  else
1444  return AV_SYNC_AUDIO_MASTER;
1445  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1446  if (is->audio_st)
1447  return AV_SYNC_AUDIO_MASTER;
1448  else
1449  return AV_SYNC_EXTERNAL_CLOCK;
1450  } else {
1451  return AV_SYNC_EXTERNAL_CLOCK;
1452  }
1453 }
1454 
1455 /* get the current master clock value */
1456 static double get_master_clock(VideoState *is)
1457 {
1458  double val;
1459 
1460  switch (get_master_sync_type(is)) {
1461  case AV_SYNC_VIDEO_MASTER:
1462  val = get_clock(&is->vidclk);
1463  break;
1464  case AV_SYNC_AUDIO_MASTER:
1465  val = get_clock(&is->audclk);
1466  break;
1467  default:
1468  val = get_clock(&is->extclk);
1469  break;
1470  }
1471  return val;
1472 }
1473 
1475  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1476  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1478  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1479  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1481  } else {
1482  double speed = is->extclk.speed;
1483  if (speed != 1.0)
1484  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1485  }
1486 }
1487 
1488 /* seek in the stream */
1489 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1490 {
1491  if (!is->seek_req) {
1492  is->seek_pos = pos;
1493  is->seek_rel = rel;
1494  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1495  if (seek_by_bytes)
1497  is->seek_req = 1;
1498  SDL_CondSignal(is->continue_read_thread);
1499  }
1500 }
1501 
1502 /* pause or resume the video */
1504 {
1505  if (is->paused) {
1506  is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1507  if (is->read_pause_return != AVERROR(ENOSYS)) {
1508  is->vidclk.paused = 0;
1509  }
1510  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1511  }
1512  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1513  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1514 }
1515 
1516 static void toggle_pause(VideoState *is)
1517 {
1518  stream_toggle_pause(is);
1519  is->step = 0;
1520 }
1521 
1523 {
1524  /* if the stream is paused unpause it, then step */
1525  if (is->paused)
1526  stream_toggle_pause(is);
1527  is->step = 1;
1528 }
1529 
1530 static double compute_target_delay(double delay, VideoState *is)
1531 {
1532  double sync_threshold, diff;
1533 
1534  /* update delay to follow master synchronisation source */
1536  /* if video is slave, we try to correct big delays by
1537  duplicating or deleting a frame */
1538  diff = get_clock(&is->vidclk) - get_master_clock(is);
1539 
1540  /* skip or repeat frame. We take into account the
1541  delay to compute the threshold. I still don't know
1542  if it is the best guess */
1543  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1544  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1545  if (diff <= -sync_threshold)
1546  delay = FFMAX(0, delay + diff);
1547  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1548  delay = delay + diff;
1549  else if (diff >= sync_threshold)
1550  delay = 2 * delay;
1551  }
1552  }
1553 
1554  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1555  delay, -diff);
1556 
1557  return delay;
1558 }
1559 
1560 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1561  if (vp->serial == nextvp->serial) {
1562  double duration = nextvp->pts - vp->pts;
1563  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1564  return vp->duration;
1565  else
1566  return duration;
1567  } else {
1568  return 0.0;
1569  }
1570 }
1571 
1572 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1573  /* update current video pts */
1574  set_clock(&is->vidclk, pts, serial);
1575  sync_clock_to_slave(&is->extclk, &is->vidclk);
1576 }
1577 
1578 /* called to display each frame */
1579 static void video_refresh(void *opaque, double *remaining_time)
1580 {
1581  VideoState *is = opaque;
1582  double time;
1583 
1584  Frame *sp, *sp2;
1585 
1586  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1588 
1589  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1590  time = av_gettime_relative() / 1000000.0;
1591  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1592  video_display(is);
1593  is->last_vis_time = time;
1594  }
1595  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1596  }
1597 
1598  if (is->video_st) {
1599  int redisplay = 0;
1600  if (is->force_refresh)
1601  redisplay = frame_queue_prev(&is->pictq);
1602 retry:
1603  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1604  // nothing to do, no picture to display in the queue
1605  } else {
1606  double last_duration, duration, delay;
1607  Frame *vp, *lastvp;
1608 
1609  /* dequeue the picture */
1610  lastvp = frame_queue_peek_last(&is->pictq);
1611  vp = frame_queue_peek(&is->pictq);
1612 
1613  if (vp->serial != is->videoq.serial) {
1614  frame_queue_next(&is->pictq);
1615  redisplay = 0;
1616  goto retry;
1617  }
1618 
1619  if (lastvp->serial != vp->serial && !redisplay)
1620  is->frame_timer = av_gettime_relative() / 1000000.0;
1621 
1622  if (is->paused)
1623  goto display;
1624 
1625  /* compute nominal last_duration */
1626  last_duration = vp_duration(is, lastvp, vp);
1627  if (redisplay)
1628  delay = 0.0;
1629  else
1630  delay = compute_target_delay(last_duration, is);
1631 
1632  time= av_gettime_relative()/1000000.0;
1633  if (time < is->frame_timer + delay && !redisplay) {
1634  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1635  return;
1636  }
1637 
1638  is->frame_timer += delay;
1639  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1640  is->frame_timer = time;
1641 
1642  SDL_LockMutex(is->pictq.mutex);
1643  if (!redisplay && !isnan(vp->pts))
1644  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1645  SDL_UnlockMutex(is->pictq.mutex);
1646 
1647  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1648  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1649  duration = vp_duration(is, vp, nextvp);
1650  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1651  if (!redisplay)
1652  is->frame_drops_late++;
1653  frame_queue_next(&is->pictq);
1654  redisplay = 0;
1655  goto retry;
1656  }
1657  }
1658 
1659  if (is->subtitle_st) {
1660  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1661  sp = frame_queue_peek(&is->subpq);
1662 
1663  if (frame_queue_nb_remaining(&is->subpq) > 1)
1664  sp2 = frame_queue_peek_next(&is->subpq);
1665  else
1666  sp2 = NULL;
1667 
1668  if (sp->serial != is->subtitleq.serial
1669  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1670  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1671  {
1672  frame_queue_next(&is->subpq);
1673  } else {
1674  break;
1675  }
1676  }
1677  }
1678 
1679 display:
1680  /* display picture */
1681  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1682  video_display(is);
1683 
1684  frame_queue_next(&is->pictq);
1685 
1686  if (is->step && !is->paused)
1687  stream_toggle_pause(is);
1688  }
1689  }
1690  is->force_refresh = 0;
1691  if (show_status) {
1692  static int64_t last_time;
1693  int64_t cur_time;
1694  int aqsize, vqsize, sqsize;
1695  double av_diff;
1696 
1697  cur_time = av_gettime_relative();
1698  if (!last_time || (cur_time - last_time) >= 30000) {
1699  aqsize = 0;
1700  vqsize = 0;
1701  sqsize = 0;
1702  if (is->audio_st)
1703  aqsize = is->audioq.size;
1704  if (is->video_st)
1705  vqsize = is->videoq.size;
1706  if (is->subtitle_st)
1707  sqsize = is->subtitleq.size;
1708  av_diff = 0;
1709  if (is->audio_st && is->video_st)
1710  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1711  else if (is->video_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1713  else if (is->audio_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1715  av_log(NULL, AV_LOG_INFO,
1716  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1717  get_master_clock(is),
1718  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1719  av_diff,
1721  aqsize / 1024,
1722  vqsize / 1024,
1723  sqsize,
1726  fflush(stdout);
1727  last_time = cur_time;
1728  }
1729  }
1730 }
1731 
1732 /* allocate a picture (needs to do that in main thread to avoid
1733  potential locking problems */
1734 static void alloc_picture(VideoState *is)
1735 {
1736  Frame *vp;
1737  int64_t bufferdiff;
1738 
1739  vp = &is->pictq.queue[is->pictq.windex];
1740 
1741  free_picture(vp);
1742 
1743  video_open(is, 0, vp);
1744 
1745  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1746  SDL_YV12_OVERLAY,
1747  screen);
1748  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1749  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1750  /* SDL allocates a buffer smaller than requested if the video
1751  * overlay hardware is unable to support the requested size. */
1752  av_log(NULL, AV_LOG_FATAL,
1753  "Error: the video system does not support an image\n"
1754  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1755  "to reduce the image size.\n", vp->width, vp->height );
1756  do_exit(is);
1757  }
1758 
1759  SDL_LockMutex(is->pictq.mutex);
1760  vp->allocated = 1;
1761  SDL_CondSignal(is->pictq.cond);
1762  SDL_UnlockMutex(is->pictq.mutex);
1763 }
1764 
1765 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1766  int i, width, height;
1767  Uint8 *p, *maxp;
1768  for (i = 0; i < 3; i++) {
1769  width = bmp->w;
1770  height = bmp->h;
1771  if (i > 0) {
1772  width >>= 1;
1773  height >>= 1;
1774  }
1775  if (bmp->pitches[i] > width) {
1776  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1777  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1778  *(p+1) = *p;
1779  }
1780  }
1781 }
1782 
1783 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1784 {
1785  Frame *vp;
1786 
1787 #if defined(DEBUG_SYNC) && 0
1788  printf("frame_type=%c pts=%0.3f\n",
1789  av_get_picture_type_char(src_frame->pict_type), pts);
1790 #endif
1791 
1792  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1793  return -1;
1794 
1795  vp->sar = src_frame->sample_aspect_ratio;
1796 
1797  /* alloc or resize hardware picture buffer */
1798  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1799  vp->width != src_frame->width ||
1800  vp->height != src_frame->height) {
1801  SDL_Event event;
1802 
1803  vp->allocated = 0;
1804  vp->reallocate = 0;
1805  vp->width = src_frame->width;
1806  vp->height = src_frame->height;
1807 
1808  /* the allocation must be done in the main thread to avoid
1809  locking problems. */
1810  event.type = FF_ALLOC_EVENT;
1811  event.user.data1 = is;
1812  SDL_PushEvent(&event);
1813 
1814  /* wait until the picture is allocated */
1815  SDL_LockMutex(is->pictq.mutex);
1816  while (!vp->allocated && !is->videoq.abort_request) {
1817  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1818  }
1819  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1820  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1821  while (!vp->allocated && !is->abort_request) {
1822  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1823  }
1824  }
1825  SDL_UnlockMutex(is->pictq.mutex);
1826 
1827  if (is->videoq.abort_request)
1828  return -1;
1829  }
1830 
1831  /* if the frame is not skipped, then display it */
1832  if (vp->bmp) {
1833  AVPicture pict = { { 0 } };
1834 
1835  /* get a pointer on the bitmap */
1836  SDL_LockYUVOverlay (vp->bmp);
1837 
1838  pict.data[0] = vp->bmp->pixels[0];
1839  pict.data[1] = vp->bmp->pixels[2];
1840  pict.data[2] = vp->bmp->pixels[1];
1841 
1842  pict.linesize[0] = vp->bmp->pitches[0];
1843  pict.linesize[1] = vp->bmp->pitches[2];
1844  pict.linesize[2] = vp->bmp->pitches[1];
1845 
1846 #if CONFIG_AVFILTER
1847  // FIXME use direct rendering
1848  av_picture_copy(&pict, (AVPicture *)src_frame,
1849  src_frame->format, vp->width, vp->height);
1850 #else
1851  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1853  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1854  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1855  if (!is->img_convert_ctx) {
1856  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1857  exit(1);
1858  }
1859  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1860  0, vp->height, pict.data, pict.linesize);
1861 #endif
1862  /* workaround SDL PITCH_WORKAROUND */
1864  /* update the bitmap content */
1865  SDL_UnlockYUVOverlay(vp->bmp);
1866 
1867  vp->pts = pts;
1868  vp->duration = duration;
1869  vp->pos = pos;
1870  vp->serial = serial;
1871 
1872  /* now we can update the picture count */
1873  frame_queue_push(&is->pictq);
1874  }
1875  return 0;
1876 }
1877 
1879 {
1880  int got_picture;
1881 
1882  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1883  return -1;
1884 
1885  if (got_picture) {
1886  double dpts = NAN;
1887 
1888  if (frame->pts != AV_NOPTS_VALUE)
1889  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1890 
1891  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1892 
1894  if (frame->pts != AV_NOPTS_VALUE) {
1895  double diff = dpts - get_master_clock(is);
1896  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1897  diff - is->frame_last_filter_delay < 0 &&
1898  is->viddec.pkt_serial == is->vidclk.serial &&
1899  is->videoq.nb_packets) {
1900  is->frame_drops_early++;
1901  av_frame_unref(frame);
1902  got_picture = 0;
1903  }
1904  }
1905  }
1906  }
1907 
1908  return got_picture;
1909 }
1910 
1911 #if CONFIG_AVFILTER
1912 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1913  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1914 {
1915  int ret, i;
1916  int nb_filters = graph->nb_filters;
1917  AVFilterInOut *outputs = NULL, *inputs = NULL;
1918 
1919  if (filtergraph) {
1920  outputs = avfilter_inout_alloc();
1921  inputs = avfilter_inout_alloc();
1922  if (!outputs || !inputs) {
1923  ret = AVERROR(ENOMEM);
1924  goto fail;
1925  }
1926 
1927  outputs->name = av_strdup("in");
1928  outputs->filter_ctx = source_ctx;
1929  outputs->pad_idx = 0;
1930  outputs->next = NULL;
1931 
1932  inputs->name = av_strdup("out");
1933  inputs->filter_ctx = sink_ctx;
1934  inputs->pad_idx = 0;
1935  inputs->next = NULL;
1936 
1937  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1938  goto fail;
1939  } else {
1940  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1941  goto fail;
1942  }
1943 
1944  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1945  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1946  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1947 
1948  ret = avfilter_graph_config(graph, NULL);
1949 fail:
1950  avfilter_inout_free(&outputs);
1951  avfilter_inout_free(&inputs);
1952  return ret;
1953 }
1954 
1955 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1956 {
1957  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1958  char sws_flags_str[128];
1959  char buffersrc_args[256];
1960  int ret;
1961  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1962  AVCodecContext *codec = is->video_st->codec;
1963  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1964 
1965  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1966  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1967  graph->scale_sws_opts = av_strdup(sws_flags_str);
1968 
1969  snprintf(buffersrc_args, sizeof(buffersrc_args),
1970  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1971  frame->width, frame->height, frame->format,
1973  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1974  if (fr.num && fr.den)
1975  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1976 
1977  if ((ret = avfilter_graph_create_filter(&filt_src,
1978  avfilter_get_by_name("buffer"),
1979  "ffplay_buffer", buffersrc_args, NULL,
1980  graph)) < 0)
1981  goto fail;
1982 
1983  ret = avfilter_graph_create_filter(&filt_out,
1984  avfilter_get_by_name("buffersink"),
1985  "ffplay_buffersink", NULL, NULL, graph);
1986  if (ret < 0)
1987  goto fail;
1988 
1989  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1990  goto fail;
1991 
1992  last_filter = filt_out;
1993 
1994 /* Note: this macro adds a filter before the lastly added filter, so the
1995  * processing order of the filters is in reverse */
1996 #define INSERT_FILT(name, arg) do { \
1997  AVFilterContext *filt_ctx; \
1998  \
1999  ret = avfilter_graph_create_filter(&filt_ctx, \
2000  avfilter_get_by_name(name), \
2001  "ffplay_" name, arg, NULL, graph); \
2002  if (ret < 0) \
2003  goto fail; \
2004  \
2005  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2006  if (ret < 0) \
2007  goto fail; \
2008  \
2009  last_filter = filt_ctx; \
2010 } while (0)
2011 
2012  /* SDL YUV code is not handling odd width/height for some driver
2013  * combinations, therefore we crop the picture to an even width/height. */
2014  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
2015 
2016  if (autorotate) {
2017  AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
2018  if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
2019  if (!strcmp(rotate_tag->value, "90")) {
2020  INSERT_FILT("transpose", "clock");
2021  } else if (!strcmp(rotate_tag->value, "180")) {
2022  INSERT_FILT("hflip", NULL);
2023  INSERT_FILT("vflip", NULL);
2024  } else if (!strcmp(rotate_tag->value, "270")) {
2025  INSERT_FILT("transpose", "cclock");
2026  } else {
2027  char rotate_buf[64];
2028  snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
2029  INSERT_FILT("rotate", rotate_buf);
2030  }
2031  }
2032  }
2033 
2034  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2035  goto fail;
2036 
2037  is->in_video_filter = filt_src;
2038  is->out_video_filter = filt_out;
2039 
2040 fail:
2041  return ret;
2042 }
2043 
2044 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2045 {
2047  int sample_rates[2] = { 0, -1 };
2048  int64_t channel_layouts[2] = { 0, -1 };
2049  int channels[2] = { 0, -1 };
2050  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2051  char aresample_swr_opts[512] = "";
2052  AVDictionaryEntry *e = NULL;
2053  char asrc_args[256];
2054  int ret;
2055 
2056  avfilter_graph_free(&is->agraph);
2057  if (!(is->agraph = avfilter_graph_alloc()))
2058  return AVERROR(ENOMEM);
2059 
2060  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2061  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2062  if (strlen(aresample_swr_opts))
2063  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2064  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2065 
2066  ret = snprintf(asrc_args, sizeof(asrc_args),
2067  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2068  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2069  is->audio_filter_src.channels,
2070  1, is->audio_filter_src.freq);
2071  if (is->audio_filter_src.channel_layout)
2072  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2073  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2074 
2075  ret = avfilter_graph_create_filter(&filt_asrc,
2076  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2077  asrc_args, NULL, is->agraph);
2078  if (ret < 0)
2079  goto end;
2080 
2081 
2082  ret = avfilter_graph_create_filter(&filt_asink,
2083  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2084  NULL, NULL, is->agraph);
2085  if (ret < 0)
2086  goto end;
2087 
2088  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2089  goto end;
2090  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2091  goto end;
2092 
2093  if (force_output_format) {
2094  channel_layouts[0] = is->audio_tgt.channel_layout;
2095  channels [0] = is->audio_tgt.channels;
2096  sample_rates [0] = is->audio_tgt.freq;
2097  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2098  goto end;
2099  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2100  goto end;
2101  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2102  goto end;
2103  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2104  goto end;
2105  }
2106 
2107 
2108  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2109  goto end;
2110 
2111  is->in_audio_filter = filt_asrc;
2112  is->out_audio_filter = filt_asink;
2113 
2114 end:
2115  if (ret < 0)
2116  avfilter_graph_free(&is->agraph);
2117  return ret;
2118 }
2119 #endif /* CONFIG_AVFILTER */
2120 
2121 static int audio_thread(void *arg)
2122 {
2123  VideoState *is = arg;
2124  AVFrame *frame = av_frame_alloc();
2125  Frame *af;
2126 #if CONFIG_AVFILTER
2127  int last_serial = -1;
2128  int64_t dec_channel_layout;
2129  int reconfigure;
2130 #endif
2131  int got_frame = 0;
2132  AVRational tb;
2133  int ret = 0;
2134 
2135  if (!frame)
2136  return AVERROR(ENOMEM);
2137 
2138  do {
2139  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2140  goto the_end;
2141 
2142  if (got_frame) {
2143  tb = (AVRational){1, frame->sample_rate};
2144 
2145 #if CONFIG_AVFILTER
2146  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2147 
2148  reconfigure =
2149  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2150  frame->format, av_frame_get_channels(frame)) ||
2151  is->audio_filter_src.channel_layout != dec_channel_layout ||
2152  is->audio_filter_src.freq != frame->sample_rate ||
2153  is->auddec.pkt_serial != last_serial;
2154 
2155  if (reconfigure) {
2156  char buf1[1024], buf2[1024];
2157  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2158  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2159  av_log(NULL, AV_LOG_DEBUG,
2160  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2161  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2163 
2164  is->audio_filter_src.fmt = frame->format;
2165  is->audio_filter_src.channels = av_frame_get_channels(frame);
2166  is->audio_filter_src.channel_layout = dec_channel_layout;
2167  is->audio_filter_src.freq = frame->sample_rate;
2168  last_serial = is->auddec.pkt_serial;
2169 
2170  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2171  goto the_end;
2172  }
2173 
2174  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2175  goto the_end;
2176 
2177  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2178  tb = is->out_audio_filter->inputs[0]->time_base;
2179 #endif
2180  if (!(af = frame_queue_peek_writable(&is->sampq)))
2181  goto the_end;
2182 
2183  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2184  af->pos = av_frame_get_pkt_pos(frame);
2185  af->serial = is->auddec.pkt_serial;
2186  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2187 
2188  av_frame_move_ref(af->frame, frame);
2189  frame_queue_push(&is->sampq);
2190 
2191 #if CONFIG_AVFILTER
2192  if (is->audioq.serial != is->auddec.pkt_serial)
2193  break;
2194  }
2195  if (ret == AVERROR_EOF)
2196  is->auddec.finished = is->auddec.pkt_serial;
2197 #endif
2198  }
2199  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2200  the_end:
2201 #if CONFIG_AVFILTER
2202  avfilter_graph_free(&is->agraph);
2203 #endif
2204  av_frame_free(&frame);
2205  return ret;
2206 }
2207 
2208 static int video_thread(void *arg)
2209 {
2210  VideoState *is = arg;
2211  AVFrame *frame = av_frame_alloc();
2212  double pts;
2213  double duration;
2214  int ret;
2216  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2217 
2218 #if CONFIG_AVFILTER
2220  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2221  int last_w = 0;
2222  int last_h = 0;
2223  enum AVPixelFormat last_format = -2;
2224  int last_serial = -1;
2225  int last_vfilter_idx = 0;
2226 #endif
2227 
2228  for (;;) {
2229  ret = get_video_frame(is, frame);
2230  if (ret < 0)
2231  goto the_end;
2232  if (!ret)
2233  continue;
2234 
2235 #if CONFIG_AVFILTER
2236  if ( last_w != frame->width
2237  || last_h != frame->height
2238  || last_format != frame->format
2239  || last_serial != is->viddec.pkt_serial
2240  || last_vfilter_idx != is->vfilter_idx) {
2241  av_log(NULL, AV_LOG_DEBUG,
2242  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2243  last_w, last_h,
2244  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2245  frame->width, frame->height,
2246  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2247  avfilter_graph_free(&graph);
2248  graph = avfilter_graph_alloc();
2249  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2250  SDL_Event event;
2251  event.type = FF_QUIT_EVENT;
2252  event.user.data1 = is;
2253  SDL_PushEvent(&event);
2254  goto the_end;
2255  }
2256  filt_in = is->in_video_filter;
2257  filt_out = is->out_video_filter;
2258  last_w = frame->width;
2259  last_h = frame->height;
2260  last_format = frame->format;
2261  last_serial = is->viddec.pkt_serial;
2262  last_vfilter_idx = is->vfilter_idx;
2263  frame_rate = filt_out->inputs[0]->frame_rate;
2264  }
2265 
2266  ret = av_buffersrc_add_frame(filt_in, frame);
2267  if (ret < 0)
2268  goto the_end;
2269 
2270  while (ret >= 0) {
2271  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2272 
2273  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2274  if (ret < 0) {
2275  if (ret == AVERROR_EOF)
2276  is->viddec.finished = is->viddec.pkt_serial;
2277  ret = 0;
2278  break;
2279  }
2280 
2282  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2283  is->frame_last_filter_delay = 0;
2284  tb = filt_out->inputs[0]->time_base;
2285 #endif
2286  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2287  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2288  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2289  av_frame_unref(frame);
2290 #if CONFIG_AVFILTER
2291  }
2292 #endif
2293 
2294  if (ret < 0)
2295  goto the_end;
2296  }
2297  the_end:
2298 #if CONFIG_AVFILTER
2299  avfilter_graph_free(&graph);
2300 #endif
2301  av_frame_free(&frame);
2302  return 0;
2303 }
2304 
2305 static int subtitle_thread(void *arg)
2306 {
2307  VideoState *is = arg;
2308  Frame *sp;
2309  int got_subtitle;
2310  double pts;
2311  int i, j;
2312  int r, g, b, y, u, v, a;
2313 
2314  for (;;) {
2315  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2316  return 0;
2317 
2318  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2319  break;
2320 
2321  pts = 0;
2322 
2323  if (got_subtitle && sp->sub.format == 0) {
2324  if (sp->sub.pts != AV_NOPTS_VALUE)
2325  pts = sp->sub.pts / (double)AV_TIME_BASE;
2326  sp->pts = pts;
2327  sp->serial = is->subdec.pkt_serial;
2328 
2329  for (i = 0; i < sp->sub.num_rects; i++)
2330  {
2331  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2332  {
2333  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2334  y = RGB_TO_Y_CCIR(r, g, b);
2335  u = RGB_TO_U_CCIR(r, g, b, 0);
2336  v = RGB_TO_V_CCIR(r, g, b, 0);
2337  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2338  }
2339  }
2340 
2341  /* now we can update the picture count */
2342  frame_queue_push(&is->subpq);
2343  } else if (got_subtitle) {
2344  avsubtitle_free(&sp->sub);
2345  }
2346  }
2347  return 0;
2348 }
2349 
2350 /* copy samples for viewing in editor window */
2351 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2352 {
2353  int size, len;
2354 
2355  size = samples_size / sizeof(short);
2356  while (size > 0) {
2358  if (len > size)
2359  len = size;
2360  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2361  samples += len;
2362  is->sample_array_index += len;
2364  is->sample_array_index = 0;
2365  size -= len;
2366  }
2367 }
2368 
2369 /* return the wanted number of samples to get better sync if sync_type is video
2370  * or external master clock */
2371 static int synchronize_audio(VideoState *is, int nb_samples)
2372 {
2373  int wanted_nb_samples = nb_samples;
2374 
2375  /* if not master, then we try to remove or add samples to correct the clock */
2377  double diff, avg_diff;
2378  int min_nb_samples, max_nb_samples;
2379 
2380  diff = get_clock(&is->audclk) - get_master_clock(is);
2381 
2382  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2383  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2385  /* not enough measures to have a correct estimate */
2386  is->audio_diff_avg_count++;
2387  } else {
2388  /* estimate the A-V difference */
2389  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2390 
2391  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2392  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2393  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2394  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2395  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2396  }
2397  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2398  diff, avg_diff, wanted_nb_samples - nb_samples,
2400  }
2401  } else {
2402  /* too big difference : may be initial PTS errors, so
2403  reset A-V filter */
2404  is->audio_diff_avg_count = 0;
2405  is->audio_diff_cum = 0;
2406  }
2407  }
2408 
2409  return wanted_nb_samples;
2410 }
2411 
2412 /**
2413  * Decode one audio frame and return its uncompressed size.
2414  *
2415  * The processed audio frame is decoded, converted if required, and
2416  * stored in is->audio_buf, with size in bytes given by the return
2417  * value.
2418  */
2420 {
2421  int data_size, resampled_data_size;
2422  int64_t dec_channel_layout;
2423  av_unused double audio_clock0;
2424  int wanted_nb_samples;
2425  Frame *af;
2426 
2427  if (is->paused)
2428  return -1;
2429 
2430  do {
2431  if (!(af = frame_queue_peek_readable(&is->sampq)))
2432  return -1;
2433  frame_queue_next(&is->sampq);
2434  } while (af->serial != is->audioq.serial);
2435 
2437  af->frame->nb_samples,
2438  af->frame->format, 1);
2439 
2440  dec_channel_layout =
2443  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2444 
2445  if (af->frame->format != is->audio_src.fmt ||
2446  dec_channel_layout != is->audio_src.channel_layout ||
2447  af->frame->sample_rate != is->audio_src.freq ||
2448  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2449  swr_free(&is->swr_ctx);
2450  is->swr_ctx = swr_alloc_set_opts(NULL,
2452  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2453  0, NULL);
2454  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2455  av_log(NULL, AV_LOG_ERROR,
2456  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2459  swr_free(&is->swr_ctx);
2460  return -1;
2461  }
2462  is->audio_src.channel_layout = dec_channel_layout;
2464  is->audio_src.freq = af->frame->sample_rate;
2465  is->audio_src.fmt = af->frame->format;
2466  }
2467 
2468  if (is->swr_ctx) {
2469  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2470  uint8_t **out = &is->audio_buf1;
2471  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2472  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2473  int len2;
2474  if (out_size < 0) {
2475  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2476  return -1;
2477  }
2478  if (wanted_nb_samples != af->frame->nb_samples) {
2479  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2480  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2481  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2482  return -1;
2483  }
2484  }
2485  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2486  if (!is->audio_buf1)
2487  return AVERROR(ENOMEM);
2488  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2489  if (len2 < 0) {
2490  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2491  return -1;
2492  }
2493  if (len2 == out_count) {
2494  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2495  if (swr_init(is->swr_ctx) < 0)
2496  swr_free(&is->swr_ctx);
2497  }
2498  is->audio_buf = is->audio_buf1;
2499  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2500  } else {
2501  is->audio_buf = af->frame->data[0];
2502  resampled_data_size = data_size;
2503  }
2504 
2505  audio_clock0 = is->audio_clock;
2506  /* update the audio clock with the pts */
2507  if (!isnan(af->pts))
2508  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2509  else
2510  is->audio_clock = NAN;
2511  is->audio_clock_serial = af->serial;
2512 #ifdef DEBUG
2513  {
2514  static double last_clock;
2515  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2516  is->audio_clock - last_clock,
2517  is->audio_clock, audio_clock0);
2518  last_clock = is->audio_clock;
2519  }
2520 #endif
2521  return resampled_data_size;
2522 }
2523 
2524 /* prepare a new audio buffer */
2525 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2526 {
2527  VideoState *is = opaque;
2528  int audio_size, len1;
2529 
2531 
2532  while (len > 0) {
2533  if (is->audio_buf_index >= is->audio_buf_size) {
2534  audio_size = audio_decode_frame(is);
2535  if (audio_size < 0) {
2536  /* if error, just output silence */
2537  is->audio_buf = is->silence_buf;
2538  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2539  } else {
2540  if (is->show_mode != SHOW_MODE_VIDEO)
2541  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2542  is->audio_buf_size = audio_size;
2543  }
2544  is->audio_buf_index = 0;
2545  }
2546  len1 = is->audio_buf_size - is->audio_buf_index;
2547  if (len1 > len)
2548  len1 = len;
2549  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2550  len -= len1;
2551  stream += len1;
2552  is->audio_buf_index += len1;
2553  }
2555  /* Let's assume the audio driver that is used by SDL has two periods. */
2556  if (!isnan(is->audio_clock)) {
2558  sync_clock_to_slave(&is->extclk, &is->audclk);
2559  }
2560 }
2561 
2562 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2563 {
2564  SDL_AudioSpec wanted_spec, spec;
2565  const char *env;
2566  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2567  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2568  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2569 
2570  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2571  if (env) {
2572  wanted_nb_channels = atoi(env);
2573  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2574  }
2575  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2576  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2577  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2578  }
2579  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2580  wanted_spec.channels = wanted_nb_channels;
2581  wanted_spec.freq = wanted_sample_rate;
2582  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2583  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2584  return -1;
2585  }
2586  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2587  next_sample_rate_idx--;
2588  wanted_spec.format = AUDIO_S16SYS;
2589  wanted_spec.silence = 0;
2590  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2591  wanted_spec.callback = sdl_audio_callback;
2592  wanted_spec.userdata = opaque;
2593  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2594  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2595  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2596  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2597  if (!wanted_spec.channels) {
2598  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2599  wanted_spec.channels = wanted_nb_channels;
2600  if (!wanted_spec.freq) {
2601  av_log(NULL, AV_LOG_ERROR,
2602  "No more combinations to try, audio open failed\n");
2603  return -1;
2604  }
2605  }
2606  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2607  }
2608  if (spec.format != AUDIO_S16SYS) {
2609  av_log(NULL, AV_LOG_ERROR,
2610  "SDL advised audio format %d is not supported!\n", spec.format);
2611  return -1;
2612  }
2613  if (spec.channels != wanted_spec.channels) {
2614  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2615  if (!wanted_channel_layout) {
2616  av_log(NULL, AV_LOG_ERROR,
2617  "SDL advised channel count %d is not supported!\n", spec.channels);
2618  return -1;
2619  }
2620  }
2621 
2622  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2623  audio_hw_params->freq = spec.freq;
2624  audio_hw_params->channel_layout = wanted_channel_layout;
2625  audio_hw_params->channels = spec.channels;
2626  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2627  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2628  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2629  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2630  return -1;
2631  }
2632  return spec.size;
2633 }
2634 
2635 /* open a given stream. Return 0 if OK */
2636 static int stream_component_open(VideoState *is, int stream_index)
2637 {
2638  AVFormatContext *ic = is->ic;
2639  AVCodecContext *avctx;
2640  AVCodec *codec;
2641  const char *forced_codec_name = NULL;
2642  AVDictionary *opts;
2643  AVDictionaryEntry *t = NULL;
2644  int sample_rate, nb_channels;
2645  int64_t channel_layout;
2646  int ret = 0;
2647  int stream_lowres = lowres;
2648 
2649  if (stream_index < 0 || stream_index >= ic->nb_streams)
2650  return -1;
2651  avctx = ic->streams[stream_index]->codec;
2652 
2653  codec = avcodec_find_decoder(avctx->codec_id);
2654 
2655  switch(avctx->codec_type){
2656  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2657  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2658  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2659  }
2660  if (forced_codec_name)
2661  codec = avcodec_find_decoder_by_name(forced_codec_name);
2662  if (!codec) {
2663  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2664  "No codec could be found with name '%s'\n", forced_codec_name);
2665  else av_log(NULL, AV_LOG_WARNING,
2666  "No codec could be found with id %d\n", avctx->codec_id);
2667  return -1;
2668  }
2669 
2670  avctx->codec_id = codec->id;
2671  if(stream_lowres > av_codec_get_max_lowres(codec)){
2672  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2673  av_codec_get_max_lowres(codec));
2674  stream_lowres = av_codec_get_max_lowres(codec);
2675  }
2676  av_codec_set_lowres(avctx, stream_lowres);
2677 
2678  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2679  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2680  if(codec->capabilities & CODEC_CAP_DR1)
2681  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2682 
2683  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2684  if (!av_dict_get(opts, "threads", NULL, 0))
2685  av_dict_set(&opts, "threads", "auto", 0);
2686  if (stream_lowres)
2687  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2688  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2689  av_dict_set(&opts, "refcounted_frames", "1", 0);
2690  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2691  goto fail;
2692  }
2693  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2694  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2696  goto fail;
2697  }
2698 
2699  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2700  switch (avctx->codec_type) {
2701  case AVMEDIA_TYPE_AUDIO:
2702 #if CONFIG_AVFILTER
2703  {
2704  AVFilterLink *link;
2705 
2706  is->audio_filter_src.freq = avctx->sample_rate;
2707  is->audio_filter_src.channels = avctx->channels;
2708  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2709  is->audio_filter_src.fmt = avctx->sample_fmt;
2710  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2711  goto fail;
2712  link = is->out_audio_filter->inputs[0];
2713  sample_rate = link->sample_rate;
2714  nb_channels = link->channels;
2715  channel_layout = link->channel_layout;
2716  }
2717 #else
2718  sample_rate = avctx->sample_rate;
2719  nb_channels = avctx->channels;
2720  channel_layout = avctx->channel_layout;
2721 #endif
2722 
2723  /* prepare audio output */
2724  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2725  goto fail;
2726  is->audio_hw_buf_size = ret;
2727  is->audio_src = is->audio_tgt;
2728  is->audio_buf_size = 0;
2729  is->audio_buf_index = 0;
2730 
2731  /* init averaging filter */
2732  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2733  is->audio_diff_avg_count = 0;
2734  /* since we do not have a precise anough audio fifo fullness,
2735  we correct audio sync only if larger than this threshold */
2737 
2738  is->audio_stream = stream_index;
2739  is->audio_st = ic->streams[stream_index];
2740 
2741  packet_queue_start(&is->audioq);
2742  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2744  is->auddec.start_pts = is->audio_st->start_time;
2746  }
2747  is->audio_tid = SDL_CreateThread(audio_thread, is);
2748  SDL_PauseAudio(0);
2749  break;
2750  case AVMEDIA_TYPE_VIDEO:
2751  is->video_stream = stream_index;
2752  is->video_st = ic->streams[stream_index];
2753 
2754  packet_queue_start(&is->videoq);
2755  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2756  is->video_tid = SDL_CreateThread(video_thread, is);
2757  is->queue_attachments_req = 1;
2758  break;
2759  case AVMEDIA_TYPE_SUBTITLE:
2760  is->subtitle_stream = stream_index;
2761  is->subtitle_st = ic->streams[stream_index];
2762 
2764  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2765  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2766  break;
2767  default:
2768  break;
2769  }
2770 
2771 fail:
2772  av_dict_free(&opts);
2773 
2774  return ret;
2775 }
2776 
2777 static void stream_component_close(VideoState *is, int stream_index)
2778 {
2779  AVFormatContext *ic = is->ic;
2780  AVCodecContext *avctx;
2781 
2782  if (stream_index < 0 || stream_index >= ic->nb_streams)
2783  return;
2784  avctx = ic->streams[stream_index]->codec;
2785 
2786  switch (avctx->codec_type) {
2787  case AVMEDIA_TYPE_AUDIO:
2788  packet_queue_abort(&is->audioq);
2789  frame_queue_signal(&is->sampq);
2790  SDL_CloseAudio();
2791  SDL_WaitThread(is->audio_tid, NULL);
2792 
2793  decoder_destroy(&is->auddec);
2794  packet_queue_flush(&is->audioq);
2795  swr_free(&is->swr_ctx);
2796  av_freep(&is->audio_buf1);
2797  is->audio_buf1_size = 0;
2798  is->audio_buf = NULL;
2799 
2800  if (is->rdft) {
2801  av_rdft_end(is->rdft);
2802  av_freep(&is->rdft_data);
2803  is->rdft = NULL;
2804  is->rdft_bits = 0;
2805  }
2806  break;
2807  case AVMEDIA_TYPE_VIDEO:
2808  packet_queue_abort(&is->videoq);
2809 
2810  /* note: we also signal this mutex to make sure we deblock the
2811  video thread in all cases */
2812  frame_queue_signal(&is->pictq);
2813 
2814  SDL_WaitThread(is->video_tid, NULL);
2815 
2816  decoder_destroy(&is->viddec);
2817  packet_queue_flush(&is->videoq);
2818  break;
2819  case AVMEDIA_TYPE_SUBTITLE:
2821 
2822  /* note: we also signal this mutex to make sure we deblock the
2823  video thread in all cases */
2824  frame_queue_signal(&is->subpq);
2825 
2826  SDL_WaitThread(is->subtitle_tid, NULL);
2827 
2828  decoder_destroy(&is->subdec);
2830  break;
2831  default:
2832  break;
2833  }
2834 
2835  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2836  avcodec_close(avctx);
2837  switch (avctx->codec_type) {
2838  case AVMEDIA_TYPE_AUDIO:
2839  is->audio_st = NULL;
2840  is->audio_stream = -1;
2841  break;
2842  case AVMEDIA_TYPE_VIDEO:
2843  is->video_st = NULL;
2844  is->video_stream = -1;
2845  break;
2846  case AVMEDIA_TYPE_SUBTITLE:
2847  is->subtitle_st = NULL;
2848  is->subtitle_stream = -1;
2849  break;
2850  default:
2851  break;
2852  }
2853 }
2854 
2855 static int decode_interrupt_cb(void *ctx)
2856 {
2857  VideoState *is = ctx;
2858  return is->abort_request;
2859 }
2860 
2862 {
2863  if( !strcmp(s->iformat->name, "rtp")
2864  || !strcmp(s->iformat->name, "rtsp")
2865  || !strcmp(s->iformat->name, "sdp")
2866  )
2867  return 1;
2868 
2869  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2870  || !strncmp(s->filename, "udp:", 4)
2871  )
2872  )
2873  return 1;
2874  return 0;
2875 }
2876 
2877 /* this thread gets the stream from the disk or the network */
2878 static int read_thread(void *arg)
2879 {
2880  VideoState *is = arg;
2881  AVFormatContext *ic = NULL;
2882  int err, i, ret;
2883  int st_index[AVMEDIA_TYPE_NB];
2884  AVPacket pkt1, *pkt = &pkt1;
2885  int eof = 0;
2886  int64_t stream_start_time;
2887  int pkt_in_play_range = 0;
2888  AVDictionaryEntry *t;
2889  AVDictionary **opts;
2890  int orig_nb_streams;
2891  SDL_mutex *wait_mutex = SDL_CreateMutex();
2892  int scan_all_pmts_set = 0;
2893 
2894  memset(st_index, -1, sizeof(st_index));
2895  is->last_video_stream = is->video_stream = -1;
2896  is->last_audio_stream = is->audio_stream = -1;
2897  is->last_subtitle_stream = is->subtitle_stream = -1;
2898 
2899  ic = avformat_alloc_context();
2901  ic->interrupt_callback.opaque = is;
2902  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2903  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2904  scan_all_pmts_set = 1;
2905  }
2906  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2907  if (err < 0) {
2908  print_error(is->filename, err);
2909  ret = -1;
2910  goto fail;
2911  }
2912  if (scan_all_pmts_set)
2913  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2914 
2915  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2916  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2918  goto fail;
2919  }
2920  is->ic = ic;
2921 
2922  if (genpts)
2923  ic->flags |= AVFMT_FLAG_GENPTS;
2924 
2926 
2928  orig_nb_streams = ic->nb_streams;
2929 
2930  err = avformat_find_stream_info(ic, opts);
2931 
2932  for (i = 0; i < orig_nb_streams; i++)
2933  av_dict_free(&opts[i]);
2934  av_freep(&opts);
2935 
2936  if (err < 0) {
2937  av_log(NULL, AV_LOG_WARNING,
2938  "%s: could not find codec parameters\n", is->filename);
2939  ret = -1;
2940  goto fail;
2941  }
2942 
2943  if (ic->pb)
2944  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2945 
2946  if (seek_by_bytes < 0)
2947  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2948 
2949  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2950 
2951  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2952  window_title = av_asprintf("%s - %s", t->value, input_filename);
2953 
2954  /* if seeking requested, we execute it */
2955  if (start_time != AV_NOPTS_VALUE) {
2956  int64_t timestamp;
2957 
2958  timestamp = start_time;
2959  /* add the stream start time */
2960  if (ic->start_time != AV_NOPTS_VALUE)
2961  timestamp += ic->start_time;
2962  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2963  if (ret < 0) {
2964  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2965  is->filename, (double)timestamp / AV_TIME_BASE);
2966  }
2967  }
2968 
2969  is->realtime = is_realtime(ic);
2970 
2971  for (i = 0; i < ic->nb_streams; i++)
2972  ic->streams[i]->discard = AVDISCARD_ALL;
2973  if (!video_disable)
2974  st_index[AVMEDIA_TYPE_VIDEO] =
2976  wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2977  if (!audio_disable)
2978  st_index[AVMEDIA_TYPE_AUDIO] =
2981  st_index[AVMEDIA_TYPE_VIDEO],
2982  NULL, 0);
2984  st_index[AVMEDIA_TYPE_SUBTITLE] =
2987  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2988  st_index[AVMEDIA_TYPE_AUDIO] :
2989  st_index[AVMEDIA_TYPE_VIDEO]),
2990  NULL, 0);
2991  if (show_status) {
2992  av_dump_format(ic, 0, is->filename, 0);
2993  }
2994 
2995  is->show_mode = show_mode;
2996  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2997  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2998  AVCodecContext *avctx = st->codec;
2999  AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
3000  if (avctx->width)
3001  set_default_window_size(avctx->width, avctx->height, sar);
3002  }
3003 
3004  /* open the streams */
3005  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3006  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
3007  }
3008 
3009  ret = -1;
3010  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3011  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
3012  }
3013  if (is->show_mode == SHOW_MODE_NONE)
3014  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3015 
3016  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3017  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
3018  }
3019 
3020  if (is->video_stream < 0 && is->audio_stream < 0) {
3021  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3022  is->filename);
3023  ret = -1;
3024  goto fail;
3025  }
3026 
3027  if (infinite_buffer < 0 && is->realtime)
3028  infinite_buffer = 1;
3029 
3030  for (;;) {
3031  if (is->abort_request)
3032  break;
3033  if (is->paused != is->last_paused) {
3034  is->last_paused = is->paused;
3035  if (is->paused)
3036  is->read_pause_return = av_read_pause(ic);
3037  else
3038  av_read_play(ic);
3039  }
3040 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3041  if (is->paused &&
3042  (!strcmp(ic->iformat->name, "rtsp") ||
3043  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3044  /* wait 10 ms to avoid trying to get another packet */
3045  /* XXX: horrible */
3046  SDL_Delay(10);
3047  continue;
3048  }
3049 #endif
3050  if (is->seek_req) {
3051  int64_t seek_target = is->seek_pos;
3052  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3053  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3054 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3055 // of the seek_pos/seek_rel variables
3056 
3057  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3058  if (ret < 0) {
3059  av_log(NULL, AV_LOG_ERROR,
3060  "%s: error while seeking\n", is->ic->filename);
3061  } else {
3062  if (is->audio_stream >= 0) {
3063  packet_queue_flush(&is->audioq);
3064  packet_queue_put(&is->audioq, &flush_pkt);
3065  }
3066  if (is->subtitle_stream >= 0) {
3068  packet_queue_put(&is->subtitleq, &flush_pkt);
3069  }
3070  if (is->video_stream >= 0) {
3071  packet_queue_flush(&is->videoq);
3072  packet_queue_put(&is->videoq, &flush_pkt);
3073  }
3074  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3075  set_clock(&is->extclk, NAN, 0);
3076  } else {
3077  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3078  }
3079  }
3080  is->seek_req = 0;
3081  is->queue_attachments_req = 1;
3082  eof = 0;
3083  if (is->paused)
3084  step_to_next_frame(is);
3085  }
3086  if (is->queue_attachments_req) {
3088  AVPacket copy;
3089  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3090  goto fail;
3091  packet_queue_put(&is->videoq, &copy);
3093  }
3094  is->queue_attachments_req = 0;
3095  }
3096 
3097  /* if the queue are full, no need to read more */
3098  if (infinite_buffer<1 &&
3099  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3100  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3101  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3103  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3104  /* wait 10 ms */
3105  SDL_LockMutex(wait_mutex);
3106  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3107  SDL_UnlockMutex(wait_mutex);
3108  continue;
3109  }
3110  if (!is->paused &&
3111  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3112  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3113  if (loop != 1 && (!loop || --loop)) {
3114  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3115  } else if (autoexit) {
3116  ret = AVERROR_EOF;
3117  goto fail;
3118  }
3119  }
3120  ret = av_read_frame(ic, pkt);
3121  if (ret < 0) {
3122  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !eof) {
3123  if (is->video_stream >= 0)
3125  if (is->audio_stream >= 0)
3127  if (is->subtitle_stream >= 0)
3129  eof = 1;
3130  }
3131  if (ic->pb && ic->pb->error)
3132  break;
3133  SDL_LockMutex(wait_mutex);
3134  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3135  SDL_UnlockMutex(wait_mutex);
3136  continue;
3137  } else {
3138  eof = 0;
3139  }
3140  /* check if packet is in play range specified by user, then queue, otherwise discard */
3141  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3142  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3143  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3144  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3145  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3146  <= ((double)duration / 1000000);
3147  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3148  packet_queue_put(&is->audioq, pkt);
3149  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3151  packet_queue_put(&is->videoq, pkt);
3152  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3153  packet_queue_put(&is->subtitleq, pkt);
3154  } else {
3155  av_free_packet(pkt);
3156  }
3157  }
3158  /* wait until the end */
3159  while (!is->abort_request) {
3160  SDL_Delay(100);
3161  }
3162 
3163  ret = 0;
3164  fail:
3165  /* close each stream */
3166  if (is->audio_stream >= 0)
3168  if (is->video_stream >= 0)
3170  if (is->subtitle_stream >= 0)
3172  if (ic) {
3173  avformat_close_input(&ic);
3174  is->ic = NULL;
3175  }
3176 
3177  if (ret != 0) {
3178  SDL_Event event;
3179 
3180  event.type = FF_QUIT_EVENT;
3181  event.user.data1 = is;
3182  SDL_PushEvent(&event);
3183  }
3184  SDL_DestroyMutex(wait_mutex);
3185  return 0;
3186 }
3187 
3188 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3189 {
3190  VideoState *is;
3191 
3192  is = av_mallocz(sizeof(VideoState));
3193  if (!is)
3194  return NULL;
3195  av_strlcpy(is->filename, filename, sizeof(is->filename));
3196  is->iformat = iformat;
3197  is->ytop = 0;
3198  is->xleft = 0;
3199 
3200  /* start video display */
3201  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3202  goto fail;
3203  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3204  goto fail;
3205  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3206  goto fail;
3207 
3208  packet_queue_init(&is->videoq);
3209  packet_queue_init(&is->audioq);
3211 
3212  is->continue_read_thread = SDL_CreateCond();
3213 
3214  init_clock(&is->vidclk, &is->videoq.serial);
3215  init_clock(&is->audclk, &is->audioq.serial);
3216  init_clock(&is->extclk, &is->extclk.serial);
3217  is->audio_clock_serial = -1;
3218  is->av_sync_type = av_sync_type;
3219  is->read_tid = SDL_CreateThread(read_thread, is);
3220  if (!is->read_tid) {
3221 fail:
3222  stream_close(is);
3223  return NULL;
3224  }
3225  return is;
3226 }
3227 
3229 {
3230  AVFormatContext *ic = is->ic;
3231  int start_index, stream_index;
3232  int old_index;
3233  AVStream *st;
3234  AVProgram *p = NULL;
3235  int nb_streams = is->ic->nb_streams;
3236 
3237  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3238  start_index = is->last_video_stream;
3239  old_index = is->video_stream;
3240  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3241  start_index = is->last_audio_stream;
3242  old_index = is->audio_stream;
3243  } else {
3244  start_index = is->last_subtitle_stream;
3245  old_index = is->subtitle_stream;
3246  }
3247  stream_index = start_index;
3248 
3249  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3250  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3251  if (p) {
3252  nb_streams = p->nb_stream_indexes;
3253  for (start_index = 0; start_index < nb_streams; start_index++)
3254  if (p->stream_index[start_index] == stream_index)
3255  break;
3256  if (start_index == nb_streams)
3257  start_index = -1;
3258  stream_index = start_index;
3259  }
3260  }
3261 
3262  for (;;) {
3263  if (++stream_index >= nb_streams)
3264  {
3265  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3266  {
3267  stream_index = -1;
3268  is->last_subtitle_stream = -1;
3269  goto the_end;
3270  }
3271  if (start_index == -1)
3272  return;
3273  stream_index = 0;
3274  }
3275  if (stream_index == start_index)
3276  return;
3277  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3278  if (st->codec->codec_type == codec_type) {
3279  /* check that parameters are OK */
3280  switch (codec_type) {
3281  case AVMEDIA_TYPE_AUDIO:
3282  if (st->codec->sample_rate != 0 &&
3283  st->codec->channels != 0)
3284  goto the_end;
3285  break;
3286  case AVMEDIA_TYPE_VIDEO:
3287  case AVMEDIA_TYPE_SUBTITLE:
3288  goto the_end;
3289  default:
3290  break;
3291  }
3292  }
3293  }
3294  the_end:
3295  if (p && stream_index != -1)
3296  stream_index = p->stream_index[stream_index];
3297  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3298  av_get_media_type_string(codec_type),
3299  old_index,
3300  stream_index);
3301 
3302  stream_component_close(is, old_index);
3303  stream_component_open(is, stream_index);
3304 }
3305 
3306 
3308 {
3309 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3310  /* OS X needs to reallocate the SDL overlays */
3311  int i;
3312  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3313  is->pictq.queue[i].reallocate = 1;
3314 #endif
3316  video_open(is, 1, NULL);
3317 }
3318 
3320 {
3321  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3322  int next = is->show_mode;
3323  do {
3324  next = (next + 1) % SHOW_MODE_NB;
3325  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3326  if (is->show_mode != next) {
3328  is->xleft, is->ytop, is->width, is->height,
3329  bgcolor, 1);
3330  is->force_refresh = 1;
3331  is->show_mode = next;
3332  }
3333 }
3334 
3335 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3336  double remaining_time = 0.0;
3337  SDL_PumpEvents();
3338  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3340  SDL_ShowCursor(0);
3341  cursor_hidden = 1;
3342  }
3343  if (remaining_time > 0.0)
3344  av_usleep((int64_t)(remaining_time * 1000000.0));
3345  remaining_time = REFRESH_RATE;
3346  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3347  video_refresh(is, &remaining_time);
3348  SDL_PumpEvents();
3349  }
3350 }
3351 
3352 static void seek_chapter(VideoState *is, int incr)
3353 {
3354  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3355  int i;
3356 
3357  if (!is->ic->nb_chapters)
3358  return;
3359 
3360  /* find the current chapter */
3361  for (i = 0; i < is->ic->nb_chapters; i++) {
3362  AVChapter *ch = is->ic->chapters[i];
3363  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3364  i--;
3365  break;
3366  }
3367  }
3368 
3369  i += incr;
3370  i = FFMAX(i, 0);
3371  if (i >= is->ic->nb_chapters)
3372  return;
3373 
3374  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3375  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3376  AV_TIME_BASE_Q), 0, 0);
3377 }
3378 
3379 /* handle an event sent by the GUI */
3380 static void event_loop(VideoState *cur_stream)
3381 {
3382  SDL_Event event;
3383  double incr, pos, frac;
3384 
3385  for (;;) {
3386  double x;
3387  refresh_loop_wait_event(cur_stream, &event);
3388  switch (event.type) {
3389  case SDL_KEYDOWN:
3390  if (exit_on_keydown) {
3391  do_exit(cur_stream);
3392  break;
3393  }
3394  switch (event.key.keysym.sym) {
3395  case SDLK_ESCAPE:
3396  case SDLK_q:
3397  do_exit(cur_stream);
3398  break;
3399  case SDLK_f:
3400  toggle_full_screen(cur_stream);
3401  cur_stream->force_refresh = 1;
3402  break;
3403  case SDLK_p:
3404  case SDLK_SPACE:
3405  toggle_pause(cur_stream);
3406  break;
3407  case SDLK_s: // S: Step to next frame
3408  step_to_next_frame(cur_stream);
3409  break;
3410  case SDLK_a:
3412  break;
3413  case SDLK_v:
3415  break;
3416  case SDLK_c:
3420  break;
3421  case SDLK_t:
3423  break;
3424  case SDLK_w:
3425 #if CONFIG_AVFILTER
3426  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3427  if (++cur_stream->vfilter_idx >= nb_vfilters)
3428  cur_stream->vfilter_idx = 0;
3429  } else {
3430  cur_stream->vfilter_idx = 0;
3431  toggle_audio_display(cur_stream);
3432  }
3433 #else
3434  toggle_audio_display(cur_stream);
3435 #endif
3436  break;
3437  case SDLK_PAGEUP:
3438  if (cur_stream->ic->nb_chapters <= 1) {
3439  incr = 600.0;
3440  goto do_seek;
3441  }
3442  seek_chapter(cur_stream, 1);
3443  break;
3444  case SDLK_PAGEDOWN:
3445  if (cur_stream->ic->nb_chapters <= 1) {
3446  incr = -600.0;
3447  goto do_seek;
3448  }
3449  seek_chapter(cur_stream, -1);
3450  break;
3451  case SDLK_LEFT:
3452  incr = -10.0;
3453  goto do_seek;
3454  case SDLK_RIGHT:
3455  incr = 10.0;
3456  goto do_seek;
3457  case SDLK_UP:
3458  incr = 60.0;
3459  goto do_seek;
3460  case SDLK_DOWN:
3461  incr = -60.0;
3462  do_seek:
3463  if (seek_by_bytes) {
3464  pos = -1;
3465  if (pos < 0 && cur_stream->video_stream >= 0)
3466  pos = frame_queue_last_pos(&cur_stream->pictq);
3467  if (pos < 0 && cur_stream->audio_stream >= 0)
3468  pos = frame_queue_last_pos(&cur_stream->sampq);
3469  if (pos < 0)
3470  pos = avio_tell(cur_stream->ic->pb);
3471  if (cur_stream->ic->bit_rate)
3472  incr *= cur_stream->ic->bit_rate / 8.0;
3473  else
3474  incr *= 180000.0;
3475  pos += incr;
3476  stream_seek(cur_stream, pos, incr, 1);
3477  } else {
3478  pos = get_master_clock(cur_stream);
3479  if (isnan(pos))
3480  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3481  pos += incr;
3482  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3483  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3484  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3485  }
3486  break;
3487  default:
3488  break;
3489  }
3490  break;
3491  case SDL_VIDEOEXPOSE:
3492  cur_stream->force_refresh = 1;
3493  break;
3494  case SDL_MOUSEBUTTONDOWN:
3495  if (exit_on_mousedown) {
3496  do_exit(cur_stream);
3497  break;
3498  }
3499  case SDL_MOUSEMOTION:
3500  if (cursor_hidden) {
3501  SDL_ShowCursor(1);
3502  cursor_hidden = 0;
3503  }
3505  if (event.type == SDL_MOUSEBUTTONDOWN) {
3506  x = event.button.x;
3507  } else {
3508  if (event.motion.state != SDL_PRESSED)
3509  break;
3510  x = event.motion.x;
3511  }
3512  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3513  uint64_t size = avio_size(cur_stream->ic->pb);
3514  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3515  } else {
3516  int64_t ts;
3517  int ns, hh, mm, ss;
3518  int tns, thh, tmm, tss;
3519  tns = cur_stream->ic->duration / 1000000LL;
3520  thh = tns / 3600;
3521  tmm = (tns % 3600) / 60;
3522  tss = (tns % 60);
3523  frac = x / cur_stream->width;
3524  ns = frac * tns;
3525  hh = ns / 3600;
3526  mm = (ns % 3600) / 60;
3527  ss = (ns % 60);
3528  av_log(NULL, AV_LOG_INFO,
3529  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3530  hh, mm, ss, thh, tmm, tss);
3531  ts = frac * cur_stream->ic->duration;
3532  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3533  ts += cur_stream->ic->start_time;
3534  stream_seek(cur_stream, ts, 0, 0);
3535  }
3536  break;
3537  case SDL_VIDEORESIZE:
3538  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3539  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3540  if (!screen) {
3541  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3542  do_exit(cur_stream);
3543  }
3544  screen_width = cur_stream->width = screen->w;
3545  screen_height = cur_stream->height = screen->h;
3546  cur_stream->force_refresh = 1;
3547  break;
3548  case SDL_QUIT:
3549  case FF_QUIT_EVENT:
3550  do_exit(cur_stream);
3551  break;
3552  case FF_ALLOC_EVENT:
3553  alloc_picture(event.user.data1);
3554  break;
3555  default:
3556  break;
3557  }
3558  }
3559 }
3560 
3561 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3562 {
3563  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3564  return opt_default(NULL, "video_size", arg);
3565 }
3566 
3567 static int opt_width(void *optctx, const char *opt, const char *arg)
3568 {
3569  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3570  return 0;
3571 }
3572 
3573 static int opt_height(void *optctx, const char *opt, const char *arg)
3574 {
3575  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3576  return 0;
3577 }
3578 
3579 static int opt_format(void *optctx, const char *opt, const char *arg)
3580 {
3581  file_iformat = av_find_input_format(arg);
3582  if (!file_iformat) {
3583  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3584  return AVERROR(EINVAL);
3585  }
3586  return 0;
3587 }
3588 
3589 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3590 {
3591  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3592  return opt_default(NULL, "pixel_format", arg);
3593 }
3594 
3595 static int opt_sync(void *optctx, const char *opt, const char *arg)
3596 {
3597  if (!strcmp(arg, "audio"))
3599  else if (!strcmp(arg, "video"))
3601  else if (!strcmp(arg, "ext"))
3603  else {
3604  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3605  exit(1);
3606  }
3607  return 0;
3608 }
3609 
3610 static int opt_seek(void *optctx, const char *opt, const char *arg)
3611 {
3612  start_time = parse_time_or_die(opt, arg, 1);
3613  return 0;
3614 }
3615 
3616 static int opt_duration(void *optctx, const char *opt, const char *arg)
3617 {
3618  duration = parse_time_or_die(opt, arg, 1);
3619  return 0;
3620 }
3621 
3622 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3623 {
3624  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3625  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3626  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3627  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3628  return 0;
3629 }
3630 
3631 static void opt_input_file(void *optctx, const char *filename)
3632 {
3633  if (input_filename) {
3634  av_log(NULL, AV_LOG_FATAL,
3635  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3636  filename, input_filename);
3637  exit(1);
3638  }
3639  if (!strcmp(filename, "-"))
3640  filename = "pipe:";
3641  input_filename = filename;
3642 }
3643 
3644 static int opt_codec(void *optctx, const char *opt, const char *arg)
3645 {
3646  const char *spec = strchr(opt, ':');
3647  if (!spec) {
3648  av_log(NULL, AV_LOG_ERROR,
3649  "No media specifier was specified in '%s' in option '%s'\n",
3650  arg, opt);
3651  return AVERROR(EINVAL);
3652  }
3653  spec++;
3654  switch (spec[0]) {
3655  case 'a' : audio_codec_name = arg; break;
3656  case 's' : subtitle_codec_name = arg; break;
3657  case 'v' : video_codec_name = arg; break;
3658  default:
3659  av_log(NULL, AV_LOG_ERROR,
3660  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3661  return AVERROR(EINVAL);
3662  }
3663  return 0;
3664 }
3665 
3666 static int dummy;
3667 
3668 static const OptionDef options[] = {
3669 #include "cmdutils_common_opts.h"
3670  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3671  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3672  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3673  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3674  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3675  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3676  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3677  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3678  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3679  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3680  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3681  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3682  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3683  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3684  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3685  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3686  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3687  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3688  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3689  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3690  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3691  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3692  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3693  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3694  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3695  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3696  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3697  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3698  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3699 #if CONFIG_AVFILTER
3700  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3701  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3702 #endif
3703  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3704  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3705  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3706  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3707  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3708  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3709  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3710  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3711  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3712  { NULL, },
3713 };
3714 
3715 static void show_usage(void)
3716 {
3717  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3718  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3719  av_log(NULL, AV_LOG_INFO, "\n");
3720 }
3721 
3722 void show_help_default(const char *opt, const char *arg)
3723 {
3725  show_usage();
3726  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3727  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3728  printf("\n");
3731 #if !CONFIG_AVFILTER
3733 #else
3735 #endif
3736  printf("\nWhile playing:\n"
3737  "q, ESC quit\n"
3738  "f toggle full screen\n"
3739  "p, SPC pause\n"
3740  "a cycle audio channel in the current program\n"
3741  "v cycle video channel\n"
3742  "t cycle subtitle channel in the current program\n"
3743  "c cycle program\n"
3744  "w cycle video filters or show modes\n"
3745  "s activate frame-step mode\n"
3746  "left/right seek backward/forward 10 seconds\n"
3747  "down/up seek backward/forward 1 minute\n"
3748  "page down/page up seek backward/forward 10 minutes\n"
3749  "mouse click seek to percentage in file corresponding to fraction of width\n"
3750  );
3751 }
3752 
3753 static int lockmgr(void **mtx, enum AVLockOp op)
3754 {
3755  switch(op) {
3756  case AV_LOCK_CREATE:
3757  *mtx = SDL_CreateMutex();
3758  if(!*mtx)
3759  return 1;
3760  return 0;
3761  case AV_LOCK_OBTAIN:
3762  return !!SDL_LockMutex(*mtx);
3763  case AV_LOCK_RELEASE:
3764  return !!SDL_UnlockMutex(*mtx);
3765  case AV_LOCK_DESTROY:
3766  SDL_DestroyMutex(*mtx);
3767  return 0;
3768  }
3769  return 1;
3770 }
3771 
3772 /* Called from the main */
3773 int main(int argc, char **argv)
3774 {
3775  int flags;
3776  VideoState *is;
3777  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3778 
3780  parse_loglevel(argc, argv, options);
3781 
3782  /* register all codecs, demux and protocols */
3783 #if CONFIG_AVDEVICE
3785 #endif
3786 #if CONFIG_AVFILTER
3788 #endif
3789  av_register_all();
3791 
3792  init_opts();
3793 
3794  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3795  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3796 
3797  show_banner(argc, argv, options);
3798 
3799  parse_options(NULL, argc, argv, options, opt_input_file);
3800 
3801  if (!input_filename) {
3802  show_usage();
3803  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3804  av_log(NULL, AV_LOG_FATAL,
3805  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3806  exit(1);
3807  }
3808 
3809  if (display_disable) {
3810  video_disable = 1;
3811  }
3812  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3813  if (audio_disable)
3814  flags &= ~SDL_INIT_AUDIO;
3815  if (display_disable)
3816  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3817 #if !defined(_WIN32) && !defined(__APPLE__)
3818  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3819 #endif
3820  if (SDL_Init (flags)) {
3821  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3822  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3823  exit(1);
3824  }
3825 
3826  if (!display_disable) {
3827  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3828  fs_screen_width = vi->current_w;
3829  fs_screen_height = vi->current_h;
3830  }
3831 
3832  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3833  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3834  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3835 
3837  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3838  do_exit(NULL);
3839  }
3840 
3841  av_init_packet(&flush_pkt);
3842  flush_pkt.data = (uint8_t *)&flush_pkt;
3843 
3844  is = stream_open(input_filename, file_iformat);
3845  if (!is) {
3846  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3847  do_exit(NULL);
3848  }
3849 
3850  event_loop(is);
3851 
3852  /* never returns */
3853 
3854  return 0;
3855 }