FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
49 
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53 
54 #include <SDL.h>
55 #include <SDL_thread.h>
56 
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60 
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68 
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73 
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107 
108 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 typedef struct FrameData {
149 } FrameData;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  int width;
160  int height;
161  int format;
163  int uploaded;
164  int flip_v;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
191  int finished;
193  SDL_cond *empty_queue_cond;
198  SDL_Thread *decoder_tid;
199 } Decoder;
200 
201 typedef struct VideoState {
202  SDL_Thread *read_tid;
206  int paused;
209  int seek_req;
215  int realtime;
216 
220 
224 
228 
230 
232 
233  double audio_clock;
235  double audio_diff_cum; /* used for AV difference average computation */
242  uint8_t *audio_buf;
243  uint8_t *audio_buf1;
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249  int muted;
256 
257  enum ShowMode {
259  } show_mode;
266  float *real_data;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354 
355 /* current context */
356 static int is_full_screen;
358 
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365 
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
392 };
393 
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
397  if (ret < 0)
398  return ret;
399 
401  if (!vfilters_list[nb_vfilters - 1])
402  return AVERROR(ENOMEM);
403 
404  return 0;
405 }
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
419 {
420  MyAVPacketList pkt1;
421  int ret;
422 
423  if (q->abort_request)
424  return -1;
425 
426 
427  pkt1.pkt = pkt;
428  pkt1.serial = q->serial;
429 
430  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
431  if (ret < 0)
432  return ret;
433  q->nb_packets++;
434  q->size += pkt1.pkt->size + sizeof(pkt1);
435  q->duration += pkt1.pkt->duration;
436  /* XXX: should duplicate packet data in DV case */
437  SDL_CondSignal(q->cond);
438  return 0;
439 }
440 
442 {
443  AVPacket *pkt1;
444  int ret;
445 
446  pkt1 = av_packet_alloc();
447  if (!pkt1) {
449  return -1;
450  }
451  av_packet_move_ref(pkt1, pkt);
452 
453  SDL_LockMutex(q->mutex);
454  ret = packet_queue_put_private(q, pkt1);
455  SDL_UnlockMutex(q->mutex);
456 
457  if (ret < 0)
458  av_packet_free(&pkt1);
459 
460  return ret;
461 }
462 
463 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
464 {
465  pkt->stream_index = stream_index;
466  return packet_queue_put(q, pkt);
467 }
468 
469 /* packet queue handling */
471 {
472  memset(q, 0, sizeof(PacketQueue));
474  if (!q->pkt_list)
475  return AVERROR(ENOMEM);
476  q->mutex = SDL_CreateMutex();
477  if (!q->mutex) {
478  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
479  return AVERROR(ENOMEM);
480  }
481  q->cond = SDL_CreateCond();
482  if (!q->cond) {
483  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
484  return AVERROR(ENOMEM);
485  }
486  q->abort_request = 1;
487  return 0;
488 }
489 
491 {
492  MyAVPacketList pkt1;
493 
494  SDL_LockMutex(q->mutex);
495  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
496  av_packet_free(&pkt1.pkt);
497  q->nb_packets = 0;
498  q->size = 0;
499  q->duration = 0;
500  q->serial++;
501  SDL_UnlockMutex(q->mutex);
502 }
503 
505 {
508  SDL_DestroyMutex(q->mutex);
509  SDL_DestroyCond(q->cond);
510 }
511 
513 {
514  SDL_LockMutex(q->mutex);
515 
516  q->abort_request = 1;
517 
518  SDL_CondSignal(q->cond);
519 
520  SDL_UnlockMutex(q->mutex);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526  q->abort_request = 0;
527  q->serial++;
528  SDL_UnlockMutex(q->mutex);
529 }
530 
531 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
532 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
533 {
534  MyAVPacketList pkt1;
535  int ret;
536 
537  SDL_LockMutex(q->mutex);
538 
539  for (;;) {
540  if (q->abort_request) {
541  ret = -1;
542  break;
543  }
544 
545  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
546  q->nb_packets--;
547  q->size -= pkt1.pkt->size + sizeof(pkt1);
548  q->duration -= pkt1.pkt->duration;
549  av_packet_move_ref(pkt, pkt1.pkt);
550  if (serial)
551  *serial = pkt1.serial;
552  av_packet_free(&pkt1.pkt);
553  ret = 1;
554  break;
555  } else if (!block) {
556  ret = 0;
557  break;
558  } else {
559  SDL_CondWait(q->cond, q->mutex);
560  }
561  }
562  SDL_UnlockMutex(q->mutex);
563  return ret;
564 }
565 
566 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
567  memset(d, 0, sizeof(Decoder));
568  d->pkt = av_packet_alloc();
569  if (!d->pkt)
570  return AVERROR(ENOMEM);
571  d->avctx = avctx;
572  d->queue = queue;
573  d->empty_queue_cond = empty_queue_cond;
575  d->pkt_serial = -1;
576  return 0;
577 }
578 
580  int ret = AVERROR(EAGAIN);
581 
582  for (;;) {
583  if (d->queue->serial == d->pkt_serial) {
584  do {
585  if (d->queue->abort_request)
586  return -1;
587 
588  switch (d->avctx->codec_type) {
589  case AVMEDIA_TYPE_VIDEO:
591  if (ret >= 0) {
592  if (decoder_reorder_pts == -1) {
593  frame->pts = frame->best_effort_timestamp;
594  } else if (!decoder_reorder_pts) {
595  frame->pts = frame->pkt_dts;
596  }
597  }
598  break;
599  case AVMEDIA_TYPE_AUDIO:
601  if (ret >= 0) {
602  AVRational tb = (AVRational){1, frame->sample_rate};
603  if (frame->pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
605  else if (d->next_pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
607  if (frame->pts != AV_NOPTS_VALUE) {
608  d->next_pts = frame->pts + frame->nb_samples;
609  d->next_pts_tb = tb;
610  }
611  }
612  break;
613  }
614  if (ret == AVERROR_EOF) {
615  d->finished = d->pkt_serial;
617  return 0;
618  }
619  if (ret >= 0)
620  return 1;
621  } while (ret != AVERROR(EAGAIN));
622  }
623 
624  do {
625  if (d->queue->nb_packets == 0)
626  SDL_CondSignal(d->empty_queue_cond);
627  if (d->packet_pending) {
628  d->packet_pending = 0;
629  } else {
630  int old_serial = d->pkt_serial;
631  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
632  return -1;
633  if (old_serial != d->pkt_serial) {
635  d->finished = 0;
636  d->next_pts = d->start_pts;
637  d->next_pts_tb = d->start_pts_tb;
638  }
639  }
640  if (d->queue->serial == d->pkt_serial)
641  break;
642  av_packet_unref(d->pkt);
643  } while (1);
644 
646  int got_frame = 0;
647  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
648  if (ret < 0) {
649  ret = AVERROR(EAGAIN);
650  } else {
651  if (got_frame && !d->pkt->data) {
652  d->packet_pending = 1;
653  }
654  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
655  }
656  av_packet_unref(d->pkt);
657  } else {
658  if (d->pkt->buf && !d->pkt->opaque_ref) {
659  FrameData *fd;
660 
661  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
662  if (!d->pkt->opaque_ref)
663  return AVERROR(ENOMEM);
664  fd = (FrameData*)d->pkt->opaque_ref->data;
665  fd->pkt_pos = d->pkt->pos;
666  }
667 
668  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  } else {
672  av_packet_unref(d->pkt);
673  }
674  }
675  }
676 }
677 
678 static void decoder_destroy(Decoder *d) {
679  av_packet_free(&d->pkt);
681 }
682 
684 {
685  av_frame_unref(vp->frame);
686  avsubtitle_free(&vp->sub);
687 }
688 
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691  int i;
692  memset(f, 0, sizeof(FrameQueue));
693  if (!(f->mutex = SDL_CreateMutex())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  if (!(f->cond = SDL_CreateCond())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  f->pktq = pktq;
702  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703  f->keep_last = !!keep_last;
704  for (i = 0; i < f->max_size; i++)
705  if (!(f->queue[i].frame = av_frame_alloc()))
706  return AVERROR(ENOMEM);
707  return 0;
708 }
709 
711 {
712  int i;
713  for (i = 0; i < f->max_size; i++) {
714  Frame *vp = &f->queue[i];
716  av_frame_free(&vp->frame);
717  }
718  SDL_DestroyMutex(f->mutex);
719  SDL_DestroyCond(f->cond);
720 }
721 
723 {
724  SDL_LockMutex(f->mutex);
725  SDL_CondSignal(f->cond);
726  SDL_UnlockMutex(f->mutex);
727 }
728 
730 {
731  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738 
740 {
741  return &f->queue[f->rindex];
742 }
743 
745 {
746  /* wait until we have space to put a new frame */
747  SDL_LockMutex(f->mutex);
748  while (f->size >= f->max_size &&
749  !f->pktq->abort_request) {
750  SDL_CondWait(f->cond, f->mutex);
751  }
752  SDL_UnlockMutex(f->mutex);
753 
754  if (f->pktq->abort_request)
755  return NULL;
756 
757  return &f->queue[f->windex];
758 }
759 
761 {
762  /* wait until we have a readable a new frame */
763  SDL_LockMutex(f->mutex);
764  while (f->size - f->rindex_shown <= 0 &&
765  !f->pktq->abort_request) {
766  SDL_CondWait(f->cond, f->mutex);
767  }
768  SDL_UnlockMutex(f->mutex);
769 
770  if (f->pktq->abort_request)
771  return NULL;
772 
773  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775 
777 {
778  if (++f->windex == f->max_size)
779  f->windex = 0;
780  SDL_LockMutex(f->mutex);
781  f->size++;
782  SDL_CondSignal(f->cond);
783  SDL_UnlockMutex(f->mutex);
784 }
785 
787 {
788  if (f->keep_last && !f->rindex_shown) {
789  f->rindex_shown = 1;
790  return;
791  }
792  frame_queue_unref_item(&f->queue[f->rindex]);
793  if (++f->rindex == f->max_size)
794  f->rindex = 0;
795  SDL_LockMutex(f->mutex);
796  f->size--;
797  SDL_CondSignal(f->cond);
798  SDL_UnlockMutex(f->mutex);
799 }
800 
801 /* return the number of undisplayed frames in the queue */
803 {
804  return f->size - f->rindex_shown;
805 }
806 
807 /* return last shown position */
809 {
810  Frame *fp = &f->queue[f->rindex];
811  if (f->rindex_shown && fp->serial == f->pktq->serial)
812  return fp->pos;
813  else
814  return -1;
815 }
816 
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
820  frame_queue_signal(fq);
821  SDL_WaitThread(d->decoder_tid, NULL);
822  d->decoder_tid = NULL;
824 }
825 
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828  SDL_Rect rect;
829  rect.x = x;
830  rect.y = y;
831  rect.w = w;
832  rect.h = h;
833  if (w && h)
834  SDL_RenderFillRect(renderer, &rect);
835 }
836 
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839  Uint32 format;
840  int access, w, h;
841  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842  void *pixels;
843  int pitch;
844  if (*texture)
845  SDL_DestroyTexture(*texture);
846  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847  return -1;
848  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849  return -1;
850  if (init_texture) {
851  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852  return -1;
853  memset(pixels, 0, pitch * new_height);
854  SDL_UnlockTexture(*texture);
855  }
856  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857  }
858  return 0;
859 }
860 
861 static void calculate_display_rect(SDL_Rect *rect,
862  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863  int pic_width, int pic_height, AVRational pic_sar)
864 {
865  AVRational aspect_ratio = pic_sar;
866  int64_t width, height, x, y;
867 
868  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869  aspect_ratio = av_make_q(1, 1);
870 
871  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX((int)width, 1);
885  rect->h = FFMAX((int)height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
907 {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_IYUV:
916  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
917  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
918  frame->data[1], frame->linesize[1],
919  frame->data[2], frame->linesize[2]);
920  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
921  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
922  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
923  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
924  } else {
925  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
926  return -1;
927  }
928  break;
929  default:
930  if (frame->linesize[0] < 0) {
931  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
932  } else {
933  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
934  }
935  break;
936  }
937  return ret;
938 }
939 
945 };
946 
948 {
949 #if SDL_VERSION_ATLEAST(2,0,8)
950  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
951  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
952  if (frame->color_range == AVCOL_RANGE_JPEG)
953  mode = SDL_YUV_CONVERSION_JPEG;
954  else if (frame->colorspace == AVCOL_SPC_BT709)
955  mode = SDL_YUV_CONVERSION_BT709;
956  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
957  mode = SDL_YUV_CONVERSION_BT601;
958  }
959  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
960 #endif
961 }
962 
964 {
965  Frame *vp;
966  Frame *sp = NULL;
967  SDL_Rect rect;
968 
969  vp = frame_queue_peek_last(&is->pictq);
970  if (vk_renderer) {
972  return;
973  }
974 
975  if (is->subtitle_st) {
976  if (frame_queue_nb_remaining(&is->subpq) > 0) {
977  sp = frame_queue_peek(&is->subpq);
978 
979  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
980  if (!sp->uploaded) {
981  uint8_t* pixels[4];
982  int pitch[4];
983  int i;
984  if (!sp->width || !sp->height) {
985  sp->width = vp->width;
986  sp->height = vp->height;
987  }
988  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
989  return;
990 
991  for (i = 0; i < sp->sub.num_rects; i++) {
992  AVSubtitleRect *sub_rect = sp->sub.rects[i];
993 
994  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
995  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
996  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
997  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
998 
999  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1000  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1001  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1002  0, NULL, NULL, NULL);
1003  if (!is->sub_convert_ctx) {
1004  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1005  return;
1006  }
1007  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1008  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1009  0, sub_rect->h, pixels, pitch);
1010  SDL_UnlockTexture(is->sub_texture);
1011  }
1012  }
1013  sp->uploaded = 1;
1014  }
1015  } else
1016  sp = NULL;
1017  }
1018  }
1019 
1020  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1022 
1023  if (!vp->uploaded) {
1024  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1026  return;
1027  }
1028  vp->uploaded = 1;
1029  vp->flip_v = vp->frame->linesize[0] < 0;
1030  }
1031 
1032  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1034  if (sp) {
1035 #if USE_ONEPASS_SUBTITLE_RENDER
1036  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1037 #else
1038  int i;
1039  double xratio = (double)rect.w / (double)sp->width;
1040  double yratio = (double)rect.h / (double)sp->height;
1041  for (i = 0; i < sp->sub.num_rects; i++) {
1042  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1043  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1044  .y = rect.y + sub_rect->y * yratio,
1045  .w = sub_rect->w * xratio,
1046  .h = sub_rect->h * yratio};
1047  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1048  }
1049 #endif
1050  }
1051 }
1052 
1053 static inline int compute_mod(int a, int b)
1054 {
1055  return a < 0 ? a%b + b : a%b;
1056 }
1057 
1059 {
1060  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1061  int ch, channels, h, h2;
1062  int64_t time_diff;
1063  int rdft_bits, nb_freq;
1064 
1065  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1066  ;
1067  nb_freq = 1 << (rdft_bits - 1);
1068 
1069  /* compute display index : center on currently output samples */
1070  channels = s->audio_tgt.ch_layout.nb_channels;
1071  nb_display_channels = channels;
1072  if (!s->paused) {
1073  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1074  n = 2 * channels;
1075  delay = s->audio_write_buf_size;
1076  delay /= n;
1077 
1078  /* to be more precise, we take into account the time spent since
1079  the last buffer computation */
1080  if (audio_callback_time) {
1081  time_diff = av_gettime_relative() - audio_callback_time;
1082  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1083  }
1084 
1085  delay += 2 * data_used;
1086  if (delay < data_used)
1087  delay = data_used;
1088 
1089  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1090  if (s->show_mode == SHOW_MODE_WAVES) {
1091  h = INT_MIN;
1092  for (i = 0; i < 1000; i += channels) {
1093  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1094  int a = s->sample_array[idx];
1095  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1096  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1097  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1098  int score = a - d;
1099  if (h < score && (b ^ c) < 0) {
1100  h = score;
1101  i_start = idx;
1102  }
1103  }
1104  }
1105 
1106  s->last_i_start = i_start;
1107  } else {
1108  i_start = s->last_i_start;
1109  }
1110 
1111  if (s->show_mode == SHOW_MODE_WAVES) {
1112  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1113 
1114  /* total height for one channel */
1115  h = s->height / nb_display_channels;
1116  /* graph height / 2 */
1117  h2 = (h * 9) / 20;
1118  for (ch = 0; ch < nb_display_channels; ch++) {
1119  i = i_start + ch;
1120  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1121  for (x = 0; x < s->width; x++) {
1122  y = (s->sample_array[i] * h2) >> 15;
1123  if (y < 0) {
1124  y = -y;
1125  ys = y1 - y;
1126  } else {
1127  ys = y1;
1128  }
1129  fill_rectangle(s->xleft + x, ys, 1, y);
1130  i += channels;
1131  if (i >= SAMPLE_ARRAY_SIZE)
1132  i -= SAMPLE_ARRAY_SIZE;
1133  }
1134  }
1135 
1136  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1137 
1138  for (ch = 1; ch < nb_display_channels; ch++) {
1139  y = s->ytop + ch * h;
1140  fill_rectangle(s->xleft, y, s->width, 1);
1141  }
1142  } else {
1143  int err = 0;
1144  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1145  return;
1146 
1147  if (s->xpos >= s->width)
1148  s->xpos = 0;
1149  nb_display_channels= FFMIN(nb_display_channels, 2);
1150  if (rdft_bits != s->rdft_bits) {
1151  const float rdft_scale = 1.0;
1152  av_tx_uninit(&s->rdft);
1153  av_freep(&s->real_data);
1154  av_freep(&s->rdft_data);
1155  s->rdft_bits = rdft_bits;
1156  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1157  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1158  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1159  0, 1 << rdft_bits, &rdft_scale, 0);
1160  }
1161  if (err < 0 || !s->rdft_data) {
1162  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163  s->show_mode = SHOW_MODE_WAVES;
1164  } else {
1165  float *data_in[2];
1166  AVComplexFloat *data[2];
1167  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1168  uint32_t *pixels;
1169  int pitch;
1170  for (ch = 0; ch < nb_display_channels; ch++) {
1171  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1172  data[ch] = s->rdft_data + nb_freq * ch;
1173  i = i_start + ch;
1174  for (x = 0; x < 2 * nb_freq; x++) {
1175  double w = (x-nb_freq) * (1.0 / nb_freq);
1176  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1177  i += channels;
1178  if (i >= SAMPLE_ARRAY_SIZE)
1179  i -= SAMPLE_ARRAY_SIZE;
1180  }
1181  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1182  data[ch][0].im = data[ch][nb_freq].re;
1183  data[ch][nb_freq].re = 0;
1184  }
1185  /* Least efficient way to do this, we should of course
1186  * directly access it but it is more than fast enough. */
1187  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1188  pitch >>= 2;
1189  pixels += pitch * s->height;
1190  for (y = 0; y < s->height; y++) {
1191  double w = 1 / sqrt(nb_freq);
1192  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1193  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1194  : a;
1195  a = FFMIN(a, 255);
1196  b = FFMIN(b, 255);
1197  pixels -= pitch;
1198  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1199  }
1200  SDL_UnlockTexture(s->vis_texture);
1201  }
1202  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1203  }
1204  if (!s->paused)
1205  s->xpos++;
1206  }
1207 }
1208 
1209 static void stream_component_close(VideoState *is, int stream_index)
1210 {
1211  AVFormatContext *ic = is->ic;
1212  AVCodecParameters *codecpar;
1213 
1214  if (stream_index < 0 || stream_index >= ic->nb_streams)
1215  return;
1216  codecpar = ic->streams[stream_index]->codecpar;
1217 
1218  switch (codecpar->codec_type) {
1219  case AVMEDIA_TYPE_AUDIO:
1220  decoder_abort(&is->auddec, &is->sampq);
1221  SDL_CloseAudioDevice(audio_dev);
1222  decoder_destroy(&is->auddec);
1223  swr_free(&is->swr_ctx);
1224  av_freep(&is->audio_buf1);
1225  is->audio_buf1_size = 0;
1226  is->audio_buf = NULL;
1227 
1228  if (is->rdft) {
1229  av_tx_uninit(&is->rdft);
1230  av_freep(&is->real_data);
1231  av_freep(&is->rdft_data);
1232  is->rdft = NULL;
1233  is->rdft_bits = 0;
1234  }
1235  break;
1236  case AVMEDIA_TYPE_VIDEO:
1237  decoder_abort(&is->viddec, &is->pictq);
1238  decoder_destroy(&is->viddec);
1239  break;
1240  case AVMEDIA_TYPE_SUBTITLE:
1241  decoder_abort(&is->subdec, &is->subpq);
1242  decoder_destroy(&is->subdec);
1243  break;
1244  default:
1245  break;
1246  }
1247 
1248  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1249  switch (codecpar->codec_type) {
1250  case AVMEDIA_TYPE_AUDIO:
1251  is->audio_st = NULL;
1252  is->audio_stream = -1;
1253  break;
1254  case AVMEDIA_TYPE_VIDEO:
1255  is->video_st = NULL;
1256  is->video_stream = -1;
1257  break;
1258  case AVMEDIA_TYPE_SUBTITLE:
1259  is->subtitle_st = NULL;
1260  is->subtitle_stream = -1;
1261  break;
1262  default:
1263  break;
1264  }
1265 }
1266 
1268 {
1269  /* XXX: use a special url_shutdown call to abort parse cleanly */
1270  is->abort_request = 1;
1271  SDL_WaitThread(is->read_tid, NULL);
1272 
1273  /* close each stream */
1274  if (is->audio_stream >= 0)
1275  stream_component_close(is, is->audio_stream);
1276  if (is->video_stream >= 0)
1277  stream_component_close(is, is->video_stream);
1278  if (is->subtitle_stream >= 0)
1279  stream_component_close(is, is->subtitle_stream);
1280 
1281  avformat_close_input(&is->ic);
1282 
1283  packet_queue_destroy(&is->videoq);
1284  packet_queue_destroy(&is->audioq);
1285  packet_queue_destroy(&is->subtitleq);
1286 
1287  /* free all pictures */
1288  frame_queue_destroy(&is->pictq);
1289  frame_queue_destroy(&is->sampq);
1290  frame_queue_destroy(&is->subpq);
1291  SDL_DestroyCond(is->continue_read_thread);
1292  sws_freeContext(is->sub_convert_ctx);
1293  av_free(is->filename);
1294  if (is->vis_texture)
1295  SDL_DestroyTexture(is->vis_texture);
1296  if (is->vid_texture)
1297  SDL_DestroyTexture(is->vid_texture);
1298  if (is->sub_texture)
1299  SDL_DestroyTexture(is->sub_texture);
1300  av_free(is);
1301 }
1302 
1303 static void do_exit(VideoState *is)
1304 {
1305  if (is) {
1306  stream_close(is);
1307  }
1308  if (renderer)
1309  SDL_DestroyRenderer(renderer);
1310  if (vk_renderer)
1312  if (window)
1313  SDL_DestroyWindow(window);
1314  uninit_opts();
1315  for (int i = 0; i < nb_vfilters; i++)
1323  if (show_status)
1324  printf("\n");
1325  SDL_Quit();
1326  av_log(NULL, AV_LOG_QUIET, "%s", "");
1327  exit(0);
1328 }
1329 
1330 static void sigterm_handler(int sig)
1331 {
1332  exit(123);
1333 }
1334 
1336 {
1337  SDL_Rect rect;
1338  int max_width = screen_width ? screen_width : INT_MAX;
1339  int max_height = screen_height ? screen_height : INT_MAX;
1340  if (max_width == INT_MAX && max_height == INT_MAX)
1341  max_height = height;
1342  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1343  default_width = rect.w;
1344  default_height = rect.h;
1345 }
1346 
1348 {
1349  int w,h;
1350 
1353 
1354  if (!window_title)
1356  SDL_SetWindowTitle(window, window_title);
1357 
1358  SDL_SetWindowSize(window, w, h);
1359  SDL_SetWindowPosition(window, screen_left, screen_top);
1360  if (is_full_screen)
1361  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1362  SDL_ShowWindow(window);
1363 
1364  is->width = w;
1365  is->height = h;
1366 
1367  return 0;
1368 }
1369 
1370 /* display the current picture, if any */
1372 {
1373  if (!is->width)
1374  video_open(is);
1375 
1376  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1377  SDL_RenderClear(renderer);
1378  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1380  else if (is->video_st)
1382  SDL_RenderPresent(renderer);
1383 }
1384 
1385 static double get_clock(Clock *c)
1386 {
1387  if (*c->queue_serial != c->serial)
1388  return NAN;
1389  if (c->paused) {
1390  return c->pts;
1391  } else {
1392  double time = av_gettime_relative() / 1000000.0;
1393  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1394  }
1395 }
1396 
1397 static void set_clock_at(Clock *c, double pts, int serial, double time)
1398 {
1399  c->pts = pts;
1400  c->last_updated = time;
1401  c->pts_drift = c->pts - time;
1402  c->serial = serial;
1403 }
1404 
1405 static void set_clock(Clock *c, double pts, int serial)
1406 {
1407  double time = av_gettime_relative() / 1000000.0;
1408  set_clock_at(c, pts, serial, time);
1409 }
1410 
1411 static void set_clock_speed(Clock *c, double speed)
1412 {
1413  set_clock(c, get_clock(c), c->serial);
1414  c->speed = speed;
1415 }
1416 
1417 static void init_clock(Clock *c, int *queue_serial)
1418 {
1419  c->speed = 1.0;
1420  c->paused = 0;
1421  c->queue_serial = queue_serial;
1422  set_clock(c, NAN, -1);
1423 }
1424 
1425 static void sync_clock_to_slave(Clock *c, Clock *slave)
1426 {
1427  double clock = get_clock(c);
1428  double slave_clock = get_clock(slave);
1429  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1430  set_clock(c, slave_clock, slave->serial);
1431 }
1432 
1434  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1435  if (is->video_st)
1436  return AV_SYNC_VIDEO_MASTER;
1437  else
1438  return AV_SYNC_AUDIO_MASTER;
1439  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1440  if (is->audio_st)
1441  return AV_SYNC_AUDIO_MASTER;
1442  else
1443  return AV_SYNC_EXTERNAL_CLOCK;
1444  } else {
1445  return AV_SYNC_EXTERNAL_CLOCK;
1446  }
1447 }
1448 
1449 /* get the current master clock value */
1451 {
1452  double val;
1453 
1454  switch (get_master_sync_type(is)) {
1455  case AV_SYNC_VIDEO_MASTER:
1456  val = get_clock(&is->vidclk);
1457  break;
1458  case AV_SYNC_AUDIO_MASTER:
1459  val = get_clock(&is->audclk);
1460  break;
1461  default:
1462  val = get_clock(&is->extclk);
1463  break;
1464  }
1465  return val;
1466 }
1467 
1469  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1470  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1472  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1473  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1475  } else {
1476  double speed = is->extclk.speed;
1477  if (speed != 1.0)
1478  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1479  }
1480 }
1481 
1482 /* seek in the stream */
1483 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1484 {
1485  if (!is->seek_req) {
1486  is->seek_pos = pos;
1487  is->seek_rel = rel;
1488  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1489  if (by_bytes)
1490  is->seek_flags |= AVSEEK_FLAG_BYTE;
1491  is->seek_req = 1;
1492  SDL_CondSignal(is->continue_read_thread);
1493  }
1494 }
1495 
1496 /* pause or resume the video */
1498 {
1499  if (is->paused) {
1500  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1501  if (is->read_pause_return != AVERROR(ENOSYS)) {
1502  is->vidclk.paused = 0;
1503  }
1504  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1505  }
1506  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1507  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1508 }
1509 
1511 {
1513  is->step = 0;
1514 }
1515 
1517 {
1518  is->muted = !is->muted;
1519 }
1520 
1521 static void update_volume(VideoState *is, int sign, double step)
1522 {
1523  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1524  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1525  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1526 }
1527 
1529 {
1530  /* if the stream is paused unpause it, then step */
1531  if (is->paused)
1533  is->step = 1;
1534 }
1535 
1536 static double compute_target_delay(double delay, VideoState *is)
1537 {
1538  double sync_threshold, diff = 0;
1539 
1540  /* update delay to follow master synchronisation source */
1542  /* if video is slave, we try to correct big delays by
1543  duplicating or deleting a frame */
1544  diff = get_clock(&is->vidclk) - get_master_clock(is);
1545 
1546  /* skip or repeat frame. We take into account the
1547  delay to compute the threshold. I still don't know
1548  if it is the best guess */
1549  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1550  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1551  if (diff <= -sync_threshold)
1552  delay = FFMAX(0, delay + diff);
1553  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1554  delay = delay + diff;
1555  else if (diff >= sync_threshold)
1556  delay = 2 * delay;
1557  }
1558  }
1559 
1560  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1561  delay, -diff);
1562 
1563  return delay;
1564 }
1565 
1566 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1567  if (vp->serial == nextvp->serial) {
1568  double duration = nextvp->pts - vp->pts;
1569  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1570  return vp->duration;
1571  else
1572  return duration;
1573  } else {
1574  return 0.0;
1575  }
1576 }
1577 
1578 static void update_video_pts(VideoState *is, double pts, int serial)
1579 {
1580  /* update current video pts */
1581  set_clock(&is->vidclk, pts, serial);
1582  sync_clock_to_slave(&is->extclk, &is->vidclk);
1583 }
1584 
1585 /* called to display each frame */
1586 static void video_refresh(void *opaque, double *remaining_time)
1587 {
1588  VideoState *is = opaque;
1589  double time;
1590 
1591  Frame *sp, *sp2;
1592 
1593  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1595 
1596  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1597  time = av_gettime_relative() / 1000000.0;
1598  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1599  video_display(is);
1600  is->last_vis_time = time;
1601  }
1602  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1603  }
1604 
1605  if (is->video_st) {
1606 retry:
1607  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1608  // nothing to do, no picture to display in the queue
1609  } else {
1610  double last_duration, duration, delay;
1611  Frame *vp, *lastvp;
1612 
1613  /* dequeue the picture */
1614  lastvp = frame_queue_peek_last(&is->pictq);
1615  vp = frame_queue_peek(&is->pictq);
1616 
1617  if (vp->serial != is->videoq.serial) {
1618  frame_queue_next(&is->pictq);
1619  goto retry;
1620  }
1621 
1622  if (lastvp->serial != vp->serial)
1623  is->frame_timer = av_gettime_relative() / 1000000.0;
1624 
1625  if (is->paused)
1626  goto display;
1627 
1628  /* compute nominal last_duration */
1629  last_duration = vp_duration(is, lastvp, vp);
1630  delay = compute_target_delay(last_duration, is);
1631 
1632  time= av_gettime_relative()/1000000.0;
1633  if (time < is->frame_timer + delay) {
1634  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1635  goto display;
1636  }
1637 
1638  is->frame_timer += delay;
1639  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1640  is->frame_timer = time;
1641 
1642  SDL_LockMutex(is->pictq.mutex);
1643  if (!isnan(vp->pts))
1644  update_video_pts(is, vp->pts, vp->serial);
1645  SDL_UnlockMutex(is->pictq.mutex);
1646 
1647  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1648  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1649  duration = vp_duration(is, vp, nextvp);
1650  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1651  is->frame_drops_late++;
1652  frame_queue_next(&is->pictq);
1653  goto retry;
1654  }
1655  }
1656 
1657  if (is->subtitle_st) {
1658  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1659  sp = frame_queue_peek(&is->subpq);
1660 
1661  if (frame_queue_nb_remaining(&is->subpq) > 1)
1662  sp2 = frame_queue_peek_next(&is->subpq);
1663  else
1664  sp2 = NULL;
1665 
1666  if (sp->serial != is->subtitleq.serial
1667  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1668  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1669  {
1670  if (sp->uploaded) {
1671  int i;
1672  for (i = 0; i < sp->sub.num_rects; i++) {
1673  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1674  uint8_t *pixels;
1675  int pitch, j;
1676 
1677  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1678  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1679  memset(pixels, 0, sub_rect->w << 2);
1680  SDL_UnlockTexture(is->sub_texture);
1681  }
1682  }
1683  }
1684  frame_queue_next(&is->subpq);
1685  } else {
1686  break;
1687  }
1688  }
1689  }
1690 
1691  frame_queue_next(&is->pictq);
1692  is->force_refresh = 1;
1693 
1694  if (is->step && !is->paused)
1696  }
1697 display:
1698  /* display picture */
1699  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1700  video_display(is);
1701  }
1702  is->force_refresh = 0;
1703  if (show_status) {
1704  AVBPrint buf;
1705  static int64_t last_time;
1706  int64_t cur_time;
1707  int aqsize, vqsize, sqsize;
1708  double av_diff;
1709 
1710  cur_time = av_gettime_relative();
1711  if (!last_time || (cur_time - last_time) >= 30000) {
1712  aqsize = 0;
1713  vqsize = 0;
1714  sqsize = 0;
1715  if (is->audio_st)
1716  aqsize = is->audioq.size;
1717  if (is->video_st)
1718  vqsize = is->videoq.size;
1719  if (is->subtitle_st)
1720  sqsize = is->subtitleq.size;
1721  av_diff = 0;
1722  if (is->audio_st && is->video_st)
1723  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1724  else if (is->video_st)
1725  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1726  else if (is->audio_st)
1727  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1728 
1730  av_bprintf(&buf,
1731  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1733  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1734  av_diff,
1735  is->frame_drops_early + is->frame_drops_late,
1736  aqsize / 1024,
1737  vqsize / 1024,
1738  sqsize);
1739 
1740  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1741  fprintf(stderr, "%s", buf.str);
1742  else
1743  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1744 
1745  fflush(stderr);
1746  av_bprint_finalize(&buf, NULL);
1747 
1748  last_time = cur_time;
1749  }
1750  }
1751 }
1752 
1753 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1754 {
1755  Frame *vp;
1756 
1757 #if defined(DEBUG_SYNC)
1758  printf("frame_type=%c pts=%0.3f\n",
1759  av_get_picture_type_char(src_frame->pict_type), pts);
1760 #endif
1761 
1762  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1763  return -1;
1764 
1765  vp->sar = src_frame->sample_aspect_ratio;
1766  vp->uploaded = 0;
1767 
1768  vp->width = src_frame->width;
1769  vp->height = src_frame->height;
1770  vp->format = src_frame->format;
1771 
1772  vp->pts = pts;
1773  vp->duration = duration;
1774  vp->pos = pos;
1775  vp->serial = serial;
1776 
1777  set_default_window_size(vp->width, vp->height, vp->sar);
1778 
1779  av_frame_move_ref(vp->frame, src_frame);
1780  frame_queue_push(&is->pictq);
1781  return 0;
1782 }
1783 
1785 {
1786  int got_picture;
1787 
1788  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1789  return -1;
1790 
1791  if (got_picture) {
1792  double dpts = NAN;
1793 
1794  if (frame->pts != AV_NOPTS_VALUE)
1795  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1796 
1797  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1798 
1800  if (frame->pts != AV_NOPTS_VALUE) {
1801  double diff = dpts - get_master_clock(is);
1802  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1803  diff - is->frame_last_filter_delay < 0 &&
1804  is->viddec.pkt_serial == is->vidclk.serial &&
1805  is->videoq.nb_packets) {
1806  is->frame_drops_early++;
1808  got_picture = 0;
1809  }
1810  }
1811  }
1812  }
1813 
1814  return got_picture;
1815 }
1816 
1817 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1818  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1819 {
1820  int ret, i;
1821  int nb_filters = graph->nb_filters;
1823 
1824  if (filtergraph) {
1827  if (!outputs || !inputs) {
1828  ret = AVERROR(ENOMEM);
1829  goto fail;
1830  }
1831 
1832  outputs->name = av_strdup("in");
1833  outputs->filter_ctx = source_ctx;
1834  outputs->pad_idx = 0;
1835  outputs->next = NULL;
1836 
1837  inputs->name = av_strdup("out");
1838  inputs->filter_ctx = sink_ctx;
1839  inputs->pad_idx = 0;
1840  inputs->next = NULL;
1841 
1842  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1843  goto fail;
1844  } else {
1845  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1846  goto fail;
1847  }
1848 
1849  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1850  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1851  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1852 
1853  ret = avfilter_graph_config(graph, NULL);
1854 fail:
1857  return ret;
1858 }
1859 
1860 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1861 {
1863  char sws_flags_str[512] = "";
1864  char buffersrc_args[256];
1865  int ret;
1866  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1867  AVCodecParameters *codecpar = is->video_st->codecpar;
1868  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1869  const AVDictionaryEntry *e = NULL;
1870  int nb_pix_fmts = 0;
1871  int i, j;
1873 
1874  if (!par)
1875  return AVERROR(ENOMEM);
1876 
1877  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1878  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1879  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1880  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1881  break;
1882  }
1883  }
1884  }
1885  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1886 
1887  while ((e = av_dict_iterate(sws_dict, e))) {
1888  if (!strcmp(e->key, "sws_flags")) {
1889  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1890  } else
1891  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1892  }
1893  if (strlen(sws_flags_str))
1894  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1895 
1896  graph->scale_sws_opts = av_strdup(sws_flags_str);
1897 
1898  snprintf(buffersrc_args, sizeof(buffersrc_args),
1899  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1900  "colorspace=%d:range=%d",
1901  frame->width, frame->height, frame->format,
1902  is->video_st->time_base.num, is->video_st->time_base.den,
1903  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1904  frame->colorspace, frame->color_range);
1905  if (fr.num && fr.den)
1906  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1907 
1908  if ((ret = avfilter_graph_create_filter(&filt_src,
1909  avfilter_get_by_name("buffer"),
1910  "ffplay_buffer", buffersrc_args, NULL,
1911  graph)) < 0)
1912  goto fail;
1913  par->hw_frames_ctx = frame->hw_frames_ctx;
1914  ret = av_buffersrc_parameters_set(filt_src, par);
1915  if (ret < 0)
1916  goto fail;
1917 
1918  ret = avfilter_graph_create_filter(&filt_out,
1919  avfilter_get_by_name("buffersink"),
1920  "ffplay_buffersink", NULL, NULL, graph);
1921  if (ret < 0)
1922  goto fail;
1923 
1924  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1925  goto fail;
1926  if (!vk_renderer &&
1928  goto fail;
1929 
1930  last_filter = filt_out;
1931 
1932 /* Note: this macro adds a filter before the lastly added filter, so the
1933  * processing order of the filters is in reverse */
1934 #define INSERT_FILT(name, arg) do { \
1935  AVFilterContext *filt_ctx; \
1936  \
1937  ret = avfilter_graph_create_filter(&filt_ctx, \
1938  avfilter_get_by_name(name), \
1939  "ffplay_" name, arg, NULL, graph); \
1940  if (ret < 0) \
1941  goto fail; \
1942  \
1943  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1944  if (ret < 0) \
1945  goto fail; \
1946  \
1947  last_filter = filt_ctx; \
1948 } while (0)
1949 
1950  if (autorotate) {
1951  double theta = 0.0;
1952  int32_t *displaymatrix = NULL;
1954  if (sd)
1955  displaymatrix = (int32_t *)sd->data;
1956  if (!displaymatrix) {
1957  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1958  is->video_st->codecpar->nb_coded_side_data,
1960  if (psd)
1961  displaymatrix = (int32_t *)psd->data;
1962  }
1963  theta = get_rotation(displaymatrix);
1964 
1965  if (fabs(theta - 90) < 1.0) {
1966  INSERT_FILT("transpose", displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1967  } else if (fabs(theta - 180) < 1.0) {
1968  if (displaymatrix[0] < 0)
1969  INSERT_FILT("hflip", NULL);
1970  if (displaymatrix[4] < 0)
1971  INSERT_FILT("vflip", NULL);
1972  } else if (fabs(theta - 270) < 1.0) {
1973  INSERT_FILT("transpose", displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1974  } else if (fabs(theta) > 1.0) {
1975  char rotate_buf[64];
1976  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1977  INSERT_FILT("rotate", rotate_buf);
1978  } else {
1979  if (displaymatrix && displaymatrix[4] < 0)
1980  INSERT_FILT("vflip", NULL);
1981  }
1982  }
1983 
1984  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1985  goto fail;
1986 
1987  is->in_video_filter = filt_src;
1988  is->out_video_filter = filt_out;
1989 
1990 fail:
1991  av_freep(&par);
1992  return ret;
1993 }
1994 
1995 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1996 {
1998  int sample_rates[2] = { 0, -1 };
1999  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2000  char aresample_swr_opts[512] = "";
2001  const AVDictionaryEntry *e = NULL;
2002  AVBPrint bp;
2003  char asrc_args[256];
2004  int ret;
2005 
2006  avfilter_graph_free(&is->agraph);
2007  if (!(is->agraph = avfilter_graph_alloc()))
2008  return AVERROR(ENOMEM);
2009  is->agraph->nb_threads = filter_nbthreads;
2010 
2012 
2013  while ((e = av_dict_iterate(swr_opts, e)))
2014  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2015  if (strlen(aresample_swr_opts))
2016  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2017  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2018 
2019  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2020 
2021  ret = snprintf(asrc_args, sizeof(asrc_args),
2022  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2023  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2024  1, is->audio_filter_src.freq, bp.str);
2025 
2026  ret = avfilter_graph_create_filter(&filt_asrc,
2027  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2028  asrc_args, NULL, is->agraph);
2029  if (ret < 0)
2030  goto end;
2031 
2032 
2033  ret = avfilter_graph_create_filter(&filt_asink,
2034  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2035  NULL, NULL, is->agraph);
2036  if (ret < 0)
2037  goto end;
2038 
2039  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2040  goto end;
2041  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2042  goto end;
2043 
2044  if (force_output_format) {
2045  av_bprint_clear(&bp);
2046  av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2047  sample_rates [0] = is->audio_tgt.freq;
2048  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2049  goto end;
2050  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2051  goto end;
2052  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2053  goto end;
2054  }
2055 
2056 
2057  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2058  goto end;
2059 
2060  is->in_audio_filter = filt_asrc;
2061  is->out_audio_filter = filt_asink;
2062 
2063 end:
2064  if (ret < 0)
2065  avfilter_graph_free(&is->agraph);
2066  av_bprint_finalize(&bp, NULL);
2067 
2068  return ret;
2069 }
2070 
2071 static int audio_thread(void *arg)
2072 {
2073  VideoState *is = arg;
2075  Frame *af;
2076  int last_serial = -1;
2077  int reconfigure;
2078  int got_frame = 0;
2079  AVRational tb;
2080  int ret = 0;
2081 
2082  if (!frame)
2083  return AVERROR(ENOMEM);
2084 
2085  do {
2086  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2087  goto the_end;
2088 
2089  if (got_frame) {
2090  tb = (AVRational){1, frame->sample_rate};
2091 
2092  reconfigure =
2093  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2094  frame->format, frame->ch_layout.nb_channels) ||
2095  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2096  is->audio_filter_src.freq != frame->sample_rate ||
2097  is->auddec.pkt_serial != last_serial;
2098 
2099  if (reconfigure) {
2100  char buf1[1024], buf2[1024];
2101  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2102  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2104  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2105  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2106  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2107 
2108  is->audio_filter_src.fmt = frame->format;
2109  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2110  if (ret < 0)
2111  goto the_end;
2112  is->audio_filter_src.freq = frame->sample_rate;
2113  last_serial = is->auddec.pkt_serial;
2114 
2115  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2116  goto the_end;
2117  }
2118 
2119  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2120  goto the_end;
2121 
2122  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2123  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2124  tb = av_buffersink_get_time_base(is->out_audio_filter);
2125  if (!(af = frame_queue_peek_writable(&is->sampq)))
2126  goto the_end;
2127 
2128  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2129  af->pos = fd ? fd->pkt_pos : -1;
2130  af->serial = is->auddec.pkt_serial;
2131  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2132 
2134  frame_queue_push(&is->sampq);
2135 
2136  if (is->audioq.serial != is->auddec.pkt_serial)
2137  break;
2138  }
2139  if (ret == AVERROR_EOF)
2140  is->auddec.finished = is->auddec.pkt_serial;
2141  }
2142  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2143  the_end:
2144  avfilter_graph_free(&is->agraph);
2145  av_frame_free(&frame);
2146  return ret;
2147 }
2148 
2149 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2150 {
2152  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2153  if (!d->decoder_tid) {
2154  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2155  return AVERROR(ENOMEM);
2156  }
2157  return 0;
2158 }
2159 
2160 static int video_thread(void *arg)
2161 {
2162  VideoState *is = arg;
2164  double pts;
2165  double duration;
2166  int ret;
2167  AVRational tb = is->video_st->time_base;
2168  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2169 
2170  AVFilterGraph *graph = NULL;
2171  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2172  int last_w = 0;
2173  int last_h = 0;
2174  enum AVPixelFormat last_format = -2;
2175  int last_serial = -1;
2176  int last_vfilter_idx = 0;
2177 
2178  if (!frame)
2179  return AVERROR(ENOMEM);
2180 
2181  for (;;) {
2183  if (ret < 0)
2184  goto the_end;
2185  if (!ret)
2186  continue;
2187 
2188  if ( last_w != frame->width
2189  || last_h != frame->height
2190  || last_format != frame->format
2191  || last_serial != is->viddec.pkt_serial
2192  || last_vfilter_idx != is->vfilter_idx) {
2194  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2195  last_w, last_h,
2196  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2197  frame->width, frame->height,
2198  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2199  avfilter_graph_free(&graph);
2200  graph = avfilter_graph_alloc();
2201  if (!graph) {
2202  ret = AVERROR(ENOMEM);
2203  goto the_end;
2204  }
2205  graph->nb_threads = filter_nbthreads;
2206  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2207  SDL_Event event;
2208  event.type = FF_QUIT_EVENT;
2209  event.user.data1 = is;
2210  SDL_PushEvent(&event);
2211  goto the_end;
2212  }
2213  filt_in = is->in_video_filter;
2214  filt_out = is->out_video_filter;
2215  last_w = frame->width;
2216  last_h = frame->height;
2217  last_format = frame->format;
2218  last_serial = is->viddec.pkt_serial;
2219  last_vfilter_idx = is->vfilter_idx;
2220  frame_rate = av_buffersink_get_frame_rate(filt_out);
2221  }
2222 
2223  ret = av_buffersrc_add_frame(filt_in, frame);
2224  if (ret < 0)
2225  goto the_end;
2226 
2227  while (ret >= 0) {
2228  FrameData *fd;
2229 
2230  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2231 
2232  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2233  if (ret < 0) {
2234  if (ret == AVERROR_EOF)
2235  is->viddec.finished = is->viddec.pkt_serial;
2236  ret = 0;
2237  break;
2238  }
2239 
2240  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2241 
2242  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2243  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2244  is->frame_last_filter_delay = 0;
2245  tb = av_buffersink_get_time_base(filt_out);
2246  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2247  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2248  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2250  if (is->videoq.serial != is->viddec.pkt_serial)
2251  break;
2252  }
2253 
2254  if (ret < 0)
2255  goto the_end;
2256  }
2257  the_end:
2258  avfilter_graph_free(&graph);
2259  av_frame_free(&frame);
2260  return 0;
2261 }
2262 
2263 static int subtitle_thread(void *arg)
2264 {
2265  VideoState *is = arg;
2266  Frame *sp;
2267  int got_subtitle;
2268  double pts;
2269 
2270  for (;;) {
2271  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2272  return 0;
2273 
2274  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2275  break;
2276 
2277  pts = 0;
2278 
2279  if (got_subtitle && sp->sub.format == 0) {
2280  if (sp->sub.pts != AV_NOPTS_VALUE)
2281  pts = sp->sub.pts / (double)AV_TIME_BASE;
2282  sp->pts = pts;
2283  sp->serial = is->subdec.pkt_serial;
2284  sp->width = is->subdec.avctx->width;
2285  sp->height = is->subdec.avctx->height;
2286  sp->uploaded = 0;
2287 
2288  /* now we can update the picture count */
2289  frame_queue_push(&is->subpq);
2290  } else if (got_subtitle) {
2291  avsubtitle_free(&sp->sub);
2292  }
2293  }
2294  return 0;
2295 }
2296 
2297 /* copy samples for viewing in editor window */
2298 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2299 {
2300  int size, len;
2301 
2302  size = samples_size / sizeof(short);
2303  while (size > 0) {
2304  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2305  if (len > size)
2306  len = size;
2307  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2308  samples += len;
2309  is->sample_array_index += len;
2310  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2311  is->sample_array_index = 0;
2312  size -= len;
2313  }
2314 }
2315 
2316 /* return the wanted number of samples to get better sync if sync_type is video
2317  * or external master clock */
2318 static int synchronize_audio(VideoState *is, int nb_samples)
2319 {
2320  int wanted_nb_samples = nb_samples;
2321 
2322  /* if not master, then we try to remove or add samples to correct the clock */
2324  double diff, avg_diff;
2325  int min_nb_samples, max_nb_samples;
2326 
2327  diff = get_clock(&is->audclk) - get_master_clock(is);
2328 
2329  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2330  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2331  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2332  /* not enough measures to have a correct estimate */
2333  is->audio_diff_avg_count++;
2334  } else {
2335  /* estimate the A-V difference */
2336  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2337 
2338  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2339  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2340  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2341  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2342  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2343  }
2344  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2345  diff, avg_diff, wanted_nb_samples - nb_samples,
2346  is->audio_clock, is->audio_diff_threshold);
2347  }
2348  } else {
2349  /* too big difference : may be initial PTS errors, so
2350  reset A-V filter */
2351  is->audio_diff_avg_count = 0;
2352  is->audio_diff_cum = 0;
2353  }
2354  }
2355 
2356  return wanted_nb_samples;
2357 }
2358 
2359 /**
2360  * Decode one audio frame and return its uncompressed size.
2361  *
2362  * The processed audio frame is decoded, converted if required, and
2363  * stored in is->audio_buf, with size in bytes given by the return
2364  * value.
2365  */
2367 {
2368  int data_size, resampled_data_size;
2369  av_unused double audio_clock0;
2370  int wanted_nb_samples;
2371  Frame *af;
2372 
2373  if (is->paused)
2374  return -1;
2375 
2376  do {
2377 #if defined(_WIN32)
2378  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2379  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2380  return -1;
2381  av_usleep (1000);
2382  }
2383 #endif
2384  if (!(af = frame_queue_peek_readable(&is->sampq)))
2385  return -1;
2386  frame_queue_next(&is->sampq);
2387  } while (af->serial != is->audioq.serial);
2388 
2390  af->frame->nb_samples,
2391  af->frame->format, 1);
2392 
2393  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2394 
2395  if (af->frame->format != is->audio_src.fmt ||
2396  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2397  af->frame->sample_rate != is->audio_src.freq ||
2398  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2399  int ret;
2400  swr_free(&is->swr_ctx);
2401  ret = swr_alloc_set_opts2(&is->swr_ctx,
2402  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2403  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2404  0, NULL);
2405  if (ret < 0 || swr_init(is->swr_ctx) < 0) {
2407  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2409  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2410  swr_free(&is->swr_ctx);
2411  return -1;
2412  }
2413  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2414  return -1;
2415  is->audio_src.freq = af->frame->sample_rate;
2416  is->audio_src.fmt = af->frame->format;
2417  }
2418 
2419  if (is->swr_ctx) {
2420  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2421  uint8_t **out = &is->audio_buf1;
2422  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2423  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2424  int len2;
2425  if (out_size < 0) {
2426  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2427  return -1;
2428  }
2429  if (wanted_nb_samples != af->frame->nb_samples) {
2430  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2431  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2432  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2433  return -1;
2434  }
2435  }
2436  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2437  if (!is->audio_buf1)
2438  return AVERROR(ENOMEM);
2439  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2440  if (len2 < 0) {
2441  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2442  return -1;
2443  }
2444  if (len2 == out_count) {
2445  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2446  if (swr_init(is->swr_ctx) < 0)
2447  swr_free(&is->swr_ctx);
2448  }
2449  is->audio_buf = is->audio_buf1;
2450  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2451  } else {
2452  is->audio_buf = af->frame->data[0];
2453  resampled_data_size = data_size;
2454  }
2455 
2456  audio_clock0 = is->audio_clock;
2457  /* update the audio clock with the pts */
2458  if (!isnan(af->pts))
2459  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2460  else
2461  is->audio_clock = NAN;
2462  is->audio_clock_serial = af->serial;
2463 #ifdef DEBUG
2464  {
2465  static double last_clock;
2466  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2467  is->audio_clock - last_clock,
2468  is->audio_clock, audio_clock0);
2469  last_clock = is->audio_clock;
2470  }
2471 #endif
2472  return resampled_data_size;
2473 }
2474 
2475 /* prepare a new audio buffer */
2476 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2477 {
2478  VideoState *is = opaque;
2479  int audio_size, len1;
2480 
2482 
2483  while (len > 0) {
2484  if (is->audio_buf_index >= is->audio_buf_size) {
2485  audio_size = audio_decode_frame(is);
2486  if (audio_size < 0) {
2487  /* if error, just output silence */
2488  is->audio_buf = NULL;
2489  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2490  } else {
2491  if (is->show_mode != SHOW_MODE_VIDEO)
2492  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2493  is->audio_buf_size = audio_size;
2494  }
2495  is->audio_buf_index = 0;
2496  }
2497  len1 = is->audio_buf_size - is->audio_buf_index;
2498  if (len1 > len)
2499  len1 = len;
2500  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2501  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2502  else {
2503  memset(stream, 0, len1);
2504  if (!is->muted && is->audio_buf)
2505  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2506  }
2507  len -= len1;
2508  stream += len1;
2509  is->audio_buf_index += len1;
2510  }
2511  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2512  /* Let's assume the audio driver that is used by SDL has two periods. */
2513  if (!isnan(is->audio_clock)) {
2514  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2515  sync_clock_to_slave(&is->extclk, &is->audclk);
2516  }
2517 }
2518 
2519 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2520 {
2521  SDL_AudioSpec wanted_spec, spec;
2522  const char *env;
2523  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2524  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2525  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2526  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2527 
2528  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2529  if (env) {
2530  wanted_nb_channels = atoi(env);
2531  av_channel_layout_uninit(wanted_channel_layout);
2532  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2533  }
2534  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2535  av_channel_layout_uninit(wanted_channel_layout);
2536  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2537  }
2538  wanted_nb_channels = wanted_channel_layout->nb_channels;
2539  wanted_spec.channels = wanted_nb_channels;
2540  wanted_spec.freq = wanted_sample_rate;
2541  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2542  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2543  return -1;
2544  }
2545  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2546  next_sample_rate_idx--;
2547  wanted_spec.format = AUDIO_S16SYS;
2548  wanted_spec.silence = 0;
2549  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2550  wanted_spec.callback = sdl_audio_callback;
2551  wanted_spec.userdata = opaque;
2552  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2553  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2554  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2555  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2556  if (!wanted_spec.channels) {
2557  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2558  wanted_spec.channels = wanted_nb_channels;
2559  if (!wanted_spec.freq) {
2561  "No more combinations to try, audio open failed\n");
2562  return -1;
2563  }
2564  }
2565  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2566  }
2567  if (spec.format != AUDIO_S16SYS) {
2569  "SDL advised audio format %d is not supported!\n", spec.format);
2570  return -1;
2571  }
2572  if (spec.channels != wanted_spec.channels) {
2573  av_channel_layout_uninit(wanted_channel_layout);
2574  av_channel_layout_default(wanted_channel_layout, spec.channels);
2575  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2577  "SDL advised channel count %d is not supported!\n", spec.channels);
2578  return -1;
2579  }
2580  }
2581 
2582  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2583  audio_hw_params->freq = spec.freq;
2584  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2585  return -1;
2586  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2587  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2588  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2589  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2590  return -1;
2591  }
2592  return spec.size;
2593 }
2594 
2595 static int create_hwaccel(AVBufferRef **device_ctx)
2596 {
2597  enum AVHWDeviceType type;
2598  int ret;
2599  AVBufferRef *vk_dev;
2600 
2601  *device_ctx = NULL;
2602 
2603  if (!hwaccel)
2604  return 0;
2605 
2607  if (type == AV_HWDEVICE_TYPE_NONE)
2608  return AVERROR(ENOTSUP);
2609 
2611  if (ret < 0)
2612  return ret;
2613 
2614  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2615  if (!ret)
2616  return 0;
2617 
2618  if (ret != AVERROR(ENOSYS))
2619  return ret;
2620 
2621  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2622  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2623  return ret;
2624 }
2625 
2626 /* open a given stream. Return 0 if OK */
2627 static int stream_component_open(VideoState *is, int stream_index)
2628 {
2629  AVFormatContext *ic = is->ic;
2630  AVCodecContext *avctx;
2631  const AVCodec *codec;
2632  const char *forced_codec_name = NULL;
2633  AVDictionary *opts = NULL;
2634  int sample_rate;
2635  AVChannelLayout ch_layout = { 0 };
2636  int ret = 0;
2637  int stream_lowres = lowres;
2638 
2639  if (stream_index < 0 || stream_index >= ic->nb_streams)
2640  return -1;
2641 
2642  avctx = avcodec_alloc_context3(NULL);
2643  if (!avctx)
2644  return AVERROR(ENOMEM);
2645 
2646  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2647  if (ret < 0)
2648  goto fail;
2649  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2650 
2651  codec = avcodec_find_decoder(avctx->codec_id);
2652 
2653  switch(avctx->codec_type){
2654  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2655  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2656  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2657  }
2658  if (forced_codec_name)
2659  codec = avcodec_find_decoder_by_name(forced_codec_name);
2660  if (!codec) {
2661  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2662  "No codec could be found with name '%s'\n", forced_codec_name);
2663  else av_log(NULL, AV_LOG_WARNING,
2664  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2665  ret = AVERROR(EINVAL);
2666  goto fail;
2667  }
2668 
2669  avctx->codec_id = codec->id;
2670  if (stream_lowres > codec->max_lowres) {
2671  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2672  codec->max_lowres);
2673  stream_lowres = codec->max_lowres;
2674  }
2675  avctx->lowres = stream_lowres;
2676 
2677  if (fast)
2678  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2679 
2680  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2681  ic->streams[stream_index], codec, &opts, NULL);
2682  if (ret < 0)
2683  goto fail;
2684 
2685  if (!av_dict_get(opts, "threads", NULL, 0))
2686  av_dict_set(&opts, "threads", "auto", 0);
2687  if (stream_lowres)
2688  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2689 
2690  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2691 
2692  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2693  ret = create_hwaccel(&avctx->hw_device_ctx);
2694  if (ret < 0)
2695  goto fail;
2696  }
2697 
2698  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2699  goto fail;
2700  }
2702  if (ret < 0)
2703  goto fail;
2704 
2705  is->eof = 0;
2706  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2707  switch (avctx->codec_type) {
2708  case AVMEDIA_TYPE_AUDIO:
2709  {
2710  AVFilterContext *sink;
2711 
2712  is->audio_filter_src.freq = avctx->sample_rate;
2713  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2714  if (ret < 0)
2715  goto fail;
2716  is->audio_filter_src.fmt = avctx->sample_fmt;
2717  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2718  goto fail;
2719  sink = is->out_audio_filter;
2720  sample_rate = av_buffersink_get_sample_rate(sink);
2721  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2722  if (ret < 0)
2723  goto fail;
2724  }
2725 
2726  /* prepare audio output */
2727  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2728  goto fail;
2729  is->audio_hw_buf_size = ret;
2730  is->audio_src = is->audio_tgt;
2731  is->audio_buf_size = 0;
2732  is->audio_buf_index = 0;
2733 
2734  /* init averaging filter */
2735  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2736  is->audio_diff_avg_count = 0;
2737  /* since we do not have a precise anough audio FIFO fullness,
2738  we correct audio sync only if larger than this threshold */
2739  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2740 
2741  is->audio_stream = stream_index;
2742  is->audio_st = ic->streams[stream_index];
2743 
2744  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2745  goto fail;
2746  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2747  is->auddec.start_pts = is->audio_st->start_time;
2748  is->auddec.start_pts_tb = is->audio_st->time_base;
2749  }
2750  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2751  goto out;
2752  SDL_PauseAudioDevice(audio_dev, 0);
2753  break;
2754  case AVMEDIA_TYPE_VIDEO:
2755  is->video_stream = stream_index;
2756  is->video_st = ic->streams[stream_index];
2757 
2758  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2759  goto fail;
2760  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2761  goto out;
2762  is->queue_attachments_req = 1;
2763  break;
2764  case AVMEDIA_TYPE_SUBTITLE:
2765  is->subtitle_stream = stream_index;
2766  is->subtitle_st = ic->streams[stream_index];
2767 
2768  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2769  goto fail;
2770  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2771  goto out;
2772  break;
2773  default:
2774  break;
2775  }
2776  goto out;
2777 
2778 fail:
2779  avcodec_free_context(&avctx);
2780 out:
2781  av_channel_layout_uninit(&ch_layout);
2782  av_dict_free(&opts);
2783 
2784  return ret;
2785 }
2786 
2787 static int decode_interrupt_cb(void *ctx)
2788 {
2789  VideoState *is = ctx;
2790  return is->abort_request;
2791 }
2792 
2793 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2794  return stream_id < 0 ||
2795  queue->abort_request ||
2797  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2798 }
2799 
2801 {
2802  if( !strcmp(s->iformat->name, "rtp")
2803  || !strcmp(s->iformat->name, "rtsp")
2804  || !strcmp(s->iformat->name, "sdp")
2805  )
2806  return 1;
2807 
2808  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2809  || !strncmp(s->url, "udp:", 4)
2810  )
2811  )
2812  return 1;
2813  return 0;
2814 }
2815 
2816 /* this thread gets the stream from the disk or the network */
2817 static int read_thread(void *arg)
2818 {
2819  VideoState *is = arg;
2820  AVFormatContext *ic = NULL;
2821  int err, i, ret;
2822  int st_index[AVMEDIA_TYPE_NB];
2823  AVPacket *pkt = NULL;
2824  int64_t stream_start_time;
2825  int pkt_in_play_range = 0;
2826  const AVDictionaryEntry *t;
2827  SDL_mutex *wait_mutex = SDL_CreateMutex();
2828  int scan_all_pmts_set = 0;
2829  int64_t pkt_ts;
2830 
2831  if (!wait_mutex) {
2832  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2833  ret = AVERROR(ENOMEM);
2834  goto fail;
2835  }
2836 
2837  memset(st_index, -1, sizeof(st_index));
2838  is->eof = 0;
2839 
2840  pkt = av_packet_alloc();
2841  if (!pkt) {
2842  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2843  ret = AVERROR(ENOMEM);
2844  goto fail;
2845  }
2846  ic = avformat_alloc_context();
2847  if (!ic) {
2848  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2849  ret = AVERROR(ENOMEM);
2850  goto fail;
2851  }
2854  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2855  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2856  scan_all_pmts_set = 1;
2857  }
2858  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2859  if (err < 0) {
2860  print_error(is->filename, err);
2861  ret = -1;
2862  goto fail;
2863  }
2864  if (scan_all_pmts_set)
2865  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2867 
2869  if (ret < 0)
2870  goto fail;
2871  is->ic = ic;
2872 
2873  if (genpts)
2874  ic->flags |= AVFMT_FLAG_GENPTS;
2875 
2876  if (find_stream_info) {
2877  AVDictionary **opts;
2878  int orig_nb_streams = ic->nb_streams;
2879 
2881  if (err < 0) {
2883  "Error setting up avformat_find_stream_info() options\n");
2884  ret = err;
2885  goto fail;
2886  }
2887 
2888  err = avformat_find_stream_info(ic, opts);
2889 
2890  for (i = 0; i < orig_nb_streams; i++)
2891  av_dict_free(&opts[i]);
2892  av_freep(&opts);
2893 
2894  if (err < 0) {
2896  "%s: could not find codec parameters\n", is->filename);
2897  ret = -1;
2898  goto fail;
2899  }
2900  }
2901 
2902  if (ic->pb)
2903  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2904 
2905  if (seek_by_bytes < 0)
2907  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2908  strcmp("ogg", ic->iformat->name);
2909 
2910  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2911 
2912  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2913  window_title = av_asprintf("%s - %s", t->value, input_filename);
2914 
2915  /* if seeking requested, we execute it */
2916  if (start_time != AV_NOPTS_VALUE) {
2917  int64_t timestamp;
2918 
2919  timestamp = start_time;
2920  /* add the stream start time */
2921  if (ic->start_time != AV_NOPTS_VALUE)
2922  timestamp += ic->start_time;
2923  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2924  if (ret < 0) {
2925  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2926  is->filename, (double)timestamp / AV_TIME_BASE);
2927  }
2928  }
2929 
2930  is->realtime = is_realtime(ic);
2931 
2932  if (show_status)
2933  av_dump_format(ic, 0, is->filename, 0);
2934 
2935  for (i = 0; i < ic->nb_streams; i++) {
2936  AVStream *st = ic->streams[i];
2937  enum AVMediaType type = st->codecpar->codec_type;
2938  st->discard = AVDISCARD_ALL;
2939  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2941  st_index[type] = i;
2942  }
2943  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2944  if (wanted_stream_spec[i] && st_index[i] == -1) {
2945  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2946  st_index[i] = INT_MAX;
2947  }
2948  }
2949 
2950  if (!video_disable)
2951  st_index[AVMEDIA_TYPE_VIDEO] =
2953  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2954  if (!audio_disable)
2955  st_index[AVMEDIA_TYPE_AUDIO] =
2957  st_index[AVMEDIA_TYPE_AUDIO],
2958  st_index[AVMEDIA_TYPE_VIDEO],
2959  NULL, 0);
2961  st_index[AVMEDIA_TYPE_SUBTITLE] =
2963  st_index[AVMEDIA_TYPE_SUBTITLE],
2964  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2965  st_index[AVMEDIA_TYPE_AUDIO] :
2966  st_index[AVMEDIA_TYPE_VIDEO]),
2967  NULL, 0);
2968 
2969  is->show_mode = show_mode;
2970  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2971  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2972  AVCodecParameters *codecpar = st->codecpar;
2974  if (codecpar->width)
2975  set_default_window_size(codecpar->width, codecpar->height, sar);
2976  }
2977 
2978  /* open the streams */
2979  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2981  }
2982 
2983  ret = -1;
2984  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2986  }
2987  if (is->show_mode == SHOW_MODE_NONE)
2988  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2989 
2990  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2992  }
2993 
2994  if (is->video_stream < 0 && is->audio_stream < 0) {
2995  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2996  is->filename);
2997  ret = -1;
2998  goto fail;
2999  }
3000 
3001  if (infinite_buffer < 0 && is->realtime)
3002  infinite_buffer = 1;
3003 
3004  for (;;) {
3005  if (is->abort_request)
3006  break;
3007  if (is->paused != is->last_paused) {
3008  is->last_paused = is->paused;
3009  if (is->paused)
3010  is->read_pause_return = av_read_pause(ic);
3011  else
3012  av_read_play(ic);
3013  }
3014 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3015  if (is->paused &&
3016  (!strcmp(ic->iformat->name, "rtsp") ||
3017  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3018  /* wait 10 ms to avoid trying to get another packet */
3019  /* XXX: horrible */
3020  SDL_Delay(10);
3021  continue;
3022  }
3023 #endif
3024  if (is->seek_req) {
3025  int64_t seek_target = is->seek_pos;
3026  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3027  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3028 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3029 // of the seek_pos/seek_rel variables
3030 
3031  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3032  if (ret < 0) {
3034  "%s: error while seeking\n", is->ic->url);
3035  } else {
3036  if (is->audio_stream >= 0)
3037  packet_queue_flush(&is->audioq);
3038  if (is->subtitle_stream >= 0)
3039  packet_queue_flush(&is->subtitleq);
3040  if (is->video_stream >= 0)
3041  packet_queue_flush(&is->videoq);
3042  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3043  set_clock(&is->extclk, NAN, 0);
3044  } else {
3045  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3046  }
3047  }
3048  is->seek_req = 0;
3049  is->queue_attachments_req = 1;
3050  is->eof = 0;
3051  if (is->paused)
3053  }
3054  if (is->queue_attachments_req) {
3055  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3056  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3057  goto fail;
3058  packet_queue_put(&is->videoq, pkt);
3059  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3060  }
3061  is->queue_attachments_req = 0;
3062  }
3063 
3064  /* if the queue are full, no need to read more */
3065  if (infinite_buffer<1 &&
3066  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3067  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3068  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3069  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3070  /* wait 10 ms */
3071  SDL_LockMutex(wait_mutex);
3072  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3073  SDL_UnlockMutex(wait_mutex);
3074  continue;
3075  }
3076  if (!is->paused &&
3077  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3078  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3079  if (loop != 1 && (!loop || --loop)) {
3081  } else if (autoexit) {
3082  ret = AVERROR_EOF;
3083  goto fail;
3084  }
3085  }
3086  ret = av_read_frame(ic, pkt);
3087  if (ret < 0) {
3088  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3089  if (is->video_stream >= 0)
3090  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3091  if (is->audio_stream >= 0)
3092  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3093  if (is->subtitle_stream >= 0)
3094  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3095  is->eof = 1;
3096  }
3097  if (ic->pb && ic->pb->error) {
3098  if (autoexit)
3099  goto fail;
3100  else
3101  break;
3102  }
3103  SDL_LockMutex(wait_mutex);
3104  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3105  SDL_UnlockMutex(wait_mutex);
3106  continue;
3107  } else {
3108  is->eof = 0;
3109  }
3110  /* check if packet is in play range specified by user, then queue, otherwise discard */
3111  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3112  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3113  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3114  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3116  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3117  <= ((double)duration / 1000000);
3118  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3119  packet_queue_put(&is->audioq, pkt);
3120  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3121  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3122  packet_queue_put(&is->videoq, pkt);
3123  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3124  packet_queue_put(&is->subtitleq, pkt);
3125  } else {
3127  }
3128  }
3129 
3130  ret = 0;
3131  fail:
3132  if (ic && !is->ic)
3133  avformat_close_input(&ic);
3134 
3135  av_packet_free(&pkt);
3136  if (ret != 0) {
3137  SDL_Event event;
3138 
3139  event.type = FF_QUIT_EVENT;
3140  event.user.data1 = is;
3141  SDL_PushEvent(&event);
3142  }
3143  SDL_DestroyMutex(wait_mutex);
3144  return 0;
3145 }
3146 
3147 static VideoState *stream_open(const char *filename,
3148  const AVInputFormat *iformat)
3149 {
3150  VideoState *is;
3151 
3152  is = av_mallocz(sizeof(VideoState));
3153  if (!is)
3154  return NULL;
3155  is->last_video_stream = is->video_stream = -1;
3156  is->last_audio_stream = is->audio_stream = -1;
3157  is->last_subtitle_stream = is->subtitle_stream = -1;
3158  is->filename = av_strdup(filename);
3159  if (!is->filename)
3160  goto fail;
3161  is->iformat = iformat;
3162  is->ytop = 0;
3163  is->xleft = 0;
3164 
3165  /* start video display */
3166  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3167  goto fail;
3168  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3169  goto fail;
3170  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3171  goto fail;
3172 
3173  if (packet_queue_init(&is->videoq) < 0 ||
3174  packet_queue_init(&is->audioq) < 0 ||
3175  packet_queue_init(&is->subtitleq) < 0)
3176  goto fail;
3177 
3178  if (!(is->continue_read_thread = SDL_CreateCond())) {
3179  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3180  goto fail;
3181  }
3182 
3183  init_clock(&is->vidclk, &is->videoq.serial);
3184  init_clock(&is->audclk, &is->audioq.serial);
3185  init_clock(&is->extclk, &is->extclk.serial);
3186  is->audio_clock_serial = -1;
3187  if (startup_volume < 0)
3188  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3189  if (startup_volume > 100)
3190  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3192  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3193  is->audio_volume = startup_volume;
3194  is->muted = 0;
3195  is->av_sync_type = av_sync_type;
3196  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3197  if (!is->read_tid) {
3198  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3199 fail:
3200  stream_close(is);
3201  return NULL;
3202  }
3203  return is;
3204 }
3205 
3207 {
3208  AVFormatContext *ic = is->ic;
3209  int start_index, stream_index;
3210  int old_index;
3211  AVStream *st;
3212  AVProgram *p = NULL;
3213  int nb_streams = is->ic->nb_streams;
3214 
3215  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3216  start_index = is->last_video_stream;
3217  old_index = is->video_stream;
3218  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3219  start_index = is->last_audio_stream;
3220  old_index = is->audio_stream;
3221  } else {
3222  start_index = is->last_subtitle_stream;
3223  old_index = is->subtitle_stream;
3224  }
3225  stream_index = start_index;
3226 
3227  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3228  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3229  if (p) {
3231  for (start_index = 0; start_index < nb_streams; start_index++)
3232  if (p->stream_index[start_index] == stream_index)
3233  break;
3234  if (start_index == nb_streams)
3235  start_index = -1;
3236  stream_index = start_index;
3237  }
3238  }
3239 
3240  for (;;) {
3241  if (++stream_index >= nb_streams)
3242  {
3244  {
3245  stream_index = -1;
3246  is->last_subtitle_stream = -1;
3247  goto the_end;
3248  }
3249  if (start_index == -1)
3250  return;
3251  stream_index = 0;
3252  }
3253  if (stream_index == start_index)
3254  return;
3255  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3256  if (st->codecpar->codec_type == codec_type) {
3257  /* check that parameters are OK */
3258  switch (codec_type) {
3259  case AVMEDIA_TYPE_AUDIO:
3260  if (st->codecpar->sample_rate != 0 &&
3261  st->codecpar->ch_layout.nb_channels != 0)
3262  goto the_end;
3263  break;
3264  case AVMEDIA_TYPE_VIDEO:
3265  case AVMEDIA_TYPE_SUBTITLE:
3266  goto the_end;
3267  default:
3268  break;
3269  }
3270  }
3271  }
3272  the_end:
3273  if (p && stream_index != -1)
3274  stream_index = p->stream_index[stream_index];
3275  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3277  old_index,
3278  stream_index);
3279 
3280  stream_component_close(is, old_index);
3281  stream_component_open(is, stream_index);
3282 }
3283 
3284 
3286 {
3288  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3289 }
3290 
3292 {
3293  int next = is->show_mode;
3294  do {
3295  next = (next + 1) % SHOW_MODE_NB;
3296  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3297  if (is->show_mode != next) {
3298  is->force_refresh = 1;
3299  is->show_mode = next;
3300  }
3301 }
3302 
3303 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3304  double remaining_time = 0.0;
3305  SDL_PumpEvents();
3306  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3308  SDL_ShowCursor(0);
3309  cursor_hidden = 1;
3310  }
3311  if (remaining_time > 0.0)
3312  av_usleep((int64_t)(remaining_time * 1000000.0));
3313  remaining_time = REFRESH_RATE;
3314  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3315  video_refresh(is, &remaining_time);
3316  SDL_PumpEvents();
3317  }
3318 }
3319 
3320 static void seek_chapter(VideoState *is, int incr)
3321 {
3323  int i;
3324 
3325  if (!is->ic->nb_chapters)
3326  return;
3327 
3328  /* find the current chapter */
3329  for (i = 0; i < is->ic->nb_chapters; i++) {
3330  AVChapter *ch = is->ic->chapters[i];
3331  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3332  i--;
3333  break;
3334  }
3335  }
3336 
3337  i += incr;
3338  i = FFMAX(i, 0);
3339  if (i >= is->ic->nb_chapters)
3340  return;
3341 
3342  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3343  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3344  AV_TIME_BASE_Q), 0, 0);
3345 }
3346 
3347 /* handle an event sent by the GUI */
3348 static void event_loop(VideoState *cur_stream)
3349 {
3350  SDL_Event event;
3351  double incr, pos, frac;
3352 
3353  for (;;) {
3354  double x;
3355  refresh_loop_wait_event(cur_stream, &event);
3356  switch (event.type) {
3357  case SDL_KEYDOWN:
3358  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3359  do_exit(cur_stream);
3360  break;
3361  }
3362  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3363  if (!cur_stream->width)
3364  continue;
3365  switch (event.key.keysym.sym) {
3366  case SDLK_f:
3367  toggle_full_screen(cur_stream);
3368  cur_stream->force_refresh = 1;
3369  break;
3370  case SDLK_p:
3371  case SDLK_SPACE:
3372  toggle_pause(cur_stream);
3373  break;
3374  case SDLK_m:
3375  toggle_mute(cur_stream);
3376  break;
3377  case SDLK_KP_MULTIPLY:
3378  case SDLK_0:
3379  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3380  break;
3381  case SDLK_KP_DIVIDE:
3382  case SDLK_9:
3383  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3384  break;
3385  case SDLK_s: // S: Step to next frame
3386  step_to_next_frame(cur_stream);
3387  break;
3388  case SDLK_a:
3390  break;
3391  case SDLK_v:
3393  break;
3394  case SDLK_c:
3398  break;
3399  case SDLK_t:
3401  break;
3402  case SDLK_w:
3403  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3404  if (++cur_stream->vfilter_idx >= nb_vfilters)
3405  cur_stream->vfilter_idx = 0;
3406  } else {
3407  cur_stream->vfilter_idx = 0;
3408  toggle_audio_display(cur_stream);
3409  }
3410  break;
3411  case SDLK_PAGEUP:
3412  if (cur_stream->ic->nb_chapters <= 1) {
3413  incr = 600.0;
3414  goto do_seek;
3415  }
3416  seek_chapter(cur_stream, 1);
3417  break;
3418  case SDLK_PAGEDOWN:
3419  if (cur_stream->ic->nb_chapters <= 1) {
3420  incr = -600.0;
3421  goto do_seek;
3422  }
3423  seek_chapter(cur_stream, -1);
3424  break;
3425  case SDLK_LEFT:
3426  incr = seek_interval ? -seek_interval : -10.0;
3427  goto do_seek;
3428  case SDLK_RIGHT:
3429  incr = seek_interval ? seek_interval : 10.0;
3430  goto do_seek;
3431  case SDLK_UP:
3432  incr = 60.0;
3433  goto do_seek;
3434  case SDLK_DOWN:
3435  incr = -60.0;
3436  do_seek:
3437  if (seek_by_bytes) {
3438  pos = -1;
3439  if (pos < 0 && cur_stream->video_stream >= 0)
3440  pos = frame_queue_last_pos(&cur_stream->pictq);
3441  if (pos < 0 && cur_stream->audio_stream >= 0)
3442  pos = frame_queue_last_pos(&cur_stream->sampq);
3443  if (pos < 0)
3444  pos = avio_tell(cur_stream->ic->pb);
3445  if (cur_stream->ic->bit_rate)
3446  incr *= cur_stream->ic->bit_rate / 8.0;
3447  else
3448  incr *= 180000.0;
3449  pos += incr;
3450  stream_seek(cur_stream, pos, incr, 1);
3451  } else {
3452  pos = get_master_clock(cur_stream);
3453  if (isnan(pos))
3454  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3455  pos += incr;
3456  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3457  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3458  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3459  }
3460  break;
3461  default:
3462  break;
3463  }
3464  break;
3465  case SDL_MOUSEBUTTONDOWN:
3466  if (exit_on_mousedown) {
3467  do_exit(cur_stream);
3468  break;
3469  }
3470  if (event.button.button == SDL_BUTTON_LEFT) {
3471  static int64_t last_mouse_left_click = 0;
3472  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3473  toggle_full_screen(cur_stream);
3474  cur_stream->force_refresh = 1;
3475  last_mouse_left_click = 0;
3476  } else {
3477  last_mouse_left_click = av_gettime_relative();
3478  }
3479  }
3480  case SDL_MOUSEMOTION:
3481  if (cursor_hidden) {
3482  SDL_ShowCursor(1);
3483  cursor_hidden = 0;
3484  }
3486  if (event.type == SDL_MOUSEBUTTONDOWN) {
3487  if (event.button.button != SDL_BUTTON_RIGHT)
3488  break;
3489  x = event.button.x;
3490  } else {
3491  if (!(event.motion.state & SDL_BUTTON_RMASK))
3492  break;
3493  x = event.motion.x;
3494  }
3495  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3496  uint64_t size = avio_size(cur_stream->ic->pb);
3497  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3498  } else {
3499  int64_t ts;
3500  int ns, hh, mm, ss;
3501  int tns, thh, tmm, tss;
3502  tns = cur_stream->ic->duration / 1000000LL;
3503  thh = tns / 3600;
3504  tmm = (tns % 3600) / 60;
3505  tss = (tns % 60);
3506  frac = x / cur_stream->width;
3507  ns = frac * tns;
3508  hh = ns / 3600;
3509  mm = (ns % 3600) / 60;
3510  ss = (ns % 60);
3512  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3513  hh, mm, ss, thh, tmm, tss);
3514  ts = frac * cur_stream->ic->duration;
3515  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3516  ts += cur_stream->ic->start_time;
3517  stream_seek(cur_stream, ts, 0, 0);
3518  }
3519  break;
3520  case SDL_WINDOWEVENT:
3521  switch (event.window.event) {
3522  case SDL_WINDOWEVENT_SIZE_CHANGED:
3523  screen_width = cur_stream->width = event.window.data1;
3524  screen_height = cur_stream->height = event.window.data2;
3525  if (cur_stream->vis_texture) {
3526  SDL_DestroyTexture(cur_stream->vis_texture);
3527  cur_stream->vis_texture = NULL;
3528  }
3529  if (vk_renderer)
3531  case SDL_WINDOWEVENT_EXPOSED:
3532  cur_stream->force_refresh = 1;
3533  }
3534  break;
3535  case SDL_QUIT:
3536  case FF_QUIT_EVENT:
3537  do_exit(cur_stream);
3538  break;
3539  default:
3540  break;
3541  }
3542  }
3543 }
3544 
3545 static int opt_width(void *optctx, const char *opt, const char *arg)
3546 {
3547  double num;
3548  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3549  if (ret < 0)
3550  return ret;
3551 
3552  screen_width = num;
3553  return 0;
3554 }
3555 
3556 static int opt_height(void *optctx, const char *opt, const char *arg)
3557 {
3558  double num;
3559  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3560  if (ret < 0)
3561  return ret;
3562 
3563  screen_height = num;
3564  return 0;
3565 }
3566 
3567 static int opt_format(void *optctx, const char *opt, const char *arg)
3568 {
3570  if (!file_iformat) {
3571  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3572  return AVERROR(EINVAL);
3573  }
3574  return 0;
3575 }
3576 
3577 static int opt_sync(void *optctx, const char *opt, const char *arg)
3578 {
3579  if (!strcmp(arg, "audio"))
3581  else if (!strcmp(arg, "video"))
3583  else if (!strcmp(arg, "ext"))
3585  else {
3586  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3587  exit(1);
3588  }
3589  return 0;
3590 }
3591 
3592 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3593 {
3594  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3595  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3596  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3597 
3598  if (show_mode == SHOW_MODE_NONE) {
3599  double num;
3600  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3601  if (ret < 0)
3602  return ret;
3603  show_mode = num;
3604  }
3605  return 0;
3606 }
3607 
3608 static int opt_input_file(void *optctx, const char *filename)
3609 {
3610  if (input_filename) {
3612  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3613  filename, input_filename);
3614  return AVERROR(EINVAL);
3615  }
3616  if (!strcmp(filename, "-"))
3617  filename = "fd:";
3618  input_filename = av_strdup(filename);
3619  if (!input_filename)
3620  return AVERROR(ENOMEM);
3621 
3622  return 0;
3623 }
3624 
3625 static int opt_codec(void *optctx, const char *opt, const char *arg)
3626 {
3627  const char *spec = strchr(opt, ':');
3628  const char **name;
3629  if (!spec) {
3631  "No media specifier was specified in '%s' in option '%s'\n",
3632  arg, opt);
3633  return AVERROR(EINVAL);
3634  }
3635  spec++;
3636 
3637  switch (spec[0]) {
3638  case 'a' : name = &audio_codec_name; break;
3639  case 's' : name = &subtitle_codec_name; break;
3640  case 'v' : name = &video_codec_name; break;
3641  default:
3643  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3644  return AVERROR(EINVAL);
3645  }
3646 
3647  av_freep(name);
3648  *name = av_strdup(arg);
3649  return *name ? 0 : AVERROR(ENOMEM);
3650 }
3651 
3652 static int dummy;
3653 
3654 static const OptionDef options[] = {
3656  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3657  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3658  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3659  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3660  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3661  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3662  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3663  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3664  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3665  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3666  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3667  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3668  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3669  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3670  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3671  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3672  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3673  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3674  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3675  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3676  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3677  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3678  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3679  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3680  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3681  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3682  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3683  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3684  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3685  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3686  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3687  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3688  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3689  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3690  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3691  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3692  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3693  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3694  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3695  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3696  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3697  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3698  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3699  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3700  "read and decode the streams to fill missing information with heuristics" },
3701  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3702  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3703  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3704  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3705  { NULL, },
3706 };
3707 
3708 static void show_usage(void)
3709 {
3710  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3711  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3712  av_log(NULL, AV_LOG_INFO, "\n");
3713 }
3714 
3715 void show_help_default(const char *opt, const char *arg)
3716 {
3718  show_usage();
3719  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3720  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3721  printf("\n");
3725  printf("\nWhile playing:\n"
3726  "q, ESC quit\n"
3727  "f toggle full screen\n"
3728  "p, SPC pause\n"
3729  "m toggle mute\n"
3730  "9, 0 decrease and increase volume respectively\n"
3731  "/, * decrease and increase volume respectively\n"
3732  "a cycle audio channel in the current program\n"
3733  "v cycle video channel\n"
3734  "t cycle subtitle channel in the current program\n"
3735  "c cycle program\n"
3736  "w cycle video filters or show modes\n"
3737  "s activate frame-step mode\n"
3738  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3739  "down/up seek backward/forward 1 minute\n"
3740  "page down/page up seek backward/forward 10 minutes\n"
3741  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3742  "left double-click toggle full screen\n"
3743  );
3744 }
3745 
3746 /* Called from the main */
3747 int main(int argc, char **argv)
3748 {
3749  int flags, ret;
3750  VideoState *is;
3751 
3752  init_dynload();
3753 
3755  parse_loglevel(argc, argv, options);
3756 
3757  /* register all codecs, demux and protocols */
3758 #if CONFIG_AVDEVICE
3760 #endif
3762 
3763  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3764  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3765 
3766  show_banner(argc, argv, options);
3767 
3768  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3769  if (ret < 0)
3770  exit(ret == AVERROR_EXIT ? 0 : 1);
3771 
3772  if (!input_filename) {
3773  show_usage();
3774  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3776  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3777  exit(1);
3778  }
3779 
3780  if (display_disable) {
3781  video_disable = 1;
3782  }
3783  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3784  if (audio_disable)
3785  flags &= ~SDL_INIT_AUDIO;
3786  else {
3787  /* Try to work around an occasional ALSA buffer underflow issue when the
3788  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3789  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3790  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3791  }
3792  if (display_disable)
3793  flags &= ~SDL_INIT_VIDEO;
3794  if (SDL_Init (flags)) {
3795  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3796  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3797  exit(1);
3798  }
3799 
3800  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3801  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3802 
3803  if (!display_disable) {
3804  int flags = SDL_WINDOW_HIDDEN;
3805  if (alwaysontop)
3806 #if SDL_VERSION_ATLEAST(2,0,5)
3807  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3808 #else
3809  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3810 #endif
3811  if (borderless)
3812  flags |= SDL_WINDOW_BORDERLESS;
3813  else
3814  flags |= SDL_WINDOW_RESIZABLE;
3815 
3816 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3817  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3818 #endif
3819  if (hwaccel && !enable_vulkan) {
3820  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3821  enable_vulkan = 1;
3822  }
3823  if (enable_vulkan) {
3825  if (vk_renderer) {
3826 #if SDL_VERSION_ATLEAST(2, 0, 6)
3827  flags |= SDL_WINDOW_VULKAN;
3828 #endif
3829  } else {
3830  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3831  enable_vulkan = 0;
3832  }
3833  }
3834  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3835  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3836  if (!window) {
3837  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3838  do_exit(NULL);
3839  }
3840 
3841  if (vk_renderer) {
3842  AVDictionary *dict = NULL;
3843 
3844  if (vulkan_params) {
3845  int ret = av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3846  if (ret < 0) {
3847  av_log(NULL, AV_LOG_FATAL, "Failed to parse, %s\n", vulkan_params);
3848  do_exit(NULL);
3849  }
3850  }
3852  av_dict_free(&dict);
3853  if (ret < 0) {
3854  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3855  do_exit(NULL);
3856  }
3857  } else {
3858  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3859  if (!renderer) {
3860  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3861  renderer = SDL_CreateRenderer(window, -1, 0);
3862  }
3863  if (renderer) {
3864  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3865  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3866  }
3867  if (!renderer || !renderer_info.num_texture_formats) {
3868  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3869  do_exit(NULL);
3870  }
3871  }
3872  }
3873 
3875  if (!is) {
3876  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3877  do_exit(NULL);
3878  }
3879 
3880  event_loop(is);
3881 
3882  /* never returns */
3883 
3884  return 0;
3885 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
AVSubtitle
Definition: avcodec.h:2232
rect::w
int w
Definition: f_ebur128.c:76
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2507
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1303
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:212
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:105
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:428
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:210
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:159
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:100
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:263
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:132
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2237
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:363
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:148
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1427
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:802
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1995
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:394
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:786
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:191
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:786
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:649
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1189
check_avoptions
int check_avoptions(AVDictionary *m)
Definition: cmdutils.c:1479
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:808
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:443
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:264
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1050
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2160
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:231
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:949
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1335
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:453
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1360
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:173
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:241
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:579
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:674
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:176
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:819
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:392
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:904
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:76
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1521
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:479
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:225
int64_t
long long int64_t
Definition: coverity.c:34
screen_left
static int screen_left
Definition: ffplay.c:312
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:899
AudioParams::frame_size
int frame_size
Definition: ffplay.c:133
AVSubtitleRect
Definition: avcodec.h:2205
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2236
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:196
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2149
rect::y
int y
Definition: f_ebur128.c:76
FrameQueue::size
int size
Definition: ffplay.c:171
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:197
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:162
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1753
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AudioParams
Definition: ffplay.c:129
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:251
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1328
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:446
VideoState::xleft
int xleft
Definition: ffplay.c:289
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:384
Frame::pts
double pts
Definition: ffplay.c:156
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:231
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:689
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
AVPacket::data
uint8_t * data
Definition: packet.h:533
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:66
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:812
AVChapter::start
int64_t start
Definition: avformat.h:1222
Clock
Definition: ffplay.c:137
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:61
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:188
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2451
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:452
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:236
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:102
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:75
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:551
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:370
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:163
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1208
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1536
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:152
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3608
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:183
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1267
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1538
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:72
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:833
VideoState::paused
int paused
Definition: ffplay.c:206
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1417
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1450
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
dummy
static int dummy
Definition: ffplay.c:3652
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:359
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PacketQueue
Definition: ffplay.c:113
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2263
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:594
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:258
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
OptionDef
Definition: cmdutils.h:189
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2366
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:221
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:253
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3577
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1528
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:906
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:223
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:369
FrameQueue::rindex
int rindex
Definition: ffplay.c:169
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1371
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:363
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1533
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:70
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:361
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3285
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:144
VideoState::extclk
Clock extclk
Definition: ffplay.c:219
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:210
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:239
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:470
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:569
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
fail
#define fail()
Definition: checkasm.h:188
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
FrameQueue
Definition: ffplay.c:167
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:441
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2206
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1219
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:963
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3592
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:193
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1411
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:237
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:748
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:235
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:546
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:265
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3556
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:397
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1402
is_full_screen
static int is_full_screen
Definition: ffplay.c:356
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:975
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:947
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:164
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1495
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:805
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:548
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2071
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1405
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:797
VideoState
Definition: ffplay.c:201
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:734
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2476
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1425
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:190
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:215
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:722
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:648
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:257
Decoder::avctx
AVCodecContext * avctx