FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utils.c
Go to the documentation of this file.
1 /*
2  * various utility functions for use within FFmpeg
3  * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avformat.h"
23 #include "avio_internal.h"
24 #include "internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/raw.h"
27 #include "libavcodec/bytestream.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/pixdesc.h"
32 #include "metadata.h"
33 #include "id3v2.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
40 #include "riff.h"
41 #include "audiointerleave.h"
42 #include "url.h"
43 #include <stdarg.h>
44 #if CONFIG_NETWORK
45 #include "network.h"
46 #endif
47 
48 #undef NDEBUG
49 #include <assert.h>
50 
51 /**
52  * @file
53  * various utility functions for use within FFmpeg
54  */
55 
56 unsigned avformat_version(void)
57 {
60 }
61 
62 const char *avformat_configuration(void)
63 {
64  return FFMPEG_CONFIGURATION;
65 }
66 
67 const char *avformat_license(void)
68 {
69 #define LICENSE_PREFIX "libavformat license: "
70  return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
71 }
72 
73 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
74 
75 static int is_relative(int64_t ts) {
76  return ts > (RELATIVE_TS_BASE - (1LL<<48));
77 }
78 
79 /**
80  * Wrap a given time stamp, if there is an indication for an overflow
81  *
82  * @param st stream
83  * @param timestamp the time stamp to wrap
84  * @return resulting time stamp
85  */
86 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
87 {
89  st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
91  timestamp < st->pts_wrap_reference)
92  return timestamp + (1ULL<<st->pts_wrap_bits);
93  else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94  timestamp >= st->pts_wrap_reference)
95  return timestamp - (1ULL<<st->pts_wrap_bits);
96  }
97  return timestamp;
98 }
99 
100 #define MAKE_ACCESSORS(str, name, type, field) \
101  type av_##name##_get_##field(const str *s) { return s->field; } \
102  void av_##name##_set_##field(str *s, type v) { s->field = v; }
103 
104 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
105 
106 /* an arbitrarily chosen "sane" max packet size -- 50M */
107 #define SANE_CHUNK_SIZE (50000000)
108 
110 {
111  if(s->maxsize>=0){
112  int64_t remaining= s->maxsize - avio_tell(s);
113  if(remaining < size){
114  int64_t newsize= avio_size(s);
115  if(!s->maxsize || s->maxsize<newsize)
116  s->maxsize= newsize - !newsize;
117  remaining= s->maxsize - avio_tell(s);
118  remaining= FFMAX(remaining, 0);
119  }
120 
121  if(s->maxsize>=0 && remaining+1 < size){
122  av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
123  size= remaining+1;
124  }
125  }
126  return size;
127 }
128 
129 /*
130  * Read the data in sane-sized chunks and append to pkt.
131  * Return the number of bytes read or an error.
132  */
134 {
135  int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
136  int orig_size = pkt->size;
137  int ret;
138 
139  do {
140  int prev_size = pkt->size;
141  int read_size;
142 
143  /*
144  * When the caller requests a lot of data, limit it to the amount left
145  * in file or SANE_CHUNK_SIZE when it is not known
146  */
147  read_size = size;
148  if (read_size > SANE_CHUNK_SIZE/10) {
149  read_size = ffio_limit(s, read_size);
150  // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
151  if (s->maxsize < 0)
152  read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
153  }
154 
155  ret = av_grow_packet(pkt, read_size);
156  if (ret < 0)
157  break;
158 
159  ret = avio_read(s, pkt->data + prev_size, read_size);
160  if (ret != read_size) {
161  av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
162  break;
163  }
164 
165  size -= read_size;
166  } while (size > 0);
167  if (size > 0)
168  pkt->flags |= AV_PKT_FLAG_CORRUPT;
169 
170  pkt->pos = orig_pos;
171  if (!pkt->size)
172  av_free_packet(pkt);
173  return pkt->size > orig_size ? pkt->size - orig_size : ret;
174 }
175 
177 {
178  av_init_packet(pkt);
179  pkt->data = NULL;
180  pkt->size = 0;
181  pkt->pos = avio_tell(s);
182 
183  return append_packet_chunked(s, pkt, size);
184 }
185 
187 {
188  if (!pkt->size)
189  return av_get_packet(s, pkt, size);
190  return append_packet_chunked(s, pkt, size);
191 }
192 
193 
194 int av_filename_number_test(const char *filename)
195 {
196  char buf[1024];
197  return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
198 }
199 
200 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
201 {
202  AVProbeData lpd = *pd;
203  AVInputFormat *fmt1 = NULL, *fmt;
204  int score, nodat = 0, score_max=0;
205  const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
206 
207  if (!lpd.buf)
208  lpd.buf = zerobuffer;
209 
210  if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
211  int id3len = ff_id3v2_tag_len(lpd.buf);
212  if (lpd.buf_size > id3len + 16) {
213  lpd.buf += id3len;
214  lpd.buf_size -= id3len;
215  }else
216  nodat = 1;
217  }
218 
219  fmt = NULL;
220  while ((fmt1 = av_iformat_next(fmt1))) {
221  if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
222  continue;
223  score = 0;
224  if (fmt1->read_probe) {
225  score = fmt1->read_probe(&lpd);
226  if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
227  score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
228  } else if (fmt1->extensions) {
229  if (av_match_ext(lpd.filename, fmt1->extensions)) {
230  score = AVPROBE_SCORE_EXTENSION;
231  }
232  }
233  if (score > score_max) {
234  score_max = score;
235  fmt = fmt1;
236  }else if (score == score_max)
237  fmt = NULL;
238  }
239  if(nodat)
240  score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
241  *score_ret= score_max;
242 
243  return fmt;
244 }
245 
246 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
247 {
248  int score_ret;
249  AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
250  if(score_ret > *score_max){
251  *score_max= score_ret;
252  return fmt;
253  }else
254  return NULL;
255 }
256 
258  int score=0;
259  return av_probe_input_format2(pd, is_opened, &score);
260 }
261 
263 {
264  static const struct {
265  const char *name; enum AVCodecID id; enum AVMediaType type;
266  } fmt_id_type[] = {
267  { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
268  { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
269  { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
270  { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
271  { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
274  { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
275  { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
276  { 0 }
277  };
278  int score;
279  AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
280 
281  if (fmt && st->request_probe <= score) {
282  int i;
283  av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
284  pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
285  for (i = 0; fmt_id_type[i].name; i++) {
286  if (!strcmp(fmt->name, fmt_id_type[i].name)) {
287  st->codec->codec_id = fmt_id_type[i].id;
288  st->codec->codec_type = fmt_id_type[i].type;
289  break;
290  }
291  }
292  }
293  return score;
294 }
295 
296 /************************************************************/
297 /* input media file */
298 
300  int err;
301 
302  if (ic->iformat->read_header) {
303  err = ic->iformat->read_header(ic);
304  if (err < 0)
305  return err;
306  }
307 
308  if (ic->pb && !ic->data_offset)
309  ic->data_offset = avio_tell(ic->pb);
310 
311  return 0;
312 }
313 
314 
315 /** size of probe buffer, for guessing file type from file contents */
316 #define PROBE_BUF_MIN 2048
317 #define PROBE_BUF_MAX (1<<20)
318 
320  const char *filename, void *logctx,
321  unsigned int offset, unsigned int max_probe_size)
322 {
323  AVProbeData pd = { filename ? filename : "", NULL, -offset };
324  unsigned char *buf = NULL;
325  uint8_t *mime_type;
326  int ret = 0, probe_size, buf_offset = 0;
327 
328  if (!max_probe_size) {
329  max_probe_size = PROBE_BUF_MAX;
330  } else if (max_probe_size > PROBE_BUF_MAX) {
331  max_probe_size = PROBE_BUF_MAX;
332  } else if (max_probe_size < PROBE_BUF_MIN) {
333  av_log(logctx, AV_LOG_ERROR,
334  "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
335  return AVERROR(EINVAL);
336  }
337 
338  if (offset >= max_probe_size) {
339  return AVERROR(EINVAL);
340  }
341 
342  if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
343  if (!av_strcasecmp(mime_type, "audio/aacp")) {
344  *fmt = av_find_input_format("aac");
345  }
346  av_freep(&mime_type);
347  }
348 
349  for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
350  probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
351  int score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
352  void *buftmp;
353 
354  if (probe_size < offset) {
355  continue;
356  }
357 
358  /* read probe data */
359  buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
360  if(!buftmp){
361  av_free(buf);
362  return AVERROR(ENOMEM);
363  }
364  buf=buftmp;
365  if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
366  /* fail if error was not end of file, otherwise, lower score */
367  if (ret != AVERROR_EOF) {
368  av_free(buf);
369  return ret;
370  }
371  score = 0;
372  ret = 0; /* error was end of file, nothing read */
373  }
374  pd.buf_size = buf_offset += ret;
375  pd.buf = &buf[offset];
376 
377  memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
378 
379  /* guess file format */
380  *fmt = av_probe_input_format2(&pd, 1, &score);
381  if(*fmt){
382  if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
383  av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
384  }else
385  av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
386  }
387  }
388 
389  if (!*fmt) {
390  av_free(buf);
391  return AVERROR_INVALIDDATA;
392  }
393 
394  /* rewind. reuse probe buffer to avoid seeking */
395  ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
396 
397  return ret;
398 }
399 
400 /* open input file and probe the format if necessary */
401 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
402 {
403  int ret;
404  AVProbeData pd = {filename, NULL, 0};
405  int score = AVPROBE_SCORE_RETRY;
406 
407  if (s->pb) {
409  if (!s->iformat)
410  return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
411  else if (s->iformat->flags & AVFMT_NOFILE)
412  av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
413  "will be ignored with AVFMT_NOFILE format.\n");
414  return 0;
415  }
416 
417  if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
418  (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
419  return 0;
420 
421  if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
422  &s->interrupt_callback, options)) < 0)
423  return ret;
424  if (s->iformat)
425  return 0;
426  return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
427 }
428 
429 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
430  AVPacketList **plast_pktl){
431  AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
432  if (!pktl)
433  return NULL;
434 
435  if (*packet_buffer)
436  (*plast_pktl)->next = pktl;
437  else
438  *packet_buffer = pktl;
439 
440  /* add the packet in the buffered packet list */
441  *plast_pktl = pktl;
442  pktl->pkt= *pkt;
443  return &pktl->pkt;
444 }
445 
447 {
448  int i;
449  for (i = 0; i < s->nb_streams; i++)
451  s->streams[i]->discard < AVDISCARD_ALL) {
453  copy.buf = av_buffer_ref(copy.buf);
454  if (!copy.buf)
455  return AVERROR(ENOMEM);
456 
458  }
459  return 0;
460 }
461 
463 {
464  AVFormatContext *s = *ps;
465  int ret = 0;
466  AVDictionary *tmp = NULL;
467  ID3v2ExtraMeta *id3v2_extra_meta = NULL;
468 
469  if (!s && !(s = avformat_alloc_context()))
470  return AVERROR(ENOMEM);
471  if (!s->av_class){
472  av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
473  return AVERROR(EINVAL);
474  }
475  if (fmt)
476  s->iformat = fmt;
477 
478  if (options)
479  av_dict_copy(&tmp, *options, 0);
480 
481  if ((ret = av_opt_set_dict(s, &tmp)) < 0)
482  goto fail;
483 
484  if ((ret = init_input(s, filename, &tmp)) < 0)
485  goto fail;
487 
488  /* check filename in case an image number is expected */
489  if (s->iformat->flags & AVFMT_NEEDNUMBER) {
490  if (!av_filename_number_test(filename)) {
491  ret = AVERROR(EINVAL);
492  goto fail;
493  }
494  }
495 
497  av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
498 
499  /* allocate private data */
500  if (s->iformat->priv_data_size > 0) {
501  if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
502  ret = AVERROR(ENOMEM);
503  goto fail;
504  }
505  if (s->iformat->priv_class) {
506  *(const AVClass**)s->priv_data = s->iformat->priv_class;
508  if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
509  goto fail;
510  }
511  }
512 
513  /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
514  if (s->pb)
515  ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
516 
517  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
518  if ((ret = s->iformat->read_header(s)) < 0)
519  goto fail;
520 
521  if (id3v2_extra_meta) {
522  if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
523  !strcmp(s->iformat->name, "tta")) {
524  if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
525  goto fail;
526  } else
527  av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
528  }
529  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
530 
531  if ((ret = avformat_queue_attached_pictures(s)) < 0)
532  goto fail;
533 
534  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
535  s->data_offset = avio_tell(s->pb);
536 
538 
539  if (options) {
540  av_dict_free(options);
541  *options = tmp;
542  }
543  *ps = s;
544  return 0;
545 
546 fail:
547  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
548  av_dict_free(&tmp);
549  if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
550  avio_close(s->pb);
552  *ps = NULL;
553  return ret;
554 }
555 
556 /*******************************************************/
557 
559 {
560  switch(st->codec->codec_type){
561  case AVMEDIA_TYPE_VIDEO:
563  break;
564  case AVMEDIA_TYPE_AUDIO:
566  break;
569  break;
570  }
571 }
572 
573 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
574 {
575  if(st->request_probe>0){
576  AVProbeData *pd = &st->probe_data;
577  int end;
578  av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
579  --st->probe_packets;
580 
581  if (pkt) {
582  uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
583  if(!new_buf)
584  goto no_packet;
585  pd->buf = new_buf;
586  memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
587  pd->buf_size += pkt->size;
588  memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
589  } else {
590 no_packet:
591  st->probe_packets = 0;
592  if (!pd->buf_size) {
593  av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
594  st->index);
595  }
596  }
597 
599  || st->probe_packets<=0;
600 
601  if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
602  int score= set_codec_from_probe_data(s, st, pd);
603  if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
604  || end){
605  pd->buf_size=0;
606  av_freep(&pd->buf);
607  st->request_probe= -1;
608  if(st->codec->codec_id != AV_CODEC_ID_NONE){
609  av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
610  }else
611  av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
612  }
613  force_codec_ids(s, st);
614  }
615  }
616 }
617 
619 {
620  int ret, i;
621  AVStream *st;
622 
623  for(;;){
624  AVPacketList *pktl = s->raw_packet_buffer;
625 
626  if (pktl) {
627  *pkt = pktl->pkt;
628  st = s->streams[pkt->stream_index];
630  probe_codec(s, st, NULL);
631  if(st->request_probe <= 0){
632  s->raw_packet_buffer = pktl->next;
634  av_free(pktl);
635  return 0;
636  }
637  }
638 
639  pkt->data = NULL;
640  pkt->size = 0;
641  av_init_packet(pkt);
642  ret= s->iformat->read_packet(s, pkt);
643  if (ret < 0) {
644  if (!pktl || ret == AVERROR(EAGAIN))
645  return ret;
646  for (i = 0; i < s->nb_streams; i++) {
647  st = s->streams[i];
648  if (st->probe_packets) {
649  probe_codec(s, st, NULL);
650  }
651  av_assert0(st->request_probe <= 0);
652  }
653  continue;
654  }
655 
656  if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
657  (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
659  "Dropped corrupted packet (stream = %d)\n",
660  pkt->stream_index);
661  av_free_packet(pkt);
662  continue;
663  }
664 
665  if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
667 
668  if(pkt->stream_index >= (unsigned)s->nb_streams){
669  av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
670  continue;
671  }
672 
673  st= s->streams[pkt->stream_index];
674  pkt->dts = wrap_timestamp(st, pkt->dts);
675  pkt->pts = wrap_timestamp(st, pkt->pts);
676 
677  force_codec_ids(s, st);
678 
679  /* TODO: audio: time filter; video: frame reordering (pts != dts) */
681  pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
682 
683  if(!pktl && st->request_probe <= 0)
684  return ret;
685 
688 
689  probe_codec(s, st, pkt);
690  }
691 }
692 
693 #if FF_API_READ_PACKET
694 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
695 {
696  return ff_read_packet(s, pkt);
697 }
698 #endif
699 
700 
701 /**********************************************************/
702 
704 {
705  if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
706  avctx->codec_id == AV_CODEC_ID_MP1 ||
707  avctx->codec_id == AV_CODEC_ID_MP2 ||
708  avctx->codec_id == AV_CODEC_ID_MP3/* ||
709  avctx->codec_id == AV_CODEC_ID_CELT*/)
710  return 1;
711  return 0;
712 }
713 
714 /**
715  * Get the number of samples of an audio frame. Return -1 on error.
716  */
718 {
719  int frame_size;
720 
721  /* give frame_size priority if demuxing */
722  if (!mux && enc->frame_size > 1)
723  return enc->frame_size;
724 
725  if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
726  return frame_size;
727 
728  /* Fall back on using frame_size if muxing. */
729  if (enc->frame_size > 1)
730  return enc->frame_size;
731 
732  //For WMA we currently have no other means to calculate duration thus we
733  //do it here by assuming CBR, which is true for all known cases.
734  if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
735  if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
736  return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
737  }
738 
739  return -1;
740 }
741 
742 
743 /**
744  * Return the frame duration in seconds. Return 0 if not available.
745  */
746 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
748 {
749  int frame_size;
750 
751  *pnum = 0;
752  *pden = 0;
753  switch(st->codec->codec_type) {
754  case AVMEDIA_TYPE_VIDEO:
755  if (st->r_frame_rate.num && !pc) {
756  *pnum = st->r_frame_rate.den;
757  *pden = st->r_frame_rate.num;
758  } else if(st->time_base.num*1000LL > st->time_base.den) {
759  *pnum = st->time_base.num;
760  *pden = st->time_base.den;
761  }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
762  *pnum = st->codec->time_base.num;
763  *pden = st->codec->time_base.den;
764  if (pc && pc->repeat_pict) {
765  if (*pnum > INT_MAX / (1 + pc->repeat_pict))
766  *pden /= 1 + pc->repeat_pict;
767  else
768  *pnum *= 1 + pc->repeat_pict;
769  }
770  //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
771  //Thus if we have no parser in such case leave duration undefined.
772  if(st->codec->ticks_per_frame>1 && !pc){
773  *pnum = *pden = 0;
774  }
775  }
776  break;
777  case AVMEDIA_TYPE_AUDIO:
778  frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
779  if (frame_size <= 0 || st->codec->sample_rate <= 0)
780  break;
781  *pnum = frame_size;
782  *pden = st->codec->sample_rate;
783  break;
784  default:
785  break;
786  }
787 }
788 
789 static int is_intra_only(AVCodecContext *enc){
790  const AVCodecDescriptor *desc;
791 
792  if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
793  return 1;
794 
795  desc = av_codec_get_codec_descriptor(enc);
796  if (!desc) {
797  desc = avcodec_descriptor_get(enc->codec_id);
799  }
800  if (desc)
801  return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
802  return 0;
803 }
804 
806 {
807  if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
808  if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
809  return 1;
810 #if CONFIG_H264_DECODER
811  if(st->codec->has_b_frames &&
813  return 1;
814 #endif
815  if(st->codec->has_b_frames<3)
816  return st->nb_decoded_frames >= 7;
817  else if(st->codec->has_b_frames<4)
818  return st->nb_decoded_frames >= 18;
819  else
820  return st->nb_decoded_frames >= 20;
821 }
822 
824 {
825  if (pktl->next)
826  return pktl->next;
827  if (pktl == s->parse_queue_end)
828  return s->packet_buffer;
829  return NULL;
830 }
831 
832 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
833 {
834  if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
836  int i;
837 
838  // reference time stamp should be 60 s before first time stamp
839  int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
840  // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
841  int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
842  (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
844 
845  AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
846 
847  if (!first_program) {
848  int default_stream_index = av_find_default_stream_index(s);
849  if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
850  for (i=0; i<s->nb_streams; i++) {
851  s->streams[i]->pts_wrap_reference = pts_wrap_reference;
852  s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
853  }
854  }
855  else {
856  st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
857  st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
858  }
859  }
860  else {
861  AVProgram *program = first_program;
862  while (program) {
863  if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
864  pts_wrap_reference = program->pts_wrap_reference;
865  pts_wrap_behavior = program->pts_wrap_behavior;
866  break;
867  }
868  program = av_find_program_from_stream(s, program, stream_index);
869  }
870 
871  // update every program with differing pts_wrap_reference
872  program = first_program;
873  while(program) {
874  if (program->pts_wrap_reference != pts_wrap_reference) {
875  for (i=0; i<program->nb_stream_indexes; i++) {
876  s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
877  s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
878  }
879 
880  program->pts_wrap_reference = pts_wrap_reference;
881  program->pts_wrap_behavior = pts_wrap_behavior;
882  }
883  program = av_find_program_from_stream(s, program, stream_index);
884  }
885  }
886  return 1;
887  }
888  return 0;
889 }
890 
891 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
892  int64_t dts, int64_t pts, AVPacket *pkt)
893 {
894  AVStream *st= s->streams[stream_index];
896  int64_t pts_buffer[MAX_REORDER_DELAY+1];
897  int64_t shift;
898  int i, delay;
899 
900  if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
901  return;
902 
903  delay = st->codec->has_b_frames;
904  st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
905  st->cur_dts= dts;
906  shift = st->first_dts - RELATIVE_TS_BASE;
907 
908  for (i=0; i<MAX_REORDER_DELAY+1; i++)
909  pts_buffer[i] = AV_NOPTS_VALUE;
910 
911  if (is_relative(pts))
912  pts += shift;
913 
914  for(; pktl; pktl= get_next_pkt(s, st, pktl)){
915  if(pktl->pkt.stream_index != stream_index)
916  continue;
917  if(is_relative(pktl->pkt.pts))
918  pktl->pkt.pts += shift;
919 
920  if(is_relative(pktl->pkt.dts))
921  pktl->pkt.dts += shift;
922 
923  if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
924  st->start_time= pktl->pkt.pts;
925 
926  if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
927  pts_buffer[0]= pktl->pkt.pts;
928  for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
929  FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
930  if(pktl->pkt.dts == AV_NOPTS_VALUE)
931  pktl->pkt.dts= pts_buffer[0];
932  }
933  }
934 
935  if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
936  // correct first time stamps to negative values
937  st->first_dts = wrap_timestamp(st, st->first_dts);
938  st->cur_dts = wrap_timestamp(st, st->cur_dts);
939  pkt->dts = wrap_timestamp(st, pkt->dts);
940  pkt->pts = wrap_timestamp(st, pkt->pts);
941  pts = wrap_timestamp(st, pts);
942  }
943 
944  if (st->start_time == AV_NOPTS_VALUE)
945  st->start_time = pts;
946 }
947 
949  int stream_index, int duration)
950 {
952  int64_t cur_dts= RELATIVE_TS_BASE;
953 
954  if(st->first_dts != AV_NOPTS_VALUE){
955  cur_dts= st->first_dts;
956  for(; pktl; pktl= get_next_pkt(s, st, pktl)){
957  if(pktl->pkt.stream_index == stream_index){
958  if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
959  break;
960  cur_dts -= duration;
961  }
962  }
963  if(pktl && pktl->pkt.dts != st->first_dts) {
964  av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in the queue\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
965  return;
966  }
967  if(!pktl) {
968  av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
969  return;
970  }
971  pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
972  st->first_dts = cur_dts;
973  }else if(st->cur_dts != RELATIVE_TS_BASE)
974  return;
975 
976  for(; pktl; pktl= get_next_pkt(s, st, pktl)){
977  if(pktl->pkt.stream_index != stream_index)
978  continue;
979  if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
980  && !pktl->pkt.duration){
981  pktl->pkt.dts= cur_dts;
982  if(!st->codec->has_b_frames)
983  pktl->pkt.pts= cur_dts;
984 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
985  pktl->pkt.duration = duration;
986  }else
987  break;
988  cur_dts = pktl->pkt.dts + pktl->pkt.duration;
989  }
990  if(!pktl)
991  st->cur_dts= cur_dts;
992 }
993 
996 {
997  int num, den, presentation_delayed, delay, i;
998  int64_t offset;
999 
1000  if (s->flags & AVFMT_FLAG_NOFILLIN)
1001  return;
1002 
1003  if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1004  pkt->dts= AV_NOPTS_VALUE;
1005 
1006  if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1007  //FIXME Set low_delay = 0 when has_b_frames = 1
1008  st->codec->has_b_frames = 1;
1009 
1010  /* do we have a video B-frame ? */
1011  delay= st->codec->has_b_frames;
1012  presentation_delayed = 0;
1013 
1014  /* XXX: need has_b_frame, but cannot get it if the codec is
1015  not initialized */
1016  if (delay &&
1017  pc && pc->pict_type != AV_PICTURE_TYPE_B)
1018  presentation_delayed = 1;
1019 
1020  if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && st->pts_wrap_bits<63 && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts){
1021  if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1022  pkt->dts -= 1LL<<st->pts_wrap_bits;
1023  } else
1024  pkt->pts += 1LL<<st->pts_wrap_bits;
1025  }
1026 
1027  // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1028  // we take the conservative approach and discard both
1029  // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1030  if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1031  av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1032  if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1033  pkt->dts= AV_NOPTS_VALUE;
1034  }
1035 
1036  if (pkt->duration == 0) {
1037  ff_compute_frame_duration(&num, &den, st, pc, pkt);
1038  if (den && num) {
1039  pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1040  }
1041  }
1042  if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1043  update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1044 
1045  /* correct timestamps with byte offset if demuxers only have timestamps
1046  on packet boundaries */
1047  if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1048  /* this will estimate bitrate based on this frame's duration and size */
1049  offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1050  if(pkt->pts != AV_NOPTS_VALUE)
1051  pkt->pts += offset;
1052  if(pkt->dts != AV_NOPTS_VALUE)
1053  pkt->dts += offset;
1054  }
1055 
1056  if (pc && pc->dts_sync_point >= 0) {
1057  // we have synchronization info from the parser
1058  int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1059  if (den > 0) {
1060  int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1061  if (pkt->dts != AV_NOPTS_VALUE) {
1062  // got DTS from the stream, update reference timestamp
1063  st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1064  pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1065  } else if (st->reference_dts != AV_NOPTS_VALUE) {
1066  // compute DTS based on reference timestamp
1067  pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1068  pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1069  }
1070  if (pc->dts_sync_point > 0)
1071  st->reference_dts = pkt->dts; // new reference
1072  }
1073  }
1074 
1075  /* This may be redundant, but it should not hurt. */
1076  if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1077  presentation_delayed = 1;
1078 
1079  av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1080  presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1081  /* interpolate PTS and DTS if they are not present */
1082  //We skip H264 currently because delay and has_b_frames are not reliably set
1083  if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1084  if (presentation_delayed) {
1085  /* DTS = decompression timestamp */
1086  /* PTS = presentation timestamp */
1087  if (pkt->dts == AV_NOPTS_VALUE)
1088  pkt->dts = st->last_IP_pts;
1089  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1090  if (pkt->dts == AV_NOPTS_VALUE)
1091  pkt->dts = st->cur_dts;
1092 
1093  /* this is tricky: the dts must be incremented by the duration
1094  of the frame we are displaying, i.e. the last I- or P-frame */
1095  if (st->last_IP_duration == 0)
1096  st->last_IP_duration = pkt->duration;
1097  if(pkt->dts != AV_NOPTS_VALUE)
1098  st->cur_dts = pkt->dts + st->last_IP_duration;
1099  st->last_IP_duration = pkt->duration;
1100  st->last_IP_pts= pkt->pts;
1101  /* cannot compute PTS if not present (we can compute it only
1102  by knowing the future */
1103  } else if (pkt->pts != AV_NOPTS_VALUE ||
1104  pkt->dts != AV_NOPTS_VALUE ||
1105  pkt->duration ) {
1106  int duration = pkt->duration;
1107 
1108  /* presentation is not delayed : PTS and DTS are the same */
1109  if (pkt->pts == AV_NOPTS_VALUE)
1110  pkt->pts = pkt->dts;
1112  pkt->pts, pkt);
1113  if (pkt->pts == AV_NOPTS_VALUE)
1114  pkt->pts = st->cur_dts;
1115  pkt->dts = pkt->pts;
1116  if (pkt->pts != AV_NOPTS_VALUE)
1117  st->cur_dts = pkt->pts + duration;
1118  }
1119  }
1120 
1121  if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1122  st->pts_buffer[0]= pkt->pts;
1123  for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1124  FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1125  if(pkt->dts == AV_NOPTS_VALUE)
1126  pkt->dts= st->pts_buffer[0];
1127  }
1128  if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1129  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1130  }
1131  if(pkt->dts > st->cur_dts)
1132  st->cur_dts = pkt->dts;
1133 
1134  av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1135  presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1136 
1137  /* update flags */
1138  if (is_intra_only(st->codec))
1139  pkt->flags |= AV_PKT_FLAG_KEY;
1140  if (pc)
1142 }
1143 
1144 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1145 {
1146  while (*pkt_buf) {
1147  AVPacketList *pktl = *pkt_buf;
1148  *pkt_buf = pktl->next;
1149  av_free_packet(&pktl->pkt);
1150  av_freep(&pktl);
1151  }
1152  *pkt_buf_end = NULL;
1153 }
1154 
1155 /**
1156  * Parse a packet, add all split parts to parse_queue
1157  *
1158  * @param pkt packet to parse, NULL when flushing the parser at end of stream
1159  */
1160 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1161 {
1162  AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1163  AVStream *st = s->streams[stream_index];
1164  uint8_t *data = pkt ? pkt->data : NULL;
1165  int size = pkt ? pkt->size : 0;
1166  int ret = 0, got_output = 0;
1167 
1168  if (!pkt) {
1170  pkt = &flush_pkt;
1171  got_output = 1;
1172  } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1173  // preserve 0-size sync packets
1174  compute_pkt_fields(s, st, st->parser, pkt);
1175  }
1176 
1177  while (size > 0 || (pkt == &flush_pkt && got_output)) {
1178  int len;
1179 
1180  av_init_packet(&out_pkt);
1181  len = av_parser_parse2(st->parser, st->codec,
1182  &out_pkt.data, &out_pkt.size, data, size,
1183  pkt->pts, pkt->dts, pkt->pos);
1184 
1185  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1186  pkt->pos = -1;
1187  /* increment read pointer */
1188  data += len;
1189  size -= len;
1190 
1191  got_output = !!out_pkt.size;
1192 
1193  if (!out_pkt.size)
1194  continue;
1195 
1196  if (pkt->side_data) {
1197  out_pkt.side_data = pkt->side_data;
1198  out_pkt.side_data_elems = pkt->side_data_elems;
1199  pkt->side_data = NULL;
1200  pkt->side_data_elems = 0;
1201  }
1202 
1203  /* set the duration */
1204  out_pkt.duration = 0;
1205  if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1206  if (st->codec->sample_rate > 0) {
1207  out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1208  (AVRational){ 1, st->codec->sample_rate },
1209  st->time_base,
1210  AV_ROUND_DOWN);
1211  }
1212  } else if (st->codec->time_base.num != 0 &&
1213  st->codec->time_base.den != 0) {
1214  out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1215  st->codec->time_base,
1216  st->time_base,
1217  AV_ROUND_DOWN);
1218  }
1219 
1220  out_pkt.stream_index = st->index;
1221  out_pkt.pts = st->parser->pts;
1222  out_pkt.dts = st->parser->dts;
1223  out_pkt.pos = st->parser->pos;
1224 
1226  out_pkt.pos = st->parser->frame_offset;
1227 
1228  if (st->parser->key_frame == 1 ||
1229  (st->parser->key_frame == -1 &&
1231  out_pkt.flags |= AV_PKT_FLAG_KEY;
1232 
1233  if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1234  out_pkt.flags |= AV_PKT_FLAG_KEY;
1235 
1236  compute_pkt_fields(s, st, st->parser, &out_pkt);
1237 
1238  if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1239  out_pkt.buf = pkt->buf;
1240  pkt->buf = NULL;
1241 #if FF_API_DESTRUCT_PACKET
1242  out_pkt.destruct = pkt->destruct;
1243  pkt->destruct = NULL;
1244 #endif
1245  }
1246  if ((ret = av_dup_packet(&out_pkt)) < 0)
1247  goto fail;
1248 
1249  if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1250  av_free_packet(&out_pkt);
1251  ret = AVERROR(ENOMEM);
1252  goto fail;
1253  }
1254  }
1255 
1256 
1257  /* end of the stream => close and free the parser */
1258  if (pkt == &flush_pkt) {
1259  av_parser_close(st->parser);
1260  st->parser = NULL;
1261  }
1262 
1263 fail:
1265  return ret;
1266 }
1267 
1268 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1269  AVPacketList **pkt_buffer_end,
1270  AVPacket *pkt)
1271 {
1272  AVPacketList *pktl;
1273  av_assert0(*pkt_buffer);
1274  pktl = *pkt_buffer;
1275  *pkt = pktl->pkt;
1276  *pkt_buffer = pktl->next;
1277  if (!pktl->next)
1278  *pkt_buffer_end = NULL;
1279  av_freep(&pktl);
1280  return 0;
1281 }
1282 
1284 {
1285  int ret = 0, i, got_packet = 0;
1286 
1287  av_init_packet(pkt);
1288 
1289  while (!got_packet && !s->parse_queue) {
1290  AVStream *st;
1291  AVPacket cur_pkt;
1292 
1293  /* read next packet */
1294  ret = ff_read_packet(s, &cur_pkt);
1295  if (ret < 0) {
1296  if (ret == AVERROR(EAGAIN))
1297  return ret;
1298  /* flush the parsers */
1299  for(i = 0; i < s->nb_streams; i++) {
1300  st = s->streams[i];
1301  if (st->parser && st->need_parsing)
1302  parse_packet(s, NULL, st->index);
1303  }
1304  /* all remaining packets are now in parse_queue =>
1305  * really terminate parsing */
1306  break;
1307  }
1308  ret = 0;
1309  st = s->streams[cur_pkt.stream_index];
1310 
1311  if (cur_pkt.pts != AV_NOPTS_VALUE &&
1312  cur_pkt.dts != AV_NOPTS_VALUE &&
1313  cur_pkt.pts < cur_pkt.dts) {
1314  av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1315  cur_pkt.stream_index,
1316  av_ts2str(cur_pkt.pts),
1317  av_ts2str(cur_pkt.dts),
1318  cur_pkt.size);
1319  }
1320  if (s->debug & FF_FDEBUG_TS)
1321  av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1322  cur_pkt.stream_index,
1323  av_ts2str(cur_pkt.pts),
1324  av_ts2str(cur_pkt.dts),
1325  cur_pkt.size,
1326  cur_pkt.duration,
1327  cur_pkt.flags);
1328 
1329  if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1330  st->parser = av_parser_init(st->codec->codec_id);
1331  if (!st->parser) {
1332  av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1333  "%s, packets or times may be invalid.\n",
1335  /* no parser available: just output the raw packets */
1337  } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1339  } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1340  st->parser->flags |= PARSER_FLAG_ONCE;
1341  } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1343  }
1344  }
1345 
1346  if (!st->need_parsing || !st->parser) {
1347  /* no parsing needed: we just output the packet as is */
1348  *pkt = cur_pkt;
1349  compute_pkt_fields(s, st, NULL, pkt);
1350  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1351  (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1352  ff_reduce_index(s, st->index);
1353  av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1354  }
1355  got_packet = 1;
1356  } else if (st->discard < AVDISCARD_ALL) {
1357  if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1358  return ret;
1359  } else {
1360  /* free packet */
1361  av_free_packet(&cur_pkt);
1362  }
1363  if (pkt->flags & AV_PKT_FLAG_KEY)
1364  st->skip_to_keyframe = 0;
1365  if (st->skip_to_keyframe) {
1366  av_free_packet(&cur_pkt);
1367  if (got_packet) {
1368  *pkt = cur_pkt;
1369  }
1370  got_packet = 0;
1371  }
1372  }
1373 
1374  if (!got_packet && s->parse_queue)
1376 
1377  if(s->debug & FF_FDEBUG_TS)
1378  av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1379  pkt->stream_index,
1380  av_ts2str(pkt->pts),
1381  av_ts2str(pkt->dts),
1382  pkt->size,
1383  pkt->duration,
1384  pkt->flags);
1385 
1386  return ret;
1387 }
1388 
1390 {
1391  const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1392  int eof = 0;
1393  int ret;
1394  AVStream *st;
1395 
1396  if (!genpts) {
1397  ret = s->packet_buffer ?
1399  read_frame_internal(s, pkt);
1400  if (ret < 0)
1401  return ret;
1402  goto return_packet;
1403  }
1404 
1405  for (;;) {
1406  AVPacketList *pktl = s->packet_buffer;
1407 
1408  if (pktl) {
1409  AVPacket *next_pkt = &pktl->pkt;
1410 
1411  if (next_pkt->dts != AV_NOPTS_VALUE) {
1412  int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1413  // last dts seen for this stream. if any of packets following
1414  // current one had no dts, we will set this to AV_NOPTS_VALUE.
1415  int64_t last_dts = next_pkt->dts;
1416  while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1417  if (pktl->pkt.stream_index == next_pkt->stream_index &&
1418  (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1419  if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1420  next_pkt->pts = pktl->pkt.dts;
1421  }
1422  if (last_dts != AV_NOPTS_VALUE) {
1423  // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1424  last_dts = pktl->pkt.dts;
1425  }
1426  }
1427  pktl = pktl->next;
1428  }
1429  if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1430  // Fixing the last reference frame had none pts issue (For MXF etc).
1431  // We only do this when
1432  // 1. eof.
1433  // 2. we are not able to resolve a pts value for current packet.
1434  // 3. the packets for this stream at the end of the files had valid dts.
1435  next_pkt->pts = last_dts + next_pkt->duration;
1436  }
1437  pktl = s->packet_buffer;
1438  }
1439 
1440  /* read packet from packet buffer, if there is data */
1441  if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1442  next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1444  &s->packet_buffer_end, pkt);
1445  goto return_packet;
1446  }
1447  }
1448 
1449  ret = read_frame_internal(s, pkt);
1450  if (ret < 0) {
1451  if (pktl && ret != AVERROR(EAGAIN)) {
1452  eof = 1;
1453  continue;
1454  } else
1455  return ret;
1456  }
1457 
1459  &s->packet_buffer_end)) < 0)
1460  return AVERROR(ENOMEM);
1461  }
1462 
1463 return_packet:
1464 
1465  st = s->streams[pkt->stream_index];
1466  if (st->skip_samples) {
1468  AV_WL32(p, st->skip_samples);
1469  av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1470  st->skip_samples = 0;
1471  }
1472 
1473  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1474  ff_reduce_index(s, st->index);
1475  av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1476  }
1477 
1478  if (is_relative(pkt->dts))
1479  pkt->dts -= RELATIVE_TS_BASE;
1480  if (is_relative(pkt->pts))
1481  pkt->pts -= RELATIVE_TS_BASE;
1482 
1483  return ret;
1484 }
1485 
1486 /* XXX: suppress the packet queue */
1488 {
1492 
1494 }
1495 
1496 /*******************************************************/
1497 /* seek support */
1498 
1500 {
1501  int first_audio_index = -1;
1502  int i;
1503  AVStream *st;
1504 
1505  if (s->nb_streams <= 0)
1506  return -1;
1507  for(i = 0; i < s->nb_streams; i++) {
1508  st = s->streams[i];
1509  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1511  return i;
1512  }
1513  if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1514  first_audio_index = i;
1515  }
1516  return first_audio_index >= 0 ? first_audio_index : 0;
1517 }
1518 
1519 /**
1520  * Flush the frame reader.
1521  */
1523 {
1524  AVStream *st;
1525  int i, j;
1526 
1527  flush_packet_queue(s);
1528 
1529  /* for each stream, reset read state */
1530  for(i = 0; i < s->nb_streams; i++) {
1531  st = s->streams[i];
1532 
1533  if (st->parser) {
1534  av_parser_close(st->parser);
1535  st->parser = NULL;
1536  }
1539  else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1541 
1543 
1544  for(j=0; j<MAX_REORDER_DELAY+1; j++)
1545  st->pts_buffer[j]= AV_NOPTS_VALUE;
1546  }
1547 }
1548 
1549 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1550 {
1551  int i;
1552 
1553  for(i = 0; i < s->nb_streams; i++) {
1554  AVStream *st = s->streams[i];
1555 
1556  st->cur_dts = av_rescale(timestamp,
1557  st->time_base.den * (int64_t)ref_st->time_base.num,
1558  st->time_base.num * (int64_t)ref_st->time_base.den);
1559  }
1560 }
1561 
1562 void ff_reduce_index(AVFormatContext *s, int stream_index)
1563 {
1564  AVStream *st= s->streams[stream_index];
1565  unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1566 
1567  if((unsigned)st->nb_index_entries >= max_entries){
1568  int i;
1569  for(i=0; 2*i<st->nb_index_entries; i++)
1570  st->index_entries[i]= st->index_entries[2*i];
1571  st->nb_index_entries= i;
1572  }
1573 }
1574 
1575 int ff_add_index_entry(AVIndexEntry **index_entries,
1576  int *nb_index_entries,
1577  unsigned int *index_entries_allocated_size,
1578  int64_t pos, int64_t timestamp, int size, int distance, int flags)
1579 {
1580  AVIndexEntry *entries, *ie;
1581  int index;
1582 
1583  if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1584  return -1;
1585 
1586  if(timestamp == AV_NOPTS_VALUE)
1587  return AVERROR(EINVAL);
1588 
1589  if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1590  timestamp -= RELATIVE_TS_BASE;
1591 
1592  entries = av_fast_realloc(*index_entries,
1593  index_entries_allocated_size,
1594  (*nb_index_entries + 1) *
1595  sizeof(AVIndexEntry));
1596  if(!entries)
1597  return -1;
1598 
1599  *index_entries= entries;
1600 
1601  index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1602 
1603  if(index<0){
1604  index= (*nb_index_entries)++;
1605  ie= &entries[index];
1606  av_assert0(index==0 || ie[-1].timestamp < timestamp);
1607  }else{
1608  ie= &entries[index];
1609  if(ie->timestamp != timestamp){
1610  if(ie->timestamp <= timestamp)
1611  return -1;
1612  memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1613  (*nb_index_entries)++;
1614  }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1615  distance= ie->min_distance;
1616  }
1617 
1618  ie->pos = pos;
1619  ie->timestamp = timestamp;
1620  ie->min_distance= distance;
1621  ie->size= size;
1622  ie->flags = flags;
1623 
1624  return index;
1625 }
1626 
1628  int64_t pos, int64_t timestamp, int size, int distance, int flags)
1629 {
1630  timestamp = wrap_timestamp(st, timestamp);
1632  &st->index_entries_allocated_size, pos,
1633  timestamp, size, distance, flags);
1634 }
1635 
1636 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1637  int64_t wanted_timestamp, int flags)
1638 {
1639  int a, b, m;
1640  int64_t timestamp;
1641 
1642  a = - 1;
1643  b = nb_entries;
1644 
1645  //optimize appending index entries at the end
1646  if(b && entries[b-1].timestamp < wanted_timestamp)
1647  a= b-1;
1648 
1649  while (b - a > 1) {
1650  m = (a + b) >> 1;
1651  timestamp = entries[m].timestamp;
1652  if(timestamp >= wanted_timestamp)
1653  b = m;
1654  if(timestamp <= wanted_timestamp)
1655  a = m;
1656  }
1657  m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1658 
1659  if(!(flags & AVSEEK_FLAG_ANY)){
1660  while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1661  m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1662  }
1663  }
1664 
1665  if(m == nb_entries)
1666  return -1;
1667  return m;
1668 }
1669 
1670 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1671  int flags)
1672 {
1674  wanted_timestamp, flags);
1675 }
1676 
1677 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1678  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1679 {
1680  int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1681  if (stream_index >= 0)
1682  ts = wrap_timestamp(s->streams[stream_index], ts);
1683  return ts;
1684 }
1685 
1686 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1687 {
1688  AVInputFormat *avif= s->iformat;
1689  int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1690  int64_t ts_min, ts_max, ts;
1691  int index;
1692  int64_t ret;
1693  AVStream *st;
1694 
1695  if (stream_index < 0)
1696  return -1;
1697 
1698  av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1699 
1700  ts_max=
1701  ts_min= AV_NOPTS_VALUE;
1702  pos_limit= -1; //gcc falsely says it may be uninitialized
1703 
1704  st= s->streams[stream_index];
1705  if(st->index_entries){
1706  AVIndexEntry *e;
1707 
1708  index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1709  index= FFMAX(index, 0);
1710  e= &st->index_entries[index];
1711 
1712  if(e->timestamp <= target_ts || e->pos == e->min_distance){
1713  pos_min= e->pos;
1714  ts_min= e->timestamp;
1715  av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1716  pos_min, av_ts2str(ts_min));
1717  }else{
1718  av_assert1(index==0);
1719  }
1720 
1721  index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1722  av_assert0(index < st->nb_index_entries);
1723  if(index >= 0){
1724  e= &st->index_entries[index];
1725  av_assert1(e->timestamp >= target_ts);
1726  pos_max= e->pos;
1727  ts_max= e->timestamp;
1728  pos_limit= pos_max - e->min_distance;
1729  av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1730  pos_max, pos_limit, av_ts2str(ts_max));
1731  }
1732  }
1733 
1734  pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1735  if(pos<0)
1736  return -1;
1737 
1738  /* do the seek */
1739  if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1740  return ret;
1741 
1743  ff_update_cur_dts(s, st, ts);
1744 
1745  return 0;
1746 }
1747 
1748 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1749  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1750 {
1751  int64_t step= 1024;
1752  int64_t limit, ts_max;
1753  int64_t filesize = avio_size(s->pb);
1754  int64_t pos_max = filesize - 1;
1755  do{
1756  limit = pos_max;
1757  pos_max = FFMAX(0, (pos_max) - step);
1758  ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1759  step += step;
1760  }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1761  if (ts_max == AV_NOPTS_VALUE)
1762  return -1;
1763 
1764  for(;;){
1765  int64_t tmp_pos = pos_max + 1;
1766  int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1767  if(tmp_ts == AV_NOPTS_VALUE)
1768  break;
1769  ts_max = tmp_ts;
1770  pos_max = tmp_pos;
1771  if(tmp_pos >= filesize)
1772  break;
1773  }
1774 
1775  if (ts)
1776  *ts = ts_max;
1777  if (pos)
1778  *pos = pos_max;
1779 
1780  return 0;
1781 }
1782 
1783 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1784  int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1785  int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1786  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1787 {
1788  int64_t pos, ts;
1789  int64_t start_pos;
1790  int no_change;
1791  int ret;
1792 
1793  av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1794 
1795  if(ts_min == AV_NOPTS_VALUE){
1796  pos_min = s->data_offset;
1797  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1798  if (ts_min == AV_NOPTS_VALUE)
1799  return -1;
1800  }
1801 
1802  if(ts_min >= target_ts){
1803  *ts_ret= ts_min;
1804  return pos_min;
1805  }
1806 
1807  if(ts_max == AV_NOPTS_VALUE){
1808  if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1809  return ret;
1810  pos_limit= pos_max;
1811  }
1812 
1813  if(ts_max <= target_ts){
1814  *ts_ret= ts_max;
1815  return pos_max;
1816  }
1817 
1818  if(ts_min > ts_max){
1819  return -1;
1820  }else if(ts_min == ts_max){
1821  pos_limit= pos_min;
1822  }
1823 
1824  no_change=0;
1825  while (pos_min < pos_limit) {
1826  av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1827  pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1828  assert(pos_limit <= pos_max);
1829 
1830  if(no_change==0){
1831  int64_t approximate_keyframe_distance= pos_max - pos_limit;
1832  // interpolate position (better than dichotomy)
1833  pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1834  + pos_min - approximate_keyframe_distance;
1835  }else if(no_change==1){
1836  // bisection, if interpolation failed to change min or max pos last time
1837  pos = (pos_min + pos_limit)>>1;
1838  }else{
1839  /* linear search if bisection failed, can only happen if there
1840  are very few or no keyframes between min/max */
1841  pos=pos_min;
1842  }
1843  if(pos <= pos_min)
1844  pos= pos_min + 1;
1845  else if(pos > pos_limit)
1846  pos= pos_limit;
1847  start_pos= pos;
1848 
1849  ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1850  if(pos == pos_max)
1851  no_change++;
1852  else
1853  no_change=0;
1854  av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1855  pos_min, pos, pos_max,
1856  av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1857  pos_limit, start_pos, no_change);
1858  if(ts == AV_NOPTS_VALUE){
1859  av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1860  return -1;
1861  }
1862  assert(ts != AV_NOPTS_VALUE);
1863  if (target_ts <= ts) {
1864  pos_limit = start_pos - 1;
1865  pos_max = pos;
1866  ts_max = ts;
1867  }
1868  if (target_ts >= ts) {
1869  pos_min = pos;
1870  ts_min = ts;
1871  }
1872  }
1873 
1874  pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1875  ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1876 #if 0
1877  pos_min = pos;
1878  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1879  pos_min++;
1880  ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1881  av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1882  pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1883 #endif
1884  *ts_ret= ts;
1885  return pos;
1886 }
1887 
1888 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1889  int64_t pos_min, pos_max;
1890 
1891  pos_min = s->data_offset;
1892  pos_max = avio_size(s->pb) - 1;
1893 
1894  if (pos < pos_min) pos= pos_min;
1895  else if(pos > pos_max) pos= pos_max;
1896 
1897  avio_seek(s->pb, pos, SEEK_SET);
1898 
1899  s->io_repositioned = 1;
1900 
1901  return 0;
1902 }
1903 
1905  int stream_index, int64_t timestamp, int flags)
1906 {
1907  int index;
1908  int64_t ret;
1909  AVStream *st;
1910  AVIndexEntry *ie;
1911 
1912  st = s->streams[stream_index];
1913 
1914  index = av_index_search_timestamp(st, timestamp, flags);
1915 
1916  if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1917  return -1;
1918 
1919  if(index < 0 || index==st->nb_index_entries-1){
1920  AVPacket pkt;
1921  int nonkey=0;
1922 
1923  if(st->nb_index_entries){
1925  ie= &st->index_entries[st->nb_index_entries-1];
1926  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1927  return ret;
1928  ff_update_cur_dts(s, st, ie->timestamp);
1929  }else{
1930  if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1931  return ret;
1932  }
1933  for (;;) {
1934  int read_status;
1935  do{
1936  read_status = av_read_frame(s, &pkt);
1937  } while (read_status == AVERROR(EAGAIN));
1938  if (read_status < 0)
1939  break;
1940  av_free_packet(&pkt);
1941  if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1942  if(pkt.flags & AV_PKT_FLAG_KEY)
1943  break;
1944  if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1945  av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1946  break;
1947  }
1948  }
1949  }
1950  index = av_index_search_timestamp(st, timestamp, flags);
1951  }
1952  if (index < 0)
1953  return -1;
1954 
1956  if (s->iformat->read_seek){
1957  if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1958  return 0;
1959  }
1960  ie = &st->index_entries[index];
1961  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1962  return ret;
1963  ff_update_cur_dts(s, st, ie->timestamp);
1964 
1965  return 0;
1966 }
1967 
1968 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1969  int64_t timestamp, int flags)
1970 {
1971  int ret;
1972  AVStream *st;
1973 
1974  if (flags & AVSEEK_FLAG_BYTE) {
1975  if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1976  return -1;
1978  return seek_frame_byte(s, stream_index, timestamp, flags);
1979  }
1980 
1981  if(stream_index < 0){
1982  stream_index= av_find_default_stream_index(s);
1983  if(stream_index < 0)
1984  return -1;
1985 
1986  st= s->streams[stream_index];
1987  /* timestamp for default must be expressed in AV_TIME_BASE units */
1988  timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1989  }
1990 
1991  /* first, we try the format specific seek */
1992  if (s->iformat->read_seek) {
1994  ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1995  } else
1996  ret = -1;
1997  if (ret >= 0) {
1998  return 0;
1999  }
2000 
2001  if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2003  return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2004  } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2006  return seek_frame_generic(s, stream_index, timestamp, flags);
2007  }
2008  else
2009  return -1;
2010 }
2011 
2012 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2013 {
2014  int ret;
2015 
2016  if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2017  int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2018  if ((flags & AVSEEK_FLAG_BACKWARD))
2019  max_ts = timestamp;
2020  else
2021  min_ts = timestamp;
2022  return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2023  flags & ~AVSEEK_FLAG_BACKWARD);
2024  }
2025 
2026  ret = seek_frame_internal(s, stream_index, timestamp, flags);
2027 
2028  if (ret >= 0)
2030 
2031  return ret;
2032 }
2033 
2034 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2035 {
2036  if(min_ts > ts || max_ts < ts)
2037  return -1;
2038  if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2039  return AVERROR(EINVAL);
2040 
2041  if(s->seek2any>0)
2042  flags |= AVSEEK_FLAG_ANY;
2043  flags &= ~AVSEEK_FLAG_BACKWARD;
2044 
2045  if (s->iformat->read_seek2) {
2046  int ret;
2048 
2049  if (stream_index == -1 && s->nb_streams == 1) {
2050  AVRational time_base = s->streams[0]->time_base;
2051  ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2052  min_ts = av_rescale_rnd(min_ts, time_base.den,
2053  time_base.num * (int64_t)AV_TIME_BASE,
2055  max_ts = av_rescale_rnd(max_ts, time_base.den,
2056  time_base.num * (int64_t)AV_TIME_BASE,
2058  }
2059 
2060  ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2061 
2062  if (ret >= 0)
2064  return ret;
2065  }
2066 
2067  if(s->iformat->read_timestamp){
2068  //try to seek via read_timestamp()
2069  }
2070 
2071  // Fall back on old API if new is not implemented but old is.
2072  // Note the old API has somewhat different semantics.
2073  if (s->iformat->read_seek || 1) {
2074  int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2075  int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2076  if (ret<0 && ts != min_ts && max_ts != ts) {
2077  ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2078  if (ret >= 0)
2079  ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2080  }
2081  return ret;
2082  }
2083 
2084  // try some generic seek like seek_frame_generic() but with new ts semantics
2085  return -1; //unreachable
2086 }
2087 
2088 /*******************************************************/
2089 
2090 /**
2091  * Return TRUE if the stream has accurate duration in any stream.
2092  *
2093  * @return TRUE if the stream has accurate duration for at least one component.
2094  */
2096 {
2097  int i;
2098  AVStream *st;
2099 
2100  for(i = 0;i < ic->nb_streams; i++) {
2101  st = ic->streams[i];
2102  if (st->duration != AV_NOPTS_VALUE)
2103  return 1;
2104  }
2105  if (ic->duration != AV_NOPTS_VALUE)
2106  return 1;
2107  return 0;
2108 }
2109 
2110 /**
2111  * Estimate the stream timings from the one of each components.
2112  *
2113  * Also computes the global bitrate if possible.
2114  */
2116 {
2117  int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2118  int64_t duration, duration1, filesize;
2119  int i;
2120  AVStream *st;
2121  AVProgram *p;
2122 
2123  start_time = INT64_MAX;
2124  start_time_text = INT64_MAX;
2125  end_time = INT64_MIN;
2126  duration = INT64_MIN;
2127  for(i = 0;i < ic->nb_streams; i++) {
2128  st = ic->streams[i];
2129  if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2130  start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2132  if (start_time1 < start_time_text)
2133  start_time_text = start_time1;
2134  } else
2135  start_time = FFMIN(start_time, start_time1);
2136  end_time1 = AV_NOPTS_VALUE;
2137  if (st->duration != AV_NOPTS_VALUE) {
2138  end_time1 = start_time1
2140  end_time = FFMAX(end_time, end_time1);
2141  }
2142  for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2143  if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2144  p->start_time = start_time1;
2145  if(p->end_time < end_time1)
2146  p->end_time = end_time1;
2147  }
2148  }
2149  if (st->duration != AV_NOPTS_VALUE) {
2150  duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2151  duration = FFMAX(duration, duration1);
2152  }
2153  }
2154  if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2155  start_time = start_time_text;
2156  else if(start_time > start_time_text)
2157  av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2158 
2159  if (start_time != INT64_MAX) {
2160  ic->start_time = start_time;
2161  if (end_time != INT64_MIN) {
2162  if (ic->nb_programs) {
2163  for (i=0; i<ic->nb_programs; i++) {
2164  p = ic->programs[i];
2165  if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2166  duration = FFMAX(duration, p->end_time - p->start_time);
2167  }
2168  } else
2169  duration = FFMAX(duration, end_time - start_time);
2170  }
2171  }
2172  if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2173  ic->duration = duration;
2174  }
2175  if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2176  /* compute the bitrate */
2177  double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2178  (double)ic->duration;
2179  if (bitrate >= 0 && bitrate <= INT_MAX)
2180  ic->bit_rate = bitrate;
2181  }
2182 }
2183 
2185 {
2186  int i;
2187  AVStream *st;
2188 
2190  for(i = 0;i < ic->nb_streams; i++) {
2191  st = ic->streams[i];
2192  if (st->start_time == AV_NOPTS_VALUE) {
2193  if(ic->start_time != AV_NOPTS_VALUE)
2195  if(ic->duration != AV_NOPTS_VALUE)
2197  }
2198  }
2199 }
2200 
2202 {
2203  int64_t filesize, duration;
2204  int bit_rate, i, show_warning = 0;
2205  AVStream *st;
2206 
2207  /* if bit_rate is already set, we believe it */
2208  if (ic->bit_rate <= 0) {
2209  bit_rate = 0;
2210  for(i=0;i<ic->nb_streams;i++) {
2211  st = ic->streams[i];
2212  if (st->codec->bit_rate > 0)
2213  bit_rate += st->codec->bit_rate;
2214  }
2215  ic->bit_rate = bit_rate;
2216  }
2217 
2218  /* if duration is already set, we believe it */
2219  if (ic->duration == AV_NOPTS_VALUE &&
2220  ic->bit_rate != 0) {
2221  filesize = ic->pb ? avio_size(ic->pb) : 0;
2222  if (filesize > 0) {
2223  for(i = 0; i < ic->nb_streams; i++) {
2224  st = ic->streams[i];
2225  if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2226  && st->duration == AV_NOPTS_VALUE) {
2227  duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2228  st->duration = duration;
2229  show_warning = 1;
2230  }
2231  }
2232  }
2233  }
2234  if (show_warning)
2235  av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2236 }
2237 
2238 #define DURATION_MAX_READ_SIZE 250000LL
2239 #define DURATION_MAX_RETRY 4
2240 
2241 /* only usable for MPEG-PS streams */
2242 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2243 {
2244  AVPacket pkt1, *pkt = &pkt1;
2245  AVStream *st;
2246  int read_size, i, ret;
2247  int64_t end_time;
2248  int64_t filesize, offset, duration;
2249  int retry=0;
2250 
2251  /* flush packet queue */
2252  flush_packet_queue(ic);
2253 
2254  for (i=0; i<ic->nb_streams; i++) {
2255  st = ic->streams[i];
2256  if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2257  av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2258 
2259  if (st->parser) {
2260  av_parser_close(st->parser);
2261  st->parser= NULL;
2262  }
2263  }
2264 
2265  /* estimate the end time (duration) */
2266  /* XXX: may need to support wrapping */
2267  filesize = ic->pb ? avio_size(ic->pb) : 0;
2268  end_time = AV_NOPTS_VALUE;
2269  do{
2270  offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2271  if (offset < 0)
2272  offset = 0;
2273 
2274  avio_seek(ic->pb, offset, SEEK_SET);
2275  read_size = 0;
2276  for(;;) {
2277  if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2278  break;
2279 
2280  do {
2281  ret = ff_read_packet(ic, pkt);
2282  } while(ret == AVERROR(EAGAIN));
2283  if (ret != 0)
2284  break;
2285  read_size += pkt->size;
2286  st = ic->streams[pkt->stream_index];
2287  if (pkt->pts != AV_NOPTS_VALUE &&
2288  (st->start_time != AV_NOPTS_VALUE ||
2289  st->first_dts != AV_NOPTS_VALUE)) {
2290  duration = end_time = pkt->pts;
2291  if (st->start_time != AV_NOPTS_VALUE)
2292  duration -= st->start_time;
2293  else
2294  duration -= st->first_dts;
2295  if (duration > 0) {
2296  if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2297  (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2298  st->duration = duration;
2299  st->info->last_duration = duration;
2300  }
2301  }
2302  av_free_packet(pkt);
2303  }
2304  }while( end_time==AV_NOPTS_VALUE
2305  && filesize > (DURATION_MAX_READ_SIZE<<retry)
2306  && ++retry <= DURATION_MAX_RETRY);
2307 
2309 
2310  avio_seek(ic->pb, old_offset, SEEK_SET);
2311  for (i=0; i<ic->nb_streams; i++) {
2312  st= ic->streams[i];
2313  st->cur_dts= st->first_dts;
2316  }
2317 }
2318 
2319 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2320 {
2321  int64_t file_size;
2322 
2323  /* get the file size, if possible */
2324  if (ic->iformat->flags & AVFMT_NOFILE) {
2325  file_size = 0;
2326  } else {
2327  file_size = avio_size(ic->pb);
2328  file_size = FFMAX(0, file_size);
2329  }
2330 
2331  if ((!strcmp(ic->iformat->name, "mpeg") ||
2332  !strcmp(ic->iformat->name, "mpegts")) &&
2333  file_size && ic->pb->seekable) {
2334  /* get accurate estimate from the PTSes */
2335  estimate_timings_from_pts(ic, old_offset);
2337  } else if (has_duration(ic)) {
2338  /* at least one component has timings - we use them for all
2339  the components */
2342  } else {
2343  /* less precise: use bitrate info */
2346  }
2348 
2349  {
2350  int i;
2351  AVStream av_unused *st;
2352  for(i = 0;i < ic->nb_streams; i++) {
2353  st = ic->streams[i];
2354  av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2355  (double) st->start_time / AV_TIME_BASE,
2356  (double) st->duration / AV_TIME_BASE);
2357  }
2358  av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2359  (double) ic->start_time / AV_TIME_BASE,
2360  (double) ic->duration / AV_TIME_BASE,
2361  ic->bit_rate / 1000);
2362  }
2363 }
2364 
2365 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2366 {
2367  AVCodecContext *avctx = st->codec;
2368 
2369 #define FAIL(errmsg) do { \
2370  if (errmsg_ptr) \
2371  *errmsg_ptr = errmsg; \
2372  return 0; \
2373  } while (0)
2374 
2375  switch (avctx->codec_type) {
2376  case AVMEDIA_TYPE_AUDIO:
2377  if (!avctx->frame_size && determinable_frame_size(avctx))
2378  FAIL("unspecified frame size");
2379  if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2380  FAIL("unspecified sample format");
2381  if (!avctx->sample_rate)
2382  FAIL("unspecified sample rate");
2383  if (!avctx->channels)
2384  FAIL("unspecified number of channels");
2385  if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2386  FAIL("no decodable DTS frames");
2387  break;
2388  case AVMEDIA_TYPE_VIDEO:
2389  if (!avctx->width)
2390  FAIL("unspecified size");
2391  if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2392  FAIL("unspecified pixel format");
2395  FAIL("no frame in rv30/40 and no sar");
2396  break;
2397  case AVMEDIA_TYPE_SUBTITLE:
2398  if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2399  FAIL("unspecified size");
2400  break;
2401  case AVMEDIA_TYPE_DATA:
2402  if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2403  }
2404 
2405  if (avctx->codec_id == AV_CODEC_ID_NONE)
2406  FAIL("unknown codec");
2407  return 1;
2408 }
2409 
2410 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2412 {
2413  const AVCodec *codec;
2414  int got_picture = 1, ret = 0;
2416  AVSubtitle subtitle;
2417  AVPacket pkt = *avpkt;
2418 
2419  if (!frame)
2420  return AVERROR(ENOMEM);
2421 
2422  if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2423  AVDictionary *thread_opt = NULL;
2424 
2425  codec = st->codec->codec ? st->codec->codec :
2427 
2428  if (!codec) {
2429  st->info->found_decoder = -1;
2430  ret = -1;
2431  goto fail;
2432  }
2433 
2434  /* force thread count to 1 since the h264 decoder will not extract SPS
2435  * and PPS to extradata during multi-threaded decoding */
2436  av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2437  ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2438  if (!options)
2439  av_dict_free(&thread_opt);
2440  if (ret < 0) {
2441  st->info->found_decoder = -1;
2442  goto fail;
2443  }
2444  st->info->found_decoder = 1;
2445  } else if (!st->info->found_decoder)
2446  st->info->found_decoder = 1;
2447 
2448  if (st->info->found_decoder < 0) {
2449  ret = -1;
2450  goto fail;
2451  }
2452 
2453  while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2454  ret >= 0 &&
2455  (!has_codec_parameters(st, NULL) ||
2458  got_picture = 0;
2460  switch(st->codec->codec_type) {
2461  case AVMEDIA_TYPE_VIDEO:
2462  ret = avcodec_decode_video2(st->codec, frame,
2463  &got_picture, &pkt);
2464  break;
2465  case AVMEDIA_TYPE_AUDIO:
2466  ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2467  break;
2468  case AVMEDIA_TYPE_SUBTITLE:
2469  ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2470  &got_picture, &pkt);
2471  ret = pkt.size;
2472  break;
2473  default:
2474  break;
2475  }
2476  if (ret >= 0) {
2477  if (got_picture)
2478  st->nb_decoded_frames++;
2479  pkt.data += ret;
2480  pkt.size -= ret;
2481  ret = got_picture;
2482  }
2483  }
2484 
2485  if(!pkt.data && !got_picture)
2486  ret = -1;
2487 
2488 fail:
2489  avcodec_free_frame(&frame);
2490  return ret;
2491 }
2492 
2493 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2494 {
2495  while (tags->id != AV_CODEC_ID_NONE) {
2496  if (tags->id == id)
2497  return tags->tag;
2498  tags++;
2499  }
2500  return 0;
2501 }
2502 
2503 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2504 {
2505  int i;
2506  for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2507  if(tag == tags[i].tag)
2508  return tags[i].id;
2509  }
2510  for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2511  if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2512  return tags[i].id;
2513  }
2514  return AV_CODEC_ID_NONE;
2515 }
2516 
2517 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2518 {
2519  if (flt) {
2520  switch (bps) {
2521  case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2522  case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2523  default: return AV_CODEC_ID_NONE;
2524  }
2525  } else {
2526  bps += 7;
2527  bps >>= 3;
2528  if (sflags & (1 << (bps - 1))) {
2529  switch (bps) {
2530  case 1: return AV_CODEC_ID_PCM_S8;
2531  case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2532  case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2533  case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2534  default: return AV_CODEC_ID_NONE;
2535  }
2536  } else {
2537  switch (bps) {
2538  case 1: return AV_CODEC_ID_PCM_U8;
2539  case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2540  case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2541  case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2542  default: return AV_CODEC_ID_NONE;
2543  }
2544  }
2545  }
2546 }
2547 
2548 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2549 {
2550  unsigned int tag;
2551  if (!av_codec_get_tag2(tags, id, &tag))
2552  return 0;
2553  return tag;
2554 }
2555 
2556 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2557  unsigned int *tag)
2558 {
2559  int i;
2560  for(i=0; tags && tags[i]; i++){
2561  const AVCodecTag *codec_tags = tags[i];
2562  while (codec_tags->id != AV_CODEC_ID_NONE) {
2563  if (codec_tags->id == id) {
2564  *tag = codec_tags->tag;
2565  return 1;
2566  }
2567  codec_tags++;
2568  }
2569  }
2570  return 0;
2571 }
2572 
2573 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2574 {
2575  int i;
2576  for(i=0; tags && tags[i]; i++){
2577  enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2578  if(id!=AV_CODEC_ID_NONE) return id;
2579  }
2580  return AV_CODEC_ID_NONE;
2581 }
2582 
2584 {
2585  unsigned int i, j;
2586  int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2587 
2588  for (i = 0; i < s->nb_chapters; i++)
2589  if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2590  AVChapter *ch = s->chapters[i];
2591  int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2592  : INT64_MAX;
2593 
2594  for (j = 0; j < s->nb_chapters; j++) {
2595  AVChapter *ch1 = s->chapters[j];
2596  int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2597  if (j != i && next_start > ch->start && next_start < end)
2598  end = next_start;
2599  }
2600  ch->end = (end == INT64_MAX) ? ch->start : end;
2601  }
2602 }
2603 
2604 static int get_std_framerate(int i){
2605  if(i<60*12) return (i+1)*1001;
2606  else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2607 }
2608 
2609 /*
2610  * Is the time base unreliable.
2611  * This is a heuristic to balance between quick acceptance of the values in
2612  * the headers vs. some extra checks.
2613  * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2614  * MPEG-2 commonly misuses field repeat flags to store different framerates.
2615  * And there are "variable" fps files this needs to detect as well.
2616  */
2618  if( c->time_base.den >= 101L*c->time_base.num
2619  || c->time_base.den < 5L*c->time_base.num
2620 /* || c->codec_tag == AV_RL32("DIVX")
2621  || c->codec_tag == AV_RL32("XVID")*/
2622  || c->codec_tag == AV_RL32("mp4v")
2624  || c->codec_id == AV_CODEC_ID_H264
2625  )
2626  return 1;
2627  return 0;
2628 }
2629 
2630 #if FF_API_FORMAT_PARAMETERS
2631 int av_find_stream_info(AVFormatContext *ic)
2632 {
2633  return avformat_find_stream_info(ic, NULL);
2634 }
2635 #endif
2636 
2638 {
2639  int i, count, ret = 0, j;
2640  int64_t read_size;
2641  AVStream *st;
2642  AVPacket pkt1, *pkt;
2643  int64_t old_offset = avio_tell(ic->pb);
2644  int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2645  int flush_codecs = ic->probesize > 0;
2646 
2647  if(ic->pb)
2648  av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2649 
2650  for(i=0;i<ic->nb_streams;i++) {
2651  const AVCodec *codec;
2652  AVDictionary *thread_opt = NULL;
2653  st = ic->streams[i];
2654 
2655  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2657 /* if(!st->time_base.num)
2658  st->time_base= */
2659  if(!st->codec->time_base.num)
2660  st->codec->time_base= st->time_base;
2661  }
2662  //only for the split stuff
2663  if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2664  st->parser = av_parser_init(st->codec->codec_id);
2665  if(st->parser){
2668  } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2670  }
2671  } else if (st->need_parsing) {
2672  av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2673  "%s, packets or times may be invalid.\n",
2675  }
2676  }
2677  codec = st->codec->codec ? st->codec->codec :
2679 
2680  /* force thread count to 1 since the h264 decoder will not extract SPS
2681  * and PPS to extradata during multi-threaded decoding */
2682  av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2683 
2684  /* Ensure that subtitle_header is properly set. */
2686  && codec && !st->codec->codec)
2687  avcodec_open2(st->codec, codec, options ? &options[i]
2688  : &thread_opt);
2689 
2690  //try to just open decoders, in case this is enough to get parameters
2691  if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2692  if (codec && !st->codec->codec)
2693  avcodec_open2(st->codec, codec, options ? &options[i]
2694  : &thread_opt);
2695  }
2696  if (!options)
2697  av_dict_free(&thread_opt);
2698  }
2699 
2700  for (i=0; i<ic->nb_streams; i++) {
2701 #if FF_API_R_FRAME_RATE
2702  ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2703 #endif
2706  }
2707 
2708  count = 0;
2709  read_size = 0;
2710  for(;;) {
2712  ret= AVERROR_EXIT;
2713  av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2714  break;
2715  }
2716 
2717  /* check if one codec still needs to be handled */
2718  for(i=0;i<ic->nb_streams;i++) {
2719  int fps_analyze_framecount = 20;
2720 
2721  st = ic->streams[i];
2722  if (!has_codec_parameters(st, NULL))
2723  break;
2724  /* if the timebase is coarse (like the usual millisecond precision
2725  of mkv), we need to analyze more frames to reliably arrive at
2726  the correct fps */
2727  if (av_q2d(st->time_base) > 0.0005)
2728  fps_analyze_framecount *= 2;
2729  if (ic->fps_probe_size >= 0)
2730  fps_analyze_framecount = ic->fps_probe_size;
2732  fps_analyze_framecount = 0;
2733  /* variable fps and no guess at the real fps */
2734  if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2735  && st->info->duration_count < fps_analyze_framecount
2736  && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2737  break;
2738  if(st->parser && st->parser->parser->split && !st->codec->extradata)
2739  break;
2740  if (st->first_dts == AV_NOPTS_VALUE &&
2741  (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2743  break;
2744  }
2745  if (i == ic->nb_streams) {
2746  /* NOTE: if the format has no header, then we need to read
2747  some packets to get most of the streams, so we cannot
2748  stop here */
2749  if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2750  /* if we found the info for all the codecs, we can stop */
2751  ret = count;
2752  av_log(ic, AV_LOG_DEBUG, "All info found\n");
2753  flush_codecs = 0;
2754  break;
2755  }
2756  }
2757  /* we did not get all the codec info, but we read too much data */
2758  if (read_size >= ic->probesize) {
2759  ret = count;
2760  av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2761  for (i = 0; i < ic->nb_streams; i++)
2762  if (!ic->streams[i]->r_frame_rate.num &&
2763  ic->streams[i]->info->duration_count <= 1)
2764  av_log(ic, AV_LOG_WARNING,
2765  "Stream #%d: not enough frames to estimate rate; "
2766  "consider increasing probesize\n", i);
2767  break;
2768  }
2769 
2770  /* NOTE: a new stream can be added there if no header in file
2771  (AVFMTCTX_NOHEADER) */
2772  ret = read_frame_internal(ic, &pkt1);
2773  if (ret == AVERROR(EAGAIN))
2774  continue;
2775 
2776  if (ret < 0) {
2777  /* EOF or error*/
2778  break;
2779  }
2780 
2781  if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2782  pkt = &pkt1;
2783  } else {
2784  pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2785  &ic->packet_buffer_end);
2786  if ((ret = av_dup_packet(pkt)) < 0)
2787  goto find_stream_info_err;
2788  }
2789 
2790  read_size += pkt->size;
2791 
2792  st = ic->streams[pkt->stream_index];
2793  if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2794  /* check for non-increasing dts */
2795  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2796  st->info->fps_last_dts >= pkt->dts) {
2797  av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2798  "packet %d with DTS %"PRId64", packet %d with DTS "
2799  "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2800  st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2802  }
2803  /* check for a discontinuity in dts - if the difference in dts
2804  * is more than 1000 times the average packet duration in the sequence,
2805  * we treat it as a discontinuity */
2806  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2808  (pkt->dts - st->info->fps_last_dts) / 1000 >
2810  av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2811  "packet %d with DTS %"PRId64", packet %d with DTS "
2812  "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2813  st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2815  }
2816 
2817  /* update stored dts values */
2818  if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2819  st->info->fps_first_dts = pkt->dts;
2821  }
2822  st->info->fps_last_dts = pkt->dts;
2824  }
2825  if (st->codec_info_nb_frames>1) {
2826  int64_t t=0;
2827  if (st->time_base.den > 0)
2829  if (st->avg_frame_rate.num > 0)
2831 
2832  if ( t==0
2833  && st->codec_info_nb_frames>30
2834  && st->info->fps_first_dts != AV_NOPTS_VALUE
2835  && st->info->fps_last_dts != AV_NOPTS_VALUE)
2837 
2838  if (t >= ic->max_analyze_duration) {
2839  av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2840  break;
2841  }
2842  if (pkt->duration) {
2843  st->info->codec_info_duration += pkt->duration;
2844  st->info->codec_info_duration_fields += st->parser && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2845  }
2846  }
2847 #if FF_API_R_FRAME_RATE
2848  {
2849  int64_t last = st->info->last_dts;
2850 
2851  if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2852  && pkt->dts - (uint64_t)last < INT64_MAX){
2853  double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2854  int64_t duration= pkt->dts - last;
2855 
2856  if (!st->info->duration_error)
2857  st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2858 
2859 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2860 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2861  for (i=0; i<MAX_STD_TIMEBASES; i++) {
2862  int framerate= get_std_framerate(i);
2863  double sdts= dts*framerate/(1001*12);
2864  for(j=0; j<2; j++){
2865  int64_t ticks= llrint(sdts+j*0.5);
2866  double error= sdts - ticks + j*0.5;
2867  st->info->duration_error[j][0][i] += error;
2868  st->info->duration_error[j][1][i] += error*error;
2869  }
2870  }
2871  st->info->duration_count++;
2872  // ignore the first 4 values, they might have some random jitter
2873  if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2874  st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2875  }
2876  if (pkt->dts != AV_NOPTS_VALUE)
2877  st->info->last_dts = pkt->dts;
2878  }
2879 #endif
2880  if(st->parser && st->parser->parser->split && !st->codec->extradata){
2881  int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2882  if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2883  st->codec->extradata_size= i;
2885  if (!st->codec->extradata)
2886  return AVERROR(ENOMEM);
2887  memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2888  memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2889  }
2890  }
2891 
2892  /* if still no information, we try to open the codec and to
2893  decompress the frame. We try to avoid that in most cases as
2894  it takes longer and uses more memory. For MPEG-4, we need to
2895  decompress for QuickTime.
2896 
2897  If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2898  least one frame of codec data, this makes sure the codec initializes
2899  the channel configuration and does not only trust the values from the container.
2900  */
2901  try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2902 
2903  st->codec_info_nb_frames++;
2904  count++;
2905  }
2906 
2907  if (flush_codecs) {
2908  AVPacket empty_pkt = { 0 };
2909  int err = 0;
2910  av_init_packet(&empty_pkt);
2911 
2912  for(i=0;i<ic->nb_streams;i++) {
2913 
2914  st = ic->streams[i];
2915 
2916  /* flush the decoders */
2917  if (st->info->found_decoder == 1) {
2918  do {
2919  err = try_decode_frame(st, &empty_pkt,
2920  (options && i < orig_nb_streams) ?
2921  &options[i] : NULL);
2922  } while (err > 0 && !has_codec_parameters(st, NULL));
2923 
2924  if (err < 0) {
2925  av_log(ic, AV_LOG_INFO,
2926  "decoding for stream %d failed\n", st->index);
2927  }
2928  }
2929  }
2930  }
2931 
2932  // close codecs which were opened in try_decode_frame()
2933  for(i=0;i<ic->nb_streams;i++) {
2934  st = ic->streams[i];
2935  avcodec_close(st->codec);
2936  }
2937  for(i=0;i<ic->nb_streams;i++) {
2938  st = ic->streams[i];
2939  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2943  st->codec->codec_tag= tag;
2944  }
2945 
2946  /* estimate average framerate if not set by demuxer */
2948  int best_fps = 0;
2949  double best_error = 0.01;
2950 
2952  st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
2953  st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
2954 
2955  /* round guessed framerate to a "standard" framerate if it's
2956  * within 1% of the original estimate*/
2957  for (j = 1; j < MAX_STD_TIMEBASES; j++) {
2958  AVRational std_fps = { get_std_framerate(j), 12*1001 };
2959  double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
2960 
2961  if (error < best_error) {
2962  best_error = error;
2963  best_fps = std_fps.num;
2964  }
2965  }
2966  if (best_fps) {
2968  best_fps, 12*1001, INT_MAX);
2969  }
2970  }
2971  // the check for tb_unreliable() is not completely correct, since this is not about handling
2972  // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2973  // ipmovie.c produces.
2974  if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2975  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2976  if (st->info->duration_count>1 && !st->r_frame_rate.num
2977  && tb_unreliable(st->codec)) {
2978  int num = 0;
2979  double best_error= 0.01;
2980 
2981  for (j=0; j<MAX_STD_TIMEBASES; j++) {
2982  int k;
2983 
2984  if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2985  continue;
2986  if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2987  continue;
2988  for(k=0; k<2; k++){
2989  int n= st->info->duration_count;
2990  double a= st->info->duration_error[k][0][j] / n;
2991  double error= st->info->duration_error[k][1][j]/n - a*a;
2992 
2993  if(error < best_error && best_error> 0.000000001){
2994  best_error= error;
2995  num = get_std_framerate(j);
2996  }
2997  if(error < 0.02)
2998  av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2999  }
3000  }
3001  // do not increase frame rate by more than 1 % in order to match a standard rate.
3002  if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3003  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3004  }
3005 
3006  if (!st->r_frame_rate.num){
3007  if( st->codec->time_base.den * (int64_t)st->time_base.num
3008  <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3009  st->r_frame_rate.num = st->codec->time_base.den;
3011  }else{
3012  st->r_frame_rate.num = st->time_base.den;
3013  st->r_frame_rate.den = st->time_base.num;
3014  }
3015  }
3016  }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3017  if(!st->codec->bits_per_coded_sample)
3019  // set stream disposition based on audio service type
3020  switch (st->codec->audio_service_type) {
3028  st->disposition = AV_DISPOSITION_COMMENT; break;
3030  st->disposition = AV_DISPOSITION_KARAOKE; break;
3031  }
3032  }
3033  }
3034 
3035  if(ic->probesize)
3036  estimate_timings(ic, old_offset);
3037 
3038  if (ret >= 0 && ic->nb_streams)
3039  ret = -1; /* we could not have all the codec parameters before EOF */
3040  for(i=0;i<ic->nb_streams;i++) {
3041  const char *errmsg;
3042  st = ic->streams[i];
3043  if (!has_codec_parameters(st, &errmsg)) {
3044  char buf[256];
3045  avcodec_string(buf, sizeof(buf), st->codec, 0);
3046  av_log(ic, AV_LOG_WARNING,
3047  "Could not find codec parameters for stream %d (%s): %s\n"
3048  "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3049  i, buf, errmsg);
3050  } else {
3051  ret = 0;
3052  }
3053  }
3054 
3056 
3057  find_stream_info_err:
3058  for (i=0; i < ic->nb_streams; i++) {
3059  st = ic->streams[i];
3060  if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3061  ic->streams[i]->codec->thread_count = 0;
3062  if (st->info)
3063  av_freep(&st->info->duration_error);
3064  av_freep(&ic->streams[i]->info);
3065  }
3066  if(ic->pb)
3067  av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3068  return ret;
3069 }
3070 
3072 {
3073  int i, j;
3074 
3075  for (i = 0; i < ic->nb_programs; i++) {
3076  if (ic->programs[i] == last) {
3077  last = NULL;
3078  } else {
3079  if (!last)
3080  for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3081  if (ic->programs[i]->stream_index[j] == s)
3082  return ic->programs[i];
3083  }
3084  }
3085  return NULL;
3086 }
3087 
3089  enum AVMediaType type,
3090  int wanted_stream_nb,
3091  int related_stream,
3092  AVCodec **decoder_ret,
3093  int flags)
3094 {
3095  int i, nb_streams = ic->nb_streams;
3096  int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3097  unsigned *program = NULL;
3098  AVCodec *decoder = NULL, *best_decoder = NULL;
3099 
3100  if (related_stream >= 0 && wanted_stream_nb < 0) {
3101  AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3102  if (p) {
3103  program = p->stream_index;
3104  nb_streams = p->nb_stream_indexes;
3105  }
3106  }
3107  for (i = 0; i < nb_streams; i++) {
3108  int real_stream_index = program ? program[i] : i;
3109  AVStream *st = ic->streams[real_stream_index];
3110  AVCodecContext *avctx = st->codec;
3111  if (avctx->codec_type != type)
3112  continue;
3113  if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3114  continue;
3116  continue;
3117  if (decoder_ret) {
3118  decoder = avcodec_find_decoder(st->codec->codec_id);
3119  if (!decoder) {
3120  if (ret < 0)
3122  continue;
3123  }
3124  }
3126  bitrate = avctx->bit_rate;
3127  multiframe = FFMIN(5, count);
3128  if ((best_multiframe > multiframe) ||
3129  (best_multiframe == multiframe && best_bitrate > bitrate) ||
3130  (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3131  continue;
3132  best_count = count;
3133  best_bitrate = bitrate;
3134  best_multiframe = multiframe;
3135  ret = real_stream_index;
3136  best_decoder = decoder;
3137  if (program && i == nb_streams - 1 && ret < 0) {
3138  program = NULL;
3139  nb_streams = ic->nb_streams;
3140  i = 0; /* no related stream found, try again with everything */
3141  }
3142  }
3143  if (decoder_ret)
3144  *decoder_ret = best_decoder;
3145  return ret;
3146 }
3147 
3148 /*******************************************************/
3149 
3151 {
3152  if (s->iformat->read_play)
3153  return s->iformat->read_play(s);
3154  if (s->pb)
3155  return avio_pause(s->pb, 0);
3156  return AVERROR(ENOSYS);
3157 }
3158 
3160 {
3161  if (s->iformat->read_pause)
3162  return s->iformat->read_pause(s);
3163  if (s->pb)
3164  return avio_pause(s->pb, 1);
3165  return AVERROR(ENOSYS);
3166 }
3167 
3169  av_assert0(s->nb_streams>0);
3170  av_assert0(s->streams[ s->nb_streams-1 ] == st);
3171 
3172  if (st->parser) {
3173  av_parser_close(st->parser);
3174  }
3175  if (st->attached_pic.data)
3177  av_dict_free(&st->metadata);
3178  av_freep(&st->probe_data.buf);
3179  av_freep(&st->index_entries);
3180  av_freep(&st->codec->extradata);
3182  av_freep(&st->codec);
3183  av_freep(&st->priv_data);
3184  if (st->info)
3185  av_freep(&st->info->duration_error);
3186  av_freep(&st->info);
3187  av_freep(&s->streams[ --s->nb_streams ]);
3188 }
3189 
3191 {
3192  int i;
3193 
3194  if (!s)
3195  return;
3196 
3197  av_opt_free(s);
3198  if (s->iformat && s->iformat->priv_class && s->priv_data)
3199  av_opt_free(s->priv_data);
3200 
3201  for(i=s->nb_streams-1; i>=0; i--) {
3202  ff_free_stream(s, s->streams[i]);
3203  }
3204  for(i=s->nb_programs-1; i>=0; i--) {
3205  av_dict_free(&s->programs[i]->metadata);
3206  av_freep(&s->programs[i]->stream_index);
3207  av_freep(&s->programs[i]);
3208  }
3209  av_freep(&s->programs);
3210  av_freep(&s->priv_data);
3211  while(s->nb_chapters--) {
3213  av_freep(&s->chapters[s->nb_chapters]);
3214  }
3215  av_freep(&s->chapters);
3216  av_dict_free(&s->metadata);
3217  av_freep(&s->streams);
3218  av_free(s);
3219 }
3220 
3221 #if FF_API_CLOSE_INPUT_FILE
3222 void av_close_input_file(AVFormatContext *s)
3223 {
3225 }
3226 #endif
3227 
3229 {
3230  AVFormatContext *s = *ps;
3231  AVIOContext *pb = s->pb;
3232 
3233  if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3234  (s->flags & AVFMT_FLAG_CUSTOM_IO))
3235  pb = NULL;
3236 
3237  flush_packet_queue(s);
3238 
3239  if (s->iformat) {
3240  if (s->iformat->read_close)
3241  s->iformat->read_close(s);
3242  }
3243 
3245 
3246  *ps = NULL;
3247 
3248  avio_close(pb);
3249 }
3250 
3251 #if FF_API_NEW_STREAM
3252 AVStream *av_new_stream(AVFormatContext *s, int id)
3253 {
3254  AVStream *st = avformat_new_stream(s, NULL);
3255  if (st)
3256  st->id = id;
3257  return st;
3258 }
3259 #endif
3260 
3262 {
3263  AVStream *st;
3264  int i;
3265  AVStream **streams;
3266 
3267  if (s->nb_streams >= INT_MAX/sizeof(*streams))
3268  return NULL;
3269  streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
3270  if (!streams)
3271  return NULL;
3272  s->streams = streams;
3273 
3274  st = av_mallocz(sizeof(AVStream));
3275  if (!st)
3276  return NULL;
3277  if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3278  av_free(st);
3279  return NULL;
3280  }
3281  st->info->last_dts = AV_NOPTS_VALUE;
3282 
3283  st->codec = avcodec_alloc_context3(c);
3284  if (s->iformat) {
3285  /* no default bitrate if decoding */
3286  st->codec->bit_rate = 0;
3287  }
3288  st->index = s->nb_streams;
3289  st->start_time = AV_NOPTS_VALUE;
3290  st->duration = AV_NOPTS_VALUE;
3291  /* we set the current DTS to 0 so that formats without any timestamps
3292  but durations get some timestamps, formats with some unknown
3293  timestamps have their first few packets buffered and the
3294  timestamps corrected before they are returned to the user */
3295  st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3296  st->first_dts = AV_NOPTS_VALUE;
3300 
3301  /* default pts setting is MPEG-like */
3302  avpriv_set_pts_info(st, 33, 1, 90000);
3304  for(i=0; i<MAX_REORDER_DELAY+1; i++)
3305  st->pts_buffer[i]= AV_NOPTS_VALUE;
3307 
3308  st->sample_aspect_ratio = (AVRational){0,1};
3309 
3310 #if FF_API_R_FRAME_RATE
3311  st->info->last_dts = AV_NOPTS_VALUE;
3312 #endif
3315 
3316  s->streams[s->nb_streams++] = st;
3317  return st;
3318 }
3319 
3321 {
3322  AVProgram *program=NULL;
3323  int i;
3324 
3325  av_dlog(ac, "new_program: id=0x%04x\n", id);
3326 
3327  for(i=0; i<ac->nb_programs; i++)
3328  if(ac->programs[i]->id == id)
3329  program = ac->programs[i];
3330 
3331  if(!program){
3332  program = av_mallocz(sizeof(AVProgram));
3333  if (!program)
3334  return NULL;
3335  dynarray_add(&ac->programs, &ac->nb_programs, program);
3336  program->discard = AVDISCARD_NONE;
3337  }
3338  program->id = id;
3341 
3342  program->start_time =
3343  program->end_time = AV_NOPTS_VALUE;
3344 
3345  return program;
3346 }
3347 
3348 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3349 {
3350  AVChapter *chapter = NULL;
3351  int i;
3352 
3353  for(i=0; i<s->nb_chapters; i++)
3354  if(s->chapters[i]->id == id)
3355  chapter = s->chapters[i];
3356 
3357  if(!chapter){
3358  chapter= av_mallocz(sizeof(AVChapter));
3359  if(!chapter)
3360  return NULL;
3361  dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3362  }
3363  av_dict_set(&chapter->metadata, "title", title, 0);
3364  chapter->id = id;
3365  chapter->time_base= time_base;
3366  chapter->start = start;
3367  chapter->end = end;
3368 
3369  return chapter;
3370 }
3371 
3372 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3373 {
3374  int i, j;
3375  AVProgram *program=NULL;
3376  void *tmp;
3377 
3378  if (idx >= ac->nb_streams) {
3379  av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3380  return;
3381  }
3382 
3383  for(i=0; i<ac->nb_programs; i++){
3384  if(ac->programs[i]->id != progid)
3385  continue;
3386  program = ac->programs[i];
3387  for(j=0; j<program->nb_stream_indexes; j++)
3388  if(program->stream_index[j] == idx)
3389  return;
3390 
3391  tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3392  if(!tmp)
3393  return;
3394  program->stream_index = tmp;
3395  program->stream_index[program->nb_stream_indexes++] = idx;
3396  return;
3397  }
3398 }
3399 
3400 static void print_fps(double d, const char *postfix){
3401  uint64_t v= lrintf(d*100);
3402  if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3403  else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3404  else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3405 }
3406 
3407 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3408 {
3409  if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3410  AVDictionaryEntry *tag=NULL;
3411 
3412  av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3413  while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3414  if(strcmp("language", tag->key)){
3415  const char *p = tag->value;
3416  av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3417  while(*p) {
3418  char tmp[256];
3419  size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3420  av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3421  av_log(ctx, AV_LOG_INFO, "%s", tmp);
3422  p += len;
3423  if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3424  if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3425  if (*p) p++;
3426  }
3427  av_log(ctx, AV_LOG_INFO, "\n");
3428  }
3429  }
3430  }
3431 }
3432 
3433 /* "user interface" functions */
3434 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3435 {
3436  char buf[256];
3437  int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3438  AVStream *st = ic->streams[i];
3439  int g = av_gcd(st->time_base.num, st->time_base.den);
3440  AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3441  avcodec_string(buf, sizeof(buf), st->codec, is_output);
3442  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3443  /* the pid is an important information, so we display it */
3444  /* XXX: add a generic system */
3445  if (flags & AVFMT_SHOW_IDS)
3446  av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3447  if (lang)
3448  av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3449  av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3450  av_log(NULL, AV_LOG_INFO, ": %s", buf);
3451  if (st->sample_aspect_ratio.num && // default
3453  AVRational display_aspect_ratio;
3454  av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3457  1024*1024);
3458  av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3460  display_aspect_ratio.num, display_aspect_ratio.den);
3461  }
3462  if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3463  if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3464  print_fps(av_q2d(st->avg_frame_rate), "fps");
3465 #if FF_API_R_FRAME_RATE
3466  if(st->r_frame_rate.den && st->r_frame_rate.num)
3467  print_fps(av_q2d(st->r_frame_rate), "tbr");
3468 #endif
3469  if(st->time_base.den && st->time_base.num)
3470  print_fps(1/av_q2d(st->time_base), "tbn");
3471  if(st->codec->time_base.den && st->codec->time_base.num)
3472  print_fps(1/av_q2d(st->codec->time_base), "tbc");
3473  }
3475  av_log(NULL, AV_LOG_INFO, " (default)");
3476  if (st->disposition & AV_DISPOSITION_DUB)
3477  av_log(NULL, AV_LOG_INFO, " (dub)");
3479  av_log(NULL, AV_LOG_INFO, " (original)");
3481  av_log(NULL, AV_LOG_INFO, " (comment)");
3483  av_log(NULL, AV_LOG_INFO, " (lyrics)");
3485  av_log(NULL, AV_LOG_INFO, " (karaoke)");
3487  av_log(NULL, AV_LOG_INFO, " (forced)");
3489  av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3491  av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3493  av_log(NULL, AV_LOG_INFO, " (clean effects)");
3494  av_log(NULL, AV_LOG_INFO, "\n");
3495  dump_metadata(NULL, st->metadata, " ");
3496 }
3497 
3499  int index,
3500  const char *url,
3501  int is_output)
3502 {
3503  int i;
3504  uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3505  if (ic->nb_streams && !printed)
3506  return;
3507 
3508  av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3509  is_output ? "Output" : "Input",
3510  index,
3511  is_output ? ic->oformat->name : ic->iformat->name,
3512  is_output ? "to" : "from", url);
3513  dump_metadata(NULL, ic->metadata, " ");
3514  if (!is_output) {
3515  av_log(NULL, AV_LOG_INFO, " Duration: ");
3516  if (ic->duration != AV_NOPTS_VALUE) {
3517  int hours, mins, secs, us;
3518  int64_t duration = ic->duration + 5000;
3519  secs = duration / AV_TIME_BASE;
3520  us = duration % AV_TIME_BASE;
3521  mins = secs / 60;
3522  secs %= 60;
3523  hours = mins / 60;
3524  mins %= 60;
3525  av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3526  (100 * us) / AV_TIME_BASE);
3527  } else {
3528  av_log(NULL, AV_LOG_INFO, "N/A");
3529  }
3530  if (ic->start_time != AV_NOPTS_VALUE) {
3531  int secs, us;
3532  av_log(NULL, AV_LOG_INFO, ", start: ");
3533  secs = ic->start_time / AV_TIME_BASE;
3534  us = abs(ic->start_time % AV_TIME_BASE);
3535  av_log(NULL, AV_LOG_INFO, "%d.%06d",
3536  secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3537  }
3538  av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3539  if (ic->bit_rate) {
3540  av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3541  } else {
3542  av_log(NULL, AV_LOG_INFO, "N/A");
3543  }
3544  av_log(NULL, AV_LOG_INFO, "\n");
3545  }
3546  for (i = 0; i < ic->nb_chapters; i++) {
3547  AVChapter *ch = ic->chapters[i];
3548  av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3549  av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3550  av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3551 
3552  dump_metadata(NULL, ch->metadata, " ");
3553  }
3554  if(ic->nb_programs) {
3555  int j, k, total = 0;
3556  for(j=0; j<ic->nb_programs; j++) {
3558  "name", NULL, 0);
3559  av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3560  name ? name->value : "");
3561  dump_metadata(NULL, ic->programs[j]->metadata, " ");
3562  for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3563  dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3564  printed[ic->programs[j]->stream_index[k]] = 1;
3565  }
3566  total += ic->programs[j]->nb_stream_indexes;
3567  }
3568  if (total < ic->nb_streams)
3569  av_log(NULL, AV_LOG_INFO, " No Program\n");
3570  }
3571  for(i=0;i<ic->nb_streams;i++)
3572  if (!printed[i])
3573  dump_stream_format(ic, i, index, is_output);
3574 
3575  av_free(printed);
3576 }
3577 
3578 uint64_t ff_ntp_time(void)
3579 {
3580  return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3581 }
3582 
3583 int av_get_frame_filename(char *buf, int buf_size,
3584  const char *path, int number)
3585 {
3586  const char *p;
3587  char *q, buf1[20], c;
3588  int nd, len, percentd_found;
3589 
3590  q = buf;
3591  p = path;
3592  percentd_found = 0;
3593  for(;;) {
3594  c = *p++;
3595  if (c == '\0')
3596  break;
3597  if (c == '%') {
3598  do {
3599  nd = 0;
3600  while (av_isdigit(*p)) {
3601  nd = nd * 10 + *p++ - '0';
3602  }
3603  c = *p++;
3604  } while (av_isdigit(c));
3605 
3606  switch(c) {
3607  case '%':
3608  goto addchar;
3609  case 'd':
3610  if (percentd_found)
3611  goto fail;
3612  percentd_found = 1;
3613  snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3614  len = strlen(buf1);
3615  if ((q - buf + len) > buf_size - 1)
3616  goto fail;
3617  memcpy(q, buf1, len);
3618  q += len;
3619  break;
3620  default:
3621  goto fail;
3622  }
3623  } else {
3624  addchar:
3625  if ((q - buf) < buf_size - 1)
3626  *q++ = c;
3627  }
3628  }
3629  if (!percentd_found)
3630  goto fail;
3631  *q = '\0';
3632  return 0;
3633  fail:
3634  *q = '\0';
3635  return -1;
3636 }
3637 
3638 static void hex_dump_internal(void *avcl, FILE *f, int level,
3639  const uint8_t *buf, int size)
3640 {
3641  int len, i, j, c;
3642 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3643 
3644  for(i=0;i<size;i+=16) {
3645  len = size - i;
3646  if (len > 16)
3647  len = 16;
3648  PRINT("%08x ", i);
3649  for(j=0;j<16;j++) {
3650  if (j < len)
3651  PRINT(" %02x", buf[i+j]);
3652  else
3653  PRINT(" ");
3654  }
3655  PRINT(" ");
3656  for(j=0;j<len;j++) {
3657  c = buf[i+j];
3658  if (c < ' ' || c > '~')
3659  c = '.';
3660  PRINT("%c", c);
3661  }
3662  PRINT("\n");
3663  }
3664 #undef PRINT
3665 }
3666 
3667 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3668 {
3669  hex_dump_internal(NULL, f, 0, buf, size);
3670 }
3671 
3672 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3673 {
3674  hex_dump_internal(avcl, NULL, level, buf, size);
3675 }
3676 
3677 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3678 {
3679 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3680  PRINT("stream #%d:\n", pkt->stream_index);
3681  PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3682  PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3683  /* DTS is _always_ valid after av_read_frame() */
3684  PRINT(" dts=");
3685  if (pkt->dts == AV_NOPTS_VALUE)
3686  PRINT("N/A");
3687  else
3688  PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3689  /* PTS may not be known if B-frames are present. */
3690  PRINT(" pts=");
3691  if (pkt->pts == AV_NOPTS_VALUE)
3692  PRINT("N/A");
3693  else
3694  PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3695  PRINT("\n");
3696  PRINT(" size=%d\n", pkt->size);
3697 #undef PRINT
3698  if (dump_payload)
3699  av_hex_dump(f, pkt->data, pkt->size);
3700 }
3701 
3702 #if FF_API_PKT_DUMP
3703 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3704 {
3705  AVRational tb = { 1, AV_TIME_BASE };
3706  pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3707 }
3708 #endif
3709 
3710 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3711 {
3712  pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3713 }
3714 
3715 #if FF_API_PKT_DUMP
3716 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3717 {
3718  AVRational tb = { 1, AV_TIME_BASE };
3719  pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3720 }
3721 #endif
3722 
3723 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3724  AVStream *st)
3725 {
3726  pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3727 }
3728 
3729 void av_url_split(char *proto, int proto_size,
3730  char *authorization, int authorization_size,
3731  char *hostname, int hostname_size,
3732  int *port_ptr,
3733  char *path, int path_size,
3734  const char *url)
3735 {
3736  const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3737 
3738  if (port_ptr) *port_ptr = -1;
3739  if (proto_size > 0) proto[0] = 0;
3740  if (authorization_size > 0) authorization[0] = 0;
3741  if (hostname_size > 0) hostname[0] = 0;
3742  if (path_size > 0) path[0] = 0;
3743 
3744  /* parse protocol */
3745  if ((p = strchr(url, ':'))) {
3746  av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3747  p++; /* skip ':' */
3748  if (*p == '/') p++;
3749  if (*p == '/') p++;
3750  } else {
3751  /* no protocol means plain filename */
3752  av_strlcpy(path, url, path_size);
3753  return;
3754  }
3755 
3756  /* separate path from hostname */
3757  ls = strchr(p, '/');
3758  ls2 = strchr(p, '?');
3759  if(!ls)
3760  ls = ls2;
3761  else if (ls && ls2)
3762  ls = FFMIN(ls, ls2);
3763  if(ls)
3764  av_strlcpy(path, ls, path_size);
3765  else
3766  ls = &p[strlen(p)]; // XXX
3767 
3768  /* the rest is hostname, use that to parse auth/port */
3769  if (ls != p) {
3770  /* authorization (user[:pass]@hostname) */
3771  at2 = p;
3772  while ((at = strchr(p, '@')) && at < ls) {
3773  av_strlcpy(authorization, at2,
3774  FFMIN(authorization_size, at + 1 - at2));
3775  p = at + 1; /* skip '@' */
3776  }
3777 
3778  if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3779  /* [host]:port */
3780  av_strlcpy(hostname, p + 1,
3781  FFMIN(hostname_size, brk - p));
3782  if (brk[1] == ':' && port_ptr)
3783  *port_ptr = atoi(brk + 2);
3784  } else if ((col = strchr(p, ':')) && col < ls) {
3785  av_strlcpy(hostname, p,
3786  FFMIN(col + 1 - p, hostname_size));
3787  if (port_ptr) *port_ptr = atoi(col + 1);
3788  } else
3789  av_strlcpy(hostname, p,
3790  FFMIN(ls + 1 - p, hostname_size));
3791  }
3792 }
3793 
3794 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3795 {
3796  int i;
3797  static const char hex_table_uc[16] = { '0', '1', '2', '3',
3798  '4', '5', '6', '7',
3799  '8', '9', 'A', 'B',
3800  'C', 'D', 'E', 'F' };
3801  static const char hex_table_lc[16] = { '0', '1', '2', '3',
3802  '4', '5', '6', '7',
3803  '8', '9', 'a', 'b',
3804  'c', 'd', 'e', 'f' };
3805  const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3806 
3807  for(i = 0; i < s; i++) {
3808  buff[i * 2] = hex_table[src[i] >> 4];
3809  buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3810  }
3811 
3812  return buff;
3813 }
3814 
3815 int ff_hex_to_data(uint8_t *data, const char *p)
3816 {
3817  int c, len, v;
3818 
3819  len = 0;
3820  v = 1;
3821  for (;;) {
3822  p += strspn(p, SPACE_CHARS);
3823  if (*p == '\0')
3824  break;
3825  c = av_toupper((unsigned char) *p++);
3826  if (c >= '0' && c <= '9')
3827  c = c - '0';
3828  else if (c >= 'A' && c <= 'F')
3829  c = c - 'A' + 10;
3830  else
3831  break;
3832  v = (v << 4) | c;
3833  if (v & 0x100) {
3834  if (data)
3835  data[len] = v;
3836  len++;
3837  v = 1;
3838  }
3839  }
3840  return len;
3841 }
3842 
3843 #if FF_API_SET_PTS_INFO
3844 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3845  unsigned int pts_num, unsigned int pts_den)
3846 {
3847  avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3848 }
3849 #endif
3850 
3851 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3852  unsigned int pts_num, unsigned int pts_den)
3853 {
3854  AVRational new_tb;
3855  if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3856  if(new_tb.num != pts_num)
3857  av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3858  }else
3859  av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3860 
3861  if(new_tb.num <= 0 || new_tb.den <= 0) {
3862  av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3863  return;
3864  }
3865  s->time_base = new_tb;
3866  av_codec_set_pkt_timebase(s->codec, new_tb);
3867  s->pts_wrap_bits = pts_wrap_bits;
3868 }
3869 
3870 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3871  void *context)
3872 {
3873  const char *ptr = str;
3874 
3875  /* Parse key=value pairs. */
3876  for (;;) {
3877  const char *key;
3878  char *dest = NULL, *dest_end;
3879  int key_len, dest_len = 0;
3880 
3881  /* Skip whitespace and potential commas. */
3882  while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3883  ptr++;
3884  if (!*ptr)
3885  break;
3886 
3887  key = ptr;
3888 
3889  if (!(ptr = strchr(key, '=')))
3890  break;
3891  ptr++;
3892  key_len = ptr - key;
3893 
3894  callback_get_buf(context, key, key_len, &dest, &dest_len);
3895  dest_end = dest + dest_len - 1;
3896 
3897  if (*ptr == '\"') {
3898  ptr++;
3899  while (*ptr && *ptr != '\"') {
3900  if (*ptr == '\\') {
3901  if (!ptr[1])
3902  break;
3903  if (dest && dest < dest_end)
3904  *dest++ = ptr[1];
3905  ptr += 2;
3906  } else {
3907  if (dest && dest < dest_end)
3908  *dest++ = *ptr;
3909  ptr++;
3910  }
3911  }
3912  if (*ptr == '\"')
3913  ptr++;
3914  } else {
3915  for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3916  if (dest && dest < dest_end)
3917  *dest++ = *ptr;
3918  }
3919  if (dest)
3920  *dest = 0;
3921  }
3922 }
3923 
3925 {
3926  int i;
3927  for (i = 0; i < s->nb_streams; i++) {
3928  if (s->streams[i]->id == id)
3929  return i;
3930  }
3931  return -1;
3932 }
3933 
3934 int64_t ff_iso8601_to_unix_time(const char *datestr)
3935 {
3936  struct tm time1 = {0}, time2 = {0};
3937  char *ret1, *ret2;
3938  ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3939  ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
3940  if (ret2 && !ret1)
3941  return av_timegm(&time2);
3942  else
3943  return av_timegm(&time1);
3944 }
3945 
3946 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
3947 {
3948  if (ofmt) {
3949  if (ofmt->query_codec)
3950  return ofmt->query_codec(codec_id, std_compliance);
3951  else if (ofmt->codec_tag)
3952  return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3953  else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
3954  codec_id == ofmt->subtitle_codec)
3955  return 1;
3956  }
3957  return AVERROR_PATCHWELCOME;
3958 }
3959 
3961 {
3962 #if CONFIG_NETWORK
3963  int ret;
3965  if ((ret = ff_network_init()) < 0)
3966  return ret;
3967  ff_tls_init();
3968 #endif
3969  return 0;
3970 }
3971 
3973 {
3974 #if CONFIG_NETWORK
3975  ff_network_close();
3976  ff_tls_deinit();
3977 #endif
3978  return 0;
3979 }
3980 
3982  uint64_t channel_layout, int32_t sample_rate,
3984 {
3985  uint32_t flags = 0;
3986  int size = 4;
3987  uint8_t *data;
3988  if (!pkt)
3989  return AVERROR(EINVAL);
3990  if (channels) {
3991  size += 4;
3993  }
3994  if (channel_layout) {
3995  size += 8;
3997  }
3998  if (sample_rate) {
3999  size += 4;
4001  }
4002  if (width || height) {
4003  size += 8;
4005  }
4007  if (!data)
4008  return AVERROR(ENOMEM);
4009  bytestream_put_le32(&data, flags);
4010  if (channels)
4011  bytestream_put_le32(&data, channels);
4012  if (channel_layout)
4013  bytestream_put_le64(&data, channel_layout);
4014  if (sample_rate)
4015  bytestream_put_le32(&data, sample_rate);
4016  if (width || height) {
4017  bytestream_put_le32(&data, width);
4018  bytestream_put_le32(&data, height);
4019  }
4020  return 0;
4021 }
4022 
4024 {
4025  AVRational undef = {0, 1};
4026  AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4027  AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4028  AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4029 
4030  av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4031  stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4032  if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4033  stream_sample_aspect_ratio = undef;
4034 
4035  av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4036  frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4037  if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4038  frame_sample_aspect_ratio = undef;
4039 
4040  if (stream_sample_aspect_ratio.num)
4041  return stream_sample_aspect_ratio;
4042  else
4043  return frame_sample_aspect_ratio;
4044 }
4045 
4047 {
4048  AVRational fr = st->r_frame_rate;
4049 
4050  if (st->codec->ticks_per_frame > 1) {
4051  AVRational codec_fr = av_inv_q(st->codec->time_base);
4052  AVRational avg_fr = st->avg_frame_rate;
4053  codec_fr.den *= st->codec->ticks_per_frame;
4054  if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4055  && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4056  fr = codec_fr;
4057  }
4058 
4059  return fr;
4060 }
4061 
4063  const char *spec)
4064 {
4065  if (*spec <= '9' && *spec >= '0') /* opt:index */
4066  return strtol(spec, NULL, 0) == st->index;
4067  else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4068  *spec == 't') { /* opt:[vasdt] */
4069  enum AVMediaType type;
4070 
4071  switch (*spec++) {
4072  case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4073  case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4074  case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4075  case 'd': type = AVMEDIA_TYPE_DATA; break;
4076  case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4077  default: av_assert0(0);
4078  }
4079  if (type != st->codec->codec_type)
4080  return 0;
4081  if (*spec++ == ':') { /* possibly followed by :index */
4082  int i, index = strtol(spec, NULL, 0);
4083  for (i = 0; i < s->nb_streams; i++)
4084  if (s->streams[i]->codec->codec_type == type && index-- == 0)
4085  return i == st->index;
4086  return 0;
4087  }
4088  return 1;
4089  } else if (*spec == 'p' && *(spec + 1) == ':') {
4090  int prog_id, i, j;
4091  char *endptr;
4092  spec += 2;
4093  prog_id = strtol(spec, &endptr, 0);
4094  for (i = 0; i < s->nb_programs; i++) {
4095  if (s->programs[i]->id != prog_id)
4096  continue;
4097