FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include <time.h>
24 #include "libavutil/bprint.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/time.h"
28 #include "libavutil/parseutils.h"
29 #include "internal.h"
30 #include "avio_internal.h"
31 #include "dash.h"
32 #include "demux.h"
33 #include "url.h"
34 
35 #define INITIAL_BUFFER_SIZE 32768
36 
37 struct fragment {
40  char *url;
41 };
42 
43 /*
44  * reference to : ISO_IEC_23009-1-DASH-2012
45  * Section: 5.3.9.6.2
46  * Table: Table 17 — Semantics of SegmentTimeline element
47  * */
48 struct timeline {
49  /* starttime: Element or Attribute Name
50  * specifies the MPD start time, in @timescale units,
51  * the first Segment in the series starts relative to the beginning of the Period.
52  * The value of this attribute must be equal to or greater than the sum of the previous S
53  * element earliest presentation time and the sum of the contiguous Segment durations.
54  * If the value of the attribute is greater than what is expressed by the previous S element,
55  * it expresses discontinuities in the timeline.
56  * If not present then the value shall be assumed to be zero for the first S element
57  * and for the subsequent S elements, the value shall be assumed to be the sum of
58  * the previous S element's earliest presentation time and contiguous duration
59  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
60  * */
62  /* repeat: Element or Attribute Name
63  * specifies the repeat count of the number of following contiguous Segments with
64  * the same duration expressed by the value of @duration. This value is zero-based
65  * (e.g. a value of three means four Segments in the contiguous series).
66  * */
68  /* duration: Element or Attribute Name
69  * specifies the Segment duration, in units of the value of the @timescale.
70  * */
72 };
73 
74 /*
75  * Each playlist has its own demuxer. If it is currently active,
76  * it has an opened AVIOContext too, and potentially an AVPacket
77  * containing the next packet from this stream.
78  */
80  char *url_template;
86 
87  char *id;
88  char *lang;
89  int bandwidth;
91  AVStream *assoc_stream; /* demuxer stream associated with this representation */
92 
94  struct fragment **fragments; /* VOD list of fragment for profile */
95 
97  struct timeline **timelines;
98 
101  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
102 
105 
107 
111  struct fragment *cur_seg;
112 
113  /* Currently active Media Initialization Section */
115  uint8_t *init_sec_buf;
121 };
122 
123 typedef struct DASHContext {
124  const AVClass *class;
125  char *base_url;
126 
127  int n_videos;
129  int n_audios;
133 
134  /* MediaPresentationDescription Attribute */
139  uint64_t publish_time;
142  uint64_t min_buffer_time;
143 
144  /* Period Attribute */
145  uint64_t period_duration;
146  uint64_t period_start;
147 
148  /* AdaptationSet Attribute */
150 
151  int is_live;
157 
158  /* Flags for init section*/
162 
163 } DASHContext;
164 
165 static int ishttp(char *url)
166 {
167  const char *proto_name = avio_find_protocol_name(url);
168  return proto_name && av_strstart(proto_name, "http", NULL);
169 }
170 
171 static int aligned(int val)
172 {
173  return ((val + 0x3F) >> 6) << 6;
174 }
175 
176 static uint64_t get_current_time_in_sec(void)
177 {
178  return av_gettime() / 1000000;
179 }
180 
181 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
182 {
183  struct tm timeinfo;
184  int year = 0;
185  int month = 0;
186  int day = 0;
187  int hour = 0;
188  int minute = 0;
189  int ret = 0;
190  float second = 0.0;
191 
192  /* ISO-8601 date parser */
193  if (!datetime)
194  return 0;
195 
196  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
197  /* year, month, day, hour, minute, second 6 arguments */
198  if (ret != 6) {
199  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
200  }
201  timeinfo.tm_year = year - 1900;
202  timeinfo.tm_mon = month - 1;
203  timeinfo.tm_mday = day;
204  timeinfo.tm_hour = hour;
205  timeinfo.tm_min = minute;
206  timeinfo.tm_sec = (int)second;
207 
208  return av_timegm(&timeinfo);
209 }
210 
211 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
212 {
213  /* ISO-8601 duration parser */
214  uint32_t days = 0;
215  uint32_t hours = 0;
216  uint32_t mins = 0;
217  uint32_t secs = 0;
218  int size = 0;
219  float value = 0;
220  char type = '\0';
221  const char *ptr = duration;
222 
223  while (*ptr) {
224  if (*ptr == 'P' || *ptr == 'T') {
225  ptr++;
226  continue;
227  }
228 
229  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
230  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
231  return 0; /* parser error */
232  }
233  switch (type) {
234  case 'D':
235  days = (uint32_t)value;
236  break;
237  case 'H':
238  hours = (uint32_t)value;
239  break;
240  case 'M':
241  mins = (uint32_t)value;
242  break;
243  case 'S':
244  secs = (uint32_t)value;
245  break;
246  default:
247  // handle invalid type
248  break;
249  }
250  ptr += size;
251  }
252  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
253 }
254 
256 {
257  int64_t start_time = 0;
258  int64_t i = 0;
259  int64_t j = 0;
260  int64_t num = 0;
261 
262  if (pls->n_timelines) {
263  for (i = 0; i < pls->n_timelines; i++) {
264  if (pls->timelines[i]->starttime > 0) {
265  start_time = pls->timelines[i]->starttime;
266  }
267  if (num == cur_seq_no)
268  goto finish;
269 
270  start_time += pls->timelines[i]->duration;
271 
272  if (pls->timelines[i]->repeat == -1) {
273  start_time = pls->timelines[i]->duration * cur_seq_no;
274  goto finish;
275  }
276 
277  for (j = 0; j < pls->timelines[i]->repeat; j++) {
278  num++;
279  if (num == cur_seq_no)
280  goto finish;
281  start_time += pls->timelines[i]->duration;
282  }
283  num++;
284  }
285  }
286 finish:
287  return start_time;
288 }
289 
291 {
292  int64_t i = 0;
293  int64_t j = 0;
294  int64_t num = 0;
295  int64_t start_time = 0;
296 
297  for (i = 0; i < pls->n_timelines; i++) {
298  if (pls->timelines[i]->starttime > 0) {
299  start_time = pls->timelines[i]->starttime;
300  }
301  if (start_time > cur_time)
302  goto finish;
303 
304  start_time += pls->timelines[i]->duration;
305  for (j = 0; j < pls->timelines[i]->repeat; j++) {
306  num++;
307  if (start_time > cur_time)
308  goto finish;
309  start_time += pls->timelines[i]->duration;
310  }
311  num++;
312  }
313 
314  return -1;
315 
316 finish:
317  return num;
318 }
319 
320 static void free_fragment(struct fragment **seg)
321 {
322  if (!(*seg)) {
323  return;
324  }
325  av_freep(&(*seg)->url);
326  av_freep(seg);
327 }
328 
329 static void free_fragment_list(struct representation *pls)
330 {
331  int i;
332 
333  for (i = 0; i < pls->n_fragments; i++) {
334  free_fragment(&pls->fragments[i]);
335  }
336  av_freep(&pls->fragments);
337  pls->n_fragments = 0;
338 }
339 
340 static void free_timelines_list(struct representation *pls)
341 {
342  int i;
343 
344  for (i = 0; i < pls->n_timelines; i++) {
345  av_freep(&pls->timelines[i]);
346  }
347  av_freep(&pls->timelines);
348  pls->n_timelines = 0;
349 }
350 
351 static void free_representation(struct representation *pls)
352 {
353  free_fragment_list(pls);
354  free_timelines_list(pls);
355  free_fragment(&pls->cur_seg);
357  av_freep(&pls->init_sec_buf);
358  av_freep(&pls->pb.pub.buffer);
359  ff_format_io_close(pls->parent, &pls->input);
360  if (pls->ctx) {
361  pls->ctx->pb = NULL;
362  avformat_close_input(&pls->ctx);
363  }
364 
365  av_freep(&pls->url_template);
366  av_freep(&pls->lang);
367  av_freep(&pls->id);
368  av_freep(&pls);
369 }
370 
372 {
373  int i;
374  for (i = 0; i < c->n_videos; i++) {
375  struct representation *pls = c->videos[i];
376  free_representation(pls);
377  }
378  av_freep(&c->videos);
379  c->n_videos = 0;
380 }
381 
383 {
384  int i;
385  for (i = 0; i < c->n_audios; i++) {
386  struct representation *pls = c->audios[i];
387  free_representation(pls);
388  }
389  av_freep(&c->audios);
390  c->n_audios = 0;
391 }
392 
394 {
395  int i;
396  for (i = 0; i < c->n_subtitles; i++) {
397  struct representation *pls = c->subtitles[i];
398  free_representation(pls);
399  }
400  av_freep(&c->subtitles);
401  c->n_subtitles = 0;
402 }
403 
404 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
405  AVDictionary **opts, AVDictionary *opts2, int *is_http)
406 {
407  DASHContext *c = s->priv_data;
408  AVDictionary *tmp = NULL;
409  const char *proto_name = NULL;
410  int proto_name_len;
411  int ret;
412 
413  if (av_strstart(url, "crypto", NULL)) {
414  if (url[6] == '+' || url[6] == ':')
415  proto_name = avio_find_protocol_name(url + 7);
416  }
417 
418  if (!proto_name)
419  proto_name = avio_find_protocol_name(url);
420 
421  if (!proto_name)
422  return AVERROR_INVALIDDATA;
423 
424  proto_name_len = strlen(proto_name);
425  // only http(s) & file are allowed
426  if (av_strstart(proto_name, "file", NULL)) {
427  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
429  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
430  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
431  url);
432  return AVERROR_INVALIDDATA;
433  }
434  } else if (av_strstart(proto_name, "http", NULL)) {
435  ;
436  } else
437  return AVERROR_INVALIDDATA;
438 
439  if (!strncmp(proto_name, url, proto_name_len) && url[proto_name_len] == ':')
440  ;
441  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, proto_name_len) && url[7 + proto_name_len] == ':')
442  ;
443  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
444  return AVERROR_INVALIDDATA;
445 
446  av_freep(pb);
447  av_dict_copy(&tmp, *opts, 0);
448  av_dict_copy(&tmp, opts2, 0);
449  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
450  if (ret >= 0) {
451  // update cookies on http response with setcookies.
452  char *new_cookies = NULL;
453 
454  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
455  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
456 
457  if (new_cookies) {
458  av_dict_set(opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
459  }
460 
461  }
462 
463  av_dict_free(&tmp);
464 
465  if (is_http)
466  *is_http = av_strstart(proto_name, "http", NULL);
467 
468  return ret;
469 }
470 
471 static char *get_content_url(xmlNodePtr *baseurl_nodes,
472  int n_baseurl_nodes,
473  int max_url_size,
474  char *rep_id_val,
475  char *rep_bandwidth_val,
476  char *val)
477 {
478  int i;
479  char *text;
480  char *url = NULL;
481  char *tmp_str = av_mallocz(max_url_size);
482 
483  if (!tmp_str)
484  return NULL;
485 
486  for (i = 0; i < n_baseurl_nodes; ++i) {
487  if (baseurl_nodes[i] &&
488  baseurl_nodes[i]->children &&
489  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
490  text = xmlNodeGetContent(baseurl_nodes[i]->children);
491  if (text) {
492  memset(tmp_str, 0, max_url_size);
493  ff_make_absolute_url(tmp_str, max_url_size, "", text);
494  xmlFree(text);
495  }
496  }
497  }
498 
499  if (val)
500  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
501 
502  if (rep_id_val) {
503  url = av_strireplace(tmp_str, "$RepresentationID$", rep_id_val);
504  if (!url) {
505  goto end;
506  }
507  av_strlcpy(tmp_str, url, max_url_size);
508  }
509  if (rep_bandwidth_val && tmp_str[0] != '\0') {
510  // free any previously assigned url before reassigning
511  av_free(url);
512  url = av_strireplace(tmp_str, "$Bandwidth$", rep_bandwidth_val);
513  if (!url) {
514  goto end;
515  }
516  }
517 end:
518  av_free(tmp_str);
519  return url;
520 }
521 
522 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
523 {
524  int i;
525  char *val;
526 
527  for (i = 0; i < n_nodes; ++i) {
528  if (nodes[i]) {
529  val = xmlGetProp(nodes[i], attrname);
530  if (val)
531  return val;
532  }
533  }
534 
535  return NULL;
536 }
537 
538 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
539 {
540  xmlNodePtr node = rootnode;
541  if (!node) {
542  return NULL;
543  }
544 
545  node = xmlFirstElementChild(node);
546  while (node) {
547  if (!av_strcasecmp(node->name, nodename)) {
548  return node;
549  }
550  node = xmlNextElementSibling(node);
551  }
552  return NULL;
553 }
554 
555 static enum AVMediaType get_content_type(xmlNodePtr node)
556 {
558  int i = 0;
559  const char *attr;
560  char *val = NULL;
561 
562  if (node) {
563  for (i = 0; i < 2; i++) {
564  attr = i ? "mimeType" : "contentType";
565  val = xmlGetProp(node, attr);
566  if (val) {
567  if (av_stristr(val, "video")) {
569  } else if (av_stristr(val, "audio")) {
571  } else if (av_stristr(val, "text")) {
573  }
574  xmlFree(val);
575  }
576  }
577  }
578  return type;
579 }
580 
581 static struct fragment *get_fragment(char *range)
582 {
583  struct fragment *seg = av_mallocz(sizeof(struct fragment));
584 
585  if (!seg)
586  return NULL;
587 
588  seg->size = -1;
589  if (range) {
590  char *str_end_offset;
591  char *str_offset = av_strtok(range, "-", &str_end_offset);
592  seg->url_offset = strtoll(str_offset, NULL, 10);
593  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset + 1;
594  }
595 
596  return seg;
597 }
598 
600  xmlNodePtr fragmenturl_node,
601  xmlNodePtr *baseurl_nodes,
602  char *rep_id_val,
603  char *rep_bandwidth_val)
604 {
605  DASHContext *c = s->priv_data;
606  char *initialization_val = NULL;
607  char *media_val = NULL;
608  char *range_val = NULL;
609  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
610  int err;
611 
612  if (!av_strcasecmp(fragmenturl_node->name, "Initialization")) {
613  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
614  range_val = xmlGetProp(fragmenturl_node, "range");
615  if (initialization_val || range_val) {
617  rep->init_section = get_fragment(range_val);
618  xmlFree(range_val);
619  if (!rep->init_section) {
620  xmlFree(initialization_val);
621  return AVERROR(ENOMEM);
622  }
623  rep->init_section->url = get_content_url(baseurl_nodes, 4,
624  max_url_size,
625  rep_id_val,
626  rep_bandwidth_val,
627  initialization_val);
628  xmlFree(initialization_val);
629  if (!rep->init_section->url) {
630  av_freep(&rep->init_section);
631  return AVERROR(ENOMEM);
632  }
633  }
634  } else if (!av_strcasecmp(fragmenturl_node->name, "SegmentURL")) {
635  media_val = xmlGetProp(fragmenturl_node, "media");
636  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
637  if (media_val || range_val) {
638  struct fragment *seg = get_fragment(range_val);
639  xmlFree(range_val);
640  if (!seg) {
641  xmlFree(media_val);
642  return AVERROR(ENOMEM);
643  }
644  seg->url = get_content_url(baseurl_nodes, 4,
645  max_url_size,
646  rep_id_val,
647  rep_bandwidth_val,
648  media_val);
649  xmlFree(media_val);
650  if (!seg->url) {
651  av_free(seg);
652  return AVERROR(ENOMEM);
653  }
654  err = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
655  if (err < 0) {
656  free_fragment(&seg);
657  return err;
658  }
659  }
660  }
661 
662  return 0;
663 }
664 
666  xmlNodePtr fragment_timeline_node)
667 {
668  xmlAttrPtr attr = NULL;
669  char *val = NULL;
670  int err;
671 
672  if (!av_strcasecmp(fragment_timeline_node->name, "S")) {
673  struct timeline *tml = av_mallocz(sizeof(struct timeline));
674  if (!tml) {
675  return AVERROR(ENOMEM);
676  }
677  attr = fragment_timeline_node->properties;
678  while (attr) {
679  val = xmlGetProp(fragment_timeline_node, attr->name);
680 
681  if (!val) {
682  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
683  continue;
684  }
685 
686  if (!av_strcasecmp(attr->name, "t")) {
687  tml->starttime = (int64_t)strtoll(val, NULL, 10);
688  } else if (!av_strcasecmp(attr->name, "r")) {
689  tml->repeat =(int64_t) strtoll(val, NULL, 10);
690  } else if (!av_strcasecmp(attr->name, "d")) {
691  tml->duration = (int64_t)strtoll(val, NULL, 10);
692  }
693  attr = attr->next;
694  xmlFree(val);
695  }
696  err = av_dynarray_add_nofree(&rep->timelines, &rep->n_timelines, tml);
697  if (err < 0) {
698  av_free(tml);
699  return err;
700  }
701  }
702 
703  return 0;
704 }
705 
706 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
707 {
708  char *tmp_str = NULL;
709  char *path = NULL;
710  char *mpdName = NULL;
711  xmlNodePtr node = NULL;
712  char *baseurl = NULL;
713  char *root_url = NULL;
714  char *text = NULL;
715  char *tmp = NULL;
716  int isRootHttp = 0;
717  char token ='/';
718  int start = 0;
719  int rootId = 0;
720  int updated = 0;
721  int size = 0;
722  int i;
723  int tmp_max_url_size = strlen(url);
724 
725  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
726  text = xmlNodeGetContent(baseurl_nodes[i]);
727  if (!text)
728  continue;
729  tmp_max_url_size += strlen(text);
730  if (ishttp(text)) {
731  xmlFree(text);
732  break;
733  }
734  xmlFree(text);
735  }
736 
737  tmp_max_url_size = aligned(tmp_max_url_size);
738  text = av_mallocz(tmp_max_url_size);
739  if (!text) {
740  updated = AVERROR(ENOMEM);
741  goto end;
742  }
743  av_strlcpy(text, url, strlen(url)+1);
744  tmp = text;
745  while (mpdName = av_strtok(tmp, "/", &tmp)) {
746  size = strlen(mpdName);
747  }
748  av_free(text);
749 
750  path = av_mallocz(tmp_max_url_size);
751  tmp_str = av_mallocz(tmp_max_url_size);
752  if (!tmp_str || !path) {
753  updated = AVERROR(ENOMEM);
754  goto end;
755  }
756 
757  av_strlcpy (path, url, strlen(url) - size + 1);
758  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
759  if (!(node = baseurl_nodes[rootId])) {
760  continue;
761  }
762  text = xmlNodeGetContent(node);
763  if (ishttp(text)) {
764  xmlFree(text);
765  break;
766  }
767  xmlFree(text);
768  }
769 
770  node = baseurl_nodes[rootId];
771  baseurl = xmlNodeGetContent(node);
772  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
773  if (node) {
774  xmlNodeSetContent(node, root_url);
775  updated = 1;
776  }
777 
778  size = strlen(root_url);
779  isRootHttp = ishttp(root_url);
780 
781  if (size > 0 && root_url[size - 1] != token) {
782  av_strlcat(root_url, "/", size + 2);
783  size += 2;
784  }
785 
786  for (i = 0; i < n_baseurl_nodes; ++i) {
787  if (i == rootId) {
788  continue;
789  }
790  text = xmlNodeGetContent(baseurl_nodes[i]);
791  if (text && !av_strstart(text, "/", NULL)) {
792  memset(tmp_str, 0, strlen(tmp_str));
793  if (!ishttp(text) && isRootHttp) {
794  av_strlcpy(tmp_str, root_url, size + 1);
795  }
796  start = (text[0] == token);
797  if (start && av_stristr(tmp_str, text)) {
798  char *p = tmp_str;
799  if (!av_strncasecmp(tmp_str, "http://", 7)) {
800  p += 7;
801  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
802  p += 8;
803  }
804  p = strchr(p, '/');
805  memset(p + 1, 0, strlen(p));
806  }
807  av_strlcat(tmp_str, text + start, tmp_max_url_size);
808  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
809  updated = 1;
810  xmlFree(text);
811  }
812  }
813 
814 end:
815  if (tmp_max_url_size > *max_url_size) {
816  *max_url_size = tmp_max_url_size;
817  }
818  av_free(path);
819  av_free(tmp_str);
820  xmlFree(baseurl);
821  return updated;
822 
823 }
824 
825 static int parse_manifest_representation(AVFormatContext *s, const char *url,
826  xmlNodePtr node,
827  xmlNodePtr adaptionset_node,
828  xmlNodePtr mpd_baseurl_node,
829  xmlNodePtr period_baseurl_node,
830  xmlNodePtr period_segmenttemplate_node,
831  xmlNodePtr period_segmentlist_node,
832  xmlNodePtr fragment_template_node,
833  xmlNodePtr content_component_node,
834  xmlNodePtr adaptionset_baseurl_node,
835  xmlNodePtr adaptionset_segmentlist_node,
836  xmlNodePtr adaptionset_supplementalproperty_node)
837 {
838  int32_t ret = 0;
839  DASHContext *c = s->priv_data;
840  struct representation *rep = NULL;
841  struct fragment *seg = NULL;
842  xmlNodePtr representation_segmenttemplate_node = NULL;
843  xmlNodePtr representation_baseurl_node = NULL;
844  xmlNodePtr representation_segmentlist_node = NULL;
845  xmlNodePtr segmentlists_tab[3];
846  xmlNodePtr fragment_timeline_node = NULL;
847  xmlNodePtr fragment_templates_tab[5];
848  char *val = NULL;
849  xmlNodePtr baseurl_nodes[4];
850  xmlNodePtr representation_node = node;
851  char *rep_bandwidth_val;
853 
854  // try get information from representation
855  if (type == AVMEDIA_TYPE_UNKNOWN)
856  type = get_content_type(representation_node);
857  // try get information from contentComponen
858  if (type == AVMEDIA_TYPE_UNKNOWN)
859  type = get_content_type(content_component_node);
860  // try get information from adaption set
861  if (type == AVMEDIA_TYPE_UNKNOWN)
862  type = get_content_type(adaptionset_node);
865  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
866  return 0;
867  }
868 
869  // convert selected representation to our internal struct
870  rep = av_mallocz(sizeof(struct representation));
871  if (!rep)
872  return AVERROR(ENOMEM);
873  if (c->adaptionset_lang) {
874  rep->lang = av_strdup(c->adaptionset_lang);
875  if (!rep->lang) {
876  av_log(s, AV_LOG_ERROR, "alloc language memory failure\n");
877  av_freep(&rep);
878  return AVERROR(ENOMEM);
879  }
880  }
881  rep->parent = s;
882  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
883  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
884  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
885  rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
886  val = xmlGetProp(representation_node, "id");
887  if (val) {
888  rep->id = av_strdup(val);
889  xmlFree(val);
890  if (!rep->id)
891  goto enomem;
892  }
893 
894  baseurl_nodes[0] = mpd_baseurl_node;
895  baseurl_nodes[1] = period_baseurl_node;
896  baseurl_nodes[2] = adaptionset_baseurl_node;
897  baseurl_nodes[3] = representation_baseurl_node;
898 
899  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
900  c->max_url_size = aligned(c->max_url_size
901  + (rep->id ? strlen(rep->id) : 0)
902  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
903  if (ret == AVERROR(ENOMEM) || ret == 0)
904  goto free;
905  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
906  fragment_timeline_node = NULL;
907  fragment_templates_tab[0] = representation_segmenttemplate_node;
908  fragment_templates_tab[1] = adaptionset_segmentlist_node;
909  fragment_templates_tab[2] = fragment_template_node;
910  fragment_templates_tab[3] = period_segmenttemplate_node;
911  fragment_templates_tab[4] = period_segmentlist_node;
912 
913  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
914  if (val) {
915  rep->init_section = av_mallocz(sizeof(struct fragment));
916  if (!rep->init_section) {
917  xmlFree(val);
918  goto enomem;
919  }
920  c->max_url_size = aligned(c->max_url_size + strlen(val));
921  rep->init_section->url = get_content_url(baseurl_nodes, 4,
922  c->max_url_size, rep->id,
923  rep_bandwidth_val, val);
924  xmlFree(val);
925  if (!rep->init_section->url)
926  goto enomem;
927  rep->init_section->size = -1;
928  }
929  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
930  if (val) {
931  c->max_url_size = aligned(c->max_url_size + strlen(val));
932  rep->url_template = get_content_url(baseurl_nodes, 4,
933  c->max_url_size, rep->id,
934  rep_bandwidth_val, val);
935  xmlFree(val);
936  }
937  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
938  if (val) {
939  rep->presentation_timeoffset = (int64_t) strtoll(val, NULL, 10);
940  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
941  xmlFree(val);
942  }
943  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
944  if (val) {
945  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
946  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
947  xmlFree(val);
948  }
949  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
950  if (val) {
951  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
952  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
953  xmlFree(val);
954  }
955  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
956  if (val) {
957  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
958  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
959  xmlFree(val);
960  }
961  if (adaptionset_supplementalproperty_node) {
962  char *scheme_id_uri = xmlGetProp(adaptionset_supplementalproperty_node, "schemeIdUri");
963  if (scheme_id_uri) {
964  int is_last_segment_number = !av_strcasecmp(scheme_id_uri, "http://dashif.org/guidelines/last-segment-number");
965  xmlFree(scheme_id_uri);
966  if (is_last_segment_number) {
967  val = xmlGetProp(adaptionset_supplementalproperty_node, "value");
968  if (!val) {
969  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
970  } else {
971  rep->last_seq_no = (int64_t)strtoll(val, NULL, 10) - 1;
972  xmlFree(val);
973  }
974  }
975  }
976  }
977 
978  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
979 
980  if (!fragment_timeline_node)
981  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
982  if (!fragment_timeline_node)
983  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
984  if (!fragment_timeline_node)
985  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
986  if (fragment_timeline_node) {
987  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
988  while (fragment_timeline_node) {
989  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
990  if (ret < 0)
991  goto free;
992  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
993  }
994  }
995  } else if (representation_baseurl_node && !representation_segmentlist_node) {
996  seg = av_mallocz(sizeof(struct fragment));
997  if (!seg)
998  goto enomem;
999  ret = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
1000  if (ret < 0) {
1001  av_free(seg);
1002  goto free;
1003  }
1004  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size,
1005  rep->id, rep_bandwidth_val, NULL);
1006  if (!seg->url)
1007  goto enomem;
1008  seg->size = -1;
1009  } else if (representation_segmentlist_node) {
1010  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1011  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1012  xmlNodePtr fragmenturl_node = NULL;
1013  segmentlists_tab[0] = representation_segmentlist_node;
1014  segmentlists_tab[1] = adaptionset_segmentlist_node;
1015  segmentlists_tab[2] = period_segmentlist_node;
1016 
1017  val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1018  if (val) {
1019  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
1020  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1021  xmlFree(val);
1022  }
1023  val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1024  if (val) {
1025  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
1026  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1027  xmlFree(val);
1028  }
1029  val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1030  if (val) {
1031  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
1032  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1033  xmlFree(val);
1034  }
1035 
1036  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1037  while (fragmenturl_node) {
1038  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1039  baseurl_nodes, rep->id,
1040  rep_bandwidth_val);
1041  if (ret < 0)
1042  goto free;
1043  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1044  }
1045 
1046  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1047  if (!fragment_timeline_node)
1048  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1049  if (fragment_timeline_node) {
1050  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1051  while (fragment_timeline_node) {
1052  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1053  if (ret < 0)
1054  goto free;
1055  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1056  }
1057  }
1058  } else {
1059  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id '%s' \n",
1060  rep->id ? rep->id : "");
1061  goto free;
1062  }
1063 
1064  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1065  rep->fragment_timescale = 1;
1066  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1067  rep->framerate = av_make_q(0, 0);
1068  if (type == AVMEDIA_TYPE_VIDEO) {
1069  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
1070  if (rep_framerate_val) {
1071  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1072  if (ret < 0)
1073  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1074  xmlFree(rep_framerate_val);
1075  }
1076  }
1077 
1078  switch (type) {
1079  case AVMEDIA_TYPE_VIDEO:
1080  ret = av_dynarray_add_nofree(&c->videos, &c->n_videos, rep);
1081  break;
1082  case AVMEDIA_TYPE_AUDIO:
1083  ret = av_dynarray_add_nofree(&c->audios, &c->n_audios, rep);
1084  break;
1085  case AVMEDIA_TYPE_SUBTITLE:
1086  ret = av_dynarray_add_nofree(&c->subtitles, &c->n_subtitles, rep);
1087  break;
1088  }
1089  if (ret < 0)
1090  goto free;
1091 
1092 end:
1093  if (rep_bandwidth_val)
1094  xmlFree(rep_bandwidth_val);
1095 
1096  return ret;
1097 enomem:
1098  ret = AVERROR(ENOMEM);
1099 free:
1100  free_representation(rep);
1101  goto end;
1102 }
1103 
1104 static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
1105 {
1106  DASHContext *c = s->priv_data;
1107 
1108  if (!adaptionset_node) {
1109  av_log(s, AV_LOG_WARNING, "Cannot get AdaptionSet\n");
1110  return AVERROR(EINVAL);
1111  }
1112  c->adaptionset_lang = xmlGetProp(adaptionset_node, "lang");
1113 
1114  return 0;
1115 }
1116 
1118  xmlNodePtr adaptionset_node,
1119  xmlNodePtr mpd_baseurl_node,
1120  xmlNodePtr period_baseurl_node,
1121  xmlNodePtr period_segmenttemplate_node,
1122  xmlNodePtr period_segmentlist_node)
1123 {
1124  int ret = 0;
1125  DASHContext *c = s->priv_data;
1126  xmlNodePtr fragment_template_node = NULL;
1127  xmlNodePtr content_component_node = NULL;
1128  xmlNodePtr adaptionset_baseurl_node = NULL;
1129  xmlNodePtr adaptionset_segmentlist_node = NULL;
1130  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1131  xmlNodePtr node = NULL;
1132 
1133  ret = parse_manifest_adaptationset_attr(s, adaptionset_node);
1134  if (ret < 0)
1135  return ret;
1136 
1137  node = xmlFirstElementChild(adaptionset_node);
1138  while (node) {
1139  if (!av_strcasecmp(node->name, "SegmentTemplate")) {
1140  fragment_template_node = node;
1141  } else if (!av_strcasecmp(node->name, "ContentComponent")) {
1142  content_component_node = node;
1143  } else if (!av_strcasecmp(node->name, "BaseURL")) {
1144  adaptionset_baseurl_node = node;
1145  } else if (!av_strcasecmp(node->name, "SegmentList")) {
1146  adaptionset_segmentlist_node = node;
1147  } else if (!av_strcasecmp(node->name, "SupplementalProperty")) {
1148  adaptionset_supplementalproperty_node = node;
1149  } else if (!av_strcasecmp(node->name, "Representation")) {
1151  adaptionset_node,
1152  mpd_baseurl_node,
1153  period_baseurl_node,
1154  period_segmenttemplate_node,
1155  period_segmentlist_node,
1156  fragment_template_node,
1157  content_component_node,
1158  adaptionset_baseurl_node,
1159  adaptionset_segmentlist_node,
1160  adaptionset_supplementalproperty_node);
1161  if (ret < 0)
1162  goto err;
1163  }
1164  node = xmlNextElementSibling(node);
1165  }
1166 
1167 err:
1168  xmlFree(c->adaptionset_lang);
1169  c->adaptionset_lang = NULL;
1170  return ret;
1171 }
1172 
1173 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1174 {
1175  xmlChar *val = NULL;
1176 
1177  node = xmlFirstElementChild(node);
1178  while (node) {
1179  if (!av_strcasecmp(node->name, "Title")) {
1180  val = xmlNodeGetContent(node);
1181  if (val) {
1182  av_dict_set(&s->metadata, "Title", val, 0);
1183  }
1184  } else if (!av_strcasecmp(node->name, "Source")) {
1185  val = xmlNodeGetContent(node);
1186  if (val) {
1187  av_dict_set(&s->metadata, "Source", val, 0);
1188  }
1189  } else if (!av_strcasecmp(node->name, "Copyright")) {
1190  val = xmlNodeGetContent(node);
1191  if (val) {
1192  av_dict_set(&s->metadata, "Copyright", val, 0);
1193  }
1194  }
1195  node = xmlNextElementSibling(node);
1196  xmlFree(val);
1197  val = NULL;
1198  }
1199  return 0;
1200 }
1201 
1202 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1203 {
1204  DASHContext *c = s->priv_data;
1205  int ret = 0;
1206  int close_in = 0;
1207  AVBPrint buf;
1208  AVDictionary *opts = NULL;
1209  xmlDoc *doc = NULL;
1210  xmlNodePtr root_element = NULL;
1211  xmlNodePtr node = NULL;
1212  xmlNodePtr period_node = NULL;
1213  xmlNodePtr tmp_node = NULL;
1214  xmlNodePtr mpd_baseurl_node = NULL;
1215  xmlNodePtr period_baseurl_node = NULL;
1216  xmlNodePtr period_segmenttemplate_node = NULL;
1217  xmlNodePtr period_segmentlist_node = NULL;
1218  xmlNodePtr adaptionset_node = NULL;
1219  xmlAttrPtr attr = NULL;
1220  char *val = NULL;
1221  uint32_t period_duration_sec = 0;
1222  uint32_t period_start_sec = 0;
1223 
1224  if (!in) {
1225  close_in = 1;
1226 
1227  av_dict_copy(&opts, c->avio_opts, 0);
1228  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1229  av_dict_free(&opts);
1230  if (ret < 0)
1231  return ret;
1232  }
1233 
1234  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&c->base_url) < 0)
1235  c->base_url = av_strdup(url);
1236 
1237  av_bprint_init(&buf, 0, INT_MAX); // xmlReadMemory uses integer bufsize
1238 
1239  if ((ret = avio_read_to_bprint(in, &buf, SIZE_MAX)) < 0 ||
1240  !avio_feof(in)) {
1241  av_log(s, AV_LOG_ERROR, "Unable to read to manifest '%s'\n", url);
1242  if (ret == 0)
1244  } else {
1245  LIBXML_TEST_VERSION
1246 
1247  doc = xmlReadMemory(buf.str, buf.len, c->base_url, NULL, 0);
1248  root_element = xmlDocGetRootElement(doc);
1249  node = root_element;
1250 
1251  if (!node) {
1253  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1254  goto cleanup;
1255  }
1256 
1257  if (node->type != XML_ELEMENT_NODE ||
1258  av_strcasecmp(node->name, "MPD")) {
1260  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1261  goto cleanup;
1262  }
1263 
1264  val = xmlGetProp(node, "type");
1265  if (!val) {
1266  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1268  goto cleanup;
1269  }
1270  if (!av_strcasecmp(val, "dynamic"))
1271  c->is_live = 1;
1272  xmlFree(val);
1273 
1274  attr = node->properties;
1275  while (attr) {
1276  val = xmlGetProp(node, attr->name);
1277 
1278  if (!av_strcasecmp(attr->name, "availabilityStartTime")) {
1279  c->availability_start_time = get_utc_date_time_insec(s, val);
1280  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1281  } else if (!av_strcasecmp(attr->name, "availabilityEndTime")) {
1282  c->availability_end_time = get_utc_date_time_insec(s, val);
1283  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1284  } else if (!av_strcasecmp(attr->name, "publishTime")) {
1285  c->publish_time = get_utc_date_time_insec(s, val);
1286  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1287  } else if (!av_strcasecmp(attr->name, "minimumUpdatePeriod")) {
1288  c->minimum_update_period = get_duration_insec(s, val);
1289  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1290  } else if (!av_strcasecmp(attr->name, "timeShiftBufferDepth")) {
1291  c->time_shift_buffer_depth = get_duration_insec(s, val);
1292  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1293  } else if (!av_strcasecmp(attr->name, "minBufferTime")) {
1294  c->min_buffer_time = get_duration_insec(s, val);
1295  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1296  } else if (!av_strcasecmp(attr->name, "suggestedPresentationDelay")) {
1297  c->suggested_presentation_delay = get_duration_insec(s, val);
1298  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1299  } else if (!av_strcasecmp(attr->name, "mediaPresentationDuration")) {
1300  c->media_presentation_duration = get_duration_insec(s, val);
1301  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1302  }
1303  attr = attr->next;
1304  xmlFree(val);
1305  }
1306 
1307  tmp_node = find_child_node_by_name(node, "BaseURL");
1308  if (tmp_node) {
1309  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1310  } else {
1311  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1312  }
1313 
1314  // at now we can handle only one period, with the longest duration
1315  node = xmlFirstElementChild(node);
1316  while (node) {
1317  if (!av_strcasecmp(node->name, "Period")) {
1318  period_duration_sec = 0;
1319  period_start_sec = 0;
1320  attr = node->properties;
1321  while (attr) {
1322  val = xmlGetProp(node, attr->name);
1323  if (!av_strcasecmp(attr->name, "duration")) {
1324  period_duration_sec = get_duration_insec(s, val);
1325  } else if (!av_strcasecmp(attr->name, "start")) {
1326  period_start_sec = get_duration_insec(s, val);
1327  }
1328  attr = attr->next;
1329  xmlFree(val);
1330  }
1331  if ((period_duration_sec) >= (c->period_duration)) {
1332  period_node = node;
1333  c->period_duration = period_duration_sec;
1334  c->period_start = period_start_sec;
1335  if (c->period_start > 0)
1336  c->media_presentation_duration = c->period_duration;
1337  }
1338  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1339  parse_programinformation(s, node);
1340  }
1341  node = xmlNextElementSibling(node);
1342  }
1343  if (!period_node) {
1344  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1346  goto cleanup;
1347  }
1348 
1349  adaptionset_node = xmlFirstElementChild(period_node);
1350  while (adaptionset_node) {
1351  if (!av_strcasecmp(adaptionset_node->name, "BaseURL")) {
1352  period_baseurl_node = adaptionset_node;
1353  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentTemplate")) {
1354  period_segmenttemplate_node = adaptionset_node;
1355  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentList")) {
1356  period_segmentlist_node = adaptionset_node;
1357  } else if (!av_strcasecmp(adaptionset_node->name, "AdaptationSet")) {
1358  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1359  }
1360  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1361  }
1362 cleanup:
1363  /*free the document */
1364  xmlFreeDoc(doc);
1365  xmlCleanupParser();
1366  xmlFreeNode(mpd_baseurl_node);
1367  }
1368 
1369  av_bprint_finalize(&buf, NULL);
1370  if (close_in) {
1371  avio_close(in);
1372  }
1373  return ret;
1374 }
1375 
1377 {
1378  DASHContext *c = s->priv_data;
1379  int64_t num = 0;
1380  int64_t start_time_offset = 0;
1381 
1382  if (c->is_live) {
1383  if (pls->n_fragments) {
1384  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1385  num = pls->first_seq_no;
1386  } else if (pls->n_timelines) {
1387  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1388  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1389  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1390  if (num == -1)
1391  num = pls->first_seq_no;
1392  else
1393  num += pls->first_seq_no;
1394  } else if (pls->fragment_duration){
1395  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1396  if (pls->presentation_timeoffset) {
1397  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
1398  } else if (c->publish_time > 0 && !c->availability_start_time) {
1399  if (c->min_buffer_time) {
1400  num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
1401  } else {
1402  num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1403  }
1404  } else {
1405  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1406  }
1407  }
1408  } else {
1409  num = pls->first_seq_no;
1410  }
1411  return num;
1412 }
1413 
1415 {
1416  DASHContext *c = s->priv_data;
1417  int64_t num = 0;
1418 
1419  if (c->is_live && pls->fragment_duration) {
1420  av_log(s, AV_LOG_TRACE, "in live mode\n");
1421  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
1422  } else {
1423  num = pls->first_seq_no;
1424  }
1425  return num;
1426 }
1427 
1429 {
1430  int64_t num = 0;
1431 
1432  if (pls->n_fragments) {
1433  num = pls->first_seq_no + pls->n_fragments - 1;
1434  } else if (pls->n_timelines) {
1435  int i = 0;
1436  num = pls->first_seq_no + pls->n_timelines - 1;
1437  for (i = 0; i < pls->n_timelines; i++) {
1438  if (pls->timelines[i]->repeat == -1) {
1439  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1440  num = c->period_duration / length_of_each_segment;
1441  } else {
1442  num += pls->timelines[i]->repeat;
1443  }
1444  }
1445  } else if (c->is_live && pls->fragment_duration) {
1446  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time)) * pls->fragment_timescale) / pls->fragment_duration;
1447  } else if (pls->fragment_duration) {
1448  num = pls->first_seq_no + av_rescale_rnd(1, c->media_presentation_duration * pls->fragment_timescale, pls->fragment_duration, AV_ROUND_UP);
1449  }
1450 
1451  return num;
1452 }
1453 
1454 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1455 {
1456  if (rep_dest && rep_src ) {
1457  free_timelines_list(rep_dest);
1458  rep_dest->timelines = rep_src->timelines;
1459  rep_dest->n_timelines = rep_src->n_timelines;
1460  rep_dest->first_seq_no = rep_src->first_seq_no;
1461  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1462  rep_src->timelines = NULL;
1463  rep_src->n_timelines = 0;
1464  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1465  }
1466 }
1467 
1468 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1469 {
1470  if (rep_dest && rep_src ) {
1471  free_fragment_list(rep_dest);
1472  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1473  rep_dest->cur_seq_no = 0;
1474  else
1475  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1476  rep_dest->fragments = rep_src->fragments;
1477  rep_dest->n_fragments = rep_src->n_fragments;
1478  rep_dest->parent = rep_src->parent;
1479  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1480  rep_src->fragments = NULL;
1481  rep_src->n_fragments = 0;
1482  }
1483 }
1484 
1485 
1487 {
1488  int ret = 0, i;
1489  DASHContext *c = s->priv_data;
1490  // save current context
1491  int n_videos = c->n_videos;
1492  struct representation **videos = c->videos;
1493  int n_audios = c->n_audios;
1494  struct representation **audios = c->audios;
1495  int n_subtitles = c->n_subtitles;
1496  struct representation **subtitles = c->subtitles;
1497  char *base_url = c->base_url;
1498 
1499  c->base_url = NULL;
1500  c->n_videos = 0;
1501  c->videos = NULL;
1502  c->n_audios = 0;
1503  c->audios = NULL;
1504  c->n_subtitles = 0;
1505  c->subtitles = NULL;
1506  ret = parse_manifest(s, s->url, NULL);
1507  if (ret)
1508  goto finish;
1509 
1510  if (c->n_videos != n_videos) {
1512  "new manifest has mismatched no. of video representations, %d -> %d\n",
1513  n_videos, c->n_videos);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  if (c->n_audios != n_audios) {
1518  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1519  n_audios, c->n_audios);
1520  return AVERROR_INVALIDDATA;
1521  }
1522  if (c->n_subtitles != n_subtitles) {
1524  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1525  n_subtitles, c->n_subtitles);
1526  return AVERROR_INVALIDDATA;
1527  }
1528 
1529  for (i = 0; i < n_videos; i++) {
1530  struct representation *cur_video = videos[i];
1531  struct representation *ccur_video = c->videos[i];
1532  if (cur_video->timelines) {
1533  // calc current time
1534  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1535  // update segments
1536  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1537  if (ccur_video->cur_seq_no >= 0) {
1538  move_timelines(ccur_video, cur_video, c);
1539  }
1540  }
1541  if (cur_video->fragments) {
1542  move_segments(ccur_video, cur_video, c);
1543  }
1544  }
1545  for (i = 0; i < n_audios; i++) {
1546  struct representation *cur_audio = audios[i];
1547  struct representation *ccur_audio = c->audios[i];
1548  if (cur_audio->timelines) {
1549  // calc current time
1550  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1551  // update segments
1552  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1553  if (ccur_audio->cur_seq_no >= 0) {
1554  move_timelines(ccur_audio, cur_audio, c);
1555  }
1556  }
1557  if (cur_audio->fragments) {
1558  move_segments(ccur_audio, cur_audio, c);
1559  }
1560  }
1561 
1562 finish:
1563  // restore context
1564  if (c->base_url)
1565  av_free(base_url);
1566  else
1567  c->base_url = base_url;
1568 
1569  if (c->subtitles)
1571  if (c->audios)
1572  free_audio_list(c);
1573  if (c->videos)
1574  free_video_list(c);
1575 
1576  c->n_subtitles = n_subtitles;
1577  c->subtitles = subtitles;
1578  c->n_audios = n_audios;
1579  c->audios = audios;
1580  c->n_videos = n_videos;
1581  c->videos = videos;
1582  return ret;
1583 }
1584 
1585 static struct fragment *get_current_fragment(struct representation *pls)
1586 {
1587  int64_t min_seq_no = 0;
1588  int64_t max_seq_no = 0;
1589  struct fragment *seg = NULL;
1590  struct fragment *seg_ptr = NULL;
1591  DASHContext *c = pls->parent->priv_data;
1592 
1593  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1594  if (pls->cur_seq_no < pls->n_fragments) {
1595  seg_ptr = pls->fragments[pls->cur_seq_no];
1596  seg = av_mallocz(sizeof(struct fragment));
1597  if (!seg) {
1598  return NULL;
1599  }
1600  seg->url = av_strdup(seg_ptr->url);
1601  if (!seg->url) {
1602  av_free(seg);
1603  return NULL;
1604  }
1605  seg->size = seg_ptr->size;
1606  seg->url_offset = seg_ptr->url_offset;
1607  return seg;
1608  } else if (c->is_live) {
1609  refresh_manifest(pls->parent);
1610  } else {
1611  break;
1612  }
1613  }
1614  if (c->is_live) {
1615  min_seq_no = calc_min_seg_no(pls->parent, pls);
1616  max_seq_no = calc_max_seg_no(pls, c);
1617 
1618  if (pls->timelines || pls->fragments) {
1619  refresh_manifest(pls->parent);
1620  }
1621  if (pls->cur_seq_no <= min_seq_no) {
1622  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"]\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no);
1623  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1624  } else if (pls->cur_seq_no > max_seq_no) {
1625  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"]\n", min_seq_no, max_seq_no);
1626  }
1627  seg = av_mallocz(sizeof(struct fragment));
1628  if (!seg) {
1629  return NULL;
1630  }
1631  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1632  seg = av_mallocz(sizeof(struct fragment));
1633  if (!seg) {
1634  return NULL;
1635  }
1636  }
1637  if (seg) {
1638  char *tmpfilename;
1639  if (!pls->url_template) {
1640  av_log(pls->parent, AV_LOG_ERROR, "Cannot get fragment, missing template URL\n");
1641  av_free(seg);
1642  return NULL;
1643  }
1644  tmpfilename = av_mallocz(c->max_url_size);
1645  if (!tmpfilename) {
1646  av_free(seg);
1647  return NULL;
1648  }
1649  ff_dash_fill_tmpl_params(tmpfilename, c->max_url_size, pls->url_template, 0, pls->cur_seq_no, 0, get_segment_start_time_based_on_timeline(pls, pls->cur_seq_no));
1650  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1651  if (!seg->url) {
1652  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1653  seg->url = av_strdup(pls->url_template);
1654  if (!seg->url) {
1655  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1656  av_free(tmpfilename);
1657  av_free(seg);
1658  return NULL;
1659  }
1660  }
1661  av_free(tmpfilename);
1662  seg->size = -1;
1663  }
1664 
1665  return seg;
1666 }
1667 
1668 static int read_from_url(struct representation *pls, struct fragment *seg,
1669  uint8_t *buf, int buf_size)
1670 {
1671  int ret;
1672 
1673  /* limit read if the fragment was only a part of a file */
1674  if (seg->size >= 0)
1675  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1676 
1677  ret = avio_read(pls->input, buf, buf_size);
1678  if (ret > 0)
1679  pls->cur_seg_offset += ret;
1680 
1681  return ret;
1682 }
1683 
1684 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1685 {
1686  AVDictionary *opts = NULL;
1687  char *url = NULL;
1688  int ret = 0;
1689 
1690  url = av_mallocz(c->max_url_size);
1691  if (!url) {
1692  ret = AVERROR(ENOMEM);
1693  goto cleanup;
1694  }
1695 
1696  if (seg->size >= 0) {
1697  /* try to restrict the HTTP request to the part we want
1698  * (if this is in fact a HTTP request) */
1699  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1700  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1701  }
1702 
1703  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1704  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64"\n",
1705  url, seg->url_offset);
1706  ret = open_url(pls->parent, &pls->input, url, &c->avio_opts, opts, NULL);
1707 
1708 cleanup:
1709  av_free(url);
1710  av_dict_free(&opts);
1711  pls->cur_seg_offset = 0;
1712  pls->cur_seg_size = seg->size;
1713  return ret;
1714 }
1715 
1716 static int update_init_section(struct representation *pls)
1717 {
1718  static const int max_init_section_size = 1024 * 1024;
1719  DASHContext *c = pls->parent->priv_data;
1720  int64_t sec_size;
1721  int64_t urlsize;
1722  int ret;
1723 
1724  if (!pls->init_section || pls->init_sec_buf)
1725  return 0;
1726 
1727  ret = open_input(c, pls, pls->init_section);
1728  if (ret < 0) {
1730  "Failed to open an initialization section\n");
1731  return ret;
1732  }
1733 
1734  if (pls->init_section->size >= 0)
1735  sec_size = pls->init_section->size;
1736  else if ((urlsize = avio_size(pls->input)) >= 0)
1737  sec_size = urlsize;
1738  else
1739  sec_size = max_init_section_size;
1740 
1741  av_log(pls->parent, AV_LOG_DEBUG,
1742  "Downloading an initialization section of size %"PRId64"\n",
1743  sec_size);
1744 
1745  sec_size = FFMIN(sec_size, max_init_section_size);
1746 
1747  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1748 
1749  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1750  pls->init_sec_buf_size);
1751  ff_format_io_close(pls->parent, &pls->input);
1752 
1753  if (ret < 0)
1754  return ret;
1755 
1756  pls->init_sec_data_len = ret;
1757  pls->init_sec_buf_read_offset = 0;
1758 
1759  return 0;
1760 }
1761 
1762 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1763 {
1764  struct representation *v = opaque;
1765  if (v->n_fragments && !v->init_sec_data_len) {
1766  return avio_seek(v->input, offset, whence);
1767  }
1768 
1769  return AVERROR(ENOSYS);
1770 }
1771 
1772 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1773 {
1774  int ret = 0;
1775  struct representation *v = opaque;
1776  DASHContext *c = v->parent->priv_data;
1777 
1778 restart:
1779  if (!v->input) {
1780  free_fragment(&v->cur_seg);
1781  v->cur_seg = get_current_fragment(v);
1782  if (!v->cur_seg) {
1783  ret = AVERROR_EOF;
1784  goto end;
1785  }
1786 
1787  /* load/update Media Initialization Section, if any */
1788  ret = update_init_section(v);
1789  if (ret)
1790  goto end;
1791 
1792  ret = open_input(c, v, v->cur_seg);
1793  if (ret < 0) {
1794  if (ff_check_interrupt(c->interrupt_callback)) {
1795  ret = AVERROR_EXIT;
1796  goto end;
1797  }
1798  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist\n");
1799  v->cur_seq_no++;
1800  goto restart;
1801  }
1802  }
1803 
1805  /* Push init section out first before first actual fragment */
1806  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1807  memcpy(buf, v->init_sec_buf, copy_size);
1808  v->init_sec_buf_read_offset += copy_size;
1809  ret = copy_size;
1810  goto end;
1811  }
1812 
1813  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1814  if (!v->cur_seg) {
1815  v->cur_seg = get_current_fragment(v);
1816  }
1817  if (!v->cur_seg) {
1818  ret = AVERROR_EOF;
1819  goto end;
1820  }
1821  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1822  if (ret > 0)
1823  goto end;
1824 
1825  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1826  if (!v->is_restart_needed)
1827  v->cur_seq_no++;
1828  v->is_restart_needed = 1;
1829  }
1830 
1831 end:
1832  return ret;
1833 }
1834 
1835 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1836  int flags, AVDictionary **opts)
1837 {
1839  "A DASH playlist item '%s' referred to an external file '%s'. "
1840  "Opening this file was forbidden for security reasons\n",
1841  s->url, url);
1842  return AVERROR(EPERM);
1843 }
1844 
1846 {
1847  /* note: the internal buffer could have changed */
1848  av_freep(&pls->pb.pub.buffer);
1849  memset(&pls->pb, 0x00, sizeof(pls->pb));
1850  pls->ctx->pb = NULL;
1851  avformat_close_input(&pls->ctx);
1852 }
1853 
1855 {
1856  DASHContext *c = s->priv_data;
1857  const AVInputFormat *in_fmt = NULL;
1858  AVDictionary *in_fmt_opts = NULL;
1859  uint8_t *avio_ctx_buffer = NULL;
1860  int ret = 0, i;
1861 
1862  if (pls->ctx) {
1864  }
1865 
1866  if (ff_check_interrupt(&s->interrupt_callback)) {
1867  ret = AVERROR_EXIT;
1868  goto fail;
1869  }
1870 
1871  if (!(pls->ctx = avformat_alloc_context())) {
1872  ret = AVERROR(ENOMEM);
1873  goto fail;
1874  }
1875 
1876  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1877  if (!avio_ctx_buffer ) {
1878  ret = AVERROR(ENOMEM);
1879  avformat_free_context(pls->ctx);
1880  pls->ctx = NULL;
1881  goto fail;
1882  }
1883  ffio_init_context(&pls->pb, avio_ctx_buffer, INITIAL_BUFFER_SIZE, 0,
1884  pls, read_data, NULL, c->is_live ? NULL : seek_data);
1885  pls->pb.pub.seekable = 0;
1886 
1887  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1888  goto fail;
1889 
1890  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1891  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;
1892  pls->ctx->max_analyze_duration = s->max_analyze_duration > 0 ? s->max_analyze_duration : 4 * AV_TIME_BASE;
1893  pls->ctx->interrupt_callback = s->interrupt_callback;
1894  ret = av_probe_input_buffer(&pls->pb.pub, &in_fmt, "", NULL, 0, 0);
1895  if (ret < 0) {
1896  av_log(s, AV_LOG_ERROR, "Error when loading first fragment of playlist\n");
1897  avformat_free_context(pls->ctx);
1898  pls->ctx = NULL;
1899  goto fail;
1900  }
1901 
1902  pls->ctx->pb = &pls->pb.pub;
1903  pls->ctx->io_open = nested_io_open;
1904 
1905  if (c->cenc_decryption_key)
1906  av_dict_set(&in_fmt_opts, "decryption_key", c->cenc_decryption_key, 0);
1907 
1908  // provide additional information from mpd if available
1909  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1910  av_dict_free(&in_fmt_opts);
1911  if (ret < 0)
1912  goto fail;
1913  if (pls->n_fragments) {
1914 #if FF_API_R_FRAME_RATE
1915  if (pls->framerate.den) {
1916  for (i = 0; i < pls->ctx->nb_streams; i++)
1917  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1918  }
1919 #endif
1921  if (ret < 0)
1922  goto fail;
1923  }
1924 
1925 fail:
1926  return ret;
1927 }
1928 
1930 {
1931  int ret = 0;
1932  int i;
1933 
1934  pls->parent = s;
1935  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1936 
1937  if (!pls->last_seq_no)
1938  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1939 
1941  if (ret < 0)
1942  return ret;
1943 
1944  for (i = 0; i < pls->ctx->nb_streams; i++) {
1946  AVStream *ist = pls->ctx->streams[i];
1947  if (!st)
1948  return AVERROR(ENOMEM);
1949 
1950  st->id = i;
1951 
1953  if (ret < 0)
1954  return ret;
1955 
1957 
1958  // copy disposition
1959  st->disposition = ist->disposition;
1960  }
1961 
1962  return 0;
1963 }
1964 
1965 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1966 {
1967  struct fragment *first_init_section = pls[0]->init_section;
1968  char *url =NULL;
1969  int64_t url_offset = -1;
1970  int64_t size = -1;
1971  int i = 0;
1972 
1973  if (first_init_section == NULL || n_pls == 0)
1974  return 0;
1975 
1976  url = first_init_section->url;
1977  url_offset = first_init_section->url_offset;
1978  size = pls[0]->init_section->size;
1979  for (i=0;i<n_pls;i++) {
1980  if (!pls[i]->init_section)
1981  continue;
1982 
1983  if (av_strcasecmp(pls[i]->init_section->url, url) ||
1984  pls[i]->init_section->url_offset != url_offset ||
1985  pls[i]->init_section->size != size) {
1986  return 0;
1987  }
1988  }
1989  return 1;
1990 }
1991 
1992 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
1993 {
1994  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
1995  if (!rep_dest->init_sec_buf) {
1996  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
1997  return AVERROR(ENOMEM);
1998  }
1999  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2000  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2001  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2002  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2003 
2004  return 0;
2005 }
2006 
2007 static void move_metadata(AVStream *st, const char *key, char **value)
2008 {
2009  if (*value) {
2011  *value = NULL;
2012  }
2013 }
2014 
2016 {
2017  DASHContext *c = s->priv_data;
2018  struct representation *rep;
2019  AVProgram *program;
2020  int ret = 0;
2021  int stream_index = 0;
2022  int i;
2023 
2024  c->interrupt_callback = &s->interrupt_callback;
2025 
2026  if ((ret = ffio_copy_url_options(s->pb, &c->avio_opts)) < 0)
2027  return ret;
2028 
2029  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2030  return ret;
2031 
2032  /* If this isn't a live stream, fill the total duration of the
2033  * stream. */
2034  if (!c->is_live) {
2035  s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
2036  } else {
2037  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2038  }
2039 
2040  if(c->n_videos)
2041  c->is_init_section_common_video = is_common_init_section_exist(c->videos, c->n_videos);
2042 
2043  /* Open the demuxer for video and audio components if available */
2044  for (i = 0; i < c->n_videos; i++) {
2045  rep = c->videos[i];
2046  if (i > 0 && c->is_init_section_common_video) {
2047  ret = copy_init_section(rep, c->videos[0]);
2048  if (ret < 0)
2049  return ret;
2050  }
2051  ret = open_demux_for_component(s, rep);
2052 
2053  if (ret)
2054  return ret;
2055  rep->stream_index = stream_index;
2056  ++stream_index;
2057  }
2058 
2059  if(c->n_audios)
2060  c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
2061 
2062  for (i = 0; i < c->n_audios; i++) {
2063  rep = c->audios[i];
2064  if (i > 0 && c->is_init_section_common_audio) {
2065  ret = copy_init_section(rep, c->audios[0]);
2066  if (ret < 0)
2067  return ret;
2068  }
2069  ret = open_demux_for_component(s, rep);
2070 
2071  if (ret)
2072  return ret;
2073  rep->stream_index = stream_index;
2074  ++stream_index;
2075  }
2076 
2077  if (c->n_subtitles)
2078  c->is_init_section_common_subtitle = is_common_init_section_exist(c->subtitles, c->n_subtitles);
2079 
2080  for (i = 0; i < c->n_subtitles; i++) {
2081  rep = c->subtitles[i];
2082  if (i > 0 && c->is_init_section_common_subtitle) {
2083  ret = copy_init_section(rep, c->subtitles[0]);
2084  if (ret < 0)
2085  return ret;
2086  }
2087  ret = open_demux_for_component(s, rep);
2088 
2089  if (ret)
2090  return ret;
2091  rep->stream_index = stream_index;
2092  ++stream_index;
2093  }
2094 
2095  if (!stream_index)
2096  return AVERROR_INVALIDDATA;
2097 
2098  /* Create a program */
2099  program = av_new_program(s, 0);
2100  if (!program)
2101  return AVERROR(ENOMEM);
2102 
2103  for (i = 0; i < c->n_videos; i++) {
2104  rep = c->videos[i];
2106  rep->assoc_stream = s->streams[rep->stream_index];
2107  if (rep->bandwidth > 0)
2108  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2109  move_metadata(rep->assoc_stream, "id", &rep->id);
2110  }
2111  for (i = 0; i < c->n_audios; i++) {
2112  rep = c->audios[i];
2114  rep->assoc_stream = s->streams[rep->stream_index];
2115  if (rep->bandwidth > 0)
2116  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2117  move_metadata(rep->assoc_stream, "id", &rep->id);
2118  move_metadata(rep->assoc_stream, "language", &rep->lang);
2119  }
2120  for (i = 0; i < c->n_subtitles; i++) {
2121  rep = c->subtitles[i];
2123  rep->assoc_stream = s->streams[rep->stream_index];
2124  move_metadata(rep->assoc_stream, "id", &rep->id);
2125  move_metadata(rep->assoc_stream, "language", &rep->lang);
2126  }
2127 
2128  return 0;
2129 }
2130 
2131 static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
2132 {
2133  int i, j;
2134 
2135  for (i = 0; i < n; i++) {
2136  struct representation *pls = p[i];
2137  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2138 
2139  if (needed && !pls->ctx) {
2140  pls->cur_seg_offset = 0;
2141  pls->init_sec_buf_read_offset = 0;
2142  /* Catch up */
2143  for (j = 0; j < n; j++) {
2144  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2145  }
2147  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2148  } else if (!needed && pls->ctx) {
2150  ff_format_io_close(pls->parent, &pls->input);
2151  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2152  }
2153  }
2154 }
2155 
2157 {
2158  DASHContext *c = s->priv_data;
2159  int ret = 0, i;
2160  int64_t mints = 0;
2161  struct representation *cur = NULL;
2162  struct representation *rep = NULL;
2163 
2164  recheck_discard_flags(s, c->videos, c->n_videos);
2165  recheck_discard_flags(s, c->audios, c->n_audios);
2166  recheck_discard_flags(s, c->subtitles, c->n_subtitles);
2167 
2168  for (i = 0; i < c->n_videos; i++) {
2169  rep = c->videos[i];
2170  if (!rep->ctx)
2171  continue;
2172  if (!cur || rep->cur_timestamp < mints) {
2173  cur = rep;
2174  mints = rep->cur_timestamp;
2175  }
2176  }
2177  for (i = 0; i < c->n_audios; i++) {
2178  rep = c->audios[i];
2179  if (!rep->ctx)
2180  continue;
2181  if (!cur || rep->cur_timestamp < mints) {
2182  cur = rep;
2183  mints = rep->cur_timestamp;
2184  }
2185  }
2186 
2187  for (i = 0; i < c->n_subtitles; i++) {
2188  rep = c->subtitles[i];
2189  if (!rep->ctx)
2190  continue;
2191  if (!cur || rep->cur_timestamp < mints) {
2192  cur = rep;
2193  mints = rep->cur_timestamp;
2194  }
2195  }
2196 
2197  if (!cur) {
2198  return AVERROR_INVALIDDATA;
2199  }
2200  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2201  ret = av_read_frame(cur->ctx, pkt);
2202  if (ret >= 0) {
2203  /* If we got a packet, return it */
2204  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2205  pkt->stream_index = cur->stream_index;
2206  return 0;
2207  }
2208  if (cur->is_restart_needed) {
2209  cur->cur_seg_offset = 0;
2210  cur->init_sec_buf_read_offset = 0;
2211  cur->is_restart_needed = 0;
2212  ff_format_io_close(cur->parent, &cur->input);
2214  }
2215  }
2216  return AVERROR_EOF;
2217 }
2218 
2220 {
2221  DASHContext *c = s->priv_data;
2222  free_audio_list(c);
2223  free_video_list(c);
2225  av_dict_free(&c->avio_opts);
2226  av_freep(&c->base_url);
2227  return 0;
2228 }
2229 
2230 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2231 {
2232  int ret = 0;
2233  int i = 0;
2234  int j = 0;
2235  int64_t duration = 0;
2236 
2237  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms] %s\n",
2238  seek_pos_msec, dry_run ? " (dry)" : "");
2239 
2240  // single fragment mode
2241  if (pls->n_fragments == 1) {
2242  pls->cur_timestamp = 0;
2243  pls->cur_seg_offset = 0;
2244  if (dry_run)
2245  return 0;
2246  ff_read_frame_flush(pls->ctx);
2247  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2248  }
2249 
2250  ff_format_io_close(pls->parent, &pls->input);
2251 
2252  // find the nearest fragment
2253  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2254  int64_t num = pls->first_seq_no;
2255  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2256  "last_seq_no[%"PRId64"].\n",
2257  (int)pls->n_timelines, (int64_t)pls->last_seq_no);
2258  for (i = 0; i < pls->n_timelines; i++) {
2259  if (pls->timelines[i]->starttime > 0) {
2260  duration = pls->timelines[i]->starttime;
2261  }
2262  duration += pls->timelines[i]->duration;
2263  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2264  goto set_seq_num;
2265  }
2266  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2267  duration += pls->timelines[i]->duration;
2268  num++;
2269  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2270  goto set_seq_num;
2271  }
2272  }
2273  num++;
2274  }
2275 
2276 set_seq_num:
2277  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2278  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"].\n",
2279  (int64_t)pls->cur_seq_no);
2280  } else if (pls->fragment_duration > 0) {
2281  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2282  } else {
2283  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2284  pls->cur_seq_no = pls->first_seq_no;
2285  }
2286  pls->cur_timestamp = 0;
2287  pls->cur_seg_offset = 0;
2288  pls->init_sec_buf_read_offset = 0;
2289  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2290 
2291  return ret;
2292 }
2293 
2294 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2295 {
2296  int ret = 0, i;
2297  DASHContext *c = s->priv_data;
2298  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2299  s->streams[stream_index]->time_base.den,
2302  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2303  return AVERROR(ENOSYS);
2304 
2305  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2306  for (i = 0; i < c->n_videos; i++) {
2307  if (!ret)
2308  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2309  }
2310  for (i = 0; i < c->n_audios; i++) {
2311  if (!ret)
2312  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2313  }
2314  for (i = 0; i < c->n_subtitles; i++) {
2315  if (!ret)
2316  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2317  }
2318 
2319  return ret;
2320 }
2321 
2322 static int dash_probe(const AVProbeData *p)
2323 {
2324  if (!av_stristr(p->buf, "<MPD"))
2325  return 0;
2326 
2327  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2328  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2329  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2330  av_stristr(p->buf, "dash:profile:isoff-main:2011") ||
2331  av_stristr(p->buf, "3GPP:PSS:profile:DASH1")) {
2332  return AVPROBE_SCORE_MAX;
2333  }
2334  if (av_stristr(p->buf, "dash:profile")) {
2335  return AVPROBE_SCORE_MAX;
2336  }
2337 
2338  return 0;
2339 }
2340 
2341 #define OFFSET(x) offsetof(DASHContext, x)
2342 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2343 static const AVOption dash_options[] = {
2344  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2345  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2346  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm,ts"},
2347  INT_MIN, INT_MAX, FLAGS},
2348  { "cenc_decryption_key", "Media decryption key (hex)", OFFSET(cenc_decryption_key), AV_OPT_TYPE_STRING, {.str = NULL}, INT_MIN, INT_MAX, .flags = FLAGS },
2349  {NULL}
2350 };
2351 
2352 static const AVClass dash_class = {
2353  .class_name = "dash",
2354  .item_name = av_default_item_name,
2355  .option = dash_options,
2356  .version = LIBAVUTIL_VERSION_INT,
2357 };
2358 
2360  .p.name = "dash",
2361  .p.long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2362  .p.priv_class = &dash_class,
2363  .p.flags = AVFMT_NO_BYTE_SEEK,
2364  .priv_data_size = sizeof(DASHContext),
2365  .flags_internal = FF_INFMT_FLAG_INIT_CLEANUP,
2371 };
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
reopen_demux_for_component
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1854
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:134
close_demux_for_component
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1845
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
calc_next_seg_no_from_timelines
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:290
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
program
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:6
open_demux_for_component
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1929
read_data
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1772
ffio_init_context
void ffio_init_context(FFIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, const uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:50
ffio_copy_url_options
int ffio_copy_url_options(AVIOContext *pb, AVDictionary **avio_opts)
Read url related dictionary options from the AVIOContext and write to the given dictionary.
Definition: aviobuf.c:991
representation::start_number
int64_t start_number
Definition: dashdec.c:101
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_current_time_in_sec
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:176
ishttp
static int ishttp(char *url)
Definition: dashdec.c:165
calc_min_seg_no
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1414
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
FLAGS
#define FLAGS
Definition: dashdec.c:2342
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:58
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const struct AVCodec *c)
Add a new stream to a media file.
representation::assoc_stream
AVStream * assoc_stream
Definition: dashdec.c:91
free_video_list
static void free_video_list(DASHContext *c)
Definition: dashdec.c:371
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:819
representation::init_sec_buf_read_offset
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:118
representation::cur_seq_no
int64_t cur_seq_no
Definition: dashdec.c:108
get_current_fragment
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1585
int64_t
long long int64_t
Definition: coverity.c:34
DASHContext::n_subtitles
int n_subtitles
Definition: dashdec.c:131
DASHContext::is_init_section_common_subtitle
int is_init_section_common_subtitle
Definition: dashdec.c:161
av_strcasecmp
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:207
representation::cur_seg_offset
int64_t cur_seg_offset
Definition: dashdec.c:109
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
dash_close
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2219
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:130
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1368
AVOption
AVOption.
Definition: opt.h:429
DASHContext::interrupt_callback
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:152
parse_manifest_segmenturlnode
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:599
AVFMT_FLAG_CUSTOM_IO
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don't avio_close() it.
Definition: avformat.h:1459
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2499
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
representation::id
char * id
Definition: dashdec.c:87
DASHContext::n_audios
int n_audios
Definition: dashdec.c:129
AVDictionary
Definition: dict.c:34
representation::last_seq_no
int64_t last_seq_no
Definition: dashdec.c:100
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFormatContext::probesize
int64_t probesize
Maximum number of bytes read from input in order to determine stream properties.
Definition: avformat.h:1487
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1547
ff_read_frame_flush
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: seek.c:722
read_from_url
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1668
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
representation::n_fragments
int n_fragments
Definition: dashdec.c:93
FFIOContext
Definition: avio_internal.h:28
DASHContext::availability_end_time
uint64_t availability_end_time
Definition: dashdec.c:138
find_child_node_by_name
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:538
representation::first_seq_no
int64_t first_seq_no
Definition: dashdec.c:99
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
fragment
Definition: dashdec.c:37
DASHContext::n_videos
int n_videos
Definition: dashdec.c:127
DASHContext
Definition: dashdec.c:123
get_segment_start_time_based_on_timeline
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:255
DASHContext::subtitles
struct representation ** subtitles
Definition: dashdec.c:132
AVPROBE_SCORE_MAX
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:463
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:366
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1573
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: avformat.c:867
representation::init_section
struct fragment * init_section
Definition: dashdec.c:114
finish
static void finish(void)
Definition: movenc.c:374
DASHContext::publish_time
uint64_t publish_time
Definition: dashdec.c:139
free_timelines_list
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:340
calc_max_seg_no
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1428
free_fragment
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:320
fail
#define fail()
Definition: checkasm.h:193
calc_cur_seg_no
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1376
read_seek
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:151
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:143
val
static double val(void *priv, double ch)
Definition: aeval.c:77
recheck_discard_flags
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2131
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_timegm
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:573
av_new_program
AVProgram * av_new_program(AVFormatContext *ac, int id)
Definition: avformat.c:346
get_utc_date_time_insec
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:181
get_content_type
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:555
ff_check_interrupt
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:854
AVRational::num
int num
Numerator.
Definition: rational.h:59
dash_options
static const AVOption dash_options[]
Definition: dashdec.c:2343
DASHContext::avio_opts
AVDictionary * avio_opts
Definition: dashdec.c:154
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
DASHContext::suggested_presentation_delay
uint64_t suggested_presentation_delay
Definition: dashdec.c:136
seek_data
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1762
aligned
static int aligned(int val)
Definition: dashdec.c:171
representation::n_timelines
int n_timelines
Definition: dashdec.c:96
representation::pb
FFIOContext pb
Definition: dashdec.c:81
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:235
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVInputFormat
Definition: avformat.h:548
free_representation
static void free_representation(struct representation *pls)
Definition: dashdec.c:351
duration
int64_t duration
Definition: movenc.c:65
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:216
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_read_callback.c:42
move_metadata
static void move_metadata(AVStream *st, const char *key, char **value)
Definition: dashdec.c:2007
DASHContext::max_url_size
int max_url_size
Definition: dashdec.c:155
DASHContext::allowed_extensions
char * allowed_extensions
Definition: dashdec.c:153
move_segments
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1468
s
#define s(width, name)
Definition: cbs_vp9.c:198
fragment::url_offset
int64_t url_offset
Definition: dashdec.c:38
DASHContext::adaptionset_lang
char * adaptionset_lang
Definition: dashdec.c:149
avio_read_to_bprint
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size)
Read contents of h into print buffer, up to max_size bytes, or up to EOF.
Definition: aviobuf.c:1251
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: seek.c:641
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1451
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
free_fragment_list
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:329
AVProbeData::buf
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:453
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
av_match_ext
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:41
representation::is_restart_needed
int is_restart_needed
Definition: dashdec.c:120
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
parse_programinformation
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1173
get_duration_insec
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:211
DASHContext::videos
struct representation ** videos
Definition: dashdec.c:128
INITIAL_BUFFER_SIZE
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:35
key
const char * key
Definition: hwcontext_opencl.c:189
representation::cur_timestamp
int64_t cur_timestamp
Definition: dashdec.c:119
timeline::duration
int64_t duration
Definition: dashdec.c:71
representation::init_sec_buf_size
uint32_t init_sec_buf_size
Definition: dashdec.c:116
representation::stream_index
int stream_index
Definition: dashdec.c:85
AVFormatContext::max_analyze_duration
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1495
representation::ctx
AVFormatContext * ctx
Definition: dashdec.c:84
FF_INFMT_FLAG_INIT_CLEANUP
#define FF_INFMT_FLAG_INIT_CLEANUP
For an FFInputFormat with this flag set read_close() needs to be called by the caller upon read_heade...
Definition: demux.h:35
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
AVFormatContext
Format I/O context.
Definition: avformat.h:1300
representation::lang
char * lang
Definition: dashdec.c:88
internal.h
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVSEEK_FLAG_BACKWARD
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2498
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:540
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:787
NULL
#define NULL
Definition: coverity.c:32
av_program_add_stream_index
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
Definition: avformat.c:377
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_strireplace
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:229
is_common_init_section_exist
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1965
ff_copy_whiteblacklists
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: avformat.c:912
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
dash_read_seek
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2294
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1342
parseutils.h
AVProbeData
This structure contains the data a format has to probe a file.
Definition: avformat.h:451
move_timelines
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1454
representation::timelines
struct timeline ** timelines
Definition: dashdec.c:97
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:828
DASHContext::minimum_update_period
uint64_t minimum_update_period
Definition: dashdec.c:140
time.h
ff_dash_demuxer
const FFInputFormat ff_dash_demuxer
Definition: dashdec.c:2359
timeline::starttime
int64_t starttime
Definition: dashdec.c:61
DASHContext::period_start
uint64_t period_start
Definition: dashdec.c:146
parse_manifest
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1202
representation::url_template
char * url_template
Definition: dashdec.c:80
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1356
get_val_from_nodes_tab
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:522
AV_ROUND_DOWN
@ AV_ROUND_DOWN
Round toward -infinity.
Definition: mathematics.h:133
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
DASHContext::time_shift_buffer_depth
uint64_t time_shift_buffer_depth
Definition: dashdec.c:141
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2525
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
resolve_content_path
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:706
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:162
DASHContext::media_presentation_duration
uint64_t media_presentation_duration
Definition: dashdec.c:135
AVIOContext::seekable
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:261
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
FFIOContext::pub
AVIOContext pub
Definition: avio_internal.h:29
start_time
static int64_t start_time
Definition: ffplay.c:326
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
representation::bandwidth
int bandwidth
Definition: dashdec.c:89
representation::parent
AVFormatContext * parent
Definition: dashdec.c:83
ff_format_io_close
int ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: avformat.c:958
AVMEDIA_TYPE_UNKNOWN
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
OFFSET
#define OFFSET(x)
Definition: dashdec.c:2341
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
FFInputFormat::p
AVInputFormat p
The public AVInputFormat.
Definition: demux.h:46
copy_init_section
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:1992
DASHContext::availability_start_time
uint64_t availability_start_time
Definition: dashdec.c:137
representation::init_sec_data_len
uint32_t init_sec_data_len
Definition: dashdec.c:117
dash_read_header
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2015
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
free_audio_list
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:382
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
representation::framerate
AVRational framerate
Definition: dashdec.c:90
av_strstart
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:36
av_probe_input_buffer
int av_probe_input_buffer(AVIOContext *pb, const AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:343
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
DASHContext::cenc_decryption_key
char * cenc_decryption_key
Definition: dashdec.c:156
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:181
open_url
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary **opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:404
bprint.h
free_subtitle_list
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:393
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
avio_internal.h
dash_probe
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2322
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
DASHContext::audios
struct representation ** audios
Definition: dashdec.c:130
representation::fragment_timescale
int64_t fragment_timescale
Definition: dashdec.c:104
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
DASHContext::is_init_section_common_audio
int is_init_section_common_audio
Definition: dashdec.c:160
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
parse_manifest_adaptationset
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1117
url.h
fragment::url
char * url
Definition: dashdec.c:40
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1224
demux.h
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
DASHContext::min_buffer_time
uint64_t min_buffer_time
Definition: dashdec.c:142
nested_io_open
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1835
DASHContext::is_live
int is_live
Definition: dashdec.c:151
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:817
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:760
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
avio_seek
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:231
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:95
representation::input
AVIOContext * input
Definition: dashdec.c:82
parse_manifest_segmenttimeline
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:665
representation
Definition: dashdec.c:79
representation::init_sec_buf
uint8_t * init_sec_buf
Definition: dashdec.c:115
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
MAX_URL_SIZE
#define MAX_URL_SIZE
Definition: internal.h:30
parse_manifest_adaptationset_attr
static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
Definition: dashdec.c:1104
AVRational::den
int den
Denominator.
Definition: rational.h:60
representation::cur_seg
struct fragment * cur_seg
Definition: dashdec.c:111
get_content_url
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:471
DASHContext::is_init_section_common_video
int is_init_section_common_video
Definition: dashdec.c:159
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: avformat.c:149
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:914
refresh_manifest
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1486
AVFormatContext::io_open
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1903
update_init_section
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1716
parse_manifest_representation
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:825
AVPacket::stream_index
int stream_index
Definition: packet.h:541
dash_read_packet
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2156
open_input
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1684
timeline
Definition: dashdec.c:48
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:167
representation::cur_seg_size
int64_t cur_seg_size
Definition: dashdec.c:110
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
read_probe
static int read_probe(const AVProbeData *p)
Definition: cdg.c:30
mem.h
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
AVIOContext::buffer
unsigned char * buffer
Start of the buffer.
Definition: avio.h:225
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
ff_make_absolute_url
int ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:321
AVPacket
This structure stores compressed data.
Definition: packet.h:516
ff_dash_fill_tmpl_params
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:95
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
FFInputFormat
Definition: demux.h:42
representation::fragment_duration
int64_t fragment_duration
Definition: dashdec.c:103
avio_find_protocol_name
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:657
int32_t
int32_t
Definition: audioconvert.c:56
get_fragment
static struct fragment * get_fragment(char *range)
Definition: dashdec.c:581
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:1215
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dash_seek
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2230
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
timeline::repeat
int64_t repeat
Definition: dashdec.c:67
dash.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
DASHContext::base_url
char * base_url
Definition: dashdec.c:125
AVStream::pts_wrap_bits
int pts_wrap_bits
Number of bits in timestamps.
Definition: avformat.h:923
representation::fragments
struct fragment ** fragments
Definition: dashdec.c:94
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1328
dash_class
static const AVClass dash_class
Definition: dashdec.c:2352
DASHContext::period_duration
uint64_t period_duration
Definition: dashdec.c:145
representation::presentation_timeoffset
int64_t presentation_timeoffset
Definition: dashdec.c:106
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:106
fragment::size
int64_t size
Definition: dashdec.c:39
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:346