FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/bprint.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 #include "demux.h"
31 
32 #define INITIAL_BUFFER_SIZE 32768
33 
34 struct fragment {
35  int64_t url_offset;
36  int64_t size;
37  char *url;
38 };
39 
40 /*
41  * reference to : ISO_IEC_23009-1-DASH-2012
42  * Section: 5.3.9.6.2
43  * Table: Table 17 — Semantics of SegmentTimeline element
44  * */
45 struct timeline {
46  /* starttime: Element or Attribute Name
47  * specifies the MPD start time, in @timescale units,
48  * the first Segment in the series starts relative to the beginning of the Period.
49  * The value of this attribute must be equal to or greater than the sum of the previous S
50  * element earliest presentation time and the sum of the contiguous Segment durations.
51  * If the value of the attribute is greater than what is expressed by the previous S element,
52  * it expresses discontinuities in the timeline.
53  * If not present then the value shall be assumed to be zero for the first S element
54  * and for the subsequent S elements, the value shall be assumed to be the sum of
55  * the previous S element's earliest presentation time and contiguous duration
56  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
57  * */
58  int64_t starttime;
59  /* repeat: Element or Attribute Name
60  * specifies the repeat count of the number of following contiguous Segments with
61  * the same duration expressed by the value of @duration. This value is zero-based
62  * (e.g. a value of three means four Segments in the contiguous series).
63  * */
64  int64_t repeat;
65  /* duration: Element or Attribute Name
66  * specifies the Segment duration, in units of the value of the @timescale.
67  * */
68  int64_t duration;
69 };
70 
71 /*
72  * Each playlist has its own demuxer. If it is currently active,
73  * it has an opened AVIOContext too, and potentially an AVPacket
74  * containing the next packet from this stream.
75  */
77  char *url_template;
83 
84  char *id;
85  char *lang;
86  int bandwidth;
88  AVStream *assoc_stream; /* demuxer stream associated with this representation */
89 
91  struct fragment **fragments; /* VOD list of fragment for profile */
92 
94  struct timeline **timelines;
95 
96  int64_t first_seq_no;
97  int64_t last_seq_no;
98  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
99 
102 
104 
105  int64_t cur_seq_no;
106  int64_t cur_seg_offset;
107  int64_t cur_seg_size;
108  struct fragment *cur_seg;
109 
110  /* Currently active Media Initialization Section */
112  uint8_t *init_sec_buf;
116  int64_t cur_timestamp;
118 };
119 
120 typedef struct DASHContext {
121  const AVClass *class;
122  char *base_url;
123 
124  int n_videos;
126  int n_audios;
130 
131  /* MediaPresentationDescription Attribute */
136  uint64_t publish_time;
139  uint64_t min_buffer_time;
140 
141  /* Period Attribute */
142  uint64_t period_duration;
143  uint64_t period_start;
144 
145  /* AdaptationSet Attribute */
147 
148  int is_live;
154 
155  /* Flags for init section*/
159 
160 } DASHContext;
161 
162 static int ishttp(char *url)
163 {
164  const char *proto_name = avio_find_protocol_name(url);
165  return proto_name && av_strstart(proto_name, "http", NULL);
166 }
167 
168 static int aligned(int val)
169 {
170  return ((val + 0x3F) >> 6) << 6;
171 }
172 
173 static uint64_t get_current_time_in_sec(void)
174 {
175  return av_gettime() / 1000000;
176 }
177 
178 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
179 {
180  struct tm timeinfo;
181  int year = 0;
182  int month = 0;
183  int day = 0;
184  int hour = 0;
185  int minute = 0;
186  int ret = 0;
187  float second = 0.0;
188 
189  /* ISO-8601 date parser */
190  if (!datetime)
191  return 0;
192 
193  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
194  /* year, month, day, hour, minute, second 6 arguments */
195  if (ret != 6) {
196  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
197  }
198  timeinfo.tm_year = year - 1900;
199  timeinfo.tm_mon = month - 1;
200  timeinfo.tm_mday = day;
201  timeinfo.tm_hour = hour;
202  timeinfo.tm_min = minute;
203  timeinfo.tm_sec = (int)second;
204 
205  return av_timegm(&timeinfo);
206 }
207 
208 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
209 {
210  /* ISO-8601 duration parser */
211  uint32_t days = 0;
212  uint32_t hours = 0;
213  uint32_t mins = 0;
214  uint32_t secs = 0;
215  int size = 0;
216  float value = 0;
217  char type = '\0';
218  const char *ptr = duration;
219 
220  while (*ptr) {
221  if (*ptr == 'P' || *ptr == 'T') {
222  ptr++;
223  continue;
224  }
225 
226  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
227  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
228  return 0; /* parser error */
229  }
230  switch (type) {
231  case 'D':
232  days = (uint32_t)value;
233  break;
234  case 'H':
235  hours = (uint32_t)value;
236  break;
237  case 'M':
238  mins = (uint32_t)value;
239  break;
240  case 'S':
241  secs = (uint32_t)value;
242  break;
243  default:
244  // handle invalid type
245  break;
246  }
247  ptr += size;
248  }
249  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
250 }
251 
252 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
253 {
254  int64_t start_time = 0;
255  int64_t i = 0;
256  int64_t j = 0;
257  int64_t num = 0;
258 
259  if (pls->n_timelines) {
260  for (i = 0; i < pls->n_timelines; i++) {
261  if (pls->timelines[i]->starttime > 0) {
262  start_time = pls->timelines[i]->starttime;
263  }
264  if (num == cur_seq_no)
265  goto finish;
266 
267  start_time += pls->timelines[i]->duration;
268 
269  if (pls->timelines[i]->repeat == -1) {
270  start_time = pls->timelines[i]->duration * cur_seq_no;
271  goto finish;
272  }
273 
274  for (j = 0; j < pls->timelines[i]->repeat; j++) {
275  num++;
276  if (num == cur_seq_no)
277  goto finish;
278  start_time += pls->timelines[i]->duration;
279  }
280  num++;
281  }
282  }
283 finish:
284  return start_time;
285 }
286 
287 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
288 {
289  int64_t i = 0;
290  int64_t j = 0;
291  int64_t num = 0;
292  int64_t start_time = 0;
293 
294  for (i = 0; i < pls->n_timelines; i++) {
295  if (pls->timelines[i]->starttime > 0) {
296  start_time = pls->timelines[i]->starttime;
297  }
298  if (start_time > cur_time)
299  goto finish;
300 
301  start_time += pls->timelines[i]->duration;
302  for (j = 0; j < pls->timelines[i]->repeat; j++) {
303  num++;
304  if (start_time > cur_time)
305  goto finish;
306  start_time += pls->timelines[i]->duration;
307  }
308  num++;
309  }
310 
311  return -1;
312 
313 finish:
314  return num;
315 }
316 
317 static void free_fragment(struct fragment **seg)
318 {
319  if (!(*seg)) {
320  return;
321  }
322  av_freep(&(*seg)->url);
323  av_freep(seg);
324 }
325 
326 static void free_fragment_list(struct representation *pls)
327 {
328  int i;
329 
330  for (i = 0; i < pls->n_fragments; i++) {
331  free_fragment(&pls->fragments[i]);
332  }
333  av_freep(&pls->fragments);
334  pls->n_fragments = 0;
335 }
336 
337 static void free_timelines_list(struct representation *pls)
338 {
339  int i;
340 
341  for (i = 0; i < pls->n_timelines; i++) {
342  av_freep(&pls->timelines[i]);
343  }
344  av_freep(&pls->timelines);
345  pls->n_timelines = 0;
346 }
347 
348 static void free_representation(struct representation *pls)
349 {
350  free_fragment_list(pls);
351  free_timelines_list(pls);
352  free_fragment(&pls->cur_seg);
354  av_freep(&pls->init_sec_buf);
355  av_freep(&pls->pb.pub.buffer);
356  ff_format_io_close(pls->parent, &pls->input);
357  if (pls->ctx) {
358  pls->ctx->pb = NULL;
359  avformat_close_input(&pls->ctx);
360  }
361 
362  av_freep(&pls->url_template);
363  av_freep(&pls->lang);
364  av_freep(&pls->id);
365  av_freep(&pls);
366 }
367 
369 {
370  int i;
371  for (i = 0; i < c->n_videos; i++) {
372  struct representation *pls = c->videos[i];
373  free_representation(pls);
374  }
375  av_freep(&c->videos);
376  c->n_videos = 0;
377 }
378 
380 {
381  int i;
382  for (i = 0; i < c->n_audios; i++) {
383  struct representation *pls = c->audios[i];
384  free_representation(pls);
385  }
386  av_freep(&c->audios);
387  c->n_audios = 0;
388 }
389 
391 {
392  int i;
393  for (i = 0; i < c->n_subtitles; i++) {
394  struct representation *pls = c->subtitles[i];
395  free_representation(pls);
396  }
397  av_freep(&c->subtitles);
398  c->n_subtitles = 0;
399 }
400 
401 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
402  AVDictionary **opts, AVDictionary *opts2, int *is_http)
403 {
404  DASHContext *c = s->priv_data;
405  AVDictionary *tmp = NULL;
406  const char *proto_name = NULL;
407  int proto_name_len;
408  int ret;
409 
410  if (av_strstart(url, "crypto", NULL)) {
411  if (url[6] == '+' || url[6] == ':')
412  proto_name = avio_find_protocol_name(url + 7);
413  }
414 
415  if (!proto_name)
416  proto_name = avio_find_protocol_name(url);
417 
418  if (!proto_name)
419  return AVERROR_INVALIDDATA;
420 
421  proto_name_len = strlen(proto_name);
422  // only http(s) & file are allowed
423  if (av_strstart(proto_name, "file", NULL)) {
424  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
426  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
427  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
428  url);
429  return AVERROR_INVALIDDATA;
430  }
431  } else if (av_strstart(proto_name, "http", NULL)) {
432  ;
433  } else
434  return AVERROR_INVALIDDATA;
435 
436  if (!strncmp(proto_name, url, proto_name_len) && url[proto_name_len] == ':')
437  ;
438  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, proto_name_len) && url[7 + proto_name_len] == ':')
439  ;
440  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
441  return AVERROR_INVALIDDATA;
442 
443  av_freep(pb);
444  av_dict_copy(&tmp, *opts, 0);
445  av_dict_copy(&tmp, opts2, 0);
446  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
447  if (ret >= 0) {
448  // update cookies on http response with setcookies.
449  char *new_cookies = NULL;
450 
451  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
452  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
453 
454  if (new_cookies) {
455  av_dict_set(opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
456  }
457 
458  }
459 
460  av_dict_free(&tmp);
461 
462  if (is_http)
463  *is_http = av_strstart(proto_name, "http", NULL);
464 
465  return ret;
466 }
467 
468 static char *get_content_url(xmlNodePtr *baseurl_nodes,
469  int n_baseurl_nodes,
470  int max_url_size,
471  char *rep_id_val,
472  char *rep_bandwidth_val,
473  char *val)
474 {
475  int i;
476  char *text;
477  char *url = NULL;
478  char *tmp_str = av_mallocz(max_url_size);
479 
480  if (!tmp_str)
481  return NULL;
482 
483  for (i = 0; i < n_baseurl_nodes; ++i) {
484  if (baseurl_nodes[i] &&
485  baseurl_nodes[i]->children &&
486  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
487  text = xmlNodeGetContent(baseurl_nodes[i]->children);
488  if (text) {
489  memset(tmp_str, 0, max_url_size);
490  ff_make_absolute_url(tmp_str, max_url_size, "", text);
491  xmlFree(text);
492  }
493  }
494  }
495 
496  if (val)
497  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
498 
499  if (rep_id_val) {
500  url = av_strireplace(tmp_str, "$RepresentationID$", rep_id_val);
501  if (!url) {
502  goto end;
503  }
504  av_strlcpy(tmp_str, url, max_url_size);
505  }
506  if (rep_bandwidth_val && tmp_str[0] != '\0') {
507  // free any previously assigned url before reassigning
508  av_free(url);
509  url = av_strireplace(tmp_str, "$Bandwidth$", rep_bandwidth_val);
510  if (!url) {
511  goto end;
512  }
513  }
514 end:
515  av_free(tmp_str);
516  return url;
517 }
518 
519 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
520 {
521  int i;
522  char *val;
523 
524  for (i = 0; i < n_nodes; ++i) {
525  if (nodes[i]) {
526  val = xmlGetProp(nodes[i], attrname);
527  if (val)
528  return val;
529  }
530  }
531 
532  return NULL;
533 }
534 
535 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
536 {
537  xmlNodePtr node = rootnode;
538  if (!node) {
539  return NULL;
540  }
541 
542  node = xmlFirstElementChild(node);
543  while (node) {
544  if (!av_strcasecmp(node->name, nodename)) {
545  return node;
546  }
547  node = xmlNextElementSibling(node);
548  }
549  return NULL;
550 }
551 
552 static enum AVMediaType get_content_type(xmlNodePtr node)
553 {
555  int i = 0;
556  const char *attr;
557  char *val = NULL;
558 
559  if (node) {
560  for (i = 0; i < 2; i++) {
561  attr = i ? "mimeType" : "contentType";
562  val = xmlGetProp(node, attr);
563  if (val) {
564  if (av_stristr(val, "video")) {
566  } else if (av_stristr(val, "audio")) {
568  } else if (av_stristr(val, "text")) {
570  }
571  xmlFree(val);
572  }
573  }
574  }
575  return type;
576 }
577 
578 static struct fragment * get_Fragment(char *range)
579 {
580  struct fragment * seg = av_mallocz(sizeof(struct fragment));
581 
582  if (!seg)
583  return NULL;
584 
585  seg->size = -1;
586  if (range) {
587  char *str_end_offset;
588  char *str_offset = av_strtok(range, "-", &str_end_offset);
589  seg->url_offset = strtoll(str_offset, NULL, 10);
590  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset + 1;
591  }
592 
593  return seg;
594 }
595 
597  xmlNodePtr fragmenturl_node,
598  xmlNodePtr *baseurl_nodes,
599  char *rep_id_val,
600  char *rep_bandwidth_val)
601 {
602  DASHContext *c = s->priv_data;
603  char *initialization_val = NULL;
604  char *media_val = NULL;
605  char *range_val = NULL;
606  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
607  int err;
608 
609  if (!av_strcasecmp(fragmenturl_node->name, "Initialization")) {
610  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
611  range_val = xmlGetProp(fragmenturl_node, "range");
612  if (initialization_val || range_val) {
614  rep->init_section = get_Fragment(range_val);
615  xmlFree(range_val);
616  if (!rep->init_section) {
617  xmlFree(initialization_val);
618  return AVERROR(ENOMEM);
619  }
620  rep->init_section->url = get_content_url(baseurl_nodes, 4,
621  max_url_size,
622  rep_id_val,
623  rep_bandwidth_val,
624  initialization_val);
625  xmlFree(initialization_val);
626  if (!rep->init_section->url) {
627  av_freep(&rep->init_section);
628  return AVERROR(ENOMEM);
629  }
630  }
631  } else if (!av_strcasecmp(fragmenturl_node->name, "SegmentURL")) {
632  media_val = xmlGetProp(fragmenturl_node, "media");
633  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
634  if (media_val || range_val) {
635  struct fragment *seg = get_Fragment(range_val);
636  xmlFree(range_val);
637  if (!seg) {
638  xmlFree(media_val);
639  return AVERROR(ENOMEM);
640  }
641  seg->url = get_content_url(baseurl_nodes, 4,
642  max_url_size,
643  rep_id_val,
644  rep_bandwidth_val,
645  media_val);
646  xmlFree(media_val);
647  if (!seg->url) {
648  av_free(seg);
649  return AVERROR(ENOMEM);
650  }
651  err = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
652  if (err < 0) {
653  free_fragment(&seg);
654  return err;
655  }
656  }
657  }
658 
659  return 0;
660 }
661 
663  xmlNodePtr fragment_timeline_node)
664 {
665  xmlAttrPtr attr = NULL;
666  char *val = NULL;
667  int err;
668 
669  if (!av_strcasecmp(fragment_timeline_node->name, "S")) {
670  struct timeline *tml = av_mallocz(sizeof(struct timeline));
671  if (!tml) {
672  return AVERROR(ENOMEM);
673  }
674  attr = fragment_timeline_node->properties;
675  while (attr) {
676  val = xmlGetProp(fragment_timeline_node, attr->name);
677 
678  if (!val) {
679  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
680  continue;
681  }
682 
683  if (!av_strcasecmp(attr->name, "t")) {
684  tml->starttime = (int64_t)strtoll(val, NULL, 10);
685  } else if (!av_strcasecmp(attr->name, "r")) {
686  tml->repeat =(int64_t) strtoll(val, NULL, 10);
687  } else if (!av_strcasecmp(attr->name, "d")) {
688  tml->duration = (int64_t)strtoll(val, NULL, 10);
689  }
690  attr = attr->next;
691  xmlFree(val);
692  }
693  err = av_dynarray_add_nofree(&rep->timelines, &rep->n_timelines, tml);
694  if (err < 0) {
695  av_free(tml);
696  return err;
697  }
698  }
699 
700  return 0;
701 }
702 
703 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
704 {
705  char *tmp_str = NULL;
706  char *path = NULL;
707  char *mpdName = NULL;
708  xmlNodePtr node = NULL;
709  char *baseurl = NULL;
710  char *root_url = NULL;
711  char *text = NULL;
712  char *tmp = NULL;
713  int isRootHttp = 0;
714  char token ='/';
715  int start = 0;
716  int rootId = 0;
717  int updated = 0;
718  int size = 0;
719  int i;
720  int tmp_max_url_size = strlen(url);
721 
722  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
723  text = xmlNodeGetContent(baseurl_nodes[i]);
724  if (!text)
725  continue;
726  tmp_max_url_size += strlen(text);
727  if (ishttp(text)) {
728  xmlFree(text);
729  break;
730  }
731  xmlFree(text);
732  }
733 
734  tmp_max_url_size = aligned(tmp_max_url_size);
735  text = av_mallocz(tmp_max_url_size);
736  if (!text) {
737  updated = AVERROR(ENOMEM);
738  goto end;
739  }
740  av_strlcpy(text, url, strlen(url)+1);
741  tmp = text;
742  while (mpdName = av_strtok(tmp, "/", &tmp)) {
743  size = strlen(mpdName);
744  }
745  av_free(text);
746 
747  path = av_mallocz(tmp_max_url_size);
748  tmp_str = av_mallocz(tmp_max_url_size);
749  if (!tmp_str || !path) {
750  updated = AVERROR(ENOMEM);
751  goto end;
752  }
753 
754  av_strlcpy (path, url, strlen(url) - size + 1);
755  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
756  if (!(node = baseurl_nodes[rootId])) {
757  continue;
758  }
759  text = xmlNodeGetContent(node);
760  if (ishttp(text)) {
761  xmlFree(text);
762  break;
763  }
764  xmlFree(text);
765  }
766 
767  node = baseurl_nodes[rootId];
768  baseurl = xmlNodeGetContent(node);
769  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
770  if (node) {
771  xmlNodeSetContent(node, root_url);
772  updated = 1;
773  }
774 
775  size = strlen(root_url);
776  isRootHttp = ishttp(root_url);
777 
778  if (size > 0 && root_url[size - 1] != token) {
779  av_strlcat(root_url, "/", size + 2);
780  size += 2;
781  }
782 
783  for (i = 0; i < n_baseurl_nodes; ++i) {
784  if (i == rootId) {
785  continue;
786  }
787  text = xmlNodeGetContent(baseurl_nodes[i]);
788  if (text && !av_strstart(text, "/", NULL)) {
789  memset(tmp_str, 0, strlen(tmp_str));
790  if (!ishttp(text) && isRootHttp) {
791  av_strlcpy(tmp_str, root_url, size + 1);
792  }
793  start = (text[0] == token);
794  if (start && av_stristr(tmp_str, text)) {
795  char *p = tmp_str;
796  if (!av_strncasecmp(tmp_str, "http://", 7)) {
797  p += 7;
798  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
799  p += 8;
800  }
801  p = strchr(p, '/');
802  memset(p + 1, 0, strlen(p));
803  }
804  av_strlcat(tmp_str, text + start, tmp_max_url_size);
805  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
806  updated = 1;
807  xmlFree(text);
808  }
809  }
810 
811 end:
812  if (tmp_max_url_size > *max_url_size) {
813  *max_url_size = tmp_max_url_size;
814  }
815  av_free(path);
816  av_free(tmp_str);
817  xmlFree(baseurl);
818  return updated;
819 
820 }
821 
822 static int parse_manifest_representation(AVFormatContext *s, const char *url,
823  xmlNodePtr node,
824  xmlNodePtr adaptionset_node,
825  xmlNodePtr mpd_baseurl_node,
826  xmlNodePtr period_baseurl_node,
827  xmlNodePtr period_segmenttemplate_node,
828  xmlNodePtr period_segmentlist_node,
829  xmlNodePtr fragment_template_node,
830  xmlNodePtr content_component_node,
831  xmlNodePtr adaptionset_baseurl_node,
832  xmlNodePtr adaptionset_segmentlist_node,
833  xmlNodePtr adaptionset_supplementalproperty_node)
834 {
835  int32_t ret = 0;
836  DASHContext *c = s->priv_data;
837  struct representation *rep = NULL;
838  struct fragment *seg = NULL;
839  xmlNodePtr representation_segmenttemplate_node = NULL;
840  xmlNodePtr representation_baseurl_node = NULL;
841  xmlNodePtr representation_segmentlist_node = NULL;
842  xmlNodePtr segmentlists_tab[3];
843  xmlNodePtr fragment_timeline_node = NULL;
844  xmlNodePtr fragment_templates_tab[5];
845  char *val = NULL;
846  xmlNodePtr baseurl_nodes[4];
847  xmlNodePtr representation_node = node;
848  char *rep_bandwidth_val;
850 
851  // try get information from representation
852  if (type == AVMEDIA_TYPE_UNKNOWN)
853  type = get_content_type(representation_node);
854  // try get information from contentComponen
855  if (type == AVMEDIA_TYPE_UNKNOWN)
856  type = get_content_type(content_component_node);
857  // try get information from adaption set
858  if (type == AVMEDIA_TYPE_UNKNOWN)
859  type = get_content_type(adaptionset_node);
862  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
863  return 0;
864  }
865 
866  // convert selected representation to our internal struct
867  rep = av_mallocz(sizeof(struct representation));
868  if (!rep)
869  return AVERROR(ENOMEM);
870  if (c->adaptionset_lang) {
871  rep->lang = av_strdup(c->adaptionset_lang);
872  if (!rep->lang) {
873  av_log(s, AV_LOG_ERROR, "alloc language memory failure\n");
874  av_freep(&rep);
875  return AVERROR(ENOMEM);
876  }
877  }
878  rep->parent = s;
879  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
880  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
881  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
882  rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
883  val = xmlGetProp(representation_node, "id");
884  if (val) {
885  rep->id = av_strdup(val);
886  xmlFree(val);
887  if (!rep->id)
888  goto enomem;
889  }
890 
891  baseurl_nodes[0] = mpd_baseurl_node;
892  baseurl_nodes[1] = period_baseurl_node;
893  baseurl_nodes[2] = adaptionset_baseurl_node;
894  baseurl_nodes[3] = representation_baseurl_node;
895 
896  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
897  c->max_url_size = aligned(c->max_url_size
898  + (rep->id ? strlen(rep->id) : 0)
899  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
900  if (ret == AVERROR(ENOMEM) || ret == 0)
901  goto free;
902  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
903  fragment_timeline_node = NULL;
904  fragment_templates_tab[0] = representation_segmenttemplate_node;
905  fragment_templates_tab[1] = adaptionset_segmentlist_node;
906  fragment_templates_tab[2] = fragment_template_node;
907  fragment_templates_tab[3] = period_segmenttemplate_node;
908  fragment_templates_tab[4] = period_segmentlist_node;
909 
910  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
911  if (val) {
912  rep->init_section = av_mallocz(sizeof(struct fragment));
913  if (!rep->init_section) {
914  xmlFree(val);
915  goto enomem;
916  }
917  c->max_url_size = aligned(c->max_url_size + strlen(val));
918  rep->init_section->url = get_content_url(baseurl_nodes, 4,
919  c->max_url_size, rep->id,
920  rep_bandwidth_val, val);
921  xmlFree(val);
922  if (!rep->init_section->url)
923  goto enomem;
924  rep->init_section->size = -1;
925  }
926  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
927  if (val) {
928  c->max_url_size = aligned(c->max_url_size + strlen(val));
929  rep->url_template = get_content_url(baseurl_nodes, 4,
930  c->max_url_size, rep->id,
931  rep_bandwidth_val, val);
932  xmlFree(val);
933  }
934  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
935  if (val) {
936  rep->presentation_timeoffset = (int64_t) strtoll(val, NULL, 10);
937  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
938  xmlFree(val);
939  }
940  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
941  if (val) {
942  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
943  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
944  xmlFree(val);
945  }
946  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
947  if (val) {
948  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
949  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
950  xmlFree(val);
951  }
952  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
953  if (val) {
954  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
955  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
956  xmlFree(val);
957  }
958  if (adaptionset_supplementalproperty_node) {
959  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
960  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
961  if (!val) {
962  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
963  } else {
964  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
965  xmlFree(val);
966  }
967  }
968  }
969 
970  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
971 
972  if (!fragment_timeline_node)
973  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
974  if (!fragment_timeline_node)
975  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
976  if (!fragment_timeline_node)
977  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
978  if (fragment_timeline_node) {
979  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
980  while (fragment_timeline_node) {
981  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
982  if (ret < 0)
983  goto free;
984  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
985  }
986  }
987  } else if (representation_baseurl_node && !representation_segmentlist_node) {
988  seg = av_mallocz(sizeof(struct fragment));
989  if (!seg)
990  goto enomem;
991  ret = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
992  if (ret < 0) {
993  av_free(seg);
994  goto free;
995  }
996  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size,
997  rep->id, rep_bandwidth_val, NULL);
998  if (!seg->url)
999  goto enomem;
1000  seg->size = -1;
1001  } else if (representation_segmentlist_node) {
1002  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1003  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1004  xmlNodePtr fragmenturl_node = NULL;
1005  segmentlists_tab[0] = representation_segmentlist_node;
1006  segmentlists_tab[1] = adaptionset_segmentlist_node;
1007  segmentlists_tab[2] = period_segmentlist_node;
1008 
1009  val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1010  if (val) {
1011  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
1012  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1013  xmlFree(val);
1014  }
1015  val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1016  if (val) {
1017  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
1018  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1019  xmlFree(val);
1020  }
1021  val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1022  if (val) {
1023  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
1024  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1025  xmlFree(val);
1026  }
1027 
1028  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1029  while (fragmenturl_node) {
1030  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1031  baseurl_nodes, rep->id,
1032  rep_bandwidth_val);
1033  if (ret < 0)
1034  goto free;
1035  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1036  }
1037 
1038  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1039  if (!fragment_timeline_node)
1040  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1041  if (fragment_timeline_node) {
1042  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1043  while (fragment_timeline_node) {
1044  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1045  if (ret < 0)
1046  goto free;
1047  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1048  }
1049  }
1050  } else {
1051  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id '%s' \n",
1052  rep->id ? rep->id : "");
1053  goto free;
1054  }
1055 
1056  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1057  rep->fragment_timescale = 1;
1058  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1059  rep->framerate = av_make_q(0, 0);
1060  if (type == AVMEDIA_TYPE_VIDEO) {
1061  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
1062  if (rep_framerate_val) {
1063  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1064  if (ret < 0)
1065  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1066  xmlFree(rep_framerate_val);
1067  }
1068  }
1069 
1070  switch (type) {
1071  case AVMEDIA_TYPE_VIDEO:
1072  ret = av_dynarray_add_nofree(&c->videos, &c->n_videos, rep);
1073  break;
1074  case AVMEDIA_TYPE_AUDIO:
1075  ret = av_dynarray_add_nofree(&c->audios, &c->n_audios, rep);
1076  break;
1077  case AVMEDIA_TYPE_SUBTITLE:
1078  ret = av_dynarray_add_nofree(&c->subtitles, &c->n_subtitles, rep);
1079  break;
1080  }
1081  if (ret < 0)
1082  goto free;
1083 
1084 end:
1085  if (rep_bandwidth_val)
1086  xmlFree(rep_bandwidth_val);
1087 
1088  return ret;
1089 enomem:
1090  ret = AVERROR(ENOMEM);
1091 free:
1092  free_representation(rep);
1093  goto end;
1094 }
1095 
1096 static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
1097 {
1098  DASHContext *c = s->priv_data;
1099 
1100  if (!adaptionset_node) {
1101  av_log(s, AV_LOG_WARNING, "Cannot get AdaptionSet\n");
1102  return AVERROR(EINVAL);
1103  }
1104  c->adaptionset_lang = xmlGetProp(adaptionset_node, "lang");
1105 
1106  return 0;
1107 }
1108 
1110  xmlNodePtr adaptionset_node,
1111  xmlNodePtr mpd_baseurl_node,
1112  xmlNodePtr period_baseurl_node,
1113  xmlNodePtr period_segmenttemplate_node,
1114  xmlNodePtr period_segmentlist_node)
1115 {
1116  int ret = 0;
1117  DASHContext *c = s->priv_data;
1118  xmlNodePtr fragment_template_node = NULL;
1119  xmlNodePtr content_component_node = NULL;
1120  xmlNodePtr adaptionset_baseurl_node = NULL;
1121  xmlNodePtr adaptionset_segmentlist_node = NULL;
1122  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1123  xmlNodePtr node = NULL;
1124 
1125  ret = parse_manifest_adaptationset_attr(s, adaptionset_node);
1126  if (ret < 0)
1127  return ret;
1128 
1129  node = xmlFirstElementChild(adaptionset_node);
1130  while (node) {
1131  if (!av_strcasecmp(node->name, "SegmentTemplate")) {
1132  fragment_template_node = node;
1133  } else if (!av_strcasecmp(node->name, "ContentComponent")) {
1134  content_component_node = node;
1135  } else if (!av_strcasecmp(node->name, "BaseURL")) {
1136  adaptionset_baseurl_node = node;
1137  } else if (!av_strcasecmp(node->name, "SegmentList")) {
1138  adaptionset_segmentlist_node = node;
1139  } else if (!av_strcasecmp(node->name, "SupplementalProperty")) {
1140  adaptionset_supplementalproperty_node = node;
1141  } else if (!av_strcasecmp(node->name, "Representation")) {
1143  adaptionset_node,
1144  mpd_baseurl_node,
1145  period_baseurl_node,
1146  period_segmenttemplate_node,
1147  period_segmentlist_node,
1148  fragment_template_node,
1149  content_component_node,
1150  adaptionset_baseurl_node,
1151  adaptionset_segmentlist_node,
1152  adaptionset_supplementalproperty_node);
1153  if (ret < 0)
1154  goto err;
1155  }
1156  node = xmlNextElementSibling(node);
1157  }
1158 
1159 err:
1160  xmlFree(c->adaptionset_lang);
1161  c->adaptionset_lang = NULL;
1162  return ret;
1163 }
1164 
1165 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1166 {
1167  xmlChar *val = NULL;
1168 
1169  node = xmlFirstElementChild(node);
1170  while (node) {
1171  if (!av_strcasecmp(node->name, "Title")) {
1172  val = xmlNodeGetContent(node);
1173  if (val) {
1174  av_dict_set(&s->metadata, "Title", val, 0);
1175  }
1176  } else if (!av_strcasecmp(node->name, "Source")) {
1177  val = xmlNodeGetContent(node);
1178  if (val) {
1179  av_dict_set(&s->metadata, "Source", val, 0);
1180  }
1181  } else if (!av_strcasecmp(node->name, "Copyright")) {
1182  val = xmlNodeGetContent(node);
1183  if (val) {
1184  av_dict_set(&s->metadata, "Copyright", val, 0);
1185  }
1186  }
1187  node = xmlNextElementSibling(node);
1188  xmlFree(val);
1189  val = NULL;
1190  }
1191  return 0;
1192 }
1193 
1194 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1195 {
1196  DASHContext *c = s->priv_data;
1197  int ret = 0;
1198  int close_in = 0;
1199  AVBPrint buf;
1200  AVDictionary *opts = NULL;
1201  xmlDoc *doc = NULL;
1202  xmlNodePtr root_element = NULL;
1203  xmlNodePtr node = NULL;
1204  xmlNodePtr period_node = NULL;
1205  xmlNodePtr tmp_node = NULL;
1206  xmlNodePtr mpd_baseurl_node = NULL;
1207  xmlNodePtr period_baseurl_node = NULL;
1208  xmlNodePtr period_segmenttemplate_node = NULL;
1209  xmlNodePtr period_segmentlist_node = NULL;
1210  xmlNodePtr adaptionset_node = NULL;
1211  xmlAttrPtr attr = NULL;
1212  char *val = NULL;
1213  uint32_t period_duration_sec = 0;
1214  uint32_t period_start_sec = 0;
1215 
1216  if (!in) {
1217  close_in = 1;
1218 
1219  av_dict_copy(&opts, c->avio_opts, 0);
1220  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1221  av_dict_free(&opts);
1222  if (ret < 0)
1223  return ret;
1224  }
1225 
1226  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&c->base_url) < 0)
1227  c->base_url = av_strdup(url);
1228 
1229  av_bprint_init(&buf, 0, INT_MAX); // xmlReadMemory uses integer bufsize
1230 
1231  if ((ret = avio_read_to_bprint(in, &buf, SIZE_MAX)) < 0 ||
1232  !avio_feof(in)) {
1233  av_log(s, AV_LOG_ERROR, "Unable to read to manifest '%s'\n", url);
1234  if (ret == 0)
1236  } else {
1237  LIBXML_TEST_VERSION
1238 
1239  doc = xmlReadMemory(buf.str, buf.len, c->base_url, NULL, 0);
1240  root_element = xmlDocGetRootElement(doc);
1241  node = root_element;
1242 
1243  if (!node) {
1245  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1246  goto cleanup;
1247  }
1248 
1249  if (node->type != XML_ELEMENT_NODE ||
1250  av_strcasecmp(node->name, "MPD")) {
1252  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1253  goto cleanup;
1254  }
1255 
1256  val = xmlGetProp(node, "type");
1257  if (!val) {
1258  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1260  goto cleanup;
1261  }
1262  if (!av_strcasecmp(val, "dynamic"))
1263  c->is_live = 1;
1264  xmlFree(val);
1265 
1266  attr = node->properties;
1267  while (attr) {
1268  val = xmlGetProp(node, attr->name);
1269 
1270  if (!av_strcasecmp(attr->name, "availabilityStartTime")) {
1271  c->availability_start_time = get_utc_date_time_insec(s, val);
1272  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1273  } else if (!av_strcasecmp(attr->name, "availabilityEndTime")) {
1274  c->availability_end_time = get_utc_date_time_insec(s, val);
1275  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1276  } else if (!av_strcasecmp(attr->name, "publishTime")) {
1277  c->publish_time = get_utc_date_time_insec(s, val);
1278  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1279  } else if (!av_strcasecmp(attr->name, "minimumUpdatePeriod")) {
1280  c->minimum_update_period = get_duration_insec(s, val);
1281  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1282  } else if (!av_strcasecmp(attr->name, "timeShiftBufferDepth")) {
1283  c->time_shift_buffer_depth = get_duration_insec(s, val);
1284  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1285  } else if (!av_strcasecmp(attr->name, "minBufferTime")) {
1286  c->min_buffer_time = get_duration_insec(s, val);
1287  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1288  } else if (!av_strcasecmp(attr->name, "suggestedPresentationDelay")) {
1289  c->suggested_presentation_delay = get_duration_insec(s, val);
1290  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1291  } else if (!av_strcasecmp(attr->name, "mediaPresentationDuration")) {
1292  c->media_presentation_duration = get_duration_insec(s, val);
1293  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1294  }
1295  attr = attr->next;
1296  xmlFree(val);
1297  }
1298 
1299  tmp_node = find_child_node_by_name(node, "BaseURL");
1300  if (tmp_node) {
1301  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1302  } else {
1303  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1304  }
1305 
1306  // at now we can handle only one period, with the longest duration
1307  node = xmlFirstElementChild(node);
1308  while (node) {
1309  if (!av_strcasecmp(node->name, "Period")) {
1310  period_duration_sec = 0;
1311  period_start_sec = 0;
1312  attr = node->properties;
1313  while (attr) {
1314  val = xmlGetProp(node, attr->name);
1315  if (!av_strcasecmp(attr->name, "duration")) {
1316  period_duration_sec = get_duration_insec(s, val);
1317  } else if (!av_strcasecmp(attr->name, "start")) {
1318  period_start_sec = get_duration_insec(s, val);
1319  }
1320  attr = attr->next;
1321  xmlFree(val);
1322  }
1323  if ((period_duration_sec) >= (c->period_duration)) {
1324  period_node = node;
1325  c->period_duration = period_duration_sec;
1326  c->period_start = period_start_sec;
1327  if (c->period_start > 0)
1328  c->media_presentation_duration = c->period_duration;
1329  }
1330  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1331  parse_programinformation(s, node);
1332  }
1333  node = xmlNextElementSibling(node);
1334  }
1335  if (!period_node) {
1336  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1338  goto cleanup;
1339  }
1340 
1341  adaptionset_node = xmlFirstElementChild(period_node);
1342  while (adaptionset_node) {
1343  if (!av_strcasecmp(adaptionset_node->name, "BaseURL")) {
1344  period_baseurl_node = adaptionset_node;
1345  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentTemplate")) {
1346  period_segmenttemplate_node = adaptionset_node;
1347  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentList")) {
1348  period_segmentlist_node = adaptionset_node;
1349  } else if (!av_strcasecmp(adaptionset_node->name, "AdaptationSet")) {
1350  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1351  }
1352  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1353  }
1354 cleanup:
1355  /*free the document */
1356  xmlFreeDoc(doc);
1357  xmlCleanupParser();
1358  xmlFreeNode(mpd_baseurl_node);
1359  }
1360 
1361  av_bprint_finalize(&buf, NULL);
1362  if (close_in) {
1363  avio_close(in);
1364  }
1365  return ret;
1366 }
1367 
1368 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1369 {
1370  DASHContext *c = s->priv_data;
1371  int64_t num = 0;
1372  int64_t start_time_offset = 0;
1373 
1374  if (c->is_live) {
1375  if (pls->n_fragments) {
1376  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1377  num = pls->first_seq_no;
1378  } else if (pls->n_timelines) {
1379  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1380  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1381  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1382  if (num == -1)
1383  num = pls->first_seq_no;
1384  else
1385  num += pls->first_seq_no;
1386  } else if (pls->fragment_duration){
1387  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1388  if (pls->presentation_timeoffset) {
1389  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
1390  } else if (c->publish_time > 0 && !c->availability_start_time) {
1391  if (c->min_buffer_time) {
1392  num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
1393  } else {
1394  num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1395  }
1396  } else {
1397  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1398  }
1399  }
1400  } else {
1401  num = pls->first_seq_no;
1402  }
1403  return num;
1404 }
1405 
1406 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1407 {
1408  DASHContext *c = s->priv_data;
1409  int64_t num = 0;
1410 
1411  if (c->is_live && pls->fragment_duration) {
1412  av_log(s, AV_LOG_TRACE, "in live mode\n");
1413  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
1414  } else {
1415  num = pls->first_seq_no;
1416  }
1417  return num;
1418 }
1419 
1420 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1421 {
1422  int64_t num = 0;
1423 
1424  if (pls->n_fragments) {
1425  num = pls->first_seq_no + pls->n_fragments - 1;
1426  } else if (pls->n_timelines) {
1427  int i = 0;
1428  num = pls->first_seq_no + pls->n_timelines - 1;
1429  for (i = 0; i < pls->n_timelines; i++) {
1430  if (pls->timelines[i]->repeat == -1) {
1431  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1432  num = c->period_duration / length_of_each_segment;
1433  } else {
1434  num += pls->timelines[i]->repeat;
1435  }
1436  }
1437  } else if (c->is_live && pls->fragment_duration) {
1438  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time)) * pls->fragment_timescale) / pls->fragment_duration;
1439  } else if (pls->fragment_duration) {
1440  num = pls->first_seq_no + av_rescale_rnd(1, c->media_presentation_duration * pls->fragment_timescale, pls->fragment_duration, AV_ROUND_UP);
1441  }
1442 
1443  return num;
1444 }
1445 
1446 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1447 {
1448  if (rep_dest && rep_src ) {
1449  free_timelines_list(rep_dest);
1450  rep_dest->timelines = rep_src->timelines;
1451  rep_dest->n_timelines = rep_src->n_timelines;
1452  rep_dest->first_seq_no = rep_src->first_seq_no;
1453  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1454  rep_src->timelines = NULL;
1455  rep_src->n_timelines = 0;
1456  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1457  }
1458 }
1459 
1460 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1461 {
1462  if (rep_dest && rep_src ) {
1463  free_fragment_list(rep_dest);
1464  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1465  rep_dest->cur_seq_no = 0;
1466  else
1467  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1468  rep_dest->fragments = rep_src->fragments;
1469  rep_dest->n_fragments = rep_src->n_fragments;
1470  rep_dest->parent = rep_src->parent;
1471  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1472  rep_src->fragments = NULL;
1473  rep_src->n_fragments = 0;
1474  }
1475 }
1476 
1477 
1479 {
1480  int ret = 0, i;
1481  DASHContext *c = s->priv_data;
1482  // save current context
1483  int n_videos = c->n_videos;
1484  struct representation **videos = c->videos;
1485  int n_audios = c->n_audios;
1486  struct representation **audios = c->audios;
1487  int n_subtitles = c->n_subtitles;
1488  struct representation **subtitles = c->subtitles;
1489  char *base_url = c->base_url;
1490 
1491  c->base_url = NULL;
1492  c->n_videos = 0;
1493  c->videos = NULL;
1494  c->n_audios = 0;
1495  c->audios = NULL;
1496  c->n_subtitles = 0;
1497  c->subtitles = NULL;
1498  ret = parse_manifest(s, s->url, NULL);
1499  if (ret)
1500  goto finish;
1501 
1502  if (c->n_videos != n_videos) {
1504  "new manifest has mismatched no. of video representations, %d -> %d\n",
1505  n_videos, c->n_videos);
1506  return AVERROR_INVALIDDATA;
1507  }
1508  if (c->n_audios != n_audios) {
1510  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1511  n_audios, c->n_audios);
1512  return AVERROR_INVALIDDATA;
1513  }
1514  if (c->n_subtitles != n_subtitles) {
1516  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1517  n_subtitles, c->n_subtitles);
1518  return AVERROR_INVALIDDATA;
1519  }
1520 
1521  for (i = 0; i < n_videos; i++) {
1522  struct representation *cur_video = videos[i];
1523  struct representation *ccur_video = c->videos[i];
1524  if (cur_video->timelines) {
1525  // calc current time
1526  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1527  // update segments
1528  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1529  if (ccur_video->cur_seq_no >= 0) {
1530  move_timelines(ccur_video, cur_video, c);
1531  }
1532  }
1533  if (cur_video->fragments) {
1534  move_segments(ccur_video, cur_video, c);
1535  }
1536  }
1537  for (i = 0; i < n_audios; i++) {
1538  struct representation *cur_audio = audios[i];
1539  struct representation *ccur_audio = c->audios[i];
1540  if (cur_audio->timelines) {
1541  // calc current time
1542  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1543  // update segments
1544  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1545  if (ccur_audio->cur_seq_no >= 0) {
1546  move_timelines(ccur_audio, cur_audio, c);
1547  }
1548  }
1549  if (cur_audio->fragments) {
1550  move_segments(ccur_audio, cur_audio, c);
1551  }
1552  }
1553 
1554 finish:
1555  // restore context
1556  if (c->base_url)
1557  av_free(base_url);
1558  else
1559  c->base_url = base_url;
1560 
1561  if (c->subtitles)
1563  if (c->audios)
1564  free_audio_list(c);
1565  if (c->videos)
1566  free_video_list(c);
1567 
1568  c->n_subtitles = n_subtitles;
1569  c->subtitles = subtitles;
1570  c->n_audios = n_audios;
1571  c->audios = audios;
1572  c->n_videos = n_videos;
1573  c->videos = videos;
1574  return ret;
1575 }
1576 
1577 static struct fragment *get_current_fragment(struct representation *pls)
1578 {
1579  int64_t min_seq_no = 0;
1580  int64_t max_seq_no = 0;
1581  struct fragment *seg = NULL;
1582  struct fragment *seg_ptr = NULL;
1583  DASHContext *c = pls->parent->priv_data;
1584 
1585  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1586  if (pls->cur_seq_no < pls->n_fragments) {
1587  seg_ptr = pls->fragments[pls->cur_seq_no];
1588  seg = av_mallocz(sizeof(struct fragment));
1589  if (!seg) {
1590  return NULL;
1591  }
1592  seg->url = av_strdup(seg_ptr->url);
1593  if (!seg->url) {
1594  av_free(seg);
1595  return NULL;
1596  }
1597  seg->size = seg_ptr->size;
1598  seg->url_offset = seg_ptr->url_offset;
1599  return seg;
1600  } else if (c->is_live) {
1601  refresh_manifest(pls->parent);
1602  } else {
1603  break;
1604  }
1605  }
1606  if (c->is_live) {
1607  min_seq_no = calc_min_seg_no(pls->parent, pls);
1608  max_seq_no = calc_max_seg_no(pls, c);
1609 
1610  if (pls->timelines || pls->fragments) {
1611  refresh_manifest(pls->parent);
1612  }
1613  if (pls->cur_seq_no <= min_seq_no) {
1614  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"]\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no);
1615  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1616  } else if (pls->cur_seq_no > max_seq_no) {
1617  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"]\n", min_seq_no, max_seq_no);
1618  }
1619  seg = av_mallocz(sizeof(struct fragment));
1620  if (!seg) {
1621  return NULL;
1622  }
1623  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1624  seg = av_mallocz(sizeof(struct fragment));
1625  if (!seg) {
1626  return NULL;
1627  }
1628  }
1629  if (seg) {
1630  char *tmpfilename;
1631  if (!pls->url_template) {
1632  av_log(pls->parent, AV_LOG_ERROR, "Cannot get fragment, missing template URL\n");
1633  av_free(seg);
1634  return NULL;
1635  }
1636  tmpfilename = av_mallocz(c->max_url_size);
1637  if (!tmpfilename) {
1638  av_free(seg);
1639  return NULL;
1640  }
1641  ff_dash_fill_tmpl_params(tmpfilename, c->max_url_size, pls->url_template, 0, pls->cur_seq_no, 0, get_segment_start_time_based_on_timeline(pls, pls->cur_seq_no));
1642  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1643  if (!seg->url) {
1644  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1645  seg->url = av_strdup(pls->url_template);
1646  if (!seg->url) {
1647  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1648  av_free(tmpfilename);
1649  av_free(seg);
1650  return NULL;
1651  }
1652  }
1653  av_free(tmpfilename);
1654  seg->size = -1;
1655  }
1656 
1657  return seg;
1658 }
1659 
1660 static int read_from_url(struct representation *pls, struct fragment *seg,
1661  uint8_t *buf, int buf_size)
1662 {
1663  int ret;
1664 
1665  /* limit read if the fragment was only a part of a file */
1666  if (seg->size >= 0)
1667  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1668 
1669  ret = avio_read(pls->input, buf, buf_size);
1670  if (ret > 0)
1671  pls->cur_seg_offset += ret;
1672 
1673  return ret;
1674 }
1675 
1676 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1677 {
1678  AVDictionary *opts = NULL;
1679  char *url = NULL;
1680  int ret = 0;
1681 
1682  url = av_mallocz(c->max_url_size);
1683  if (!url) {
1684  ret = AVERROR(ENOMEM);
1685  goto cleanup;
1686  }
1687 
1688  if (seg->size >= 0) {
1689  /* try to restrict the HTTP request to the part we want
1690  * (if this is in fact a HTTP request) */
1691  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1692  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1693  }
1694 
1695  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1696  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64"\n",
1697  url, seg->url_offset);
1698  ret = open_url(pls->parent, &pls->input, url, &c->avio_opts, opts, NULL);
1699 
1700 cleanup:
1701  av_free(url);
1702  av_dict_free(&opts);
1703  pls->cur_seg_offset = 0;
1704  pls->cur_seg_size = seg->size;
1705  return ret;
1706 }
1707 
1708 static int update_init_section(struct representation *pls)
1709 {
1710  static const int max_init_section_size = 1024 * 1024;
1711  DASHContext *c = pls->parent->priv_data;
1712  int64_t sec_size;
1713  int64_t urlsize;
1714  int ret;
1715 
1716  if (!pls->init_section || pls->init_sec_buf)
1717  return 0;
1718 
1719  ret = open_input(c, pls, pls->init_section);
1720  if (ret < 0) {
1722  "Failed to open an initialization section\n");
1723  return ret;
1724  }
1725 
1726  if (pls->init_section->size >= 0)
1727  sec_size = pls->init_section->size;
1728  else if ((urlsize = avio_size(pls->input)) >= 0)
1729  sec_size = urlsize;
1730  else
1731  sec_size = max_init_section_size;
1732 
1733  av_log(pls->parent, AV_LOG_DEBUG,
1734  "Downloading an initialization section of size %"PRId64"\n",
1735  sec_size);
1736 
1737  sec_size = FFMIN(sec_size, max_init_section_size);
1738 
1739  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1740 
1741  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1742  pls->init_sec_buf_size);
1743  ff_format_io_close(pls->parent, &pls->input);
1744 
1745  if (ret < 0)
1746  return ret;
1747 
1748  pls->init_sec_data_len = ret;
1749  pls->init_sec_buf_read_offset = 0;
1750 
1751  return 0;
1752 }
1753 
1754 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1755 {
1756  struct representation *v = opaque;
1757  if (v->n_fragments && !v->init_sec_data_len) {
1758  return avio_seek(v->input, offset, whence);
1759  }
1760 
1761  return AVERROR(ENOSYS);
1762 }
1763 
1764 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1765 {
1766  int ret = 0;
1767  struct representation *v = opaque;
1768  DASHContext *c = v->parent->priv_data;
1769 
1770 restart:
1771  if (!v->input) {
1772  free_fragment(&v->cur_seg);
1773  v->cur_seg = get_current_fragment(v);
1774  if (!v->cur_seg) {
1775  ret = AVERROR_EOF;
1776  goto end;
1777  }
1778 
1779  /* load/update Media Initialization Section, if any */
1780  ret = update_init_section(v);
1781  if (ret)
1782  goto end;
1783 
1784  ret = open_input(c, v, v->cur_seg);
1785  if (ret < 0) {
1786  if (ff_check_interrupt(c->interrupt_callback)) {
1787  ret = AVERROR_EXIT;
1788  goto end;
1789  }
1790  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist\n");
1791  v->cur_seq_no++;
1792  goto restart;
1793  }
1794  }
1795 
1797  /* Push init section out first before first actual fragment */
1798  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1799  memcpy(buf, v->init_sec_buf, copy_size);
1800  v->init_sec_buf_read_offset += copy_size;
1801  ret = copy_size;
1802  goto end;
1803  }
1804 
1805  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1806  if (!v->cur_seg) {
1807  v->cur_seg = get_current_fragment(v);
1808  }
1809  if (!v->cur_seg) {
1810  ret = AVERROR_EOF;
1811  goto end;
1812  }
1813  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1814  if (ret > 0)
1815  goto end;
1816 
1817  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1818  if (!v->is_restart_needed)
1819  v->cur_seq_no++;
1820  v->is_restart_needed = 1;
1821  }
1822 
1823 end:
1824  return ret;
1825 }
1826 
1827 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1828  int flags, AVDictionary **opts)
1829 {
1831  "A DASH playlist item '%s' referred to an external file '%s'. "
1832  "Opening this file was forbidden for security reasons\n",
1833  s->url, url);
1834  return AVERROR(EPERM);
1835 }
1836 
1838 {
1839  /* note: the internal buffer could have changed */
1840  av_freep(&pls->pb.pub.buffer);
1841  memset(&pls->pb, 0x00, sizeof(pls->pb));
1842  pls->ctx->pb = NULL;
1843  avformat_close_input(&pls->ctx);
1844 }
1845 
1847 {
1848  DASHContext *c = s->priv_data;
1849  const AVInputFormat *in_fmt = NULL;
1850  AVDictionary *in_fmt_opts = NULL;
1851  uint8_t *avio_ctx_buffer = NULL;
1852  int ret = 0, i;
1853 
1854  if (pls->ctx) {
1856  }
1857 
1858  if (ff_check_interrupt(&s->interrupt_callback)) {
1859  ret = AVERROR_EXIT;
1860  goto fail;
1861  }
1862 
1863  if (!(pls->ctx = avformat_alloc_context())) {
1864  ret = AVERROR(ENOMEM);
1865  goto fail;
1866  }
1867 
1868  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1869  if (!avio_ctx_buffer ) {
1870  ret = AVERROR(ENOMEM);
1871  avformat_free_context(pls->ctx);
1872  pls->ctx = NULL;
1873  goto fail;
1874  }
1875  ffio_init_context(&pls->pb, avio_ctx_buffer, INITIAL_BUFFER_SIZE, 0,
1876  pls, read_data, NULL, c->is_live ? NULL : seek_data);
1877  pls->pb.pub.seekable = 0;
1878 
1879  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1880  goto fail;
1881 
1882  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1883  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;
1884  pls->ctx->max_analyze_duration = s->max_analyze_duration > 0 ? s->max_analyze_duration : 4 * AV_TIME_BASE;
1885  pls->ctx->interrupt_callback = s->interrupt_callback;
1886  ret = av_probe_input_buffer(&pls->pb.pub, &in_fmt, "", NULL, 0, 0);
1887  if (ret < 0) {
1888  av_log(s, AV_LOG_ERROR, "Error when loading first fragment of playlist\n");
1889  avformat_free_context(pls->ctx);
1890  pls->ctx = NULL;
1891  goto fail;
1892  }
1893 
1894  pls->ctx->pb = &pls->pb.pub;
1895  pls->ctx->io_open = nested_io_open;
1896 
1897  if (c->cenc_decryption_key)
1898  av_dict_set(&in_fmt_opts, "decryption_key", c->cenc_decryption_key, 0);
1899 
1900  // provide additional information from mpd if available
1901  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1902  av_dict_free(&in_fmt_opts);
1903  if (ret < 0)
1904  goto fail;
1905  if (pls->n_fragments) {
1906 #if FF_API_R_FRAME_RATE
1907  if (pls->framerate.den) {
1908  for (i = 0; i < pls->ctx->nb_streams; i++)
1909  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1910  }
1911 #endif
1913  if (ret < 0)
1914  goto fail;
1915  }
1916 
1917 fail:
1918  return ret;
1919 }
1920 
1922 {
1923  int ret = 0;
1924  int i;
1925 
1926  pls->parent = s;
1927  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1928 
1929  if (!pls->last_seq_no) {
1930  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1931  }
1932 
1934  if (ret < 0) {
1935  goto fail;
1936  }
1937  for (i = 0; i < pls->ctx->nb_streams; i++) {
1939  AVStream *ist = pls->ctx->streams[i];
1940  if (!st) {
1941  ret = AVERROR(ENOMEM);
1942  goto fail;
1943  }
1944  st->id = i;
1947 
1948  // copy disposition
1949  st->disposition = ist->disposition;
1950 
1951  // copy side data
1952  for (int i = 0; i < ist->nb_side_data; i++) {
1953  const AVPacketSideData *sd_src = &ist->side_data[i];
1954  uint8_t *dst_data;
1955 
1956  dst_data = av_stream_new_side_data(st, sd_src->type, sd_src->size);
1957  if (!dst_data)
1958  return AVERROR(ENOMEM);
1959  memcpy(dst_data, sd_src->data, sd_src->size);
1960  }
1961  }
1962 
1963  return 0;
1964 fail:
1965  return ret;
1966 }
1967 
1968 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1969 {
1970  struct fragment *first_init_section = pls[0]->init_section;
1971  char *url =NULL;
1972  int64_t url_offset = -1;
1973  int64_t size = -1;
1974  int i = 0;
1975 
1976  if (first_init_section == NULL || n_pls == 0)
1977  return 0;
1978 
1979  url = first_init_section->url;
1980  url_offset = first_init_section->url_offset;
1981  size = pls[0]->init_section->size;
1982  for (i=0;i<n_pls;i++) {
1983  if (!pls[i]->init_section)
1984  continue;
1985 
1986  if (av_strcasecmp(pls[i]->init_section->url, url) ||
1987  pls[i]->init_section->url_offset != url_offset ||
1988  pls[i]->init_section->size != size) {
1989  return 0;
1990  }
1991  }
1992  return 1;
1993 }
1994 
1995 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
1996 {
1997  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
1998  if (!rep_dest->init_sec_buf) {
1999  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
2000  return AVERROR(ENOMEM);
2001  }
2002  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2003  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2004  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2005  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2006 
2007  return 0;
2008 }
2009 
2010 static void move_metadata(AVStream *st, const char *key, char **value)
2011 {
2012  if (*value) {
2014  *value = NULL;
2015  }
2016 }
2017 
2019 {
2020  DASHContext *c = s->priv_data;
2021  struct representation *rep;
2022  AVProgram *program;
2023  int ret = 0;
2024  int stream_index = 0;
2025  int i;
2026 
2027  c->interrupt_callback = &s->interrupt_callback;
2028 
2029  if ((ret = ffio_copy_url_options(s->pb, &c->avio_opts)) < 0)
2030  return ret;
2031 
2032  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2033  return ret;
2034 
2035  /* If this isn't a live stream, fill the total duration of the
2036  * stream. */
2037  if (!c->is_live) {
2038  s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
2039  } else {
2040  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2041  }
2042 
2043  if(c->n_videos)
2044  c->is_init_section_common_video = is_common_init_section_exist(c->videos, c->n_videos);
2045 
2046  /* Open the demuxer for video and audio components if available */
2047  for (i = 0; i < c->n_videos; i++) {
2048  rep = c->videos[i];
2049  if (i > 0 && c->is_init_section_common_video) {
2050  ret = copy_init_section(rep, c->videos[0]);
2051  if (ret < 0)
2052  return ret;
2053  }
2054  ret = open_demux_for_component(s, rep);
2055 
2056  if (ret)
2057  return ret;
2058  rep->stream_index = stream_index;
2059  ++stream_index;
2060  }
2061 
2062  if(c->n_audios)
2063  c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
2064 
2065  for (i = 0; i < c->n_audios; i++) {
2066  rep = c->audios[i];
2067  if (i > 0 && c->is_init_section_common_audio) {
2068  ret = copy_init_section(rep, c->audios[0]);
2069  if (ret < 0)
2070  return ret;
2071  }
2072  ret = open_demux_for_component(s, rep);
2073 
2074  if (ret)
2075  return ret;
2076  rep->stream_index = stream_index;
2077  ++stream_index;
2078  }
2079 
2080  if (c->n_subtitles)
2081  c->is_init_section_common_subtitle = is_common_init_section_exist(c->subtitles, c->n_subtitles);
2082 
2083  for (i = 0; i < c->n_subtitles; i++) {
2084  rep = c->subtitles[i];
2085  if (i > 0 && c->is_init_section_common_subtitle) {
2086  ret = copy_init_section(rep, c->subtitles[0]);
2087  if (ret < 0)
2088  return ret;
2089  }
2090  ret = open_demux_for_component(s, rep);
2091 
2092  if (ret)
2093  return ret;
2094  rep->stream_index = stream_index;
2095  ++stream_index;
2096  }
2097 
2098  if (!stream_index)
2099  return AVERROR_INVALIDDATA;
2100 
2101  /* Create a program */
2102  program = av_new_program(s, 0);
2103  if (!program)
2104  return AVERROR(ENOMEM);
2105 
2106  for (i = 0; i < c->n_videos; i++) {
2107  rep = c->videos[i];
2109  rep->assoc_stream = s->streams[rep->stream_index];
2110  if (rep->bandwidth > 0)
2111  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2112  move_metadata(rep->assoc_stream, "id", &rep->id);
2113  }
2114  for (i = 0; i < c->n_audios; i++) {
2115  rep = c->audios[i];
2117  rep->assoc_stream = s->streams[rep->stream_index];
2118  if (rep->bandwidth > 0)
2119  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2120  move_metadata(rep->assoc_stream, "id", &rep->id);
2121  move_metadata(rep->assoc_stream, "language", &rep->lang);
2122  }
2123  for (i = 0; i < c->n_subtitles; i++) {
2124  rep = c->subtitles[i];
2126  rep->assoc_stream = s->streams[rep->stream_index];
2127  move_metadata(rep->assoc_stream, "id", &rep->id);
2128  move_metadata(rep->assoc_stream, "language", &rep->lang);
2129  }
2130 
2131  return 0;
2132 }
2133 
2134 static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
2135 {
2136  int i, j;
2137 
2138  for (i = 0; i < n; i++) {
2139  struct representation *pls = p[i];
2140  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2141 
2142  if (needed && !pls->ctx) {
2143  pls->cur_seg_offset = 0;
2144  pls->init_sec_buf_read_offset = 0;
2145  /* Catch up */
2146  for (j = 0; j < n; j++) {
2147  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2148  }
2150  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2151  } else if (!needed && pls->ctx) {
2153  ff_format_io_close(pls->parent, &pls->input);
2154  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2155  }
2156  }
2157 }
2158 
2160 {
2161  DASHContext *c = s->priv_data;
2162  int ret = 0, i;
2163  int64_t mints = 0;
2164  struct representation *cur = NULL;
2165  struct representation *rep = NULL;
2166 
2167  recheck_discard_flags(s, c->videos, c->n_videos);
2168  recheck_discard_flags(s, c->audios, c->n_audios);
2169  recheck_discard_flags(s, c->subtitles, c->n_subtitles);
2170 
2171  for (i = 0; i < c->n_videos; i++) {
2172  rep = c->videos[i];
2173  if (!rep->ctx)
2174  continue;
2175  if (!cur || rep->cur_timestamp < mints) {
2176  cur = rep;
2177  mints = rep->cur_timestamp;
2178  }
2179  }
2180  for (i = 0; i < c->n_audios; i++) {
2181  rep = c->audios[i];
2182  if (!rep->ctx)
2183  continue;
2184  if (!cur || rep->cur_timestamp < mints) {
2185  cur = rep;
2186  mints = rep->cur_timestamp;
2187  }
2188  }
2189 
2190  for (i = 0; i < c->n_subtitles; i++) {
2191  rep = c->subtitles[i];
2192  if (!rep->ctx)
2193  continue;
2194  if (!cur || rep->cur_timestamp < mints) {
2195  cur = rep;
2196  mints = rep->cur_timestamp;
2197  }
2198  }
2199 
2200  if (!cur) {
2201  return AVERROR_INVALIDDATA;
2202  }
2203  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2204  ret = av_read_frame(cur->ctx, pkt);
2205  if (ret >= 0) {
2206  /* If we got a packet, return it */
2207  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2208  pkt->stream_index = cur->stream_index;
2209  return 0;
2210  }
2211  if (cur->is_restart_needed) {
2212  cur->cur_seg_offset = 0;
2213  cur->init_sec_buf_read_offset = 0;
2214  ff_format_io_close(cur->parent, &cur->input);
2216  cur->is_restart_needed = 0;
2217  }
2218  }
2219  return AVERROR_EOF;
2220 }
2221 
2223 {
2224  DASHContext *c = s->priv_data;
2225  free_audio_list(c);
2226  free_video_list(c);
2228  av_dict_free(&c->avio_opts);
2229  av_freep(&c->base_url);
2230  return 0;
2231 }
2232 
2233 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2234 {
2235  int ret = 0;
2236  int i = 0;
2237  int j = 0;
2238  int64_t duration = 0;
2239 
2240  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms] %s\n",
2241  seek_pos_msec, dry_run ? " (dry)" : "");
2242 
2243  // single fragment mode
2244  if (pls->n_fragments == 1) {
2245  pls->cur_timestamp = 0;
2246  pls->cur_seg_offset = 0;
2247  if (dry_run)
2248  return 0;
2249  ff_read_frame_flush(pls->ctx);
2250  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2251  }
2252 
2253  ff_format_io_close(pls->parent, &pls->input);
2254 
2255  // find the nearest fragment
2256  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2257  int64_t num = pls->first_seq_no;
2258  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2259  "last_seq_no[%"PRId64"].\n",
2260  (int)pls->n_timelines, (int64_t)pls->last_seq_no);
2261  for (i = 0; i < pls->n_timelines; i++) {
2262  if (pls->timelines[i]->starttime > 0) {
2263  duration = pls->timelines[i]->starttime;
2264  }
2265  duration += pls->timelines[i]->duration;
2266  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2267  goto set_seq_num;
2268  }
2269  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2270  duration += pls->timelines[i]->duration;
2271  num++;
2272  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2273  goto set_seq_num;
2274  }
2275  }
2276  num++;
2277  }
2278 
2279 set_seq_num:
2280  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2281  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"].\n",
2282  (int64_t)pls->cur_seq_no);
2283  } else if (pls->fragment_duration > 0) {
2284  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2285  } else {
2286  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2287  pls->cur_seq_no = pls->first_seq_no;
2288  }
2289  pls->cur_timestamp = 0;
2290  pls->cur_seg_offset = 0;
2291  pls->init_sec_buf_read_offset = 0;
2292  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2293 
2294  return ret;
2295 }
2296 
2297 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2298 {
2299  int ret = 0, i;
2300  DASHContext *c = s->priv_data;
2301  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2302  s->streams[stream_index]->time_base.den,
2305  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2306  return AVERROR(ENOSYS);
2307 
2308  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2309  for (i = 0; i < c->n_videos; i++) {
2310  if (!ret)
2311  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2312  }
2313  for (i = 0; i < c->n_audios; i++) {
2314  if (!ret)
2315  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2316  }
2317  for (i = 0; i < c->n_subtitles; i++) {
2318  if (!ret)
2319  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2320  }
2321 
2322  return ret;
2323 }
2324 
2325 static int dash_probe(const AVProbeData *p)
2326 {
2327  if (!av_stristr(p->buf, "<MPD"))
2328  return 0;
2329 
2330  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2331  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2332  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2333  av_stristr(p->buf, "dash:profile:isoff-main:2011") ||
2334  av_stristr(p->buf, "3GPP:PSS:profile:DASH1")) {
2335  return AVPROBE_SCORE_MAX;
2336  }
2337  if (av_stristr(p->buf, "dash:profile")) {
2338  return AVPROBE_SCORE_MAX;
2339  }
2340 
2341  return 0;
2342 }
2343 
2344 #define OFFSET(x) offsetof(DASHContext, x)
2345 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2346 static const AVOption dash_options[] = {
2347  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2348  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2349  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm,ts"},
2350  INT_MIN, INT_MAX, FLAGS},
2351  { "cenc_decryption_key", "Media decryption key (hex)", OFFSET(cenc_decryption_key), AV_OPT_TYPE_STRING, {.str = NULL}, INT_MIN, INT_MAX, .flags = FLAGS },
2352  {NULL}
2353 };
2354 
2355 static const AVClass dash_class = {
2356  .class_name = "dash",
2357  .item_name = av_default_item_name,
2358  .option = dash_options,
2359  .version = LIBAVUTIL_VERSION_INT,
2360 };
2361 
2363  .name = "dash",
2364  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2365  .priv_class = &dash_class,
2366  .priv_data_size = sizeof(DASHContext),
2367  .flags_internal = FF_FMT_INIT_CLEANUP,
2374 };
reopen_demux_for_component
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1846
close_demux_for_component
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1837
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
calc_next_seg_no_from_timelines
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:287
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:489
program
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:6
open_demux_for_component
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1921
read_data
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1764
ffio_copy_url_options
int ffio_copy_url_options(AVIOContext *pb, AVDictionary **avio_opts)
Read url related dictionary options from the AVIOContext and write to the given dictionary.
Definition: aviobuf.c:1023
FF_FMT_INIT_CLEANUP
#define FF_FMT_INIT_CLEANUP
For an AVInputFormat with this flag set read_close() needs to be called by the caller upon read_heade...
Definition: internal.h:48
representation::start_number
int64_t start_number
Definition: dashdec.c:98
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: options.c:237
get_current_time_in_sec
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:173
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1255
ishttp
static int ishttp(char *url)
Definition: dashdec.c:162
calc_min_seg_no
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1406
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
FLAGS
#define FLAGS
Definition: dashdec.c:2345
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:59
representation::assoc_stream
AVStream * assoc_stream
Definition: dashdec.c:88
free_video_list
static void free_video_list(DASHContext *c)
Definition: dashdec.c:368
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:1010
representation::init_sec_buf_read_offset
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:115
representation::cur_seq_no
int64_t cur_seq_no
Definition: dashdec.c:105
get_current_fragment
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1577
DASHContext::n_subtitles
int n_subtitles
Definition: dashdec.c:128
DASHContext::is_init_section_common_subtitle
int is_init_section_common_subtitle
Definition: dashdec.c:158
av_strcasecmp
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:218
representation::cur_seg_offset
int64_t cur_seg_offset
Definition: dashdec.c:106
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
dash_close
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2222
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:130
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1281
AVPacketSideData
Definition: packet.h:315
AVOption
AVOption.
Definition: opt.h:251
DASHContext::interrupt_callback
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:149
parse_manifest_segmenturlnode
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:596
AVFMT_FLAG_CUSTOM_IO
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don't avio_close() it.
Definition: avformat.h:1339
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2290
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
representation::id
char * id
Definition: dashdec.c:84
DASHContext::n_audios
int n_audios
Definition: dashdec.c:126
AVDictionary
Definition: dict.c:30
representation::last_seq_no
int64_t last_seq_no
Definition: dashdec.c:97
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFormatContext::probesize
int64_t probesize
Maximum number of bytes read from input in order to determine stream properties.
Definition: avformat.h:1368
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1438
ff_read_frame_flush
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: seek.c:715
read_from_url
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1660
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:352
representation::n_fragments
int n_fragments
Definition: dashdec.c:90
FFIOContext
Definition: avio_internal.h:29
DASHContext::availability_end_time
uint64_t availability_end_time
Definition: dashdec.c:135
find_child_node_by_name
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:535
representation::first_seq_no
int64_t first_seq_no
Definition: dashdec.c:96
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
fragment
Definition: dashdec.c:34
DASHContext::n_videos
int n_videos
Definition: dashdec.c:124
DASHContext
Definition: dashdec.c:120
get_segment_start_time_based_on_timeline
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:252
DASHContext::subtitles
struct representation ** subtitles
Definition: dashdec.c:129
AVPROBE_SCORE_MAX
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:465
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:368
AVPacketSideData::size
size_t size
Definition: packet.h:317
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1480
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: avformat.c:697
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1249
representation::init_section
struct fragment * init_section
Definition: dashdec.c:111
finish
static void finish(void)
Definition: movenc.c:342
DASHContext::publish_time
uint64_t publish_time
Definition: dashdec.c:136
free_timelines_list
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:337
calc_max_seg_no
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1420
free_fragment
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:317
fail
#define fail()
Definition: checkasm.h:131
calc_cur_seg_no
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1368
read_seek
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:151
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:143
val
static double val(void *priv, double ch)
Definition: aeval.c:77
recheck_discard_flags
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2134
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:83
av_timegm
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:570
av_new_program
AVProgram * av_new_program(AVFormatContext *ac, int id)
Definition: avformat.c:238
get_utc_date_time_insec
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:178
get_content_type
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:552
ff_check_interrupt
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:658
AVRational::num
int num
Numerator.
Definition: rational.h:59
dash_options
static const AVOption dash_options[]
Definition: dashdec.c:2346
DASHContext::avio_opts
AVDictionary * avio_opts
Definition: dashdec.c:151
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:72
DASHContext::suggested_presentation_delay
uint64_t suggested_presentation_delay
Definition: dashdec.c:133
seek_data
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1754
aligned
static int aligned(int val)
Definition: dashdec.c:168
representation::n_timelines
int n_timelines
Definition: dashdec.c:93
representation::pb
FFIOContext pb
Definition: dashdec.c:78
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVInputFormat
Definition: avformat.h:656
free_representation
static void free_representation(struct representation *pls)
Definition: dashdec.c:348
duration
int64_t duration
Definition: movenc.c:64
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:220
move_metadata
static void move_metadata(AVStream *st, const char *key, char **value)
Definition: dashdec.c:2010
DASHContext::max_url_size
int max_url_size
Definition: dashdec.c:152
DASHContext::allowed_extensions
char * allowed_extensions
Definition: dashdec.c:150
move_segments
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1460
s
#define s(width, name)
Definition: cbs_vp9.c:256
fragment::url_offset
int64_t url_offset
Definition: dashdec.c:35
DASHContext::adaptionset_lang
char * adaptionset_lang
Definition: dashdec.c:146
avio_read_to_bprint
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size)
Read contents of h into print buffer, up to max_size bytes, or up to EOF.
Definition: aviobuf.c:1355
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: seek.c:634
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1331
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:661
free_fragment_list
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:326
AVProbeData::buf
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:455
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:189
av_match_ext
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:40
representation::is_restart_needed
int is_restart_needed
Definition: dashdec.c:117
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:316
parse_programinformation
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1165
get_duration_insec
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:208
DASHContext::videos
struct representation ** videos
Definition: dashdec.c:125
INITIAL_BUFFER_SIZE
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:32
key
const char * key
Definition: hwcontext_opencl.c:174
representation::cur_timestamp
int64_t cur_timestamp
Definition: dashdec.c:116
timeline::duration
int64_t duration
Definition: dashdec.c:68
representation::init_sec_buf_size
uint32_t init_sec_buf_size
Definition: dashdec.c:113
representation::stream_index
int stream_index
Definition: dashdec.c:82
AVFormatContext::max_analyze_duration
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1376
representation::ctx
AVFormatContext * ctx
Definition: dashdec.c:81
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AVFormatContext
Format I/O context.
Definition: avformat.h:1213
representation::lang
char * lang
Definition: dashdec.c:85
internal.h
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1108
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVSEEK_FLAG_BACKWARD
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2289
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:532
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:978
NULL
#define NULL
Definition: coverity.c:32
av_program_add_stream_index
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
Definition: avformat.c:269
read_probe
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_strireplace
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:240
is_common_init_section_exist
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1968
ff_copy_whiteblacklists
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: avformat.c:741
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:318
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
dash_read_seek
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2297
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1255
parseutils.h
AVProbeData
This structure contains the data a format has to probe a file.
Definition: avformat.h:453
move_timelines
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1446
representation::timelines
struct timeline ** timelines
Definition: dashdec.c:94
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:1019
DASHContext::minimum_update_period
uint64_t minimum_update_period
Definition: dashdec.c:137
time.h
timeline::starttime
int64_t starttime
Definition: dashdec.c:58
DASHContext::period_start
uint64_t period_start
Definition: dashdec.c:143
parse_manifest
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1194
representation::url_template
char * url_template
Definition: dashdec.c:77
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1269
get_val_from_nodes_tab
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:519
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:228
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
DASHContext::time_shift_buffer_depth
uint64_t time_shift_buffer_depth
Definition: dashdec.c:138
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2415
AVIOContext
Bytestream IO Context.
Definition: avio.h:162
resolve_content_path
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:703
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:164
DASHContext::media_presentation_duration
uint64_t media_presentation_duration
Definition: dashdec.c:132
AVIOContext::seekable
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:263
FFIOContext::pub
AVIOContext pub
Definition: avio_internal.h:30
start_time
static int64_t start_time
Definition: ffplay.c:331
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
representation::bandwidth
int bandwidth
Definition: dashdec.c:86
representation::parent
AVFormatContext * parent
Definition: dashdec.c:80
ff_format_io_close
int ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: avformat.c:779
AVMEDIA_TYPE_UNKNOWN
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
OFFSET
#define OFFSET(x)
Definition: dashdec.c:2344
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
copy_init_section
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:1995
DASHContext::availability_start_time
uint64_t availability_start_time
Definition: dashdec.c:134
representation::init_sec_data_len
uint32_t init_sec_data_len
Definition: dashdec.c:114
dash_read_header
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2018
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
free_audio_list
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:379
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
representation::framerate
AVRational framerate
Definition: dashdec.c:87
av_strstart
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:37
av_probe_input_buffer
int av_probe_input_buffer(AVIOContext *pb, const AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:317
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
DASHContext::cenc_decryption_key
char * cenc_decryption_key
Definition: dashdec.c:153
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:1057
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:74
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:181
open_url
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary **opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:401
bprint.h
free_subtitle_list
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:390
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
avio_internal.h
dash_probe
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2325
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
DASHContext::audios
struct representation ** audios
Definition: dashdec.c:127
ffio_init_context
void ffio_init_context(FFIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:81
representation::fragment_timescale
int64_t fragment_timescale
Definition: dashdec.c:101
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AV_ROUND_DOWN
@ AV_ROUND_DOWN
Round toward -infinity.
Definition: mathematics.h:82
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
DASHContext::is_init_section_common_audio
int is_init_section_common_audio
Definition: dashdec.c:157
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
parse_manifest_adaptationset
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1109
fragment::url
char * url
Definition: dashdec.c:37
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1137
demux.h
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
DASHContext::min_buffer_time
uint64_t min_buffer_time
Definition: dashdec.c:139
nested_io_open
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1827
DASHContext::is_live
int is_live
Definition: dashdec.c:148
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:1008
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:962
ret
ret
Definition: filter_design.txt:187
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVStream
Stream structure.
Definition: avformat.h:948
avio_seek
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:260
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:96
representation::input
AVIOContext * input
Definition: dashdec.c:79
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:1061
get_Fragment
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:578
parse_manifest_segmenttimeline
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:662
representation
Definition: dashdec.c:76
representation::init_sec_buf
uint8_t * init_sec_buf
Definition: dashdec.c:112
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:323
MAX_URL_SIZE
#define MAX_URL_SIZE
Definition: internal.h:32
parse_manifest_adaptationset_attr
static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
Definition: dashdec.c:1096
ff_dash_demuxer
const AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2362
AVRational::den
int den
Denominator.
Definition: rational.h:60
representation::cur_seg
struct fragment * cur_seg
Definition: dashdec.c:108
get_content_url
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:468
DASHContext::is_init_section_common_video
int is_init_section_common_video
Definition: dashdec.c:156
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: avformat.c:95
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:641
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1097
refresh_manifest
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1478
AVFormatContext::io_open
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1773
update_init_section
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1708
parse_manifest_representation
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:822
AVPacket::stream_index
int stream_index
Definition: packet.h:376
dash_read_packet
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2159
open_input
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1676
timeline
Definition: dashdec.c:45
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
representation::cur_seg_size
int64_t cur_seg_size
Definition: dashdec.c:107
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:628
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVIOContext::buffer
unsigned char * buffer
Start of the buffer.
Definition: avio.h:227
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
ff_make_absolute_url
int ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:319
AVPacket
This structure stores compressed data.
Definition: packet.h:351
ff_dash_fill_tmpl_params
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:95
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
representation::fragment_duration
int64_t fragment_duration
Definition: dashdec.c:100
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
Definition: avformat.c:190
avio_find_protocol_name
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:467
int32_t
int32_t
Definition: audioconvert.c:56
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:86
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:837
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dash_seek
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2233
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
timeline::repeat
int64_t repeat
Definition: dashdec.c:64
dash.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DASHContext::base_url
char * base_url
Definition: dashdec.c:122
AVStream::pts_wrap_bits
int pts_wrap_bits
Number of bits in timestamps.
Definition: avformat.h:1117
int
int
Definition: ffmpeg_filter.c:153
representation::fragments
struct fragment ** fragments
Definition: dashdec.c:91
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1241
dash_class
static const AVClass dash_class
Definition: dashdec.c:2355
DASHContext::period_duration
uint64_t period_duration
Definition: dashdec.c:142
representation::presentation_timeoffset
int64_t presentation_timeoffset
Definition: dashdec.c:103
fragment::size
int64_t size
Definition: dashdec.c:36
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:375