def test_segments_number_time(self): with xml("dash/test_1.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[0].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/tracks-v3/init-1526842800.g_m4v") video_segments = list(map(attrgetter("url"), (itertools.islice(segments, 5)))) # suggested delay is 11 seconds, each segment is 5 seconds long - so there should be 3 self.assertSequenceEqual(video_segments, ['http://test.se/tracks-v3/dvr-1526842800-698.g_m4v?t=3403000', 'http://test.se/tracks-v3/dvr-1526842800-699.g_m4v?t=3408000', 'http://test.se/tracks-v3/dvr-1526842800-700.g_m4v?t=3413000'])
def test_segments_list(self): with xml("dash/test_7.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[0].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/chunk_ctvideo_ridp0va0br4332748_cinit_mpd.m4s") video_segments = [x.url for x in itertools.islice(segments, 3)] self.assertSequenceEqual(video_segments, ['http://test.se/chunk_ctvideo_ridp0va0br4332748_cn1_mpd.m4s', 'http://test.se/chunk_ctvideo_ridp0va0br4332748_cn2_mpd.m4s', 'http://test.se/chunk_ctvideo_ridp0va0br4332748_cn3_mpd.m4s', ])
def test_segments_static_no_publish_time(self): with xml("dash/test_5.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[1].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/dash/150633-video_eng=194000.dash") video_segments = [x.url for x in itertools.islice(segments, 3)] self.assertSequenceEqual(video_segments, ['http://test.se/dash/150633-video_eng=194000-0.dash', 'http://test.se/dash/150633-video_eng=194000-2000.dash', 'http://test.se/dash/150633-video_eng=194000-4000.dash', ])
def parse_manifest(cls, session, url, **args): """ Attempt to parse a DASH manifest file and return its streams :param session: Streamlink session instance :param url: URL of the manifest file :return: a dict of name -> DASHStream instances """ ret = {} res = session.http.get(url, **args) url = res.url urlp = list(urlparse(url)) urlp[2], _ = urlp[2].rsplit("/", 1) mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url) video, audio = [], [] # Search for suitable video and audio representations for aset in mpd.periods[0].adaptationSets: if aset.contentProtection: raise PluginError("{} is protected by DRM".format(url)) for rep in aset.representations: if rep.mimeType.startswith("video"): video.append(rep) elif rep.mimeType.startswith("audio"): audio.append(rep) if not video: video = [None] if not audio: audio = [None] for vid, aud in itertools.product(video, audio): stream = DASHStream(session, mpd, vid, aud, **args) stream_name = [] if vid: stream_name.append("{:0.0f}{}".format( vid.height or vid.bandwidth, "p" if vid.height else "k")) if audio and len(audio) > 1: stream_name.append("a{:0.0f}k".format(aud.bandwidth)) ret['+'.join(stream_name)] = stream return ret
def test_segments_static_number(self): with xml("dash/test_2.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[3].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/video/250kbit/init.mp4") video_segments = list(map(attrgetter("url"), (itertools.islice(segments, 100000)))) self.assertEqual(len(video_segments), 444) self.assertSequenceEqual(video_segments[:5], ['http://test.se/video/250kbit/segment_1.m4s', 'http://test.se/video/250kbit/segment_2.m4s', 'http://test.se/video/250kbit/segment_3.m4s', 'http://test.se/video/250kbit/segment_4.m4s', 'http://test.se/video/250kbit/segment_5.m4s'])
def test_tsegment_t_is_none_1895(self): """ Verify the fix for https://github.com/streamlink/streamlink/issues/1895 """ with xml("dash/test_8.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[0].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/video-2799000-0.mp4?z32=CENSORED_SESSION") video_segments = [x.url for x in itertools.islice(segments, 3)] self.assertSequenceEqual(video_segments, ['http://test.se/video-time=0-2799000-0.m4s?z32=CENSORED_SESSION', 'http://test.se/video-time=4000-2799000-0.m4s?z32=CENSORED_SESSION', 'http://test.se/video-time=8000-2799000-0.m4s?z32=CENSORED_SESSION', ])
def test_duplicated_resolutions(self): """ Verify the fix for https://github.com/streamlink/streamlink/issues/3365 """ with xml("dash/test_10.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") representations_0 = mpd.periods[0].adaptationSets[ 0].representations[0] self.assertEqual(representations_0.height, 804) self.assertEqual(representations_0.bandwidth, 10000.0) representations_1 = mpd.periods[0].adaptationSets[ 0].representations[1] self.assertEqual(representations_1.height, 804) self.assertEqual(representations_1.bandwidth, 8000.0)
def test_segments_dynamic_time(self): with xml("dash/test_3.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[0].representations[ 0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/video-2800000-0.mp4?z32=") video_segments = list( map(attrgetter("url"), (itertools.islice(segments, 3)))) # default suggested delay is 3 seconds, each segment is 4 seconds long - so there should be 1 segment self.assertSequenceEqual( video_segments, ['http://test.se/video-time=1525450872000-2800000-0.m4s?z32='])
def reload(self): if self.closed: return self.reader.buffer.wait_free() log.debug("Reloading manifest ({0})".format( self.reader.representation_id)) res = self.session.http.get(self.mpd.url, exception=StreamError) new_mpd = MPD(self.session.http.xml(res, ignore_ns=True), base_url=self.mpd.base_url, url=self.mpd.url, timelines=self.mpd.timelines) changed = new_mpd.publishTime > self.mpd.publishTime if changed: self.mpd = new_mpd return changed
def reload(self): if self.closed: return self.reader.buffer.wait_free() log.debug("Reloading manifest ({0}:{1})".format(self.reader.representation_id, self.reader.mime_type)) res = self.session.http.get(self.mpd.url, exception=StreamError, **self.stream.args) new_mpd = MPD(self.session.http.xml(res, ignore_ns=True), base_url=self.mpd.base_url, url=self.mpd.url, timelines=self.mpd.timelines) new_rep = self.get_representation(new_mpd, self.reader.representation_id, self.reader.mime_type) with freeze_timeline(new_mpd): changed = len(list(itertools.islice(new_rep.segments(), 1))) > 0 if changed: self.mpd = new_mpd return changed
def test_segments_dynamic_number(self): with freeze_time(FakeDatetime(2018, 5, 22, 13, 37, 0, tzinfo=utc)): with xml("dash/test_4.mpd") as mpd_xml: mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd") segments = mpd.periods[0].adaptationSets[0].representations[0].segments() init_segment = next(segments) self.assertEqual(init_segment.url, "http://test.se/hd-5-init.mp4") video_segments = [] for _ in range(3): seg = next(segments) video_segments.append((seg.url, seg.available_at)) self.assertSequenceEqual(video_segments, [('http://test.se/hd-5_000311235.mp4', datetime.datetime(2018, 5, 22, 13, 37, 0, tzinfo=utc)), ('http://test.se/hd-5_000311236.mp4', datetime.datetime(2018, 5, 22, 13, 37, 5, tzinfo=utc)), ('http://test.se/hd-5_000311237.mp4', datetime.datetime(2018, 5, 22, 13, 37, 10, tzinfo=utc)) ])
def parse_manifest(cls, session, url_or_manifest, **args): """ Attempt to parse a DASH manifest file and return its streams :param session: Streamlink session instance :param url_or_manifest: URL of the manifest file or an XML manifest string :return: a dict of name -> DASHStream instances """ if url_or_manifest.startswith('<?xml'): mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True)) else: res = session.http.get(url_or_manifest, **args) url = res.url urlp = list(urlparse(url)) urlp[2], _ = urlp[2].rsplit("/", 1) mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url) video, audio = [], [] # Search for suitable video and audio representations for aset in mpd.periods[0].adaptationSets: if aset.contentProtection: raise PluginError("{} is protected by DRM".format(url)) for rep in aset.representations: if rep.mimeType.startswith("video"): video.append(rep) elif rep.mimeType.startswith("audio"): audio.append(rep) if not video: video = [None] if not audio: audio = [None] locale = session.localization locale_lang = locale.language lang = None available_languages = set() # if the locale is explicitly set, prefer that language over others for aud in audio: if aud and aud.lang: available_languages.add(aud.lang) try: if locale.explicit and aud.lang and Language.get( aud.lang) == locale_lang: lang = aud.lang except LookupError: continue if not lang: # filter by the first language that appears lang = audio[0] and audio[0].lang log.debug( "Available languages for DASH audio streams: {0} (using: {1})". format(", ".join(available_languages) or "NONE", lang or "n/a")) # if the language is given by the stream, filter out other languages that do not match if len(available_languages) > 1: audio = list( filter(lambda a: a.lang is None or a.lang == lang, audio)) ret = [] for vid, aud in itertools.product(video, audio): stream = DASHStream(session, mpd, vid, aud, **args) stream_name = [] if vid: stream_name.append("{:0.0f}{}".format( vid.height or vid.bandwidth_rounded, "p" if vid.height else "k")) if audio and len(audio) > 1: stream_name.append("a{:0.0f}k".format(aud.bandwidth)) ret.append(('+'.join(stream_name), stream)) # rename duplicate streams dict_value_list = defaultdict(list) for k, v in ret: dict_value_list[k].append(v) ret_new = {} for q in dict_value_list: items = dict_value_list[q] for n in range(len(items)): if n == 0: ret_new[q] = items[n] elif n == 1: ret_new[f'{q}_alt'] = items[n] else: ret_new[f'{q}_alt{n}'] = items[n] return ret_new