def _get_streams(self): data = None m = self.match.groupdict() if m['slug_live']: res = self.session.http.get('https://api.pluto.tv/v2/channels') data = self.session.http.json(res, schema=self._schema_media(m['slug_live'])) elif m['slug_series'] and m['slug_episode']: res = self.session.http.get(f'http://api.pluto.tv/v3/vod/slugs/{m["slug_series"]}') data = self.session.http.json( res, schema=validate.Schema( {'seasons': validate.all( [{'episodes': self._schema_media(m['slug_episode'])}], validate.filter(lambda k: k['episodes'] is not None))}, validate.get('seasons'), validate.get(0), validate.any(None, validate.get('episodes')) ), ) elif m['slug_movies']: res = self.session.http.get('https://api.pluto.tv/v3/vod/categories', params={'includeItems': 'true', 'deviceType': 'web'}) data = self.session.http.json( res, schema=validate.Schema( {'categories': validate.all( [{'items': self._schema_media(m['slug_movies'])}], validate.filter(lambda k: k['items'] is not None))}, validate.get('categories'), validate.get(0), validate.any(None, validate.get('items')), ), ) log.trace(f'{data!r}') if data is None or not data.get('stitched'): return self.title = data['name'] stream_url_no_sid = data['stitched']['urls'][0]['url'] device_id = str(uuid4()) stream_url = update_qsd(stream_url_no_sid, { 'deviceId': device_id, 'sid': device_id, 'deviceType': 'web', 'deviceMake': 'Firefox', 'deviceModel': 'Firefox', 'appName': 'web', }) self.session.set_option('ffmpeg-fout', 'mpegts') for q, s in HLSStream.parse_variant_playlist(self.session, stream_url).items(): yield q, MuxedStream(self.session, s)
def _get_streams(self): params = self.session.http.get( self.url, schema=validate.Schema( validate.transform(self._re_player_manager.search), validate.any( None, validate.all( validate.get("json"), validate.parse_json(), { "contentId": validate.any(str, int), validate.optional("streamId"): str, validate.optional("idec"): str, validate.optional("token"): str })))) if not params: log.error("Could not find player manager data") return params.update({ "video": (unquote(params.pop("token")) if params.get("token") is not None else params.pop("streamId")), "noflash": "yes", "embedded": "0", }) url_parsed = urlparse(self.url) skip_vods = url_parsed.netloc.endswith( "m4sport.hu") and url_parsed.path.startswith("/elo") self.session.http.headers.update({"Referer": self.url}) playlists = self.session.http.get( self.PLAYER_URL, params=params, schema=validate.Schema( validate.transform(self._re_player_json.search), validate.any( None, validate.all( validate.get("json"), validate.parse_json(), {"playlist": [{ "file": validate.url(), "type": str }]}, validate.get("playlist"), validate.filter(lambda p: p["type"] == "hls"), validate.filter( lambda p: not skip_vods or "vod" not in p["file"]), validate.map( lambda p: update_scheme("https://", p["file"])))))) for url in playlists or []: yield from HLSStream.parse_variant_playlist(self.session, url).items()
def get_live(self, live_slug): live_data = self.session.http.get( "https://aloula.faulio.com/api/v1/channels", schema=validate.Schema( validate.parse_json(), [{ "id": int, "url": str, "title": str, "has_live": bool, "has_vod": bool, "streams": { "hls": validate.url(), }, }], validate.filter(lambda k: k["url"] == live_slug), ), ) if not live_data: return live_data = live_data[0] log.trace(f"{live_data!r}") if not live_data["has_live"]: log.error("Stream is not live") return self.id = live_data["id"] self.author = "SBA" self.title = live_data["title"] self.category = "Live" return HLSStream.parse_variant_playlist(self.session, live_data["streams"]["hls"])
def _get_vod(self, root): schema_vod = validate.Schema( validate.xml_xpath_string( ".//script[@type='application/ld+json'][contains(text(),'VideoObject')][1]/text()" ), str, validate.transform( lambda jsonlike: re.sub(r"[\r\n]+", "", jsonlike)), validate.parse_json(), validate.any( validate.all( {"@graph": [dict]}, validate.get("@graph"), validate.filter(lambda obj: obj["@type"] == "VideoObject"), validate.get(0)), dict), {"contentUrl": validate.url()}, validate.get("contentUrl"), validate.transform( lambda content_url: update_scheme("https://", content_url))) try: vod = schema_vod.validate(root) except PluginError: return if urlparse(vod).path.endswith(".m3u8"): return HLSStream.parse_variant_playlist(self.session, vod) return {"vod": HTTPStream(self.session, vod)}
class Welt(Plugin): _re_url = re.compile(r"""https?://(\w+\.)?welt\.de/?""", re.IGNORECASE) _re_url_vod = re.compile(r"""mediathek""", re.IGNORECASE) _url_vod = "https://www.welt.de/onward/video/play/{0}" _schema = validate.Schema( validate.transform(get_json), validate.transform(parse_json), validate.get("sources"), validate.filter(lambda obj: obj["extension"] == "m3u8"), validate.map(lambda obj: obj["src"]), validate.get(0)) @classmethod def can_handle_url(cls, url): return cls._re_url.match(url) is not None def __init__(self, url): Plugin.__init__(self, url) self.url = url self.isVod = self._re_url_vod.search(url) is not None def _get_streams(self): headers = {"User-Agent": useragents.CHROME} hls_url = self.session.http.get(self.url, headers=headers, schema=self._schema) headers["Referer"] = self.url if self.isVod: url = self._url_vod.format(quote(hls_url, safe="")) hls_url = self.session.http.get(url, headers=headers).url return HLSStream.parse_variant_playlist(self.session, hls_url, headers=headers)
def _get_streams(self): self.title, hls_url = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string(".//script[contains(text(), 'HLS')]/text()"), validate.any(None, validate.all( validate.transform(self._re_content.search), validate.any(None, validate.all( validate.get(1), validate.parse_json(), {str: {"children": {"top": {"model": {"videos": [{ "title": str, "sources": validate.all( [{"url": str, "type": str}], validate.filter(lambda p: p["type"].lower() == "hls"), validate.get((0, "url"))) }]}}}}}, validate.transform(lambda k: next(iter(k.values()))), validate.get(("children", "top", "model", "videos", 0)), validate.union_get("title", "sources") )) )) ) ) return HLSStream.parse_variant_playlist(self.session, urljoin(self.url, hls_url))
def _get_streams(self): res = self.session.http.get(self.url) m = self._re_channel_id.search(res.text) if not m: return res = self.session.http.get( "https://www.rtvs.sk/json/live5f.json", params={ "c": m.group(1), "b": "mozilla", "p": "win", "f": "0", "d": "1", } ) videos = parse_json(res.text, schema=validate.Schema({ "clip": { "sources": [{ "src": validate.url(), "type": str, }], }}, validate.get(("clip", "sources")), validate.filter(lambda n: n["type"] == "application/x-mpegurl"), )) for video in videos: yield from HLSStream.parse_variant_playlist(self.session, video["src"]).items()
def _get_streams_api(self, video_id): log.debug("Found video ID: {0}".format(video_id)) tld = self.match.group("tld") try: data = self.session.http.get( self._api.get(tld, "lt"), params=dict(video_id=video_id), schema=validate.Schema( validate.parse_json(), { "success": True, "data": { "versions": { validate.text: validate.all( [{ "type": validate.text, "src": validate.text, }], validate.filter(lambda item: item["type"] == "application/x-mpegurl") ) } } }, validate.get(("data", "versions")) ) ) except PluginError: log.error("Failed to get streams from API") return for stream in itertools.chain(*data.values()): src = update_scheme("https://", stream["src"], force=False) for s in HLSStream.parse_variant_playlist(self.session, src).items(): yield s
class Looch(Plugin): url_re = re.compile(r"https?://(?:www\.)?looch\.tv/channel/(?P<name>[^/]+)(/videos/(?P<video_id>\d+))?") api_base = "https://api.looch.tv" channel_api = api_base + "/channels/{name}" video_api = api_base + "/videos/{id}" playback_schema = validate.Schema({"weight": int, "uri": validate.url()}) data_schema = validate.Schema({ "type": validate.text, "attributes": { validate.optional("playback"): [playback_schema], validate.optional("resolution"): {"width": int, "height": int} }}) channel_schema = validate.Schema( validate.transform(parse_json), {"included": validate.all( [data_schema], validate.filter(lambda x: x["type"] == "active_streams"), validate.map(lambda x: x["attributes"].get("playback")), validate.transform(lambda x: list(itertools.chain(*x))) ), }, validate.get("included")) video_schema = validate.Schema( validate.transform(parse_json), {"data": data_schema}, validate.get("data"), validate.get("attributes")) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _get_live_stream(self, channel): url = self.channel_api.format(name=channel) self.logger.debug("Channel API call: {0}", url) data = http.get(url, schema=self.channel_schema) self.logger.debug("Got {0} channel playback items", len(data)) for playback in data: for s in HLSStream.parse_variant_playlist(self.session, playback["uri"]).items(): yield s def _get_video_stream(self, video_id): url = self.video_api.format(id=video_id) self.logger.debug("Video API call: {0}", url) data = http.get(url, schema=self.video_schema) self.logger.debug("Got video {0} playback items", len(data["playback"])) res = data["resolution"]["height"] for playback in data["playback"]: yield "{0}p".format(res), HTTPStream(self.session, playback["uri"]) def _get_streams(self): match = self.url_re.match(self.url) self.logger.debug("Matched URL: name={name}, video_id={video_id}", **match.groupdict()) if match.group("video_id"): return self._get_video_stream(match.group("video_id")) elif match.group("name"): return self._get_live_stream(match.group("name"))
def parse_token(tokenstr): return parse_json(tokenstr, schema=validate.Schema( {"chansub": {"restricted_bitrates": validate.all( [str], validate.filter(lambda n: not re.match(r"(.+_)?archives|live|chunked", n)) )}}, validate.get(("chansub", "restricted_bitrates")) ))
def _get_params_cid(self, channel): log.debug('get channel ID for {0}'.format(channel)) try: res = self.session.http.get( f'{self.base_url}/zapi/v2/cached/channels/{self._session_attributes.get("power_guide_hash")}', headers=self.headers, params={'details': 'False'} ) except Exception: log.debug('Force session reset for _get_params_cid') self.reset_session() return False data = self.session.http.json( res, schema=validate.Schema({ 'success': validate.transform(bool), 'channel_groups': [{ 'channels': [ { 'display_alias': str, 'cid': str, 'qualities': [{ 'title': str, 'stream_types': validate.all( [str], validate.filter(lambda n: not re.match(r"(.+_(?:fairplay|playready|widevine))", n)) ), 'level': str, 'availability': str, }], }, ], }]}, validate.get('channel_groups'), ) ) c_list = [] for d in data: for c in d['channels']: c_list.append(c) cid = None zattoo_list = [] for c in c_list: zattoo_list.append(c['display_alias']) if c['display_alias'] == channel: cid = c['cid'] log.debug(f'{c!r}') log.trace('Available zattoo channels in this country: {0}'.format( ', '.join(sorted(zattoo_list)))) if not cid: cid = channel log.debug('CHANNEL ID: {0}'.format(cid)) return {'cid': cid}
def _get_streams(self): re_room_id = re.compile( r"share_url:\"https:[^?]+?\?room_id=(?P<room_id>\d+)\"") room_id = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//script[contains(text(),'share_url:\"https:')][1]/text()" ), validate.any( None, validate.all(validate.transform(re_room_id.search), validate.any(None, validate.get("room_id")))))) if not room_id: return live_status, self.title = self.session.http.get( "https://www.showroom-live.com/api/live/live_info", params={"room_id": room_id}, schema=validate.Schema( validate.parse_json(), { "live_status": int, "room_name": str, }, validate.union_get( "live_status", "room_name", ))) if live_status != self.LIVE_STATUS: log.info("This stream is currently offline") return url = self.session.http.get( "https://www.showroom-live.com/api/live/streaming_url", params={ "room_id": room_id, "abr_available": 1, }, schema=validate.Schema( validate.parse_json(), { "streaming_url_list": [{ "type": str, "url": validate.url(), }] }, validate.get("streaming_url_list"), validate.filter(lambda p: p["type"] == "hls_all"), validate.get((0, "url"))), ) res = self.session.http.get(url, acceptable_status=(200, 403, 404)) if res.headers["Content-Type"] != "application/x-mpegURL": log.error("This stream is restricted") return return HLSStream.parse_variant_playlist(self.session, url)
class Welt(Plugin): _url_vod = "https://www.welt.de/onward/video/token/{0}" _re_url = re.compile(r"""https?://(\w+\.)?welt\.de/?""", re.IGNORECASE) _re_url_vod = re.compile(r"""mediathek""", re.IGNORECASE) _re_json = re.compile( r""" <script>\s* var\s+funkotron\s*=\s* \{\s* config\s*:\s*(?P<json>\{.+?\})\s* \}\s*;?\s* </script> """, re.VERBOSE | re.DOTALL | re.IGNORECASE) _schema = validate.Schema(validate.transform(_re_json.search), validate.get("json"), validate.transform(parse_json), validate.get("page"), validate.get("content"), validate.get("media"), validate.get(0), validate.get("sources"), validate.map(lambda obj: obj["file"]), validate.filter(_filter_url), validate.get(0)) _schema_url = validate.Schema( validate.url(scheme="https", path=validate.endswith(".m3u8"))) _schema_vod = validate.Schema(validate.transform(parse_json), validate.get("urlWithToken"), _schema_url) @classmethod def can_handle_url(cls, url): return cls._re_url.match(url) is not None def __init__(self, url): Plugin.__init__(self, url) self.url = url self.isVod = self._re_url_vod.search(url) is not None def _get_streams(self): headers = {"User-Agent": useragents.CHROME} hls_url = self.session.http.get(self.url, headers=headers, schema=self._schema) headers["Referer"] = self.url if self.isVod: url = self._url_vod.format(quote(hls_url, safe="")) hls_url = self.session.http.get(url, headers=headers, schema=self._schema_vod) return HLSStream.parse_variant_playlist(self.session, hls_url, headers=headers)
def _get_streams(self): data = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//script[@id='js-live-data'][@data-json]/@data-json"), validate.any( None, validate.all( validate.parse_json(), { "is_live": int, "room_id": int, validate.optional("room"): { "content_region_permission": int, "is_free": int } }, )))) if not data: # URL without livestream return log.debug(f"{data!r}") if data["is_live"] != 1: log.info("This stream is currently offline") return url = self.session.http.get( "https://www.showroom-live.com/api/live/streaming_url", params={ "room_id": data["room_id"], "abr_available": 1 }, schema=validate.Schema( validate.parse_json(), { "streaming_url_list": [{ "type": str, "url": validate.url(), }] }, validate.get("streaming_url_list"), validate.filter(lambda p: p["type"] == "hls_all"), validate.get((0, "url"))), ) res = self.session.http.get(url, acceptable_status=(200, 403, 404)) if res.headers["Content-Type"] != "application/x-mpegURL": log.error("This stream is restricted") return return ShowroomHLSStream.parse_variant_playlist(self.session, url)
def __init__(self, url: str): super().__init__(url) self._json_data_re = re.compile(r'teliaPlayer\((\{.*?\})\);', re.DOTALL) self.main_page_schema = validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//iframe[contains(@src, 'ltv.lsm.lv/embed')][1]/@src"), validate.url()) self.embed_code_schema = validate.Schema( validate.parse_html(), validate.xml_xpath_string(".//live[1]/@*[name()=':embed-data']"), str, validate.parse_json(), {"source": { "embed_code": str }}, validate.get(("source", "embed_code")), validate.parse_html(), validate.xml_xpath_string(".//iframe[@src][1]/@src"), ) self.player_apicall_schema = validate.Schema( validate.transform(self._json_data_re.search), validate.any( None, validate.all( validate.get(1), validate.transform(lambda s: s.replace("'", '"')), validate.transform( lambda s: re.sub(r",\s*\}", "}", s, flags=re.DOTALL)), validate.parse_json(), {"channel": str}, validate.get("channel")))) self.sources_schema = validate.Schema( validate.parse_json(), { "source": { "sources": validate.all([{ "type": str, "src": validate.url() }], validate.filter(lambda src: src["type"] == "application/x-mpegURL"), validate.map(lambda src: src.get("src"))), } }, validate.get(("source", "sources")))
def _schema_media(self, slug): return validate.Schema( [{ 'name': str, 'slug': str, validate.optional('stitched'): { 'urls': [{ 'type': str, 'url': validate.url(), }] } }], validate.filter(lambda k: k['slug'].lower() == slug.lower()), validate.get(0), )
class ard_live(Plugin): swf_url = "http://live.daserste.de/lib/br-player/swf/main.swf" _url_re = re.compile(r"https?://(www.)?daserste.de/", re.I) _player_re = re.compile(r'''dataURL\s*:\s*(?P<q>['"])(?P<url>.*?)(?P=q)''') _player_url_schema = validate.Schema( validate.transform(_player_re.search), validate.any(None, validate.all(validate.get("url"), validate.text))) _livestream_schema = validate.Schema( validate.xml_findall(".//assets"), validate.filter(lambda x: x.attrib.get("type") != "subtitles"), validate.get(0), validate.xml_findall(".//asset"), [ validate.union({ "url": validate.xml_findtext("./fileName"), "bitrate": validate.xml_findtext("./bitrateVideo") }) ]) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _get_streams(self): data_url = self.session.http.get(self.url, schema=self._player_url_schema) if data_url: res = self.session.http.get(urljoin(self.url, data_url)) stream_info = self.session.http.xml(res, schema=self._livestream_schema) for stream in stream_info: url = stream["url"] try: if ".m3u8" in url: for s in HLSStream.parse_variant_playlist( self.session, url, name_key="bitrate").items(): yield s elif ".f4m" in url: for s in HDSStream.parse_manifest( self.session, url, pvswf=self.swf_url, is_akamai=True).items(): yield s elif ".mp4" in url: yield "{0}k".format(stream["bitrate"]), HTTPStream( self.session, url) except IOError as err: self.logger.warning("Error parsing stream: {0}", err)
def _schema_consent(data): schema_consent = validate.Schema( validate.parse_html(), validate.any( validate.xml_find(".//form[@action='https://consent.youtube.com/s']"), validate.all( validate.xml_xpath(".//form[@action='https://consent.youtube.com/save']"), validate.filter(lambda elem: elem.xpath(".//input[@type='hidden'][@name='set_ytc'][@value='true']")), validate.get(0), ) ), validate.union(( validate.get("action"), validate.xml_xpath(".//input[@type='hidden']"), )), ) return schema_consent.validate(data)
def _get_streams(self): try: data = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_find( ".//video[@id='brightcove_video_player']"), validate.union_get("data-video-id", "data-account", "data-ad-config-id", "data-player"))) except PluginError: return data_video_id, data_account, data_ad_config_id, data_player = data url = self._PLAYER_URL.format(data_account=data_account, data_player=data_player) policy_key = self.session.http.get( url, schema=validate.Schema( validate.transform(self._policy_key_re.search), validate.any(None, validate.get(1)))) if not policy_key: return url = self._API_URL.format(data_account=data_account, data_video_id=data_video_id) if data_ad_config_id is not None: url = update_qsd(url, dict(ad_config_id=data_ad_config_id)) streams = self.session.http.get( url, headers={"Accept": f"application/json;pk={policy_key}"}, schema=validate.Schema( validate.parse_json(), { "sources": [{ validate.optional("type"): str, "src": validate.url(), }], }, validate.get("sources"), validate.filter(lambda source: source.get("type") == "application/x-mpegURL"))) for stream in streams: return HLSStream.parse_variant_playlist(self.session, stream["src"])
class Welt(Plugin): _url_vod = "https://www.welt.de/onward/video/play/{0}" _schema = validate.Schema( validate.parse_html(), validate.xml_findtext(".//script[@type='application/json'][@data-content='VideoPlayer.Config']"), validate.parse_json(), validate.get("sources"), validate.filter(lambda obj: obj["extension"] == "m3u8"), validate.get((0, "src")) ) def _get_streams(self): hls_url = self.session.http.get(self.url, schema=self._schema) if "mediathek" in self.url.lower(): url = self._url_vod.format(quote(hls_url, safe="")) hls_url = self.session.http.get(url, headers={"Referer": self.url}).url return HLSStream.parse_variant_playlist(self.session, hls_url, headers={"Referer": self.url})
def _get_streams(self): if not self.matches[0]: self._domain = urlparse(self.url).netloc iframes = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_findall(".//iframe[@src]"), validate.filter(lambda elem: urlparse( elem.attrib.get("src")).netloc == "ott.streann.com"))) if not iframes: log.error("Could not find 'ott.streann.com' iframe") return self.url = iframes[0].attrib.get("src") if not self._domain and self.get_option("url"): self._domain = urlparse(self.get_option("url")).netloc if self._domain is None: log.error("Missing source URL, use --streann-url") return self.session.http.headers.update({"Referer": self.url}) # Get the query string encrypted_data = urlparse(self.url).query data = base64.b64decode(encrypted_data) # and decrypt it passphrase = self.passphrase() if passphrase: log.debug("Found passphrase") params = decrypt_openssl(data, passphrase) config = parse_qsd(params.decode("utf8")) log.trace("config: {0!r}".format(config)) token = self.get_token(**config) if not token: return hls_url = self.stream_url.format(time=self.time, deviceId=self.device_id, token=token, **config) log.debug("URL={0}".format(hls_url)) return HLSStream.parse_variant_playlist( self.session, hls_url, acceptable_status=(200, 403, 404, 500))
def _streams_brightcove_js(self, root): re_js_src = re.compile(r"^[\w/]+/main\.\w+\.js$") re_js_brightcove_video = re.compile( r'i\?\([A-Z]="[^"]+",y="(?P<video_id>[0-9]+).*"data-account"\s*:\s*"(?P<account_id>[0-9]+)', ) schema_brightcove_js = validate.Schema( validate.xml_findall(r".//script[@src]"), validate.filter( lambda elem: re_js_src.search(elem.attrib.get("src"))), validate.get(0), str, validate.transform(lambda src: urljoin(self.url, src))) schema_brightcove_js2 = validate.Schema( validate.transform(re_js_brightcove_video.search), validate.union_get("account_id", "video_id")) try: js_url = schema_brightcove_js.validate(root) log.debug(f"JS URL: {js_url}") account_id, video_id = self.session.http.get( js_url, schema=schema_brightcove_js2) except (PluginError, TypeError): return return self._brightcove(account_id, video_id)
viafree ) \. (?: dk|ee|lt|lv|no|se|com ) (/.+?/|/embed\?id=) (?P<stream_id>\d+) """, re.VERBOSE) _stream_schema = validate.Schema( { "streams": validate.all( {validate.text: validate.any(validate.text, int, None)}, validate.filter(lambda k, v: isinstance(v, validate.text))) }, validate.get("streams")) class Viasat(Plugin): @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_swf_url(self): res = http.get(self.url) match = _swf_url_re.search(res.text) if not match: raise PluginError("Unable to find SWF URL in the HTML") return match.group(1)
def test_filter(self): assert validate(filter(lambda i: i > 5), [10, 5, 4, 6, 7]) == [10, 6, 7]
class Mixer(Plugin): api_url = "https://mixer.com/api/v1/{type}/{id}" _vod_schema = validate.Schema( { "state": "AVAILABLE", "vods": [{ "baseUrl": validate.url(), "data": validate.any(None, {"Height": int}), "format": validate.text }] }, validate.get("vods"), validate.filter(lambda x: x["format"] in ("raw", "hls")), [ validate.union({ "url": validate.get("baseUrl"), "format": validate.get("format"), "height": validate.all(validate.get("data"), validate.get("Height")) }) ]) @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_api_res(self, api_type, api_id): try: res = self.session.http.get( self.api_url.format(type=api_type, id=api_id)) return res except Exception as e: if "404" in str(e): self.logger.error("invalid {0} - {1}".format(api_type, api_id)) elif "429" in str(e): self.logger.error( "Too Many Requests, API rate limit exceeded.") raise NoStreamsError(self.url) def _get_vod_stream(self, vod_id): res = self._get_api_res("recordings", vod_id) for sdata in self.session.http.json(res, schema=self._vod_schema): if sdata["format"] == "hls": hls_url = urljoin(sdata["url"], "manifest.m3u8") yield "{0}p".format(sdata["height"]), HLSStream( self.session, hls_url) def _get_live_stream(self, channel): res = self._get_api_res("channels", channel) channel_info = self.session.http.json(res) if not channel_info["online"]: return user_id = channel_info["id"] hls_url = self.api_url.format(type="channels", id="{0}/manifest.m3u8".format(user_id)) for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): yield s def _get_streams(self): params = dict(parse_qsl(urlparse(self.url).query)) vod_id = params.get("vod") match = _url_re.match(self.url) channel = match.group("channel") if vod_id: self.logger.debug("Looking for VOD {0} from channel: {1}", vod_id, channel) return self._get_vod_stream(vod_id) else: self.logger.debug("Looking for channel: {0}", channel) return self._get_live_stream(channel)
from streamlink.plugin import Plugin from streamlink.plugin.api import http, validate from streamlink.stream import HDSStream API_URL = "http://api.sh.nhk.fivecool.tv/api/cdn/?publicId=3bz2huey&playerId=7Dy" _url_re = re.compile("http(s)?://(\w+\.)?nhk.or.jp/nhkworld") _schema = validate.Schema({ "live-streams": [{ "streams": validate.all( [{ "protocol": validate.text, "streamUrl": validate.text }], validate.filter(lambda s: s["protocol"] in ("http-flash", "http-hds")) ) }] }) class NHKWorld(Plugin): @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_streams(self): res = http.get(API_URL) data = http.json(res, schema=_schema) streams = {}
class BBCiPlayer(Plugin): """ Allows streaming of live channels from bbc.co.uk/iplayer/live/* and of iPlayer programmes from bbc.co.uk/iplayer/episode/* """ url_re = re.compile( r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) mediator_re = re.compile( r'window\.mediatorDefer\s*=\s*page\([^,]*,\s*({.*?})\);', re.DOTALL) tvip_re = re.compile(r'channel"\s*:\s*{\s*"id"\s*:\s*"(\w+?)"') tvip_master_re = re.compile(r'event_master_brand=(\w+?)&') account_locals_re = re.compile(r'window.bbcAccount.locals\s*=\s*({.*?});') swf_url = "http://emp.bbci.co.uk/emp/SMPf/1.18.3/StandardMediaPlayerChromelessFlash.swf" hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = ( "http://open.live.bbc.co.uk/mediaselector/6/select/" "version/2.0/mediaset/{platform}/vpid/{vpid}/format/json/atk/{vpid_hash}/asn/1/" ) platforms = ("pc", "iptv-all") session_url = "https://session.bbc.com/session" auth_url = "https://account.bbc.com/signin" mediator_schema = validate.Schema( {"episode": { "versions": [{ "id": validate.text }] }}, validate.get("episode"), validate.get("versions"), validate.get(0), validate.get("id")) mediaselector_schema = validate.Schema( validate.transform(parse_json), { "media": [{ "connection": [{ validate.optional("href"): validate.url(), validate.optional("transferFormat"): validate.text }], "kind": validate.text }] }, validate.get("media"), validate.filter(lambda x: x["kind"] == "video")) arguments = PluginArguments( PluginArgument("username", requires=["password"], metavar="USERNAME", help="The username used to register with bbc.co.uk."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help= "A bbc.co.uk account password to use with --bbciplayer-username.", prompt="Enter bbc.co.uk account password"), PluginArgument( "hd", action="store_true", help= "Prefer HD streams over local SD streams, some live programmes may not be broadcast in HD." ), ) @classmethod def can_handle_url(cls, url): """ Confirm plugin can handle URL """ return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() @classmethod def _extract_nonce(cls, http_result): """ Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it. We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests. :param http_result: HTTP response from the bbc session endpoint. :type http_result: requests.Response :return: nonce to "sign" url requests with :rtype: string """ # Extract the redirect URL from the last call last_redirect_url = urlparse(http_result.history[-1].request.url) last_redirect_query = dict(parse_qsl(last_redirect_url.query)) # Extract the nonce from the query string in the redirect URL final_url = urlparse(last_redirect_query['goto']) goto_url = dict(parse_qsl(final_url.query)) goto_url_query = parse_json(goto_url['state']) # Return the nonce we can use for future queries return goto_url_query['nonce'] def find_vpid(self, url, res=None): """ Find the Video Packet ID in the HTML for the provided URL :param url: URL to download, if res is not provided. :param res: Provide a cached version of the HTTP response to search :type url: string :type res: requests.Response :return: Video Packet ID for a Programme in iPlayer :rtype: string """ self.logger.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid def find_tvip(self, url, master=False): self.logger.debug("Looking for {0} tvip on {1}", "master" if master else "", url) res = http.get(url) if master: m = self.tvip_master_re.search(res.text) else: m = self.tvip_re.search(res.text) return m and m.group(1) def mediaselector(self, vpid): urls = defaultdict(set) for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) self.logger.debug("Info API request: {0}", url) medias = http.get(url, schema=self.mediaselector_schema) for media in medias: for connection in media["connection"]: urls[connection.get("transferFormat")].add( connection["href"]) for stream_type, urls in urls.items(): self.logger.debug("{0} {1} streams", len(urls), stream_type) for url in list(urls): self.logger.debug(" {0}", url) if stream_type == "hds": for s in HDSStream.parse_manifest(self.session, url).items(): yield s if stream_type == "hls": for s in HLSStream.parse_variant_playlist( self.session, url).items(): yield s if connection.get("transferFormat") == "dash": for s in DASHStream.parse_manifest( self.session, connection["href"]).items(): yield s def login(self, ptrt_url): """ Create session using BBC ID. See https://www.bbc.co.uk/usingthebbc/account/ :param ptrt_url: The snapback URL to redirect to after successful authentication :type ptrt_url: string :return: Whether authentication was successful :rtype: bool """ session_res = http.get(self.session_url, params=dict(ptrt=ptrt_url)) http_nonce = self._extract_nonce(session_res) res = http.post(self.auth_url, params=dict(ptrt=ptrt_url, nonce=http_nonce), data=dict(jsEnabled=True, username=self.get_option("username"), password=self.get_option('password'), attempts=0), headers={"Referer": self.url}) return len(res.history) != 0 def _get_streams(self): if not self.get_option("username"): self.logger.error( "BBC iPlayer requires an account you must login using " "--bbciplayer-username and --bbciplayer-password") return self.logger.info( "A TV License is required to watch BBC iPlayer streams, see the BBC website for more " "information: https://www.bbc.co.uk/iplayer/help/tvlicence") if not self.login(self.url): self.logger.error( "Could not authenticate, check your username and password") return m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: self.logger.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url) if vpid: self.logger.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: self.logger.error("Could not find VPID for episode {0}", episode_id) elif channel_name: self.logger.debug("Loading stream for live channel: {0}", channel_name) if self.get_option("hd"): tvip = self.find_tvip(self.url, master=True) + "_hd" if tvip: self.logger.debug("Trying HD stream {0}...", tvip) try: for s in self.mediaselector(tvip): yield s except PluginError: self.logger.error( "Failed to get HD streams, falling back to SD") else: return tvip = self.find_tvip(self.url) if tvip: self.logger.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
validate.transform(int), validate.transform(bool) ), "media_id": validate.text }], }, validate.get("livestream"), validate.length(1), validate.get(0) ) _player_schema = validate.Schema( { "clip": { "baseUrl": validate.any(None, validate.text), "bitrates": validate.all( validate.filter(lambda b: b.get("url") and b.get("label")), [{ "label": validate.text, "url": validate.text, }], ) }, validate.optional("playlist"): [{ validate.optional("connectionProvider"): validate.text, validate.optional("netConnectionUrl"): validate.text, validate.optional("bitrates"): [{ "label": validate.text, "url": validate.text, "provider": validate.text }] }],
{ "token": validate.text, "sig": validate.text }, validate.union(( validate.get("sig"), validate.get("token") )) ) _token_schema = validate.Schema( { "chansub": { "restricted_bitrates": validate.all( [validate.text], validate.filter( lambda n: not re.match(r"(.+_)?archives|live|chunked", n) ) ) } }, validate.get("chansub") ) _user_schema = validate.Schema( { validate.optional("display_name"): validate.text }, validate.get("display_name") ) _video_schema = validate.Schema( { "chunks": {
/(?P<channel_name>[A-Za-z0-9-_]+) ) """, re.VERBOSE) username_re = re.compile(r'''data-username\s*=\s*"(.*?)"''') chromecast_re = re.compile(r'''stream_chromecast_url"\s*:\s*(?P<url>".*?")''') _media_inner_schema = validate.Schema([{ "layerList": [{ "name": validate.text, validate.optional("sequenceList"): [{ "layerList": validate.all( [{ "name": validate.text, validate.optional("param"): dict }], validate.filter(lambda l: l["name"] in ("video", "reporting")) ) }] }] }]) _media_schema = validate.Schema( validate.any( _media_inner_schema, validate.all( {"sequence": _media_inner_schema}, validate.get("sequence") ) ) ) _vod_playlist_schema = validate.Schema({ "duration": float,
http(s)?://(\w+\.)? dailymotion.com (/embed)?/(video|live) /(?P<media_id>[^_?/]+) """, re.VERBOSE) _media_inner_schema = validate.Schema([{ "layerList": [{ "name": validate.text, validate.optional("sequenceList"): [{ "layerList": validate.all( [{ "name": validate.text, validate.optional("param"): dict }], validate.filter(lambda l: l["name"] in ("video", "reporting")) ) }] }] }]) _media_schema = validate.Schema( validate.any( _media_inner_schema, validate.all( {"sequence": _media_inner_schema}, validate.get("sequence") ) ) ) _vod_playlist_schema = validate.Schema({ "duration": float,
"Streams": validate.all( [ validate.all( {"Stream": validate.text}, validate.get("Stream") ) ], validate.get(0) ) }, validate.get("Streams") ) ], "Server": validate.text }], validate.filter(lambda s: s["LinkType"] in STREAMING_TYPES) ) }] }, validate.get("Data", {}) ) _video_schema = validate.Schema( { "Data": [{ "Assets": validate.all( [{ validate.optional("Links"): validate.all( [{ "Target": validate.text, "Uri": validate.text }], validate.filter(lambda l: l["Target"] in STREAMING_TYPES)
class BBCiPlayer(Plugin): url_re = re.compile( r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) mediator_re = re.compile( r'window\.mediatorDefer\s*=\s*page\([^,]*,\s*(\{.*?})\);', re.DOTALL) tvip_re = re.compile(r'event_master_brand=(\w+?)&') account_locals_re = re.compile(r'window.bbcAccount.locals\s*=\s*(\{.*?});') swf_url = "http://emp.bbci.co.uk/emp/SMPf/1.18.3/StandardMediaPlayerChromelessFlash.swf" hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = ( "http://open.live.bbc.co.uk/mediaselector/6/select/" "version/2.0/mediaset/{platform}/vpid/{vpid}/format/json/atk/{vpid_hash}/asn/1/" ) platforms = ("pc", "iptv-all") config_url = "http://www.bbc.co.uk/idcta/config" auth_url = "https://account.bbc.com/signin" config_schema = validate.Schema( validate.transform(parse_json), { "signin_url": validate.url(), "identity": { "cookieAgeDays": int, "accessTokenCookieName": validate.text, "idSignedInCookieName": validate.text } }) mediator_schema = validate.Schema( {"episode": { "versions": [{ "id": validate.text }] }}, validate.get("episode"), validate.get("versions"), validate.get(0), validate.get("id")) mediaselector_schema = validate.Schema( validate.transform(parse_json), { "media": [{ "connection": [{ validate.optional("href"): validate.url(), validate.optional("transferFormat"): validate.text }], "kind": validate.text }] }, validate.get("media"), validate.filter(lambda x: x["kind"] == "video")) options = PluginOptions({"password": None, "username": None}) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() def find_vpid(self, url, res=None): self.logger.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid def find_tvip(self, url): self.logger.debug("Looking for tvip on {0}", url) res = http.get(url) m = self.tvip_re.search(res.text) return m and m.group(1) def mediaselector(self, vpid): for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) self.logger.debug("Info API request: {0}", url) stream_urls = http.get(url, schema=self.mediaselector_schema) for media in stream_urls: for connection in media["connection"]: if connection.get("transferFormat") == "hds": for s in HDSStream.parse_manifest( self.session, connection["href"]).items(): yield s if connection.get("transferFormat") == "hls": for s in HLSStream.parse_variant_playlist( self.session, connection["href"]).items(): yield s def login(self, ptrt_url, context="tvandiplayer"): # get the site config, to find the signin url config = http.get(self.config_url, params=dict(ptrt=ptrt_url), schema=self.config_schema) res = http.get(config["signin_url"], params=dict(userOrigin=context, context=context), headers={"Referer": self.url}) m = self.account_locals_re.search(res.text) if m: auth_data = parse_json(m.group(1)) res = http.post(self.auth_url, params=dict(context=auth_data["userOrigin"], ptrt=auth_data["ptrt"]["value"], userOrigin=auth_data["userOrigin"], nonce=auth_data["nonce"]), data=dict(jsEnabled="false", attempts=0, username=self.get_option("username"), password=self.get_option("password"))) # redirects to ptrt_url on successful login if res.url == ptrt_url: return res else: self.logger.error( "Could not authenticate, could not find the authentication nonce" ) def _get_streams(self): if not self.get_option("username"): self.logger.error( "BBC iPlayer requires an account you must login using " "--bbciplayer-username and --bbciplayer-password") return self.logger.info( "A TV License is required to watch BBC iPlayer streams, see the BBC website for more " "information: https://www.bbc.co.uk/iplayer/help/tvlicence") page_res = self.login(self.url) if not page_res: self.logger.error( "Could not authenticate, check your username and password") return m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: self.logger.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url, res=page_res) if vpid: self.logger.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: self.logger.error("Could not find VPID for episode {0}", episode_id) elif channel_name: self.logger.debug("Loading stream for live channel: {0}", channel_name) tvip = self.find_tvip(self.url) if tvip: self.logger.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
STREAM_FORMATS = ("mp4") INFO_URL = "http://www.vgtv.no/data/actions/videostatus/" _url_re = re.compile(r"https?://(www\.)?(vgtv|vg).no") _content_id_re = re.compile(r"(?:data-videoid=\"|videostatus/\?id=)(\d+)") _url_id_re = re.compile(( r"https?://(?:www\.)?vgtv.no/" r"(?:(?:#!/)?video/|(?:#!|\?)id=)(\d+)" )) _video_schema = validate.Schema({ "status": 200, "formats": validate.all( dict, validate.filter(lambda k, v: k in STREAM_TYPES), { validate.text: validate.all( dict, validate.filter(lambda k, v: k in STREAM_FORMATS), { validate.text: [{ "bitrate": int, "paths": [{ "address": validate.text, "port": int, "path": validate.text, "filename": validate.text, "application": validate.text, }], }]
from streamlink.stream import HDSStream, HLSStream, RTMPStream from streamlink.utils import rtmpparse STREAM_API_URL = "https://playapi.mtgx.tv/v3/videos/stream/{0}" _swf_url_re = re.compile(r"data-flashplayer-url=\"([^\"]+)\"") _player_data_re = re.compile(r"window.fluxData\s*=\s*JSON.parse\(\"(.+)\"\);") _stream_schema = validate.Schema( validate.any( None, validate.all({"msg": validate.text}), validate.all({ "streams": validate.all( {validate.text: validate.any(validate.text, int, None)}, validate.filter(lambda k, v: isinstance(v, validate.text)) ) }, validate.get("streams")) ) ) class Viasat(Plugin): """Streamlink Plugin for Viasat""" _iframe_re = re.compile(r"""<iframe.+src=["'](?P<url>[^"']+)["'].+allowfullscreen""") _image_re = re.compile(r"""<meta\sproperty=["']og:image["']\scontent=".+/(?P<stream_id>\d+)/[^/]+\.jpg""") _url_re = re.compile(r"""https?://(?:www\.)? (?: juicyplay\.dk
class AdultSwim(Plugin): token_url = 'https://token.ngtv.io/token/token_spe' video_data_url = 'https://www.adultswim.com/api/shows/v1/media/{0}/desktop' app_id_js_url_re = re.compile( r'''<script src="([^"]*asvp\..*?\.bundle\.js)">''' ) app_id_re = re.compile( r'''CDN_TOKEN_APP_ID="(.*?)"''' ) json_data_re = re.compile( r'''<script id="__NEXT_DATA__" type="application/json">({.*})</script>''' ) truncate_url_re = re.compile(r'''(.*)/\w+/?''') _api_schema = validate.Schema({ 'media': { 'desktop': { validate.text: { 'url': validate.url() } } }}, validate.get('media'), validate.get('desktop'), validate.filter(lambda k, v: k in ['unprotected', 'bulkaes']) ) _stream_data_schema = validate.Schema({ 'props': {'__REDUX_STATE__': {'streams': [{ 'id': validate.text, 'stream': validate.text, }]}}}, validate.get('props'), validate.get('__REDUX_STATE__'), validate.get('streams'), ) _token_schema = validate.Schema( validate.any( {'auth': {'token': validate.text}}, {'auth': {'error': {'message': validate.text}}}, ), validate.get('auth'), ) _video_data_schema = validate.Schema({ 'props': {'pageProps': {'__APOLLO_STATE__': { validate.text: { validate.optional('id'): validate.text, validate.optional('slug'): validate.text, } }}}}, validate.get('props'), validate.get('pageProps'), validate.get('__APOLLO_STATE__'), validate.filter(lambda k, v: k.startswith('Video:')), ) def _get_stream_data(self, id): res = self.session.http.get(self.url) m = self.json_data_re.search(res.text) if m and m.group(1): streams = parse_json(m.group(1), schema=self._stream_data_schema) else: raise PluginError("Failed to get json_data") for stream in streams: if 'id' in stream: if id == stream['id'] and 'stream' in stream: return stream['stream'] def _get_video_data(self, slug): m = self.truncate_url_re.search(self.url) if m and m.group(1): log.debug("Truncated URL={0}".format(m.group(1))) else: raise PluginError("Failed to truncate URL") res = self.session.http.get(m.group(1)) m = self.json_data_re.search(res.text) if m and m.group(1): videos = parse_json(m.group(1), schema=self._video_data_schema) else: raise PluginError("Failed to get json_data") for video in videos: if 'slug' in videos[video]: if slug == videos[video]['slug'] and 'id' in videos[video]: return videos[video]['id'] def _get_token(self, path): res = self.session.http.get(self.url) m = self.app_id_js_url_re.search(res.text) app_id_js_url = m and m.group(1) if not app_id_js_url: raise PluginError("Could not determine app_id_js_url") log.debug("app_id_js_url={0}".format(app_id_js_url)) res = self.session.http.get(app_id_js_url) m = self.app_id_re.search(res.text) app_id = m and m.group(1) if not app_id: raise PluginError("Could not determine app_id") log.debug("app_id={0}".format(app_id)) res = self.session.http.get(self.token_url, params=dict( format='json', appId=app_id, path=path, )) token_data = self.session.http.json(res, schema=self._token_schema) if 'error' in token_data: raise PluginError(token_data['error']['message']) return token_data['token'] def _get_streams(self): url_type, show_name, episode_name = self.match.groups() if url_type == 'streams' and not show_name: url_type = 'live-stream' elif not show_name: raise PluginError("Missing show_name for url_type: {0}".format( url_type, )) log.debug("URL type={0}".format(url_type)) if url_type == 'live-stream': video_id = self._get_stream_data(url_type) elif url_type == 'streams': video_id = self._get_stream_data(show_name) elif url_type == 'videos': if show_name is None or episode_name is None: raise PluginError( "Missing show_name or episode_name for url_type: {0}".format( url_type, ) ) video_id = self._get_video_data(episode_name) else: raise PluginError("Unrecognised url_type: {0}".format(url_type)) if video_id is None: raise PluginError("Could not find video_id") log.debug("Video ID={0}".format(video_id)) res = self.session.http.get(self.video_data_url.format(video_id)) url_data = self.session.http.json(res, schema=self._api_schema) if 'unprotected' in url_data: url = url_data['unprotected']['url'] elif 'bulkaes' in url_data: url_parsed = urlparse(url_data['bulkaes']['url']) token = self._get_token(url_parsed.path) url = urlunparse(( url_parsed.scheme, url_parsed.netloc, url_parsed.path, url_parsed.params, "{0}={1}".format('hdnts', token), url_parsed.fragment, )) else: raise PluginError("Could not find a usable URL in url_data") log.debug("URL={0}".format(url)) return HLSStream.parse_variant_playlist(self.session, url)
/ (?P<clip_name>[\w]+) )? """, re.VERBOSE) _access_token_schema = validate.Schema( { "token": validate.text, "sig": validate.text }, validate.union((validate.get("sig"), validate.get("token")))) _token_schema = validate.Schema( { "chansub": { "restricted_bitrates": validate.all([validate.text], validate.filter(lambda n: not re.match( r"(.+_)?archives|live|chunked", n))) } }, validate.get("chansub")) _user_schema = validate.Schema( {validate.optional("display_name"): validate.text}, validate.get("display_name")) _video_schema = validate.Schema({ "chunks": { validate.text: [{ "length": int, "url": validate.any(None, validate.url(scheme="http")), "upkeep": validate.any("pass", "fail", None) }] }, "restrictions": { validate.text: validate.text
STREAM_INFO_URL = "http://live.daserste.de/{0}/livestream.xml" SWF_URL = "http://live.daserste.de/lib/br-player/swf/main.swf" STREAMING_TYPES = { "streamingUrlLive": ( "HDS", partial(HDSStream.parse_manifest, pvswf=SWF_URL) ), "streamingUrlIPhone": ( "HLS", HLSStream.parse_variant_playlist ) } _url_re = re.compile("http(s)?://live.daserste.de/(?P<channel>[^/?]+)?") _livestream_schema = validate.Schema( validate.xml_findall("video/*"), validate.filter(lambda e: e.tag in STREAMING_TYPES), validate.map(lambda e: (STREAMING_TYPES.get(e.tag), e.text)), validate.transform(dict), ) class ard_live(Plugin): @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_streams(self): match = _url_re.match(self.url) channel = match.group("channel") res = http.get(STREAM_INFO_URL.format(channel)) urls = http.xml(res, schema=_livestream_schema)