class Rtve(Plugin): secret_key = base64.b64decode("eWVMJmRhRDM=") url_re = re.compile(r""" https?://(?:www\.)?rtve\.es/(?:directo|infantil|noticias|television|deportes|alacarta|drmn)/.*?/? """, re.VERBOSE) cdn_schema = validate.Schema( validate.transform(partial(parse_xml, invalid_char_entities=True)), validate.xml_findall(".//preset"), [ validate.union({ "quality": validate.all(validate.getattr("attrib"), validate.get("type")), "urls": validate.all( validate.xml_findall(".//url"), [validate.getattr("text")] ) }) ] ) subtitles_api = "http://www.rtve.es/api/videos/{id}/subtitulos.json" subtitles_schema = validate.Schema({ "page": { "items": [{ "src": validate.url(), "lang": validate.text }] } }, validate.get("page"), validate.get("items")) video_api = "http://www.rtve.es/api/videos/{id}.json" video_schema = validate.Schema({ "page": { "items": [{ "qualities": [{ "preset": validate.text, "height": int }] }] } }, validate.get("page"), validate.get("items"), validate.get(0)) arguments = PluginArguments( PluginArgument( "mux-subtitles", action="store_true", help=""" Automatically mux available subtitles in to the output stream. """ ) ) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def __init__(self, url): Plugin.__init__(self, url) self.session.http.headers = {"User-Agent": useragents.SAFARI_8} self.zclient = ZTNRClient(self.secret_key, self.session) def _get_content_id(self): res = self.session.http.get(self.url) for div in itertags(res.text, "div"): if div.attributes.get("data-id"): return int(div.attributes.get("data-id")) else: log.error("Failed to get content_id") def _get_subtitles(self, content_id): res = self.session.http.get(self.subtitles_api.format(id=content_id)) return self.session.http.json(res, schema=self.subtitles_schema) def _get_quality_map(self, content_id): res = self.session.http.get(self.video_api.format(id=content_id)) data = self.session.http.json(res, schema=self.video_schema) qmap = {} for item in data["qualities"]: qname = {"MED": "Media", "HIGH": "Alta", "ORIGINAL": "Original"}.get(item["preset"], item["preset"]) qmap[qname] = u"{0}p".format(item["height"]) return qmap def _get_streams(self): streams = [] content_id = self._get_content_id() if content_id: log.debug(f"Found content with id: {content_id}") stream_data = self.zclient.get_cdn_list(content_id, schema=self.cdn_schema) quality_map = None for stream in stream_data: for url in stream["urls"]: if ".m3u8" in url: try: streams.extend(HLSStream.parse_variant_playlist(self.session, url).items()) except (IOError, OSError) as err: log.error(str(err)) elif ((url.endswith("mp4") or url.endswith("mov") or url.endswith("avi")) and self.session.http.head(url, raise_for_status=False).status_code == 200): if quality_map is None: # only make the request when it is necessary quality_map = self._get_quality_map(content_id) # rename the HTTP sources to match the HLS sources quality = quality_map.get(stream["quality"], stream["quality"]) streams.append((quality, HTTPStream(self.session, url))) subtitles = None if self.get_option("mux_subtitles"): subtitles = self._get_subtitles(content_id) if subtitles: substreams = {} for i, subtitle in enumerate(subtitles): substreams[subtitle["lang"]] = HTTPStream(self.session, subtitle["src"]) for q, s in streams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: for s in streams: yield s
class BBCiPlayer(Plugin): url_re = re.compile( r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) vpid_re = re.compile(r'"ident_id"\s*:\s*"(\w+)"') tvip_re = re.compile(r'event_master_brand=(\w+?)&') account_locals_re = re.compile(r'window.bbcAccount.locals\s*=\s*(\{.*?});') swf_url = "http://emp.bbci.co.uk/emp/SMPf/1.18.3/StandardMediaPlayerChromelessFlash.swf" hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = ( "http://open.live.bbc.co.uk/mediaselector/5/select/" "version/2.0/mediaset/{platform}/vpid/{vpid}/atk/{vpid_hash}/asn/1/") platforms = ("pc", "iptv-all") config_url = "http://www.bbc.co.uk/idcta/config" auth_url = "https://account.bbc.com/signin" config_schema = validate.Schema( validate.transform(parse_json), { "signin_url": validate.url(), "identity": { "cookieAgeDays": int, "accessTokenCookieName": validate.text, "idSignedInCookieName": validate.text } }) mediaselector_schema = validate.Schema( validate.transform(partial(parse_xml, ignore_ns=True)), validate.union({ "hds": validate.xml_findall( ".//media[@kind='video']//connection[@transferFormat='hds']"), "hls": validate.xml_findall( ".//media[@kind='video']//connection[@transferFormat='hls']") }), { validate.text: validate.all( [ validate.all(validate.getattr("attrib"), validate.get("href")) ], validate.transform(lambda x: list(set(x))) # unique ) }) options = PluginOptions({"password": None, "username": None}) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() def find_vpid(self, url, res=None): self.logger.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or http.get(url) m = self.vpid_re.search(res.text) return m and m.group(1) def find_tvip(self, url): self.logger.debug("Looking for tvip on {0}", url) res = http.get(url) m = self.tvip_re.search(res.text) return m and m.group(1) def mediaselector(self, vpid): for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) stream_urls = http.get(url, schema=self.mediaselector_schema) for surl in stream_urls.get("hls"): for s in HLSStream.parse_variant_playlist(self.session, surl).items(): yield s for surl in stream_urls.get("hds"): for s in HDSStream.parse_manifest(self.session, surl).items(): yield s def login(self, ptrt_url, context="tvandiplayer"): # get the site config, to find the signin url config = http.get(self.config_url, params=dict(ptrt=ptrt_url), schema=self.config_schema) res = http.get(config["signin_url"], params=dict(userOrigin=context, context=context), headers={"Referer": self.url}) m = self.account_locals_re.search(res.text) if m: auth_data = parse_json(m.group(1)) res = http.post(self.auth_url, params=dict(context=auth_data["userOrigin"], ptrt=auth_data["ptrt"]["value"], userOrigin=auth_data["userOrigin"], nonce=auth_data["nonce"]), data=dict(jsEnabled="false", attempts=0, username=self.get_option("username"), password=self.get_option("password"))) # redirects to ptrt_url on successful login if res.url == ptrt_url: return res else: self.logger.error( "Could not authenticate, could not find the authentication nonce" ) def _get_streams(self): self.logger.info( "A TV License is required to watch BBC iPlayer streams, see the BBC website for more " "information: https://www.bbc.co.uk/iplayer/help/tvlicence") page_res = None if self.get_option("username"): page_res = self.login(self.url) if not page_res: self.logger.error( "Could not authenticate, check your username and password") return m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: self.logger.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url, res=page_res) if vpid: self.logger.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: self.logger.error("Could not find VPID for episode {0}", episode_id) elif channel_name: self.logger.debug("Loading stream for live channel: {0}", channel_name) tvip = self.find_tvip(self.url) if tvip: self.logger.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
class DeutscheWelle(Plugin): default_channel = "1" url_re = re.compile(r"https?://(?:www\.)?dw\.com/") channel_re = re.compile(r'''<a.*?data-id="(\d+)".*?class="ici"''') live_stream_div = re.compile( r''' <div\s+class="mediaItem"\s+data-channel-id="(\d+)".*?>.*? <input\s+type="hidden"\s+name="file_name"\s+value="(.*?)"\s*>.*?<div ''', re.DOTALL | re.VERBOSE) smil_api_url = "http://www.dw.com/smil/{}" html5_api_url = "http://www.dw.com/html5Resource/{}" vod_player_type_re = re.compile( r'<input type="hidden" name="player_type" value="(?P<stream_type>.+?)">' ) stream_vod_data_re = re.compile( r'<input\s+type="hidden"\s+name="file_name"\s+value="(?P<stream_url>.+?)">.*?' r'<input\s+type="hidden"\s+name="media_id"\s+value="(?P<stream_id>\d+)">', re.DOTALL) smil_schema = validate.Schema( validate.union({ "base": validate.all(validate.xml_find(".//meta"), validate.xml_element(attrib={"base": validate.text}), validate.get("base")), "streams": validate.all(validate.xml_findall(".//switch/*"), [ validate.all( validate.getattr("attrib"), { "src": validate.text, "system-bitrate": validate.all( validate.text, validate.transform(int), ), validate.optional("width"): validate.all(validate.text, validate.transform(int)) }) ]) })) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _create_stream(self, url, quality=None): if url.startswith('rtmp://'): return (quality, RTMPStream(self.session, {'rtmp': url})) if url.endswith('.m3u8'): return HLSStream.parse_variant_playlist(self.session, url).items() return (quality, HTTPStream(self.session, url)) def _get_live_streams(self, page): # check if a different language has been selected qs = dict(parse_qsl(urlparse(self.url).query)) channel = qs.get("channel") if not channel: m = self.channel_re.search(page.text) channel = m and m.group(1) self.logger.debug("Using sub-channel ID: {0}", channel) # extract the streams from the page, mapping between channel-id and stream url media_items = self.live_stream_div.finditer(page.text) stream_map = dict([mi.groups((1, 2)) for mi in media_items]) stream_url = stream_map.get(str(channel) or self.default_channel) if stream_url: return self._create_stream(stream_url) def _get_vod_streams(self, stream_type, page): m = self.stream_vod_data_re.search(page.text) if m is None: return stream_url, stream_id = m.groups() if stream_type == "video": stream_api_id = "v-{}".format(stream_id) default_quality = "vod" elif stream_type == "audio": stream_api_id = "a-{}".format(stream_id) default_quality = "audio" else: return # Retrieve stream embedded in web page yield self._create_stream(stream_url, default_quality) # Retrieve streams using API res = self.session.http.get(self.smil_api_url.format(stream_api_id)) videos = self.session.http.xml(res, schema=self.smil_schema) for video in videos['streams']: url = videos["base"] + video["src"] if url == stream_url or url.replace("_dwdownload.", ".") == stream_url: continue if video["system-bitrate"] > 0: # If width is available, use it to select the best stream # amongst those with same bitrate quality = "{}k".format( (video["system-bitrate"] + video.get("width", 0)) // 1000) else: quality = default_quality yield self._create_stream(url, quality) def _get_streams(self): res = self.session.http.get(self.url) m = self.vod_player_type_re.search(res.text) if m is None: return stream_type = m.group("stream_type") if stream_type == "dwlivestream": return self._get_live_streams(res) return self._get_vod_streams(stream_type, res)
class Rtve(Plugin): _re_idAsset = re.compile(r"\"idAsset\":\"(\d+)\"") secret_key = base64.b64decode("eWVMJmRhRDM=") cdn_schema = validate.Schema( validate.transform(partial(parse_xml, invalid_char_entities=True)), validate.xml_findall(".//preset"), [ validate.union({ "quality": validate.all(validate.getattr("attrib"), validate.get("type")), "urls": validate.all(validate.xml_findall(".//url"), [validate.getattr("text")]) }) ]) subtitles_api = "https://www.rtve.es/api/videos/{id}/subtitulos.json" subtitles_schema = validate.Schema( {"page": { "items": [{ "src": validate.url(), "lang": validate.text }] }}, validate.get("page"), validate.get("items")) video_api = "https://www.rtve.es/api/videos/{id}.json" video_schema = validate.Schema( { "page": { "items": [{ "qualities": [{ "preset": validate.text, "height": int }] }] } }, validate.get("page"), validate.get("items"), validate.get(0)) arguments = PluginArguments(PluginArgument("mux-subtitles", is_global=True)) def __init__(self, url): super().__init__(url) self.zclient = ZTNRClient(self.secret_key, self.session) def _get_subtitles(self, content_id): res = self.session.http.get(self.subtitles_api.format(id=content_id)) return self.session.http.json(res, schema=self.subtitles_schema) def _get_quality_map(self, content_id): res = self.session.http.get(self.video_api.format(id=content_id)) data = self.session.http.json(res, schema=self.video_schema) qmap = {} for item in data["qualities"]: qname = { "MED": "Media", "HIGH": "Alta", "ORIGINAL": "Original" }.get(item["preset"], item["preset"]) qmap[qname] = f"{item['height']}p" return qmap def _get_streams(self): res = self.session.http.get(self.url) m = self._re_idAsset.search(res.text) if m: content_id = m.group(1) log.debug(f"Found content with id: {content_id}") stream_data = self.zclient.get_cdn_list(content_id, schema=self.cdn_schema) quality_map = None streams = [] for stream in stream_data: # only use one stream _one_m3u8 = False _one_mp4 = False for url in stream["urls"]: p_url = urlparse(url) if p_url.path.endswith(".m3u8"): if _one_m3u8: continue try: streams.extend( HLSStream.parse_variant_playlist( self.session, url).items()) _one_m3u8 = True except OSError as err: log.error(str(err)) elif p_url.path.endswith(".mp4"): if _one_mp4: continue if quality_map is None: # only make the request when it is necessary quality_map = self._get_quality_map(content_id) # rename the HTTP sources to match the HLS sources quality = quality_map.get(stream["quality"], stream["quality"]) streams.append((quality, HTTPStream(self.session, url))) _one_mp4 = True subtitles = None if self.get_option("mux_subtitles"): subtitles = self._get_subtitles(content_id) if subtitles: substreams = {} for i, subtitle in enumerate(subtitles): substreams[subtitle["lang"]] = HTTPStream( self.session, subtitle["src"]) for q, s in streams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: for s in streams: yield s
def test_getattr(self): el = Element("foo") assert validate(getattr("tag"), el) == "foo" assert validate(getattr("invalid", "default"), el) == "default"
validate.transform(_clientlibs_re.search), validate.get(2), validate.text ) }) ) _language_schema = validate.Schema( validate.xml_findtext("./country_code") ) _xml_to_srt_schema = validate.Schema( validate.xml_findall(".//body/div"), [ validate.union([validate.all( validate.getattr("attrib"), validate.get("{http://www.w3.org/XML/1998/namespace}lang") ), validate.all( validate.xml_findall("./p"), validate.transform(lambda x: list(enumerate(x, 1))), [ validate.all( validate.union({ "i": validate.get(0), "begin": validate.all( validate.get(1), validate.getattr("attrib"), validate.get("begin"), validate.transform(lambda s: s.replace(".", ",")) ),
class WWENetwork(Plugin): url_re = re.compile(r"https?://network.wwe.com") content_id_re = re.compile(r'''"content_id" : "(\d+)"''') playback_scenario = "HTTP_CLOUD_WIRED" login_url = "https://secure.net.wwe.com/workflow.do" login_page_url = "https://secure.net.wwe.com/enterworkflow.do?flowId=account.login&forwardUrl=http%3A%2F%2Fnetwork.wwe.com" api_url = "https://ws.media.net.wwe.com/ws/media/mf/op-findUserVerifiedEvent/v-2.3" _info_schema = validate.Schema( validate.union({ "status": validate.union({ "code": validate.all(validate.xml_findtext(".//status-code"), validate.transform(int)), "message": validate.xml_findtext(".//status-message"), }), "urls": validate.all(validate.xml_findall(".//url"), [validate.getattr("text")]), validate.optional("fingerprint"): validate.xml_findtext(".//updated-fingerprint"), validate.optional("session_key"): validate.xml_findtext(".//session-key"), "session_attributes": validate.all(validate.xml_findall(".//session-attribute"), [ validate.getattr("attrib"), validate.union({ "name": validate.get("name"), "value": validate.get("value") }) ]) })) arguments = PluginArguments( PluginArgument("email", required=True, metavar="EMAIL", requires=["password"], help=""" The email associated with your WWE Network account, required to access any WWE Network stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A WWE Network account password to use with --wwenetwork-email. """)) def __init__(self, url): super(WWENetwork, self).__init__(url) http.headers.update({"User-Agent": useragents.CHROME}) self._session_attributes = Cache(filename="plugin-cache.json", key_prefix="wwenetwork:attributes") self._session_key = self.cache.get("session_key") self._authed = self._session_attributes.get( "ipid") and self._session_attributes.get("fprt") @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, email, password): self.logger.debug("Attempting login as {0}", email) # sets some required cookies to login http.get(self.login_page_url) # login res = http.post(self.login_url, data=dict(registrationAction='identify', emailAddress=email, password=password, submitButton=""), headers={"Referer": self.login_page_url}, allow_redirects=False) self._authed = "Authentication Error" not in res.text if self._authed: self._session_attributes.set("ipid", res.cookies.get("ipid"), expires=3600 * 1.5) self._session_attributes.set("fprt", res.cookies.get("fprt"), expires=3600 * 1.5) return self._authed def _update_session_attribute(self, key, value): if value: self._session_attributes.set(key, value, expires=3600 * 1.5) # 1h30m expiry http.cookies.set(key, value) @property def session_key(self): return self._session_key @session_key.setter def session_key(self, value): self.cache.set("session_key", value) self._session_key = value def _get_media_info(self, content_id): """ Get the info about the content, based on the ID :param content_id: :return: """ params = { "identityPointId": self._session_attributes.get("ipid"), "fingerprint": self._session_attributes.get("fprt"), "contentId": content_id, "playbackScenario": self.playback_scenario, "platform": "WEB_MEDIAPLAYER_5", "subject": "LIVE_EVENT_COVERAGE", "frameworkURL": "https://ws.media.net.wwe.com", "_": int(time.time()) } if self.session_key: params["sessionKey"] = self.session_key url = self.api_url.format(id=content_id) res = http.get(url, params=params) return http.xml(res, ignore_ns=True, schema=self._info_schema) def _get_content_id(self): # check the page to find the contentId res = http.get(self.url) m = self.content_id_re.search(res.text) if m: return m.group(1) def _get_streams(self): email = self.get_option("email") password = self.get_option("password") if not self._authed and (not email and not password): self.logger.error( "A login for WWE Network is required, use --wwenetwork-email/" "--wwenetwork-password to set them") return if not self._authed: if not self.login(email, password): self.logger.error( "Failed to login, check your username/password") return content_id = self._get_content_id() if content_id: self.logger.debug("Found content ID: {0}", content_id) info = self._get_media_info(content_id) if info["status"]["code"] == 1: # update the session attributes self._update_session_attribute("fprt", info.get("fingerprint")) for attr in info["session_attributes"]: self._update_session_attribute(attr["name"], attr["value"]) if info.get("session_key"): self.session_key = info.get("session_key") for url in info["urls"]: for s in HLSStream.parse_variant_playlist( self.session, url, name_fmt="{pixels}_{bitrate}").items(): yield s else: raise PluginError( "Could not load streams: {message} ({code})".format( **info["status"]))
class Rtve(Plugin): secret_key = base64.b64decode("eWVMJmRhRDM=") content_id_re = re.compile(r'data-id\s*=\s*"(\d+)"') url_re = re.compile( r""" https?://(?:www\.)?rtve\.es/(?:directo|noticias|television|deportes|alacarta|drmn)/.*?/? """, re.VERBOSE) cdn_schema = validate.Schema( validate.transform(parse_xml), validate.xml_findall(".//preset"), [ validate.union({ "quality": validate.all(validate.getattr("attrib"), validate.get("type")), "urls": validate.all(validate.xml_findall(".//url"), [validate.getattr("text")]) }) ]) subtitles_api = "http://www.rtve.es/api/videos/{id}/subtitulos.json" subtitles_schema = validate.Schema( {"page": { "items": [{ "src": validate.url(), "lang": validate.text }] }}, validate.get("page"), validate.get("items")) video_api = "http://www.rtve.es/api/videos/{id}.json" video_schema = validate.Schema( { "page": { "items": [{ "qualities": [{ "preset": validate.text, "height": int }] }] } }, validate.get("page"), validate.get("items"), validate.get(0)) options = PluginOptions({"mux_subtitles": False}) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def __init__(self, url): Plugin.__init__(self, url) self.zclient = ZTNRClient(self.secret_key) http.headers = {"User-Agent": useragents.SAFARI_8} def _get_content_id(self): res = http.get(self.url) m = self.content_id_re.search(res.text) return m and int(m.group(1)) def _get_subtitles(self, content_id): res = http.get(self.subtitles_api.format(id=content_id)) return http.json(res, schema=self.subtitles_schema) def _get_quality_map(self, content_id): res = http.get(self.video_api.format(id=content_id)) data = http.json(res, schema=self.video_schema) qmap = {} for item in data["qualities"]: qname = { "MED": "Media", "HIGH": "Alta", "ORIGINAL": "Original" }.get(item["preset"], item["preset"]) qmap[qname] = u"{0}p".format(item["height"]) return qmap def _get_streams(self): streams = [] content_id = self._get_content_id() if content_id: self.logger.debug("Found content with id: {0}", content_id) stream_data = self.zclient.get_cdn_list(content_id, schema=self.cdn_schema) quality_map = None for stream in stream_data: for url in stream["urls"]: if url.endswith("m3u8"): try: streams.extend( HLSStream.parse_variant_playlist( self.session, url).items()) except (IOError, OSError): self.logger.debug("Failed to load m3u8 url: {0}", url) elif ((url.endswith("mp4") or url.endswith("mov") or url.endswith("avi")) and http.head( url, raise_for_status=False).status_code == 200): if quality_map is None: # only make the request when it is necessary quality_map = self._get_quality_map(content_id) # rename the HTTP sources to match the HLS sources quality = quality_map.get(stream["quality"], stream["quality"]) streams.append((quality, HTTPStream(self.session, url))) subtitles = None if self.get_option("mux_subtitles"): subtitles = self._get_subtitles(content_id) if subtitles: substreams = {} for i, subtitle in enumerate(subtitles): substreams[subtitle["lang"]] = HTTPStream( self.session, subtitle["src"]) for q, s in streams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: for s in streams: yield s
def _get_streams(self): try: scripts = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_findall( ".//script[@type='application/json'][@data-ssr-name]"), [ validate.union((validate.get("data-ssr-name"), validate.all(validate.getattr("text"), validate.parse_json()))) ])) except PluginError: log.error("Could not find any stream data") return for _data_ssr_name, _data_json in scripts: video_url = None log.trace(f"Found _data_ssr_name={_data_ssr_name}") if _data_ssr_name == "pages/Broadcasts/Broadcasts": self.title, video_url, is_live = validate.Schema( { "currentLivestream": { "is_live": bool, "title": str, "stream": validate.url(), } }, validate.get("currentLivestream"), validate.union_get("title", "stream", "is_live")).validate(_data_json) if not is_live: log.error(self._msg_live_offline) continue elif _data_ssr_name == "pages/Livestream/Livestream": self.title, video_url, is_live = validate.Schema( { "streamIsLive": bool, "title": str, "stream": validate.url(), }, validate.union_get("title", "stream", "streamIsLive")).validate(_data_json) if not is_live: log.error(self._msg_live_offline) continue elif _data_ssr_name in self.vod_keys.keys(): _key = self.vod_keys[_data_ssr_name] self.title, video_url = validate.Schema( { _key: { "title": str, "aspect_ratios": { "profiles": validate.all([{ "name": str, "url": validate.url(), }], validate.filter(lambda p: p[ "name"] == "hls_unencrypted")) } } }, validate.get(_key), validate.union_get("title", ("aspect_ratios", "profiles", 0, "url"))).validate(_data_json) if video_url is not None: return HLSStream.parse_variant_playlist( self.session, video_url)
class BBCiPlayer(Plugin): url_re = re.compile( r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) vpid_re = re.compile(r'"vpid"\s*:\s*"(\w+)"') tvip_re = re.compile(r'event_master_brand=(\w+?)&') swf_url = "http://emp.bbci.co.uk/emp/SMPf/1.18.3/StandardMediaPlayerChromelessFlash.swf" hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = ( "http://open.live.bbc.co.uk/mediaselector/5/select/" "version/2.0/mediaset/{platform}/vpid/{vpid}/atk/{vpid_hash}/asn/1/") platforms = ("pc", "iptv-all") mediaselector_schema = validate.Schema( validate.transform(partial(parse_xml, ignore_ns=True)), validate.union({ "hds": validate.xml_findall( ".//media[@kind='video']//connection[@transferFormat='hds']"), "hls": validate.xml_findall( ".//media[@kind='video']//connection[@transferFormat='hls']") }), { validate.text: validate.all( [ validate.all(validate.getattr("attrib"), validate.get("href")) ], validate.transform(lambda x: list(set(x))) # unique ) }) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() def find_vpid(self, url): self.logger.debug("Looking for vpid on {0}", url) res = http.get(url) m = self.vpid_re.search(res.text) return m and m.group(1) def find_tvip(self, url): self.logger.debug("Looking for tvip on {0}", url) res = http.get(url) m = self.tvip_re.search(res.text) return m and m.group(1) def mediaselector(self, vpid): for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) stream_urls = http.get(url, schema=self.mediaselector_schema) for surl in stream_urls.get("hls"): for s in HLSStream.parse_variant_playlist(self.session, surl).items(): yield s for surl in stream_urls.get("hds"): for s in HDSStream.parse_manifest(self.session, surl).items(): yield s def _get_streams(self): m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: self.logger.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url) if vpid: self.logger.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: self.logger.error("Could not find VPID for episode {0}", episode_id) elif channel_name: self.logger.debug("Loading stream for live channel: {0}", channel_name) tvip = self.find_tvip(self.url) if tvip: self.logger.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
def test_no_default(self, subject): assert validate.validate(validate.getattr("bar"), subject) is None assert validate.validate(validate.getattr("baz"), None) is None
def test_default(self, subject): assert validate.validate(validate.getattr("bar", 2), subject) == 2
def test_simple(self, subject): assert validate.validate(validate.getattr("foo"), subject) == 1