class NPO(Plugin): api_url = "http://ida.omroep.nl/app.php/{endpoint}" url_re = re.compile(r"https?://(\w+\.)?(npo\.nl|zapp\.nl|zappelin\.nl)/") media_id_re = re.compile( r'''<npo-player\smedia-id=["'](?P<media_id>[^"']+)["']''') prid_re = re.compile( r'''(?:data(-alt)?-)?prid\s*[=:]\s*(?P<q>["'])(\w+)(?P=q)''') react_re = re.compile( r'''data-react-props\s*=\s*(?P<q>["'])(?P<data>.*?)(?P=q)''') auth_schema = validate.Schema({"token": validate.text}, validate.get("token")) streams_schema = validate.Schema( { "items": [[{ "label": validate.text, "contentType": validate.text, "url": validate.url(), "format": validate.text }]] }, validate.get("items"), validate.get(0)) stream_info_schema = validate.Schema( validate.any( validate.url(), validate.all({ "errorcode": 0, "url": validate.url() }, validate.get("url")))) arguments = PluginArguments( PluginArgument("subtitles", action="store_true", help=""" Include subtitles for the deaf or hard of hearing, if available. """)) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def __init__(self, url): super(NPO, self).__init__(url) self._token = None http.headers.update({"User-Agent": useragents.CHROME}) def api_call(self, endpoint, schema=None, params=None): url = self.api_url.format(endpoint=endpoint) res = http.get(url, params=params) return http.json(res, schema=schema) @property def token(self): if not self._token: self._token = self.api_call("auth", schema=self.auth_schema) return self._token def _get_prid(self, subtitles=False): res = http.get(self.url) bprid = None # Locate the asset id for the content on the page for alt, _, prid in self.prid_re.findall(res.text): if alt and subtitles: bprid = prid elif bprid is None: bprid = prid if bprid is None: m = self.react_re.search(res.text) if m: data = parse_json(m.group("data").replace(""", '"')) bprid = data.get("mid") if bprid is None: m = self.media_id_re.search(res.text) if m: bprid = m.group('media_id') return bprid def _get_streams(self): asset_id = self._get_prid(self.get_option("subtitles")) if asset_id: self.logger.debug("Found asset id: {0}", asset_id) streams = self.api_call(asset_id, params=dict(adaptive="yes", token=self.token), schema=self.streams_schema) for stream in streams: if stream["format"] in ("adaptive", "hls", "mp4"): if stream["contentType"] == "url": stream_url = stream["url"] else: # using type=json removes the javascript function wrapper info_url = stream["url"].replace( "type=jsonp", "type=json") # find the actual stream URL stream_url = http.json(http.get(info_url), schema=self.stream_info_schema) if stream["format"] in ("adaptive", "hls"): for s in HLSStream.parse_variant_playlist( self.session, stream_url).items(): yield s elif stream["format"] in ("mp3", "mp4"): yield "vod", HTTPStream(self.session, stream_url)
class AfreecaTV(Plugin): login_url = "https://member.afreecatv.com:8111/login/LoginAction.php" arguments = PluginArguments( PluginArgument( "username", requires=["password"], metavar="USERNAME", help="The username used to register with afreecatv.com."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help= "A afreecatv.com account password to use with --afreeca-username.") ) @classmethod def can_handle_url(self, url): return _url_re.match(url) @classmethod def stream_weight(cls, key): weight = QUALITY_WEIGHTS.get(key) if weight: return weight, "afreeca" return Plugin.stream_weight(key) def _get_channel_info(self, username): data = {"bid": username, "mode": "landing", "player_type": "html5"} res = http.post(CHANNEL_API_URL, data=data) return http.json(res, schema=_channel_schema) def _get_hls_key(self, broadcast, username, quality): headers = {"Referer": self.url} data = { "bid": username, "bno": broadcast, "pwd": "", "quality": quality, "type": "pwd" } res = http.post(CHANNEL_API_URL, data=data, headers=headers) return http.json(res, schema=_channel_schema) def _get_stream_info(self, broadcast, quality, cdn, rmd): params = { "return_type": cdn, "broad_key": "{broadcast}-flash-{quality}-hls".format(**locals()) } res = http.get(STREAM_INFO_URLS.format(rmd=rmd), params=params) return http.json(res, schema=_stream_schema) def _get_hls_stream(self, broadcast, username, quality, cdn, rmd): keyjson = self._get_hls_key(broadcast, username, quality) if keyjson["RESULT"] != CHANNEL_RESULT_OK: return key = keyjson["AID"] info = self._get_stream_info(broadcast, quality, cdn, rmd) if "view_url" in info: return HLSStream(self.session, info["view_url"], params=dict(aid=key)) def _login(self, username, password): data = { "szWork": "login", "szType": "json", "szUid": username, "szPassword": password, "isSaveId": "true", "isSavePw": "false", "isSaveJoin": "false" } res = http.post(self.login_url, data=data) res = http.json(res) if res["RESULT"] == 1: return True else: return False def _get_streams(self): if not self.session.get_option("hls-segment-ignore-names"): ignore_segment = ["_0", "_1", "_2"] self.session.set_option("hls-segment-ignore-names", ignore_segment) login_username = self.get_option("username") login_password = self.get_option("password") if login_username and login_password: self.logger.debug("Attempting login as {0}".format(login_username)) if self._login(login_username, login_password): self.logger.info( "Successfully logged in as {0}".format(login_username)) else: self.logger.info( "Failed to login as {0}".format(login_username)) match = _url_re.match(self.url) username = match.group("username") channel = self._get_channel_info(username) if channel.get("BPWD") == "Y": self.logger.error("Stream is Password-Protected") return elif channel.get("RESULT") == -6: self.logger.error("Login required") return elif channel.get("RESULT") != CHANNEL_RESULT_OK: return (broadcast, rmd, cdn) = (channel["BNO"], channel["RMD"], channel["CDN"]) if not (broadcast and rmd and cdn): return for qkey in QUALITYS: hls_stream = self._get_hls_stream(broadcast, username, qkey, cdn, rmd) if hls_stream: yield qkey, hls_stream
class Crunchyroll(Plugin): arguments = PluginArguments( PluginArgument( "username", metavar="USERNAME", requires=["password"], help="A Crunchyroll username to allow access to restricted streams." ), PluginArgument("password", sensitive=True, metavar="PASSWORD", nargs="?", const=None, default=None, help=""" A Crunchyroll password for use with --crunchyroll-username. If left blank you will be prompted. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached Crunchyroll credentials to initiate a new session and reauthenticate. """), PluginArgument("session-id", sensitive=True, metavar="SESSION_ID", help=""" Set a specific session ID for crunchyroll, can be used to bypass region restrictions. If using an authenticated session ID, it is recommended that the authentication parameters be omitted as the session ID is account specific. Note: The session ID will be overwritten if authentication is used and the session ID does not match the account. """), # Deprecated, uses the general locale setting PluginArgument("locale", metavar="LOCALE", help=argparse.SUPPRESS)) @classmethod def can_handle_url(self, url): return _url_re.match(url) @classmethod def stream_weight(cls, key): weight = STREAM_WEIGHTS.get(key) if weight: return weight, "crunchyroll" return Plugin.stream_weight(key) def _get_streams(self): api = self._create_api() match = _url_re.match(self.url) media_id = int(match.group("media_id")) try: # the media.stream_data field is required, no stream data is returned otherwise info = api.get_info(media_id, fields=["media.stream_data"], schema=_media_schema) except CrunchyrollAPIError as err: raise PluginError(u"Media lookup error: {0}".format(err.msg)) if not info: return streams = {} # The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included has_adaptive = any( [s[u"quality"] == u"adaptive" for s in info[u"streams"]]) if has_adaptive: log.debug(u"Loading streams from adaptive playlist") for stream in filter(lambda x: x[u"quality"] == u"adaptive", info[u"streams"]): for q, s in HLSStream.parse_variant_playlist( self.session, stream[u"url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s # If there is no adaptive quality stream then parse each individual result for stream in info[u"streams"]: if stream[u"quality"] != u"adaptive": # the video_encode_id indicates that the stream is not a variant playlist if u"video_encode_id" in stream: streams[stream[u"quality"]] = HLSStream( self.session, stream[u"url"]) else: # otherwise the stream url is actually a list of stream qualities for q, s in HLSStream.parse_variant_playlist( self.session, stream[u"url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s return streams def _create_api(self): """Creates a new CrunchyrollAPI object, initiates it's session and tries to authenticate it either by using saved credentials or the user's username and password. """ if self.options.get("purge_credentials"): self.cache.set("session_id", None, 0) self.cache.set("auth", None, 0) self.cache.set("session_id", None, 0) # use the crunchyroll locale as an override, for backwards compatibility locale = self.get_option( "locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): log.debug("Creating session with locale: {0}", locale) api.start_session() if api.auth: log.debug("Using saved credentials") login = api.authenticate() if login: log.info( "Successfully logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) if not api.auth and self.options.get("username"): try: log.debug( "Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() log.info( "Logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) except CrunchyrollAPIError as err: raise PluginError(u"Authentication error: {0}".format( err.msg)) if not api.auth: log.warning( "No authentication provided, you won't be able to access " "premium restricted content") return api
class Zattoo(Plugin): API_CHANNELS = '{0}/zapi/v2/cached/channels/{1}?details=False' API_HELLO = '{0}/zapi/session/hello' API_HELLO_V2 = '{0}/zapi/v2/session/hello' API_HELLO_V3 = '{0}/zapi/v3/session/hello' API_LOGIN = '******' API_LOGIN_V3 = '{0}/zapi/v3/account/login' API_SESSION = '{0}/zapi/v2/session' API_WATCH = '{0}/zapi/watch' API_WATCH_REC = '{0}/zapi/watch/recording/{1}' API_WATCH_VOD = '{0}/zapi/avod/videos/{1}/watch' STREAMS_ZATTOO = ['dash', 'hls', 'hls5'] TIME_CONTROL = 60 * 60 * 2 TIME_SESSION = 60 * 60 * 24 * 30 _url_re = re.compile(r'''(?x) https?:// (?P<base_url> (?:(?: iptv\.glattvision|www\.(?:myvisiontv|saktv|vtxtv) )\.ch )|(?:(?: mobiltv\.quickline|www\.quantum-tv|zattoo )\.com )|(?:(?: tvonline\.ewe|nettv\.netcologne|tvplus\.m-net )\.de )|(?:(?: player\.waly|www\.(?:1und1|netplus) )\.tv) |www\.bbv-tv\.net |www\.meinewelt\.cc )/ (?: (?: recording(?:s\?recording=|/) | (?:ondemand/)?(?:watch/(?:[^/\s]+)(?:/[^/]+/)) )(?P<recording_id>\d+) | (?: (?:live/|watch/)|(?:channels(?:/\w+)?|guide)\?channel= )(?P<channel>[^/\s]+) | ondemand(?:\?video=|/watch/)(?P<vod_id>[^-]+) ) ''') _app_token_re = re.compile(r"""window\.appToken\s+=\s+'([^']+)'""") _channels_schema = validate.Schema( { 'success': bool, 'channel_groups': [{ 'channels': [ { 'display_alias': validate.text, 'cid': validate.text }, ] }] }, validate.get('channel_groups'), ) _session_schema = validate.Schema( { 'success': bool, 'session': { 'loggedin': bool } }, validate.get('session')) arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help=""" The email associated with your zattoo account, required to access any zattoo stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A zattoo account password to use with --zattoo-email. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached zattoo credentials to initiate a new session and reauthenticate. """), PluginArgument('stream-types', metavar='TYPES', type=comma_list_filter(STREAMS_ZATTOO), default=['hls'], help=''' A comma-delimited list of stream types which should be used, the following types are allowed: - {0} Default is "hls". '''.format('\n - '.join(STREAMS_ZATTOO)))) def __init__(self, url): super(Zattoo, self).__init__(url) self.domain = self._url_re.match(url).group('base_url') self._session_attributes = Cache( filename='plugin-cache.json', key_prefix='zattoo:attributes:{0}'.format(self.domain)) self._uuid = self._session_attributes.get('uuid') self._authed = (self._session_attributes.get('power_guide_hash') and self._uuid and self.session.http.cookies.get( 'pzuid', domain=self.domain) and self.session.http.cookies.get('beaker.session.id', domain=self.domain)) self._session_control = self._session_attributes.get( 'session_control', False) self.base_url = 'https://{0}'.format(self.domain) self.headers = { 'User-Agent': useragents.CHROME, 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Referer': self.base_url } @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _hello(self): log.debug('_hello ...') # a new session is required for the app_token self.session.http.cookies = cookiejar_from_dict({}) if self.base_url == 'https://zattoo.com': app_token_url = 'https://zattoo.com/client/token-2fb69f883fea03d06c68c6e5f21ddaea.json' elif self.base_url == 'https://www.quantum-tv.com': app_token_url = 'https://www.quantum-tv.com/token-4d0d61d4ce0bf8d9982171f349d19f34.json' else: app_token_url = self.base_url res = self.session.http.get(app_token_url) if self.base_url == 'https://www.quantum-tv.com': app_token = self.session.http.json(res)["session_token"] hello_url = self.API_HELLO_V3.format(self.base_url) elif self.base_url == 'https://zattoo.com': app_token = self.session.http.json(res)['app_tid'] hello_url = self.API_HELLO_V2.format(self.base_url) else: match = self._app_token_re.search(res.text) app_token = match.group(1) hello_url = self.API_HELLO.format(self.base_url) if self._uuid: __uuid = self._uuid else: __uuid = str(uuid.uuid4()) self._session_attributes.set('uuid', __uuid, expires=self.TIME_SESSION) if self.base_url == 'https://zattoo.com': params = { 'uuid': __uuid, 'app_tid': app_token, 'app_version': '1.0.0' } else: params = { 'client_app_token': app_token, 'uuid': __uuid, } if self.base_url == 'https://www.quantum-tv.com': params['app_version'] = '3.2028.3' else: params['lang'] = 'en' params['format'] = 'json' res = self.session.http.post(hello_url, headers=self.headers, data=params) def _login(self, email, password): log.debug('_login ... Attempting login as {0}'.format(email)) params = {'login': email, 'password': password, 'remember': 'true'} if self.base_url == 'https://quantum-tv.com': login_url = self.API_LOGIN_V3.format(self.base_url) else: login_url = self.API_LOGIN.format(self.base_url) try: res = self.session.http.post(login_url, headers=self.headers, data=params) except Exception as e: if '400 Client Error' in str(e): raise PluginError( 'Failed to login, check your username/password') raise e data = self.session.http.json(res) self._authed = data['success'] log.debug('New Session Data') self.save_cookies(default_expires=self.TIME_SESSION) self._session_attributes.set('power_guide_hash', data['session']['power_guide_hash'], expires=self.TIME_SESSION) self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) def _watch(self): log.debug('_watch ...') match = self._url_re.match(self.url) if not match: log.debug('_watch ... no match') return channel = match.group('channel') vod_id = match.group('vod_id') recording_id = match.group('recording_id') params = {'https_watch_urls': True} if channel: watch_url = self.API_WATCH.format(self.base_url) params_cid = self._get_params_cid(channel) if not params_cid: return params.update(params_cid) elif vod_id: log.debug('Found vod_id: {0}'.format(vod_id)) watch_url = self.API_WATCH_VOD.format(self.base_url, vod_id) elif recording_id: log.debug('Found recording_id: {0}'.format(recording_id)) watch_url = self.API_WATCH_REC.format(self.base_url, recording_id) else: log.debug('Missing watch_url') return zattoo_stream_types = self.get_option('stream-types') or ['hls'] for stream_type in zattoo_stream_types: params_stream_type = {'stream_type': stream_type} params.update(params_stream_type) try: res = self.session.http.post(watch_url, headers=self.headers, data=params) except Exception as e: if '404 Client Error' in str(e): log.error('Unfortunately streaming is not permitted in ' 'this country or this channel does not exist.') elif '402 Client Error: Payment Required' in str(e): log.error('Paid subscription required for this channel.') log.info('If paid subscription exist, use --zattoo-purge' '-credentials to start a new session.') elif '403 Client Error' in str(e): log.debug('Force session reset for watch_url') self.reset_session() else: log.error(str(e)) return data = self.session.http.json(res) log.debug('Found data for {0}'.format(stream_type)) if data['success'] and stream_type in ['hls', 'hls5']: for url in data['stream']['watch_urls']: for s in HLSStream.parse_variant_playlist( self.session, url['url']).items(): yield s elif data['success'] and stream_type == 'dash': for url in data['stream']['watch_urls']: for s in DASHStream.parse_manifest(self.session, url['url']).items(): yield s def _get_params_cid(self, channel): log.debug('get channel ID for {0}'.format(channel)) channels_url = self.API_CHANNELS.format( self.base_url, self._session_attributes.get('power_guide_hash')) try: res = self.session.http.get(channels_url, headers=self.headers) except Exception: log.debug('Force session reset for _get_params_cid') self.reset_session() return False data = self.session.http.json(res, schema=self._channels_schema) c_list = [] for d in data: for c in d['channels']: c_list.append(c) cid = [] zattoo_list = [] for c in c_list: zattoo_list.append(c['display_alias']) if c['display_alias'] == channel: cid = c['cid'] log.debug('Available zattoo channels in this country: {0}'.format( ', '.join(sorted(zattoo_list)))) if not cid: cid = channel log.debug('CHANNEL ID: {0}'.format(cid)) return {'cid': cid} def reset_session(self): self._session_attributes.set('power_guide_hash', None, expires=0) self._session_attributes.set('uuid', None, expires=0) self.clear_cookies() self._authed = False def _get_streams(self): email = self.get_option('email') password = self.get_option('password') if self.options.get('purge_credentials'): self.reset_session() log.info('All credentials were successfully removed.') elif (self._authed and not self._session_control): # check every two hours, if the session is actually valid log.debug('Session control for {0}'.format(self.domain)) res = self.session.http.get(self.API_SESSION.format(self.base_url)) res = self.session.http.json(res, schema=self._session_schema) if res['loggedin']: self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) else: log.debug('User is not logged in') self._authed = False if not self._authed and (not email and not password): log.error( 'A login for Zattoo is required, use --zattoo-email EMAIL' ' --zattoo-password PASSWORD to set them') return if not self._authed: self._hello() self._login(email, password) return self._watch()
class UStreamTV(Plugin): url_re = re.compile( r""" https?://(www\.)?ustream\.tv (?: (/embed/|/channel/id/)(?P<channel_id>\d+) )? (?: (/embed)?/recorded/(?P<video_id>\d+) )? """, re.VERBOSE) media_id_re = re.compile(r'"ustream:channel_id"\s+content\s*=\s*"(\d+)"') arguments = PluginArguments( PluginArgument("password", argument_name="ustream-password", sensitive=True, metavar="PASSWORD", help=""" A password to access password protected UStream.tv channels. """)) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _api_get_streams(self, media_id, application, cluster="live", referrer=None, retries=3): if retries > 0: app_id = 11 app_ver = 2 referrer = referrer or self.url self.api = UHSClient(self.session, media_id, application, referrer=referrer, cluster=cluster, app_id=app_id, app_version=app_ver, password=self.get_option("password")) self.logger.debug( "Connecting to UStream API: media_id={0}, application={1}, referrer={2}, cluster={3}, " "app_id={4}, app_ver={5}", media_id, application, referrer, cluster, app_id, app_ver) if self.api.connect(): for i in range( 5): # make at most five requests to get the moduleInfo try: for s in self._do_poll(media_id, application, cluster, referrer, retries): yield s except ModuleInfoNoStreams: self.logger.debug("Retrying moduleInfo request") time.sleep(1) else: break def _do_poll(self, media_id, application, cluster="live", referrer=None, retries=3): res = self.api.poll() if res: for result in res: if result["cmd"] == "moduleInfo": for s in self.handle_module_info(result["args"], media_id, application, cluster, referrer, retries): yield s elif result["cmd"] == "reject": for s in self.handle_reject(result["args"], media_id, application, cluster, referrer, retries): yield s else: self.logger.debug("Unknown command: {0}({1})", result["cmd"], result["args"]) def handle_module_info(self, args, media_id, application, cluster="live", referrer=None, retries=3): has_results = False for streams in UHSClient.module_info_schema.validate(args): has_results = True if isinstance(streams, list): for stream in streams: for q, s in HLSStream.parse_variant_playlist( self.session, stream["url"]).items(): yield q, UStreamHLSStream(self.session, s.url, self.api) elif isinstance(streams, dict): for stream in streams.get("streams", []): name = "{0}k".format(stream["bitrate"]) for surl in stream["streamName"]: yield name, HTTPStream(self.session, surl) elif streams == "offline": self.logger.warning("This stream is currently offline") if not has_results: raise ModuleInfoNoStreams def handle_reject(self, args, media_id, application, cluster="live", referrer=None, retries=3): for arg in args: if "cluster" in arg: self.logger.debug("Switching cluster to {0}", arg["cluster"]["name"]) cluster = arg["cluster"]["name"] if "referrerLock" in arg: referrer = arg["referrerLock"]["redirectUrl"] return self._api_get_streams(media_id, application, cluster=cluster, referrer=referrer, retries=retries - 1) def _get_streams(self): # establish a mobile non-websockets api connection umatch = self.url_re.match(self.url) application = "channel" channel_id = umatch.group("channel_id") video_id = umatch.group("video_id") if channel_id: application = "channel" media_id = channel_id elif video_id: application = "recorded" media_id = video_id else: media_id = self._find_media_id() if media_id: for s in self._api_get_streams(media_id, application): yield s else: self.logger.error("Cannot find a media_id on this page") def _find_media_id(self): self.logger.debug("Searching for media ID on the page") res = http.get(self.url, headers={"User-Agent": useragents.CHROME}) m = self.media_id_re.search(res.text) return m and m.group(1)
class Schoolism(Plugin): url_re = re.compile(r"https?://(?:www\.)?schoolism\.com/watchLesson.php") login_url = "https://www.schoolism.com/index.php" key_time_url = "https://www.schoolism.com/video-html/key-time.php" playlist_re = re.compile(r"var allVideos=(\[\{.*\}]);", re.DOTALL) js_to_json = partial(re.compile(r'(?!<")(\w+):(?!/)').sub, r'"\1":') playlist_schema = validate.Schema( validate.transform(playlist_re.search), validate.any( None, validate.all( validate.get(1), validate.transform(js_to_json), validate.transform( lambda x: x.replace(",}", "}")), # remove invalid , validate.transform(parse_json), [{ "sources": validate.all( [{ "playlistTitle": validate.text, "title": validate.text, "src": validate.text, "type": validate.text, }], # only include HLS streams validate.filter( lambda s: s["type"] == "application/x-mpegurl")) }]))) arguments = PluginArguments( PluginArgument("email", required=True, requires=["password"], metavar="EMAIL", help=""" The email associated with your Schoolism account, required to access any Schoolism stream. """), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A Schoolism account password to use with --schoolism-email." ), PluginArgument("part", type=int, default=1, metavar="PART", help=""" Play part number PART of the lesson. Defaults is 1. """)) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, email, password): """ Login to the schoolism account and return the users account :param email: (str) email for account :param password: (str) password for account :return: (str) users email """ if self.options.get("email") and self.options.get("password"): res = self.session.http.post(self.login_url, data={ "email": email, "password": password, "redirect": None, "submit": "Login" }) if res.cookies.get("password") and res.cookies.get("email"): return res.cookies.get("email") else: self.logger.error( "Failed to login to Schoolism, incorrect email/password combination" ) else: self.logger.error( "An email and password are required to access Schoolism streams" ) def _get_streams(self): user = self.login(self.options.get("email"), self.options.get("password")) if user: self.logger.debug("Logged in to Schoolism as {0}", user) res = self.session.http.get( self.url, headers={"User-Agent": useragents.SAFARI_8}) lesson_playlist = self.playlist_schema.validate(res.text) part = self.options.get("part") self.logger.info("Attempting to play lesson Part {0}", part) found = False # make request to key-time api, to get key specific headers res = self.session.http.get( self.key_time_url, headers={"User-Agent": useragents.SAFARI_8}) for i, video in enumerate(lesson_playlist, 1): if video["sources"] and i == part: found = True for source in video["sources"]: for s in HLSStream.parse_variant_playlist( self.session, source["src"], headers={ "User-Agent": useragents.SAFARI_8, "Referer": self.url }).items(): yield s if not found: self.logger.error("Could not find lesson Part {0}", part)
class YouTube(Plugin): _oembed_url = "https://www.youtube.com/oembed" _video_info_url = "https://youtube.com/get_video_info" _oembed_schema = validate.Schema({ "author_name": validate.all(validate.text, validate.transform(maybe_decode)), "title": validate.all(validate.text, validate.transform(maybe_decode)) }) adp_video = { 137: "1080p", 303: "1080p60", # HFR 299: "1080p60", # HFR 264: "1440p", 308: "1440p60", # HFR 266: "2160p", 315: "2160p60", # HFR 138: "2160p", 302: "720p60", # HFR } adp_audio = { 140: 128, 141: 256, 171: 128, 249: 48, 250: 64, 251: 160, 256: 256, 258: 258, } arguments = PluginArguments( PluginArgument( "api-key", sensitive=True, help=argparse.SUPPRESS # no longer used )) def __init__(self, url): super(YouTube, self).__init__(url) parsed = urlparse(self.url) if parsed.netloc == 'gaming.youtube.com': self.url = urlunparse(parsed._replace(netloc='www.youtube.com')) self.author = None self.title = None self.video_id = None self.session.http.headers.update({'User-Agent': useragents.CHROME}) def get_author(self): if self.author is None: self.get_oembed return self.author def get_title(self): if self.title is None: self.get_oembed return self.title @classmethod def can_handle_url(cls, url): return _url_re.match(url) @classmethod def stream_weight(cls, stream): match_3d = re.match(r"(\w+)_3d", stream) match_hfr = re.match(r"(\d+p)(\d+)", stream) if match_3d: weight, group = Plugin.stream_weight(match_3d.group(1)) weight -= 1 group = "youtube_3d" elif match_hfr: weight, group = Plugin.stream_weight(match_hfr.group(1)) weight += 1 group = "high_frame_rate" else: weight, group = Plugin.stream_weight(stream) return weight, group @property def get_oembed(self): if self.video_id is None: self.video_id = self._find_video_id(self.url) params = { "url": "https://www.youtube.com/watch?v={0}".format(self.video_id), "format": "json" } res = self.session.http.get(self._oembed_url, params=params) data = self.session.http.json(res, schema=self._oembed_schema) self.author = data["author_name"] self.title = data["title"] def _create_adaptive_streams(self, info, streams, protected): adaptive_streams = {} best_audio_itag = None # Extract audio streams from the DASH format list for stream_info in info.get("adaptive_fmts", []): if stream_info.get("s"): protected = True continue stream_params = dict(parse_qsl(stream_info["url"])) if "itag" not in stream_params: continue itag = int(stream_params["itag"]) # extract any high quality streams only available in adaptive formats adaptive_streams[itag] = stream_info["url"] stream_type, stream_format = stream_info["type"] if stream_type == "audio": stream = HTTPStream(self.session, stream_info["url"]) name = "audio_{0}".format(stream_format) streams[name] = stream # find the best quality audio stream m4a, opus or vorbis if best_audio_itag is None or self.adp_audio[ itag] > self.adp_audio[best_audio_itag]: best_audio_itag = itag if best_audio_itag and adaptive_streams and MuxedStream.is_usable( self.session): aurl = adaptive_streams[best_audio_itag] for itag, name in self.adp_video.items(): if itag in adaptive_streams: vurl = adaptive_streams[itag] log.debug( "MuxedStream: v {video} a {audio} = {name}".format( audio=best_audio_itag, name=name, video=itag, )) streams[name] = MuxedStream(self.session, HTTPStream(self.session, vurl), HTTPStream(self.session, aurl)) return streams, protected def _find_video_id(self, url): m = _url_re.match(url) if m.group("video_id"): log.debug("Video ID from URL") return m.group("video_id") res = self.session.http.get(url) datam = _ytdata_re.search(res.text) if datam: data = parse_json(datam.group(1)) # find the videoRenderer object, where there is a LVE NOW badge for vid_ep in search_dict(data, 'currentVideoEndpoint'): video_id = vid_ep.get("watchEndpoint", {}).get("videoId") if video_id: log.debug("Video ID from currentVideoEndpoint") return video_id for x in search_dict(data, 'videoRenderer'): for bstyle in search_dict(x.get("badges", {}), "style"): if bstyle == "BADGE_STYLE_TYPE_LIVE_NOW": if x.get("videoId"): log.debug("Video ID from videoRenderer (live)") return x["videoId"] if "/embed/live_stream" in url: for link in itertags(res.text, "link"): if link.attributes.get("rel") == "canonical": canon_link = link.attributes.get("href") if canon_link != url: log.debug("Re-directing to canonical URL: {0}".format( canon_link)) return self._find_video_id(canon_link) raise PluginError("Could not find a video on this page") def _get_stream_info(self, video_id): # normal _params_1 = {"el": "detailpage"} # age restricted _params_2 = {"el": "embedded"} # embedded restricted _params_3 = { "eurl": "https://youtube.googleapis.com/v/{0}".format(video_id) } count = 0 info_parsed = None for _params in (_params_1, _params_2, _params_3): count += 1 params = {"video_id": video_id} params.update(_params) res = self.session.http.get(self._video_info_url, params=params) info_parsed = parse_query(res.content if is_py2 else res.text, name="config", schema=_config_schema) if info_parsed.get("status") == "fail": log.debug("get_video_info - {0}: {1}".format( count, info_parsed.get("reason"))) continue self.author = info_parsed.get("author") self.title = info_parsed.get("title") log.debug("get_video_info - {0}: Found data".format(count)) break return info_parsed def _get_streams(self): is_live = False self.video_id = self._find_video_id(self.url) log.debug("Using video ID: {0}", self.video_id) info = self._get_stream_info(self.video_id) if info and info.get("status") == "fail": log.error("Could not get video info: {0}".format( info.get("reason"))) return elif not info: log.error("Could not get video info") return if info.get("livestream") == '1' or info.get("live_playback") == '1' \ or info.get("player_response", {}).get("videoDetails", {}).get("isLive") == True: log.debug("This video is live.") is_live = True formats = info.get("fmt_list") streams = {} protected = False for stream_info in info.get("url_encoded_fmt_stream_map", []): if stream_info.get("s"): protected = True continue stream = HTTPStream(self.session, stream_info["url"]) name = formats.get(stream_info["itag"]) or stream_info["quality"] if stream_info.get("stereo3d"): name += "_3d" streams[name] = stream if not is_live: streams, protected = self._create_adaptive_streams( info, streams, protected) hls_playlist = info.get("hlsvp") or info.get( "player_response", {}).get("streamingData", {}).get("hlsManifestUrl") if hls_playlist: try: hls_streams = HLSStream.parse_variant_playlist( self.session, hls_playlist, namekey="pixels") streams.update(hls_streams) except IOError as err: log.warning("Failed to extract HLS streams: {0}", err) if not streams and protected: raise PluginError("This plugin does not support protected videos, " "try youtube-dl instead") return streams
class LiveEdu(Plugin): login_url = "https://www.liveedu.tv/accounts/login/" url_re = re.compile(r"https?://(?:\w+\.)?(?:livecoding|liveedu)\.tv/") config_re = re.compile( r"""\Wconfig.(?P<key>\w+)\s*=\s*(?P<q>['"])(?P<value>.*?)(?P=q);""") csrf_re = re.compile(r'''"csrfToken"\s*:\s*"(\w+)"''') api_schema = validate.Schema({ "viewing_urls": { validate.optional("error"): validate.text, validate.optional("urls"): [{ "src": validate.url(), "type": validate.text, validate.optional("res"): int, validate.optional("label"): validate.text, }] } }) config_schema = validate.Schema({ "selectedVideoHID": validate.text, "livestreamURL": validate.text, "videosURL": validate.text }) arguments = PluginArguments( PluginArgument( "email", requires=["password"], metavar="EMAIL", help="The email address used to register with liveedu.tv."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A LiveEdu account password to use with --liveedu-email.")) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self): """ Attempt a login to LiveEdu.tv """ email = self.get_option("email") password = self.get_option("password") if email and password: res = http.get(self.login_url) csrf_match = self.csrf_re.search(res.text) token = csrf_match and csrf_match.group(1) self.logger.debug("Attempting login as {0} (token={1})", email, token) res = http.post(self.login_url, data=dict(login=email, password=password, csrfmiddlewaretoken=token), allow_redirects=False, raise_for_status=False, headers={"Referer": self.login_url}) if res.status_code != 302: self.logger.error("Failed to login to LiveEdu account: {0}", email) def _get_streams(self): """ Get the config object from the page source and call the API to get the list of streams :return: """ # attempt a login self.login() res = http.get(self.url) # decode the config for the page matches = self.config_re.finditer(res.text) try: config = self.config_schema.validate( dict([m.group("key", "value") for m in matches])) except PluginError: return if config["selectedVideoHID"]: self.logger.debug("Found video hash ID: {0}", config["selectedVideoHID"]) api_url = urljoin( self.url, urljoin(config["videosURL"], config["selectedVideoHID"])) elif config["livestreamURL"]: self.logger.debug("Found live stream URL: {0}", config["livestreamURL"]) api_url = urljoin(self.url, config["livestreamURL"]) else: return ares = http.get(api_url) data = http.json(ares, schema=self.api_schema) viewing_urls = data["viewing_urls"] if "error" in viewing_urls: self.logger.error("Failed to load streams: {0}", viewing_urls["error"]) else: for url in viewing_urls["urls"]: try: label = "{0}p".format(url.get("res", url["label"])) except KeyError: label = "live" if url["type"] == "rtmp/mp4" and RTMPStream.is_usable( self.session): params = { "rtmp": url["src"], "pageUrl": self.url, "live": True, } yield label, RTMPStream(self.session, params) elif url["type"] == "application/x-mpegURL": for s in HLSStream.parse_variant_playlist( self.session, url["src"]).items(): yield s
class Resolve(Plugin): _url_re = re.compile(r'''(resolve://)?(?P<url>.+)''') # regex for iframes _iframe_re = re.compile( r''' <ifr(?:["']\s?\+\s?["'])?ame (?!\sname=["']g_iFrame).*?src= ["'](?P<url>[^"'\s<>]+)["'] .*?(?:/>|>(?:[^<>]+)? </ifr(?:["']\s?\+\s?["'])?ame(?:\s+)?>) ''', re.VERBOSE | re.IGNORECASE | re.DOTALL) # regex for playlists _playlist_re = re.compile( r''' (?:["']|=|")(?P<url> (?<!title=["']) (?<!["']title["']:["']) [^"'<>\s\;{}]+\.(?:m3u8|f4m|mp3|mp4|mpd) (?:\?[^"'<>\s\\{}]+)?) (?:["']|(?<!;)\s|>|\\") ''', re.DOTALL | re.VERBOSE) # regex for mp3 and mp4 files _httpstream_bitrate_re = re.compile( r''' (?:_|\.) (?: (?P<bitrate>\d{1,4}) | (?P<resolution>\d{1,4}p) ) \.mp(?:3|4) ''', re.VERBOSE) # Regex for: javascript redirection _window_location_re = re.compile( r''' <script[^<]+window\.location\.href\s?=\s?["'] (?P<url>[^"']+)["'];[^<>]+ ''', re.DOTALL | re.VERBOSE) _unescape_iframe_re = re.compile( r''' unescape\050["'] (?P<data>%3C(?: iframe|%69%66%72%61%6d%65 )%20[^"']+)["'] ''', re.IGNORECASE | re.VERBOSE) # Regex for obviously ad paths _ads_path_re = re.compile( r''' (?:/(?:static|\d+))? /ads?/?(?:\w+)? (?:\d+x\d+)? (?:_\w+)?\.(?:html?|php) ''', re.VERBOSE) # START - _make_url_list # Not allowed at the end of the parsed url path blacklist_endswith = ( '.gif', '.jpg', '.png', '.svg', '.vtt', '/chat.html', '/chat', '/novideo.mp4', '/vidthumb.mp4', ) # Not allowed at the end of the parsed url netloc blacklist_netloc = ( '127.0.0.1', 'about:blank', 'abv.bg', 'adfox.ru', 'googletagmanager.com', 'javascript:false', ) # END - _make_url_list arguments = PluginArguments( PluginArgument('playlist-max', metavar='NUMBER', type=num(int, min=0, max=25), default=5, help=''' Number of how many playlist URLs of the same type are allowed to be resolved with this plugin. Default is 5 '''), PluginArgument('playlist-referer', metavar='URL', help=''' Set a custom referer URL for the playlist URLs. This only affects playlist URLs of this plugin. Default URL of the last website. '''), PluginArgument('blacklist-netloc', metavar='NETLOC', type=comma_list, help=''' Blacklist domains that should not be used, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with a lot of iframes. '''), PluginArgument('blacklist-path', metavar='PATH', type=comma_list, help=''' Blacklist the path of a domain that should not be used, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain. '''), PluginArgument('whitelist-netloc', metavar='NETLOC', type=comma_list, help=''' Whitelist domains that should only be searched for iframes, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with lots of iframes, where the main iframe always has the same hosting domain. '''), PluginArgument('whitelist-path', metavar='PATH', type=comma_list, help=''' Whitelist the path of a domain that should only be searched for iframes, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain, where the main iframe always has the same path. '''), ) def __init__(self, url): super(Resolve, self).__init__(url) ''' generates default options and caches them into ResolveCache class ''' # START - cache every used url and set a referer if hasattr(ResolveCache, 'cache_url_list'): ResolveCache.cache_url_list += [self.url] # set the last url as a referer self.referer = ResolveCache.cache_url_list[-2] else: ResolveCache.cache_url_list = [self.url] self.referer = self.url http.headers.update({'Referer': self.referer}) # END # START - how often _get_streams already run self._run = len(ResolveCache.cache_url_list) # END @classmethod def priority(cls, url): ''' Returns - NO priority if the URL is not prefixed - HIGH priority if the URL is prefixed :param url: the URL to find the plugin priority for :return: plugin priority for the given URL ''' m = cls._url_re.match(url) if m: prefix, url = cls._url_re.match(url).groups() if prefix is not None: return HIGH_PRIORITY return NO_PRIORITY @classmethod def can_handle_url(cls, url): m = cls._url_re.match(url) if m: return m.group('url') is not None def compare_url_path(self, parsed_url, check_list): '''compare a parsed url, if it matches an item from a list Args: parsed_url: an URL that was used with urlparse check_list: a list of URLs as a tuple [('foo.bar', '/path/'), ('foo2.bar', '/path/')] Returns: True if parsed_url in check_list False if parsed_url not in check_list ''' status = False for netloc, path in check_list: if (parsed_url.netloc.endswith(netloc) and parsed_url.path.startswith(path)): status = True return status def merge_path_list(self, static, user): '''merge the static list, with an user list Args: static (list): static list from this plugin user (list): list from an user command Returns: A new valid list ''' for _path_url in user: if not _path_url.startswith(('http', '//')): _path_url = update_scheme('http://', _path_url) _parsed_path_url = urlparse(_path_url) if _parsed_path_url.netloc and _parsed_path_url.path: static += [(_parsed_path_url.netloc, _parsed_path_url.path)] return static def repair_url(self, url, base_url, stream_base=''): '''repair a broken url''' # remove \ new_url = url.replace('\\', '') # repairs broken scheme if new_url.startswith('http://'): new_url = 'http:' + new_url[9:] elif new_url.startswith('https://'): new_url = 'https:' + new_url[10:] # creates a valid url from path only urls # and adds missing scheme for // urls if stream_base and new_url[1] is not '/': if new_url[0] is '/': new_url = new_url[1:] new_url = urljoin(stream_base, new_url) else: new_url = urljoin(base_url, new_url) return new_url def _make_url_list(self, old_list, base_url, url_type=''): '''removes unwanted URLs and creates a list of valid URLs Args: old_list: list of URLs base_url: URL that will get used for scheme and netloc repairs url_type: can be ... and is used for ... - iframe --resolve-whitelist-netloc - playlist Not used Returns: (list) A new valid list of urls. ''' # START - List for not allowed URL Paths # --resolve-blacklist-path if not hasattr(ResolveCache, 'blacklist_path'): # static list blacklist_path = [ ('bigo.tv', '/show.mp4'), ('expressen.se', '/_livetvpreview/'), ('facebook.com', '/connect'), ('facebook.com', '/plugins'), ('haber7.com', '/radyohome/station-widget/'), ('static.tvr.by', '/upload/video/atn/promo'), ('twitter.com', '/widgets'), ('vesti.ru', '/native_widget.html'), ] # merge user and static list blacklist_path_user = self.get_option('blacklist_path') if blacklist_path_user is not None: blacklist_path = self.merge_path_list(blacklist_path, blacklist_path_user) ResolveCache.blacklist_path = blacklist_path # END # START - List of only allowed URL Paths for Iframes # --resolve-whitelist-path if not hasattr(ResolveCache, 'whitelist_path'): whitelist_path = [] whitelist_path_user = self.get_option('whitelist_path') if whitelist_path_user is not None: whitelist_path = self.merge_path_list([], whitelist_path_user) ResolveCache.whitelist_path = whitelist_path # END # sorted after the way streamlink will try to remove an url status_remove = [ 'SAME-URL', 'SCHEME', 'WL-netloc', 'WL-path', 'BL-static', 'BL-netloc', 'BL-path', 'BL-ew', 'ADS', ] new_list = [] for url in old_list: new_url = self.repair_url(url, base_url) # parse the url parse_new_url = urlparse(new_url) # START - removal of unwanted urls REMOVE = False count = 0 # status_remove must be updated on changes for url_status in ( # Removes an already used iframe url (new_url in ResolveCache.cache_url_list), # Allow only an url with a valid scheme (not parse_new_url.scheme.startswith(('http'))), # Allow only whitelisted domains for iFrames # --resolve-whitelist-netloc (url_type == 'iframe' and self.get_option('whitelist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('whitelist_netloc'))) is False), # Allow only whitelisted paths from a domain for iFrames # --resolve-whitelist-path (url_type == 'iframe' and ResolveCache.whitelist_path and self.compare_url_path( parse_new_url, ResolveCache.whitelist_path) is False), # Removes blacklisted domains from a static list # self.blacklist_netloc (parse_new_url.netloc.endswith(self.blacklist_netloc)), # Removes blacklisted domains # --resolve-blacklist-netloc (self.get_option('blacklist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('blacklist_netloc')))), # Removes blacklisted paths from a domain # --resolve-blacklist-path (self.compare_url_path(parse_new_url, ResolveCache.blacklist_path) is True), # Removes unwanted endswith images and chatrooms (parse_new_url.path.endswith(self.blacklist_endswith)), # Removes obviously AD URL (self._ads_path_re.match(parse_new_url.path)), ): count += 1 if url_status: REMOVE = True break if REMOVE is True: log.debug('{0} - Removed: {1}'.format(status_remove[count - 1], new_url)) continue # END - removal of unwanted urls # Add repaired url new_list += [new_url] # Remove duplicates log.debug('List length: {0} (with duplicates)'.format(len(new_list))) new_list = sorted(list(set(new_list))) return new_list def _iframe_unescape(self, res_text): '''search for unescaped iframes Args: res_text: Content from self._res_text Returns: (list) A list of iframe urls or False if no iframe was found ''' unescape_iframe = self._unescape_iframe_re.findall(res_text) if unescape_iframe: unescape_text = [] for data in unescape_iframe: unescape_text += [unquote(data)] unescape_text = ','.join(unescape_text) unescape_iframe = self._iframe_re.findall(unescape_text) if unescape_iframe: log.debug('Found unescape_iframe: {0}'.format( len(unescape_iframe))) return unescape_iframe log.debug('No unescape_iframe') return False def _window_location(self, res_text): '''Try to find a script with window.location.href Args: res_text: Content from self._res_text Returns: (str) url or False if no url was found. ''' match = self._window_location_re.search(res_text) if match: temp_url = urljoin(self.url, match.group('url')) log.debug('Found window_location: {0}'.format(temp_url)) return temp_url log.debug('No window_location') return False def _resolve_playlist(self, playlist_all): ''' create streams Args: playlist_all: List of stream urls Returns: all streams ''' playlist_referer = self.get_option('playlist_referer') or self.url http.headers.update({'Referer': playlist_referer}) playlist_max = self.get_option('playlist_max') or 5 count_playlist = { 'dash': 0, 'hds': 0, 'hls': 0, 'http': 0, } for url in playlist_all: parsed_url = urlparse(url) if (parsed_url.path.endswith(('.m3u8')) or parsed_url.query.endswith(('.m3u8'))): if count_playlist['hls'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: streams = HLSStream.parse_variant_playlist( self.session, url).items() if not streams: yield 'live', HLSStream(self.session, url) for s in streams: yield s log.debug('HLS URL - {0}'.format(url)) count_playlist['hls'] += 1 except Exception as e: log.error('Skip HLS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.f4m')) or parsed_url.query.endswith(('.f4m'))): if count_playlist['hds'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in HDSStream.parse_manifest(self.session, url).items(): yield s log.debug('HDS URL - {0}'.format(url)) count_playlist['hds'] += 1 except Exception as e: log.error('Skip HDS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mp3', '.mp4')) or parsed_url.query.endswith(('.mp3', '.mp4'))): if count_playlist['http'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: name = 'vod' m = self._httpstream_bitrate_re.search(url) if m: bitrate = m.group('bitrate') resolution = m.group('resolution') if bitrate: name = '{0}k'.format(m.group('bitrate')) elif resolution: name = resolution yield name, HTTPStream(self.session, url) log.debug('HTTP URL - {0}'.format(url)) count_playlist['http'] += 1 except Exception as e: log.error('Skip HTTP with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mpd')) or parsed_url.query.endswith(('.mpd'))): if count_playlist['dash'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in DASHStream.parse_manifest(self.session, url).items(): yield s log.debug('DASH URL - {0}'.format(url)) count_playlist['dash'] += 1 except Exception as e: log.error('Skip DASH with error {0}'.format(str(e))) else: log.error('parsed URL - {0}'.format(url)) def _res_text(self, url): '''Content of a website Args: url: URL with an embedded Video Player. Returns: Content of the response ''' try: res = http.get(url, allow_redirects=True) except Exception as e: if 'Received response with content-encoding: gzip' in str(e): headers = { 'User-Agent': useragents.FIREFOX, 'Accept-Encoding': 'deflate' } res = http.get(url, headers=headers, allow_redirects=True) elif '403 Client Error' in str(e): log.error( 'Website Access Denied/Forbidden, you might be geo-blocked or other params are missing.' ) raise NoStreamsError(self.url) elif '404 Client Error' in str(e): log.error('Website was not found, the link is broken or dead.') raise NoStreamsError(self.url) else: raise e if res.history: for resp in res.history: log.debug('Redirect: {0} - {1}'.format(resp.status_code, resp.url)) log.debug('URL: {0}'.format(res.url)) return res.text def settings_url(self): ''' store custom settings for URLs ''' o = urlparse(self.url) # User-Agent _android = [] _chrome = [] _ipad = [] _iphone = [ 'bigo.tv', ] if http.headers['User-Agent'].startswith('python-requests'): if o.netloc.endswith(tuple(_android)): http.headers.update({'User-Agent': useragents.ANDROID}) elif o.netloc.endswith(tuple(_chrome)): http.headers.update({'User-Agent': useragents.CHROME}) elif o.netloc.endswith(tuple(_ipad)): http.headers.update({'User-Agent': useragents.IPAD}) elif o.netloc.endswith(tuple(_iphone)): http.headers.update({'User-Agent': useragents.IPHONE_6}) else: # default User-Agent http.headers.update({'User-Agent': useragents.FIREFOX}) # SSL Verification - http.verify http_verify = [ # https://github.com/streamlink/streamlink/issues/1494 '.cdn.bg', 'sportal.bg', ] if (o.netloc.endswith(tuple(http_verify)) and http.verify): http.verify = False log.warning('SSL Verification disabled.') def _get_streams(self): self.url = self.url.replace('resolve://', '') self.url = update_scheme('http://', self.url) self.settings_url() if self._run <= 1: log.debug('Version 2018-07-01') log.info('This is a custom plugin. ' 'For support visit https://github.com/back-to/plugins') log.debug('User-Agent: {0}'.format(http.headers['User-Agent'])) new_session_url = False log.info(' {0}. URL={1}'.format(self._run, self.url)) # GET website content res_text = self._res_text(self.url) # Playlist URL playlist_all = self._playlist_re.findall(res_text) if playlist_all: log.debug('Found Playlists: {0}'.format(len(playlist_all))) playlist_list = self._make_url_list( playlist_all, self.url, url_type='playlist', ) if playlist_list: log.info('Found Playlists: {0} (valid)'.format( len(playlist_list))) return self._resolve_playlist(playlist_list) else: log.debug('No Playlists') # iFrame URL iframe_list = [] for _iframe_list in (self._iframe_re.findall(res_text), self._iframe_unescape(res_text)): if not _iframe_list: continue iframe_list += _iframe_list if iframe_list: log.debug('Found Iframes: {0}'.format(len(iframe_list))) # repair and filter iframe url list new_iframe_list = self._make_url_list(iframe_list, self.url, url_type='iframe') if new_iframe_list: log.info('Found Iframes: {0} (valid)'.format( len(new_iframe_list))) for i_url in new_iframe_list: if i_url == new_iframe_list[0]: new_session_url = i_url log.info('IFRAME URL - {0}'.format(i_url)) else: log.info('Skip - {0}'.format(i_url)) else: log.debug('No Iframes') if not new_session_url: # search for window.location.href new_session_url = self._window_location(res_text) if new_session_url: # the Dailymotion Plugin does not work with this Referer if 'dailymotion.com' in new_session_url: del http.headers['Referer'] return self.session.streams(new_session_url) raise NoPluginError
class TVPlayer(Plugin): context_url = "http://tvplayer.com/watch/context" api_url = "http://api.tvplayer.com/api/v2/stream/live" login_url = "https://tvplayer.com/account/login" update_url = "https://tvplayer.com/account/update-detail" dummy_postcode = "SE1 9LT" # location of ITV HQ in London url_re = re.compile( r"https?://(?:www.)?tvplayer.com/(:?watch/?|watch/(.+)?)") stream_attrs_re = re.compile( r'data-(resource|token|channel-id)\s*=\s*"(.*?)"', re.S) data_id_re = re.compile(r'data-id\s*=\s*"(.*?)"', re.S) login_token_re = re.compile(r'input.*?name="token".*?value="(\w+)"') stream_schema = validate.Schema( { "tvplayer": validate.Schema({ "status": u'200 OK', "response": validate.Schema({ "stream": validate.url(scheme=validate.any("http", "https")), validate.optional("drmToken"): validate.any(None, validate.text) }) }) }, validate.get("tvplayer"), validate.get("response")) context_schema = validate.Schema({ "validate": validate.text, validate.optional("token"): validate.text, "platform": { "key": validate.text } }) arguments = PluginArguments( PluginArgument( "email", help="The email address used to register with tvplayer.com.", metavar="EMAIL", requires=["password"]), PluginArgument("password", sensitive=True, help="The password for your tvplayer.com account.", metavar="PASSWORD")) @classmethod def can_handle_url(cls, url): match = TVPlayer.url_re.match(url) return match is not None def __init__(self, url): super(TVPlayer, self).__init__(url) http.headers.update({"User-Agent": useragents.CHROME}) def authenticate(self, username, password): res = http.get(self.login_url) match = self.login_token_re.search(res.text) token = match and match.group(1) res2 = http.post(self.login_url, data=dict(email=username, password=password, token=token), allow_redirects=False) # there is a 302 redirect on a successful login return res2.status_code == 302 def _get_stream_data(self, resource, channel_id, token, service=1): # Get the context info (validation token and platform) self.logger.debug( "Getting stream information for resource={0}".format(resource)) context_res = http.get(self.context_url, params={ "resource": resource, "gen": token }) context_data = http.json(context_res, schema=self.context_schema) self.logger.debug("Context data: {0}", str(context_data)) # get the stream urls res = http.post(self.api_url, data=dict(service=service, id=channel_id, validate=context_data["validate"], token=context_data.get("token"), platform=context_data["platform"]["key"]), raise_for_status=False) return http.json(res, schema=self.stream_schema) def _get_stream_attrs(self, page): stream_attrs = dict( (k.replace("-", "_"), v.strip('"')) for k, v in self.stream_attrs_re.findall(page.text)) if not stream_attrs.get("channel_id"): m = self.data_id_re.search(page.text) stream_attrs["channel_id"] = m and m.group(1) self.logger.debug("Got stream attributes: {0}", str(stream_attrs)) valid = True for a in ("channel_id", "resource", "token"): if a not in stream_attrs: self.logger.debug("Missing '{0}' from stream attributes", a) valid = False return stream_attrs if valid else {} def _get_streams(self): if self.get_option("email") and self.get_option("password"): self.logger.debug("Logging in as {0}".format( self.get_option("email"))) if not self.authenticate(self.get_option("email"), self.get_option("password")): self.logger.warning("Failed to login as {0}".format( self.get_option("email"))) # find the list of channels from the html in the page self.url = self.url.replace("https", "http") # https redirects to http res = http.get(self.url) if "enter your postcode" in res.text: self.logger.info( "Setting your postcode to: {0}. " "This can be changed in the settings on tvplayer.com", self.dummy_postcode) res = http.post(self.update_url, data=dict(postcode=self.dummy_postcode), params=dict(return_url=self.url)) stream_attrs = self._get_stream_attrs(res) if stream_attrs: stream_data = self._get_stream_data(**stream_attrs) if stream_data: if stream_data.get("drmToken"): self.logger.error( "This stream is protected by DRM can cannot be played") return else: return HLSStream.parse_variant_playlist( self.session, stream_data["stream"]) else: if "need to login" in res.text: self.logger.error( "You need to login using --tvplayer-email/--tvplayer-password to view this stream" )
class AfreecaTV(Plugin): _re_bno = re.compile(r"var nBroadNo = (?P<bno>\d+);") _re_url = re.compile(r"https?://play\.afreecatv\.com/(?P<username>\w+)(?:/(?P<bno>:\d+))?") CHANNEL_API_URL = "http://live.afreecatv.com/afreeca/player_live_api.php" CHANNEL_RESULT_OK = 1 QUALITYS = ["original", "hd", "sd"] QUALITY_WEIGHTS = { "original": 1080, "hd": 720, "sd": 480, } _schema_channel = validate.Schema( { "CHANNEL": { "RESULT": validate.transform(int), validate.optional("BPWD"): str, validate.optional("BNO"): str, validate.optional("RMD"): str, validate.optional("AID"): str, validate.optional("CDN"): str, } }, validate.get("CHANNEL") ) _schema_stream = validate.Schema( { validate.optional("view_url"): validate.url( scheme=validate.any("rtmp", "http") ), "stream_status": str, } ) arguments = PluginArguments( PluginArgument( "username", sensitive=True, requires=["password"], metavar="USERNAME", help="The username used to register with afreecatv.com." ), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A afreecatv.com account password to use with --afreeca-username." ), PluginArgument( "purge-credentials", action="store_true", help=""" Purge cached AfreecaTV credentials to initiate a new session and reauthenticate. """), ) def __init__(self, url): super().__init__(url) self._authed = ( self.session.http.cookies.get("PdboxBbs") and self.session.http.cookies.get("PdboxSaveTicket") and self.session.http.cookies.get("PdboxTicket") and self.session.http.cookies.get("PdboxUser") and self.session.http.cookies.get("RDB") ) @classmethod def can_handle_url(cls, url): return cls._re_url.match(url) is not None @classmethod def stream_weight(cls, key): weight = cls.QUALITY_WEIGHTS.get(key) if weight: return weight, "afreeca" return Plugin.stream_weight(key) def _get_channel_info(self, broadcast, username): data = { "bid": username, "bno": broadcast, "mode": "landing", "player_type": "html5", "type": "live", } res = self.session.http.post(self.CHANNEL_API_URL, data=data) return self.session.http.json(res, schema=self._schema_channel) def _get_hls_key(self, broadcast, username, quality): data = { "bid": username, "bno": broadcast, "pwd": "", "quality": quality, "type": "pwd" } res = self.session.http.post(self.CHANNEL_API_URL, data=data) return self.session.http.json(res, schema=self._schema_channel) def _get_stream_info(self, broadcast, quality, cdn, rmd): params = { "return_type": cdn, "broad_key": f"{broadcast}-flash-{quality}-hls", } res = self.session.http.get(f"{rmd}/broad_stream_assign.html", params=params) return self.session.http.json(res, schema=self._schema_stream) def _get_hls_stream(self, broadcast, username, quality, cdn, rmd): keyjson = self._get_hls_key(broadcast, username, quality) if keyjson["RESULT"] != self.CHANNEL_RESULT_OK: return key = keyjson["AID"] info = self._get_stream_info(broadcast, quality, cdn, rmd) if "view_url" in info: return AfreecaHLSStream(self.session, info["view_url"], params={"aid": key}) def _login(self, username, password): data = { "szWork": "login", "szType": "json", "szUid": username, "szPassword": password, "isSaveId": "true", "isSavePw": "false", "isSaveJoin": "false", "isLoginRetain": "Y", } res = self.session.http.post("https://login.afreecatv.com/app/LoginAction.php", data=data) data = self.session.http.json(res) log.trace(f"{data!r}") if data["RESULT"] == self.CHANNEL_RESULT_OK: self.save_cookies() return True else: return False def _get_streams(self): login_username = self.get_option("username") login_password = self.get_option("password") self.session.http.headers.update({"Referer": self.url}) if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif login_username and login_password: log.debug("Attempting to login using username and password") if self._login(login_username, login_password): log.info("Login was successful") else: log.error("Failed to login") m = self._re_url.match(self.url).groupdict() username = m["username"] bno = m["bno"] if bno is None: res = self.session.http.get(self.url) m = self._re_bno.search(res.text) if not m: log.error("Could not find broadcast number.") return bno = m.group("bno") channel = self._get_channel_info(bno, username) log.trace(f"{channel!r}") if channel.get("BPWD") == "Y": log.error("Stream is Password-Protected") return elif channel.get("RESULT") == -6: log.error("Login required") return elif channel.get("RESULT") != self.CHANNEL_RESULT_OK: return (broadcast, rmd, cdn) = (channel["BNO"], channel["RMD"], channel["CDN"]) if not (broadcast and rmd and cdn): return for qkey in self.QUALITYS: hls_stream = self._get_hls_stream(broadcast, username, qkey, cdn, rmd) if hls_stream: yield qkey, hls_stream
class YuppTV(Plugin): _url_re = re.compile(r'https?://(?:www\.)?yupptv\.com') _m3u8_re = re.compile(r'''['"](http.+\.m3u8.*?)['"]''') _cookie_expiry = 3600 * 24 * 365 arguments = PluginArguments( PluginArgument("boxid", requires=["yuppflixtoken"], sensitive=True, metavar="BOXID", help=""" The yupptv.com boxid that's used in the BoxId cookie. Can be used instead of the username/password login process. """), PluginArgument("yuppflixtoken", sensitive=True, metavar="YUPPFLIXTOKEN", help=""" The yupptv.com yuppflixtoken that's used in the YuppflixToken cookie. Can be used instead of the username/password login process. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached YuppTV credentials to initiate a new session and reauthenticate. """), ) def __init__(self, url): super(YuppTV, self).__init__(url) self._authed = (self.session.http.cookies.get("BoxId") and self.session.http.cookies.get("YuppflixToken")) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _login_using_box_id_and_yuppflix_token(self, box_id, yuppflix_token): time_now = time.time() self.session.http.cookies.set( 'BoxId', box_id, domain='www.yupptv.com', path='/', expires=time_now + self._cookie_expiry, ) self.session.http.cookies.set( 'YuppflixToken', yuppflix_token, domain='www.yupptv.com', path='/', expires=time_now + self._cookie_expiry, ) self.save_cookies() log.info("Successfully set BoxId and YuppflixToken") def _get_streams(self): self.session.http.headers.update({"User-Agent": useragents.CHROME}) login_box_id = self.get_option("boxid") login_yuppflix_token = self.get_option("yuppflixtoken") if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif not self._authed and login_box_id and login_yuppflix_token: self._login_using_box_id_and_yuppflix_token( login_box_id, login_yuppflix_token, ) self._authed = True page = self.session.http.get(self.url) if self._authed and "btnsignup" in page.text: log.error("This device requires renewed credentials to log in") return match = self._m3u8_re.search(page.text) if match: stream_url = match.group(1) if "preview/" in stream_url: if "btnsignup" in page.text: log.error("This stream requires you to login") else: log.error("This stream requires a subscription") return return HLSStream.parse_variant_playlist(self.session, stream_url) elif "btnsignup" in page.text: log.error("This stream requires you to login") elif "btnsubscribe" in page.text: log.error("This stream requires a subscription")
class NicoLive(Plugin): arguments = PluginArguments( PluginArgument("email", argument_name="niconico-email", sensitive=True, metavar="EMAIL", help="The email or phone number associated with your " "Niconico account"), PluginArgument("password", argument_name="niconico-password", sensitive=True, metavar="PASSWORD", help="The password of your Niconico account"), PluginArgument( "user-session", argument_name="niconico-user-session", sensitive=True, metavar="VALUE", help="Value of the user-session token \n(can be used in " "case you do not want to put your password here)")) is_stream_ready = False is_stream_ended = False watching_interval = 30 watching_interval_worker_thread = None stream_reader = None _ws = None @classmethod def can_handle_url(cls, url): return _url_re.match(url) is not None def _get_streams(self): self.url = self.url.split("?")[0] self.session.http.headers.update({ "User-Agent": useragents.CHROME, }) if not self.get_wss_api_url(): _log.debug("Coundn't extract wss_api_url. Attempting login...") if not self.niconico_web_login(): return None if not self.get_wss_api_url(): _log.error("Failed to get wss_api_url.") _log.error( "Please check if the URL is correct, " "and make sure your account has access to the video.") return None self.api_connect(self.wss_api_url) i = 0 while not self.is_stream_ready: if i % 10 == 0: _log.debug("Waiting for permit...") if i == 600: _log.error("Waiting for permit timed out.") return None if self.is_stream_ended: return None time.sleep(0.1) i += 1 streams = HLSStream.parse_variant_playlist(self.session, self.hls_stream_url) nico_streams = {} for s in streams: nico_stream = NicoHLSStream(streams[s], self) nico_streams[s] = nico_stream return nico_streams def get_wss_api_url(self): _log.debug("Getting video page: {0}".format(self.url)) resp = self.session.http.get(self.url) try: self.wss_api_url = extract_text(resp.text, ""webSocketUrl":"", """) except Exception as e: _log.debug(e) _log.debug("Failed to extract wss api url") return False try: self.broadcast_id = extract_text(resp.text, ""broadcastId":"", """) except Exception as e: _log.debug(e) _log.warning("Failed to extract broadcast id") _log.debug("Video page response code: {0}".format(resp.status_code)) _log.trace(u"Video page response body: {0}".format(resp.text)) _log.debug("Got wss_api_url: {0}".format(self.wss_api_url)) _log.debug("Got broadcast_id: {0}".format(self.broadcast_id)) return self.wss_api_url.startswith("wss://") def api_on_open(self): self.send_playerversion() require_new_stream = not self.is_stream_ready self.send_getpermit(require_new_stream=require_new_stream) def api_on_error(self, ws, error=None): if error: _log.warning(error) _log.warning("wss api disconnected.") _log.warning("Attempting to reconnect in 5 secs...") time.sleep(5) self.api_connect(self.wss_api_url) def api_connect(self, url): # Proxy support adapted from the UStreamTV plugin (ustreamtv.py) proxy_url = self.session.get_option("https-proxy") if proxy_url is None: proxy_url = self.session.get_option("http-proxy") proxy_options = parse_proxy_url(proxy_url) if proxy_options.get('http_proxy_host'): _log.debug("Using proxy ({0}://{1}:{2})".format( proxy_options.get('proxy_type') or "http", proxy_options.get('http_proxy_host'), proxy_options.get('http_proxy_port') or 80)) _log.debug("Connecting: {0}".format(url)) self._ws = websocket.WebSocketApp( url, header=["User-Agent: {0}".format(useragents.CHROME)], on_open=self.api_on_open, on_message=self.handle_api_message, on_error=self.api_on_error) self.ws_worker_thread = threading.Thread(target=self._ws.run_forever, args=proxy_options) self.ws_worker_thread.daemon = True self.ws_worker_thread.start() def send_message(self, type_, body): msg = {"type": type_, "body": body} msg_json = json.dumps(msg) _log.debug(u"Sending: {0}".format(msg_json)) if self._ws and self._ws.sock.connected: self._ws.send(msg_json) else: _log.warning("wss api is not connected.") def send_playerversion(self): body = {"command": "playerversion", "params": ["leo"]} self.send_message("watch", body) def send_getpermit(self, require_new_stream=True): body = { "command": "getpermit", "requirement": { "broadcastId": self.broadcast_id, "route": "", "stream": { "protocol": "hls", "requireNewStream": require_new_stream, "priorStreamQuality": "abr", "isLowLatency": True, "isChasePlay": False }, "room": { "isCommentable": True, "protocol": "webSocket" } } } self.send_message("watch", body) def send_watching(self): body = { "command": "watching", "params": [self.broadcast_id, "-1", "0"] } self.send_message("watch", body) def send_pong(self): self.send_message("pong", {}) def handle_api_message(self, message): _log.debug(u"Received: {0}".format(message)) message_parsed = json.loads(message) if message_parsed["type"] == "watch": body = message_parsed["body"] command = body["command"] if command == "currentstream": current_stream = body["currentStream"] self.hls_stream_url = current_stream["uri"] self.is_stream_ready = True elif command == "watchinginterval": self.watching_interval = int(body["params"][0]) _log.debug("Got watching_interval: {0}".format( self.watching_interval)) if self.watching_interval_worker_thread is None: _log.debug("send_watching_scheduler starting.") self.watching_interval_worker_thread = threading.Thread( target=self.send_watching_scheduler) self.watching_interval_worker_thread.daemon = True self.watching_interval_worker_thread.start() else: _log.debug("send_watching_scheduler already running.") elif command == "disconnect": _log.info("Websocket API closed.") _log.info("Stream ended.") self.is_stream_ended = True if self.stream_reader is not None: self.stream_reader.close() _log.info("Stream reader closed.") elif message_parsed["type"] == "ping": self.send_pong() def send_watching_scheduler(self): """ Periodically send "watching" command to the API. This is necessary to keep the session alive. """ while not self.is_stream_ended: self.send_watching() time.sleep(self.watching_interval) def niconico_web_login(self): user_session = self.get_option("user-session") email = self.get_option("email") password = self.get_option("password") if user_session is not None: _log.info("User session cookie is provided. Using it.") self.session.http.cookies.set("user_session", user_session, path="/", domain="nicovideo.jp") self.save_cookies() return True elif email is not None and password is not None: _log.info("Email and password are provided. Attemping login.") payload = {"mail_tel": email, "password": password} resp = self.session.http.post(_login_url, data=payload, params=_login_url_params) _log.debug("Login response code: {0}".format(resp.status_code)) _log.trace(u"Login response body: {0}".format(resp.text)) _log.debug("Cookies: {0}".format( self.session.http.cookies.get_dict())) if self.session.http.cookies.get("user_session") is None: try: msg = extract_text(resp.text, '<p class="notice__text">', "</p>") except Exception as e: _log.debug(e) msg = "unknown reason" _log.warn("Login failed. {0}".format(msg)) return False else: _log.info("Logged in.") self.save_cookies() return True else: _log.warn( "Neither a email and password combination nor a user session " "token is provided. Cannot attempt login.") return False
class Twitch(Plugin): arguments = PluginArguments( PluginArgument("oauth-token", sensitive=True, metavar="TOKEN", help=""" An OAuth token to use for Twitch authentication. Use --twitch-oauth-authenticate to create a token. """), PluginArgument("cookie", sensitive=True, metavar="COOKIES", help=""" Twitch cookies to authenticate to allow access to subscription channels. Example: "_twitch_session_id=xxxxxx; persistent=xxxxx" Note: This method is the old and clunky way of authenticating with Twitch, using --twitch-oauth-authenticate is the recommended and simpler way of doing it now. """ ), PluginArgument("disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """ )) @classmethod def stream_weight(cls, key): weight = QUALITY_WEIGHTS.get(key) if weight: return weight, "twitch" return Plugin.stream_weight(key) @classmethod def get_urls(self): return ["https://www.twitch.tv/","https://www.twitch.tv/videos/"] @classmethod def can_handle_url(cls, url): return _url_re.match(url) def __init__(self, url): Plugin.__init__(self, url) self._hosted_chain = [] match = _url_re.match(url).groupdict() parsed = urlparse(url) self.params = parse_query(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.video_type = None self._channel_id = None self._channel = None self.clip_name = None if self.subdomain == "player": # pop-out player if self.params.get("video"): try: self.video_type = self.params["video"][0] self.video_id = self.params["video"][1:] except IndexError: self.logger.debug("Invalid video param: {0}", self.params["video"]) self._channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self._channel = match.get("channel") and match.get("channel").lower() self.video_type = match.get("video_type") if match.get("videos_id"): self.video_type = "v" self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(beta=self.subdomain == "beta", session=self.session, version=5) self.usher = UsherService(session=self.session) @property def channel(self): if not self._channel: if self.video_id: cdata = self._channel_from_video_id(self.video_id) self._channel = cdata["name"].lower() self._channel_id = cdata["_id"] return self._channel @channel.setter def channel(self, channel): self._channel = channel # channel id becomes unknown self._channel_id = None @property def channel_id(self): if not self._channel_id: # If the channel name is set, use that to look up the ID if self._channel: cdata = self._channel_from_login(self._channel) self._channel_id = cdata["_id"] # If the channel name is not set but the video ID is, # use that to look up both ID and name elif self.video_id: cdata = self._channel_from_video_id(self.video_id) self._channel = cdata["name"].lower() self._channel_id = cdata["_id"] return self._channel_id def _channel_from_video_id(self, video_id): vdata = self.api.videos(video_id) if "channel" not in vdata: raise PluginError("Unable to find video: {0}".format(video_id)) return vdata["channel"] def _channel_from_login(self, channel): cdata = self.api.users(login=channel) if len(cdata["users"]): return cdata["users"][0] else: raise PluginError("Unable to find channel: {0}".format(channel)) def _authenticate(self): if self.api.oauth_token: return oauth_token = self.options.get("oauth_token") cookies = self.options.get("cookie") if oauth_token: self.logger.info("Attempting to authenticate using OAuth token") self.api.oauth_token = oauth_token user = self.api.user(schema=_user_schema) if user: self.logger.info("Successfully logged in as {0}", user) else: self.logger.error("Failed to authenticate, the access token " "is invalid or missing required scope") elif cookies: self.logger.info("Attempting to authenticate using cookies") self.api.add_cookies(cookies) self.api.oauth_token = self.api.token(schema=_viewer_token_schema) login = self.api.viewer_info(schema=_viewer_info_schema) if login: self.logger.info("Successfully logged in as {0}", login) else: self.logger.error("Failed to authenticate, your cookies " "may have expired") def _create_playlist_streams(self, videos): start_offset = int(videos.get("start_offset", 0)) stop_offset = int(videos.get("end_offset", 0)) streams = {} for quality, chunks in videos.get("chunks").items(): if not chunks: if videos.get("restrictions", {}).get(quality) == "chansub": self.logger.warning("The quality '{0}' is not available " "since it requires a subscription.", quality) continue # Rename 'live' to 'source' if quality == "live": quality = "source" chunks_filtered = list(filter(lambda c: c["url"], chunks)) if len(chunks) != len(chunks_filtered): self.logger.warning("The video '{0}' contains invalid chunks. " "There will be missing data.", quality) chunks = chunks_filtered chunks_duration = sum(c.get("length") for c in chunks) # If it's a full broadcast we just use all the chunks if start_offset == 0 and chunks_duration == stop_offset: # No need to use the FLV concat if it's just one chunk if len(chunks) == 1: url = chunks[0].get("url") stream = HTTPStream(self.session, url) else: chunks = [HTTPStream(self.session, c.get("url")) for c in chunks] stream = FLVPlaylist(self.session, chunks, duration=chunks_duration) else: try: stream = self._create_video_clip(chunks, start_offset, stop_offset) except StreamError as err: self.logger.error("Error while creating video '{0}': {1}", quality, err) continue streams[quality] = stream return streams def _create_video_clip(self, chunks, start_offset, stop_offset): playlist_duration = stop_offset - start_offset playlist_offset = 0 playlist_streams = [] playlist_tags = [] for chunk in chunks: chunk_url = chunk["url"] chunk_length = chunk["length"] chunk_start = playlist_offset chunk_stop = chunk_start + chunk_length chunk_stream = HTTPStream(self.session, chunk_url) if chunk_start <= start_offset <= chunk_stop: try: headers = extract_flv_header_tags(chunk_stream) except IOError as err: raise StreamError("Error while parsing FLV: {0}", err) if not headers.metadata: raise StreamError("Missing metadata tag in the first chunk") metadata = headers.metadata.data.value keyframes = metadata.get("keyframes") if not keyframes: if chunk["upkeep"] == "fail": raise StreamError("Unable to seek into muted chunk, try another timestamp") else: raise StreamError("Missing keyframes info in the first chunk") keyframe_offset = None keyframe_offsets = keyframes.get("filepositions") keyframe_times = [playlist_offset + t for t in keyframes.get("times")] for time, offset in zip(keyframe_times, keyframe_offsets): if time > start_offset: break keyframe_offset = offset if keyframe_offset is None: raise StreamError("Unable to find a keyframe to seek to " "in the first chunk") chunk_headers = dict(Range="bytes={0}-".format(int(keyframe_offset))) chunk_stream = HTTPStream(self.session, chunk_url, headers=chunk_headers) playlist_streams.append(chunk_stream) for tag in headers: playlist_tags.append(tag) elif start_offset <= chunk_start < stop_offset: playlist_streams.append(chunk_stream) playlist_offset += chunk_length return FLVPlaylist(self.session, playlist_streams, tags=playlist_tags, duration=playlist_duration) def _get_video_streams(self): self.logger.debug("Getting video steams for {0} (type={1})".format(self.video_id, self.video_type)) self._authenticate() if self.video_type == "b": self.video_type = "a" try: videos = self.api.videos(self.video_type + self.video_id, schema=_video_schema) except PluginError as err: if "HTTP/1.1 0 ERROR" in str(err): raise NoStreamsError(self.url) else: raise # Parse the "t" query parameter on broadcasts and adjust # start offset if needed. time_offset = self.params.get("t") if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 videos["start_offset"] += time_offset return self._create_playlist_streams(videos) def _access_token(self, type="live"): try: if type == "live": endpoint = "channels" value = self.channel elif type == "video": endpoint = "vods" value = self.video_id sig, token = self.api.access_token(endpoint, value, schema=_access_token_schema) except PluginError as err: if "404 Client Error" in str(err): raise NoStreamsError(self.url) else: raise return sig, token def _check_for_host(self): host_info = self.api.hosted_channel(include_logins=1, host=self.channel_id).json()["hosts"][0] if "target_login" in host_info and host_info["target_login"].lower() != self.channel.lower(): self.logger.info("{0} is hosting {1}".format(self.channel, host_info["target_login"])) return host_info["target_login"] def _get_hls_streams(self, stream_type="live"): self.logger.debug("Getting {0} HLS streams for {1}".format(stream_type, self.channel)) self._authenticate() self._hosted_chain.append(self.channel) if stream_type == "live": hosted_channel = self._check_for_host() if hosted_channel and self.options.get("disable_hosting"): self.logger.info("hosting was disabled by command line option") elif hosted_channel: self.logger.info("switching to {0}", hosted_channel) if hosted_channel in self._hosted_chain: self.logger.error( u"A loop of hosted channels has been detected, " "cannot find a playable stream. ({0})".format( u" -> ".join(self._hosted_chain + [hosted_channel]))) return {} self.channel = hosted_channel return self._get_hls_streams(stream_type) # only get the token once the channel has been resolved sig, token = self._access_token(stream_type) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) elif stream_type == "video": sig, token = self._access_token(stream_type) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) else: self.logger.debug("Unknown HLS stream type: {0}".format(stream_type)) return {} time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: # If the stream is a VOD that is still being recorded the stream should start at the # beginning of the recording streams = HLSStream.parse_variant_playlist(self.session, url, start_offset=time_offset, force_restart=not stream_type == "live") except IOError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) try: token = parse_json(token, schema=_token_schema) for name in token["restricted_bitrates"]: if name not in streams: self.logger.warning("The quality '{0}' is not available " "since it requires a subscription.", name) except PluginError: pass return streams def _get_clips(self): quality_options = self.api.clip_status(self.channel, self.clip_name, schema=_quality_options_schema) streams = {} for quality_option in quality_options: streams[quality_option["quality"]] = HTTPStream(self.session, quality_option["source"]) return streams def _get_streams(self): if self.video_id: if self.video_type == "v": return self._get_hls_streams("video") else: return self._get_video_streams() elif self.clip_name: return self._get_clips() elif self._channel: return self._get_hls_streams("live")
class USTVNow(Plugin): _url_re = re.compile(r"https?://(?:watch\.)?ustvnow\.com(?:/(?:watch|guide)/(?P<scode>\w+))?") _token_re = re.compile(r'''var\s+token\s*=\s*"(.*?)";''') _login_url = "https://watch.ustvnow.com/account/login" _signin_url = "https://watch.ustvnow.com/account/signin" _guide_url = "http://m.ustvnow.com/gtv/1/live/channelguidehtml" _stream_url = "http://m.ustvnow.com/stream/1/live/view" arguments = PluginArguments( PluginArgument( "username", metavar="USERNAME", required=True, help="Your USTV Now account username" ), PluginArgument( "password", sensitive=True, metavar="PASSWORD", required=True, help="Your USTV Now account password", prompt="Enter USTV Now account password" ), PluginArgument( "station-code", metavar="CODE", help="USTV Now station code" ), ) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def login(self, username, password): r = http.get(self._signin_url) csrf = None for input in itertags(r.text, "input"): if input.attributes['name'] == "csrf_ustvnow": csrf = input.attributes['value'] log.debug("CSRF: {0}", csrf) r = http.post(self._login_url, data={'csrf_ustvnow': csrf, 'signin_email': username, 'signin_password': password, 'signin_remember': '1'}) m = self._token_re.search(r.text) return m and m.group(1) def _get_streams(self): """ Finds the streams from tvcatchup.com. """ token = self.login(self.get_option("username"), self.get_option("password")) m = self._url_re.match(self.url) scode = m and m.group("scode") or self.get_option("station_code") res = http.get(self._guide_url, params=dict(token=token)) channels = OrderedDict() for t in itertags(res.text, "a"): if t.attributes.get('cs'): channels[t.attributes.get('cs').lower()] = t.attributes.get('title').replace("Watch ", "").strip() if not scode: log.error("Station code not provided, use --ustvnow-station-code.") log.info("Available stations are: \n{0} ".format('\n'.join(' {0} ({1})'.format(c, n) for c, n in channels.items()))) return if scode in channels: log.debug("Finding streams for: {0}", channels.get(scode)) r = http.get(self._stream_url, params={"scode": scode, "token": token, "br_n": "Firefox", "br_v": "52", "br_d": "desktop"}, headers={"User-Agent": useragents.FIREFOX}) data = http.json(r) return HLSStream.parse_variant_playlist(self.session, data["stream"]) else: log.error("Invalid station-code: {0}", scode)
class TwitCasting(Plugin): arguments = PluginArguments( PluginArgument("password", sensitive=True, metavar="PASSWORD", help="Password for private Twitcasting streams.")) _STREAM_INFO_URL = "https://twitcasting.tv/streamserver.php?target={channel}&mode=client" _STREAM_REAL_URL = "{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}" _STREAM_INFO_SCHEMA = validate.Schema({ "movie": { "id": int, "live": bool }, "fmp4": { "host": validate.text, "proto": validate.text, "source": bool, "mobilesource": bool } }) def __init__(self, url): super().__init__(url) self.channel = self.match.group("channel") self.session.http.headers.update({'User-Agent': useragents.CHROME}) def _get_streams(self): stream_info = self._get_stream_info() log.debug("Live stream info: {}".format(stream_info)) if not stream_info["movie"]["live"]: raise PluginError("The live stream is offline") # Keys are already validated by schema above proto = stream_info["fmp4"]["proto"] host = stream_info["fmp4"]["host"] movie_id = stream_info["movie"]["id"] if stream_info["fmp4"]["source"]: mode = "main" # High quality elif stream_info["fmp4"]["mobilesource"]: mode = "mobilesource" # Medium quality else: mode = "base" # Low quality if (proto == '') or (host == '') or (not movie_id): raise PluginError("No stream available for user {}".format( self.channel)) real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode) password = self.options.get("password") if password is not None: password_hash = hashlib.md5(password.encode()).hexdigest() real_stream_url = update_qsd(real_stream_url, {"word": password_hash}) log.debug("Real stream url: {}".format(real_stream_url)) return { mode: TwitCastingStream(session=self.session, url=real_stream_url) } def _get_stream_info(self): url = self._STREAM_INFO_URL.format(channel=self.channel) res = self.session.http.get(url) return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)
class FunimationNow(Plugin): arguments = PluginArguments( PluginArgument("email", argument_name="funimation-email", requires=["password"], help="Email address for your Funimation account."), PluginArgument("password", argument_name="funimation-password", sensitive=True, help="Password for your Funimation account."), PluginArgument("language", argument_name="funimation-language", choices=["en", "ja", "english", "japanese"], default="english", help=""" The audio language to use for the stream; japanese or english. Default is "english". """), PluginArgument("mux-subtitles", is_global=True)) experience_id_re = re.compile(r"/player/(\d+)") mp4_quality = "480p" def _get_streams(self): self.session.http.headers = {"User-Agent": useragents.CHROME} res = self.session.http.get(self.url) # remap en to english, and ja to japanese rlanguage = { "en": "english", "ja": "japanese" }.get( self.get_option("language").lower(), self.get_option("language").lower()) if "_Incapsula_Resource" in res.text: log.error("This page is protected by Incapsula, please see " "https://github.com/streamlink/streamlink/issues/2088" " for a workaround.") return if "Out of Territory" in res.text: log.error( "The content requested is not available in your territory.") return id_m = self.experience_id_re.search(res.text) experience_id = id_m and int(id_m.group(1)) if experience_id: log.debug(f"Found experience ID: {experience_id}") exp = Experience(self.session, experience_id) if self.get_option("email") and self.get_option("password"): if exp.login(self.get_option("email"), self.get_option("password")): log.info( f"Logged in to Funimation as {self.get_option('email')}" ) else: log.warning("Failed to login") if exp.episode_info: log.debug(f"Found episode: {exp.episode_info['episodeTitle']}") log.debug( f" has languages: {', '.join(exp.episode_info['languages'].keys())}" ) log.debug(f" requested language: {rlanguage}") log.debug(f" current language: {exp.language}") if rlanguage != exp.language: log.debug(f"switching language to: {rlanguage}") exp.set_language(rlanguage) if exp.language != rlanguage: log.warning( f"Requested language {rlanguage} is not available, continuing with {exp.language}" ) else: log.debug(f"New experience ID: {exp.experience_id}") subtitles = None stream_metadata = {} disposition = {} for subtitle in exp.subtitles(): log.debug(f"Subtitles: {subtitle['src']}") if subtitle["src"].endswith( ".vtt") or subtitle["src"].endswith(".srt"): sub_lang = Localization.get_language( subtitle["language"]).alpha3 # pick the first suitable subtitle stream subtitles = subtitles or HTTPStream( self.session, subtitle["src"]) stream_metadata["s:s:0"] = [ "language={0}".format(sub_lang) ] stream_metadata["s:a:0"] = [ "language={0}".format(exp.language_code) ] sources = exp.sources() if 'errors' in sources: for error in sources['errors']: log.error("{0} : {1}".format(error['title'], error['detail'])) return for item in sources["items"]: url = item["src"] if ".m3u8" in url: for q, s in HLSStream.parse_variant_playlist( self.session, url).items(): if self.get_option("mux_subtitles") and subtitles: yield q, MuxedStream(self.session, s, subtitles, metadata=stream_metadata, disposition=disposition) else: yield q, s elif ".mp4" in url: # TODO: fix quality s = HTTPStream(self.session, url) if self.get_option("mux_subtitles") and subtitles: yield self.mp4_quality, MuxedStream( self.session, s, subtitles, metadata=stream_metadata, disposition=disposition) else: yield self.mp4_quality, s else: log.error("Could not find experience ID?!")
class SVTPlay(Plugin): api_url = 'https://api.svt.se/videoplayer-api/video/{0}' latest_episode_url_re = re.compile( r''' data-rt="top-area-play-button"\s+href="(?P<url>[^"]+)" ''', re.VERBOSE) live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)') _video_schema = validate.Schema({ validate.optional('programTitle'): validate.text, validate.optional('episodeTitle'): validate.text, 'videoReferences': [{ 'url': validate.url(), 'format': validate.text, }], validate.optional('subtitleReferences'): [{ 'url': validate.url(), 'format': validate.text, }], }) arguments = PluginArguments(PluginArgument("mux-subtitles", is_global=True)) def _set_metadata(self, data, category): if 'programTitle' in data: self.author = data['programTitle'] self.category = category if 'episodeTitle' in data: self.title = data['episodeTitle'] def _get_live(self, path): match = self.live_id_re.search(path) if match is None: return live_id = "ch-{0}".format(match.group('live_id')) log.debug("Live ID={0}".format(live_id)) res = self.session.http.get(self.api_url.format(live_id)) api_data = self.session.http.json(res, schema=self._video_schema) self._set_metadata(api_data, 'Live') for playlist in api_data['videoReferences']: if playlist['format'] == 'dashhbbtv': yield from DASHStream.parse_manifest(self.session, playlist['url']).items() def _get_vod(self): vod_id = self._get_vod_id(self.url) if vod_id is None: res = self.session.http.get(self.url) match = self.latest_episode_url_re.search(res.text) if match is None: return vod_id = self._get_vod_id(match.group("url")) if vod_id is None: return log.debug("VOD ID={0}".format(vod_id)) res = self.session.http.get(self.api_url.format(vod_id)) api_data = self.session.http.json(res, schema=self._video_schema) self._set_metadata(api_data, 'VOD') substreams = {} if 'subtitleReferences' in api_data: for subtitle in api_data['subtitleReferences']: if subtitle['format'] == 'webvtt': log.debug("Subtitle={0}".format(subtitle['url'])) substreams[subtitle['format']] = HTTPStream( self.session, subtitle['url'], ) for manifest in api_data['videoReferences']: if manifest['format'] == 'dashhbbtv': for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items(): if self.get_option('mux_subtitles') and substreams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: yield q, s def _get_vod_id(self, url): qs = dict(parse_qsl(urlparse(url).query)) return qs.get("id") def _get_streams(self): path, live = self.match.groups() log.debug("Path={0}".format(path)) if live: return self._get_live(path) else: return self._get_vod()
class Twitch(Plugin): arguments = PluginArguments( PluginArgument("disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """), PluginArgument("disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """), PluginArgument("disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """), PluginArgument("low-latency", action="store_true", help=f""" Enables low latency streaming by prefetching HLS segments. Sets --hls-live-edge to {LOW_LATENCY_MAX_LIVE_EDGE}, if it is higher. Reducing it to 1 will result in the lowest latency possible, but will most likely cause buffering. In order to achieve true low latency streaming during playback, the player's caching/buffering settings will need to be adjusted and reduced to a value as low as possible, but still high enough to not cause any buffering. This depends on the stream's bitrate and the quality of the connection to Twitch's servers. Please refer to the player's own documentation for the required configuration. Player parameters can be set via --player-args. Note: Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled due to the reduced --hls-live-edge value. """)) def __init__(self, url): super().__init__(url) match = self.match.groupdict() parsed = urlparse(url) self.params = parse_qsd(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.channel = None self.clip_name = None if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self.channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self.channel = match.get("channel") and match.get( "channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def get_title(self): if self.title is None: self._get_metadata() return self.title def get_author(self): if self.author is None: self._get_metadata() return self.author def get_category(self): if self.category is None: self._get_metadata() return self.category def _get_metadata(self): try: if self.video_id: (self.author, self.title, self.category) = self.api.metadata_video(self.video_id) elif self.clip_name: self._get_clips() elif self.channel: (self.author, self.title, self.category) = self.api.metadata_channel(self.channel) except (PluginError, TypeError): pass def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except (PluginError, TypeError): raise NoStreamsError(self.url) try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: login, display_name = self.api.hosted_channel(self.channel) except PluginError: return False log.info("{0} is hosting {1}".format(self.channel, login)) if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error( "A loop of hosted channels has been detected, cannot find a playable stream. ({0})" .format(loop)) return True hosted_chain.append(login) log.info("switching to {0}".format(login)) self.channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug(f"Getting live HLS streams for {self.channel}") self.session.http.headers.update({ "referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv", }) sig, token, restricted_bitrates = self._access_token( True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug(f"Getting HLS streams for video ID {self.video_id}") sig, token, restricted_bitrates = self._access_token( False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, **extra_params) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning( "The quality '{0}' is not available since it requires a subscription." .format(name)) return streams def _get_clips(self): try: (((sig, token), streams), (self.author, self.category), self.title) = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream( self.session, update_qsd(stream, { "sig": sig, "token": token })) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self.channel: return self._get_hls_streams_live()
class Twitch(Plugin): arguments = PluginArguments( PluginArgument("oauth-token", sensitive=True, metavar="TOKEN", help=""" An OAuth token to use for Twitch authentication. """), PluginArgument("disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """), PluginArgument("disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """), PluginArgument("disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """), PluginArgument("low-latency", action="store_true", help=""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {live_edge}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible. Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled. Note: The caching/buffering settings of the chosen player may need to be adjusted as well. Please refer to the player's own documentation for the required parameters and its configuration. Player parameters can be set via Streamlink's --player or --player-args parameters. """.format(live_edge=LOW_LATENCY_MAX_LIVE_EDGE))) _re_url = re.compile( r""" https?://(?:(?P<subdomain>[\w\-]+)\.)?twitch\.tv/ (?: videos/(?P<videos_id>\d+) | (?P<channel>[^/]+) (?: /video/(?P<video_id>\d+) | /clip/(?P<clip_name>[\w]+) )? ) """, re.VERBOSE) @classmethod def can_handle_url(cls, url): return cls._re_url.match(url) def __init__(self, url): super().__init__(url) match = self._re_url.match(url).groupdict() parsed = urlparse(url) self.params = parse_query(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self._channel_id = None self._channel = None self.clip_name = None self.title = None self.author = None self.category = None if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self._channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self._channel = match.get("channel") and match.get( "channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def get_title(self): if self.title is None: self._get_metadata() return self.title def get_author(self): if self.author is None: self._get_metadata() return self.author def get_category(self): if self.category is None: self._get_metadata() return self.category def _get_metadata(self): if self.video_id: (self.author, self.title, self.category) = self.api.metadata_video(self.video_id) elif self.clip_name: self._get_clips() elif self._channel: (self.author, self.title, self.category) = self.api.metadata_channel(self.channel_id) @property def channel(self): if not self._channel: if self.video_id: self._channel_from_video_id(self.video_id) return self._channel @property def channel_id(self): if not self._channel_id: if self._channel: self._channel_from_login(self._channel) elif self.video_id: self._channel_from_video_id(self.video_id) return self._channel_id def _channel_from_video_id(self, video_id): try: self._channel_id, self._channel = self.api.channel_from_video_id( video_id) except PluginError: raise PluginError("Unable to find video: {0}".format(video_id)) def _channel_from_login(self, channel): try: self._channel_id = self.api.channel_from_login(channel) except PluginError: raise PluginError("Unable to find channel: {0}".format(channel)) def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except PluginError as err: if "404 Client Error" in str(err): raise NoStreamsError(self.url) else: raise try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: host_id, target_id, login, display_name = self.api.hosted_channel( self.channel_id) except PluginError: return False log.info("{0} is hosting {1}".format(self.channel, login)) if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error( "A loop of hosted channels has been detected, cannot find a playable stream. ({0})" .format(loop)) return True hosted_chain.append(login) log.info("switching to {0}".format(login)) self._channel_id = target_id self._channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug("Getting live HLS streams for {0}".format(self.channel)) self.session.http.headers.update({ "referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv", }) sig, token, restricted_bitrates = self._access_token( True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug("Getting video HLS streams for {0}".format(self.channel)) sig, token, restricted_bitrates = self._access_token( False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, **extra_params) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning( "The quality '{0}' is not available since it requires a subscription." .format(name)) return streams def _get_clips(self): try: (self.author, self.title, self.category, streams) = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream(self.session, stream) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self._channel: return self._get_hls_streams_live()
class UStreamTV(Plugin): url_re = re.compile(r"""(?x) https?://(www\.)?ustream\.tv (?: (/embed/|/channel/id/)(?P<channel_id>\d+) )? (?: (/embed)?/recorded/(?P<video_id>\d+) )? """) media_id_re = re.compile(r'"ustream:channel_id"\s+content\s*=\s*"(\d+)"') arguments = PluginArguments( PluginArgument("password", argument_name="ustream-password", sensitive=True, metavar="PASSWORD", help=""" A password to access password protected UStream.tv channels. """)) STREAM_WEIGHTS = { "original": 65535, } @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None @classmethod def stream_weight(cls, stream): if stream in cls.STREAM_WEIGHTS: return cls.STREAM_WEIGHTS[stream], "ustreamtv" return Plugin.stream_weight(stream) def handle_module_info(self, args): res = {} for arg in args: if "cdnConfig" in arg: parts = [ # scheme arg["cdnConfig"]["protocol"], # netloc arg["cdnConfig"]["data"][0]["data"][0]["sites"][0]["host"], # path arg["cdnConfig"]["data"][0]["data"][0]["sites"][0]["path"], "", "", "", # params, query, fragment ] # Example: # LIVE: http://uhs-akamai.ustream.tv/ # VOD: http://vod-cdn.ustream.tv/ res["cdn_url"] = urlunparse(parts) if "stream" in arg and bool(arg["stream"].get("streamFormats")): data = arg["stream"] if data["streamFormats"].get("flv/segmented"): flv_segmented = data["streamFormats"]["flv/segmented"] path = flv_segmented["contentAccess"]["accessList"][0][ "data"]["path"] res["streams"] = [] for stream in flv_segmented["streams"]: res["streams"] += [ dict( stream_name="{0}p".format( stream["videoCodec"]["height"]), path=urljoin( path, stream["segmentUrl"].replace("%", "%s")), hashes=flv_segmented["hashes"], first_chunk=flv_segmented["chunkId"], chunk_time=flv_segmented["chunkTime"], ) ] elif bool(data["streamFormats"]): # supported formats: # - flv/segmented # unsupported formats: # - flv # - mp4 # - mp4/segmented raise PluginError( "Stream format is not supported: {0}".format(", ".join( data["streamFormats"].keys()))) elif "stream" in arg and arg["stream"]["contentAvailable"] is False: log.error("This stream is currently offline") raise ModuleInfoNoStreams return res def handle_reject(self, api, args): for arg in args: if "cluster" in arg: api.cluster = arg["cluster"]["name"] if "referrerLock" in arg: api.referrer = arg["referrerLock"]["redirectUrl"] if "nonexistent" in arg: log.error("This channel does not exist") raise ModuleInfoNoStreams if "geoLock" in arg: log.error("This content is not available in your area") raise ModuleInfoNoStreams def _get_streams(self): media_id, application = self._get_media_app() if media_id: api = UHSClient(media_id, application, referrer=self.url, cluster="live", password=self.get_option("password")) log.debug( "Connecting to UStream API: media_id={0}, application={1}, referrer={2}, cluster={3}", media_id, application, self.url, "live") api.connect() streams_data = {} for _ in range(5): # do not use to many tries, it might take longer for a timeout # when streamFormats is {} and contentAvailable is True data = api.recv() try: if data["cmd"] == "moduleInfo": r = self.handle_module_info(data["args"]) if r: streams_data.update(r) elif data["cmd"] == "reject": self.handle_reject(api, data["args"]) else: log.debug("Unexpected `{0}` command".format( data["cmd"])) log.trace("{0!r}".format(data)) except ModuleInfoNoStreams: break if streams_data.get("streams") and streams_data.get("cdn_url"): for s in sorted(streams_data["streams"], key=lambda k: (k["stream_name"], k["path"])): yield s["stream_name"], UHSStream( session=self.session, api=api, first_chunk_data=ChunkData( s["first_chunk"], s["chunk_time"], s["hashes"], datetime.datetime.now(tz=utc)), template_url=urljoin(streams_data["cdn_url"], s["path"]), ) break def _get_media_app(self): umatch = self.url_re.match(self.url) application = "channel" channel_id = umatch.group("channel_id") video_id = umatch.group("video_id") if channel_id: application = "channel" media_id = channel_id elif video_id: application = "recorded" media_id = video_id else: res = self.session.http.get( self.url, headers={"User-Agent": useragents.CHROME}) m = self.media_id_re.search(res.text) media_id = m and m.group(1) return media_id, application
class BBCiPlayer(Plugin): """ Allows streaming of live channels from bbc.co.uk/iplayer/live/* and of iPlayer programmes from bbc.co.uk/iplayer/episode/* """ url_re = re.compile( r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) mediator_re = re.compile(r'window\.__IPLAYER_REDUX_STATE__\s*=\s*({.*?});', re.DOTALL) state_re = re.compile(r'window.__IPLAYER_REDUX_STATE__\s*=\s*({.*});') account_locals_re = re.compile(r'window.bbcAccount.locals\s*=\s*({.*?});') hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = "https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/" \ "{platform}/vpid/{vpid}/format/json/atk/{vpid_hash}/asn/1/" platforms = ("pc", "iptv-all") session_url = "https://session.bbc.com/session" auth_url = "https://account.bbc.com/signin" mediator_schema = validate.Schema({"versions": [{ "id": validate.text }]}, validate.get("versions"), validate.get(0), validate.get("id")) mediaselector_schema = validate.Schema( validate.transform(parse_json), { "media": [{ "connection": validate.all( [{ validate.optional("href"): validate.url(), validate.optional("transferFormat"): validate.text }], validate.filter(lambda c: c.get("href"))), "kind": validate.text }] }, validate.get("media"), validate.filter(lambda x: x["kind"] == "video")) arguments = PluginArguments( PluginArgument("username", requires=["password"], metavar="USERNAME", help="The username used to register with bbc.co.uk."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help= "A bbc.co.uk account password to use with --bbciplayer-username.", prompt="Enter bbc.co.uk account password"), PluginArgument("hd", action="store_true", help=""" Prefer HD streams over local SD streams, some live programmes may not be broadcast in HD. """), ) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() def find_vpid(self, url, res=None): """ Find the Video Packet ID in the HTML for the provided URL :param url: URL to download, if res is not provided. :param res: Provide a cached version of the HTTP response to search :type url: string :type res: requests.Response :return: Video Packet ID for a Programme in iPlayer :rtype: string """ log.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or self.session.http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid def find_tvip(self, url, master=False): log.debug("Looking for {0} tvip on {1}".format( "master" if master else "", url)) res = self.session.http.get(url) m = self.state_re.search(res.text) data = m and parse_json(m.group(1)) if data: channel = data.get("channel") if master: return channel.get("masterBrand") return channel.get("id") def mediaselector(self, vpid): urls = defaultdict(set) for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) log.debug("Info API request: {0}", url) medias = self.session.http.get(url, schema=self.mediaselector_schema) for media in medias: for connection in media["connection"]: urls[connection.get("transferFormat")].add( connection["href"]) for stream_type, urls in urls.items(): log.debug("{0} {1} streams", len(urls), stream_type) for url in list(urls): try: if stream_type == "hds": for s in HDSStream.parse_manifest(self.session, url).items(): yield s if stream_type == "hls": for s in HLSStream.parse_variant_playlist( self.session, url).items(): yield s if stream_type == "dash": for s in DASHStream.parse_manifest(self.session, url).items(): yield s log.debug(" OK: {0}", url) except Exception: log.debug(" FAIL: {0}", url) def login(self, ptrt_url): """ Create session using BBC ID. See https://www.bbc.co.uk/usingthebbc/account/ :param ptrt_url: The snapback URL to redirect to after successful authentication :type ptrt_url: string :return: Whether authentication was successful :rtype: bool """ def auth_check(res): return ptrt_url in ([h.url for h in res.history] + [res.url]) # make the session request to get the correct cookies session_res = self.session.http.get(self.session_url, params=dict(ptrt=ptrt_url)) if auth_check(session_res): log.debug("Already authenticated, skipping authentication") return True res = self.session.http.post(self.auth_url, params=urlparse(session_res.url).query, data=dict( jsEnabled=True, username=self.get_option("username"), password=self.get_option('password'), attempts=0), headers={"Referer": self.url}) return auth_check(res) def _get_streams(self): if not self.get_option("username"): log.error("BBC iPlayer requires an account you must login using " "--bbciplayer-username and --bbciplayer-password") return log.info( "A TV License is required to watch BBC iPlayer streams, see the BBC website for more " "information: https://www.bbc.co.uk/iplayer/help/tvlicence") if not self.login(self.url): log.error( "Could not authenticate, check your username and password") return m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: log.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url) if vpid: log.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: log.error("Could not find VPID for episode {0}", episode_id) elif channel_name: log.debug("Loading stream for live channel: {0}", channel_name) if self.get_option("hd"): tvip = self.find_tvip(self.url, master=True) + "_hd" if tvip: log.debug("Trying HD stream {0}...", tvip) try: for s in self.mediaselector(tvip): yield s except PluginError: log.error( "Failed to get HD streams, falling back to SD") else: return tvip = self.find_tvip(self.url) if tvip: log.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
class Pixiv(Plugin): """Plugin for https://sketch.pixiv.net/lives""" _url_re = re.compile(r"https?://sketch\.pixiv\.net/@?(?P<user>[^/]+)") _post_key_re = re.compile( r"""name=["']post_key["']\svalue=["'](?P<data>[^"']+)["']""") _user_dict_schema = validate.Schema( { "user": { "unique_name": validate.text, "name": validate.all(validate.text, validate.transform(maybe_decode)) }, validate.optional("hls_movie"): { "url": validate.text } } ) _user_schema = validate.Schema( { "owner": _user_dict_schema, "performers": [ validate.any(_user_dict_schema, None) ] } ) _data_lives_schema = validate.Schema( { "data": { "lives": [_user_schema] } }, validate.get("data"), validate.get("lives") ) api_lives = "https://sketch.pixiv.net/api/lives.json" login_url_get = "https://accounts.pixiv.net/login" login_url_post = "https://accounts.pixiv.net/api/login" arguments = PluginArguments( PluginArgument("username", help=argparse.SUPPRESS), PluginArgument("password", help=argparse.SUPPRESS), PluginArgument( "sessionid", requires=["devicetoken"], sensitive=True, metavar="SESSIONID", help=""" The pixiv.net sessionid that's used in pixivs PHPSESSID cookie. can be used instead of the username/password login process. """ ), PluginArgument( "devicetoken", sensitive=True, metavar="DEVICETOKEN", help=""" The pixiv.net device token that's used in pixivs device_token cookie. can be used instead of the username/password login process. """ ), PluginArgument( "purge-credentials", action="store_true", help=""" Purge cached Pixiv credentials to initiate a new session and reauthenticate. """), PluginArgument( "performer", metavar="USER", help=""" Select a co-host stream instead of the owner stream. """) ) def __init__(self, url): super(Pixiv, self).__init__(url) self._authed = (self.session.http.cookies.get("PHPSESSID") and self.session.http.cookies.get("device_token")) self.session.http.headers.update({ "User-Agent": useragents.FIREFOX, "Referer": self.url }) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _login_using_session_id_and_device_token(self, session_id, device_token): self.session.http.get(self.login_url_get) self.session.http.cookies.set('PHPSESSID', session_id, domain='.pixiv.net', path='/') self.session.http.cookies.set('device_token', device_token, domain='.pixiv.net', path='/') self.save_cookies() log.info("Successfully set sessionId and deviceToken") def hls_stream(self, hls_url): log.debug("URL={0}".format(hls_url)) for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): yield s def get_streamer_data(self): res = self.session.http.get(self.api_lives) data = self.session.http.json(res, schema=self._data_lives_schema) log.debug("Found {0} streams".format(len(data))) m = self._url_re.match(self.url) for item in data: if item["owner"]["user"]["unique_name"] == m.group("user"): return item raise NoStreamsError(self.url) def _get_streams(self): login_session_id = self.get_option("sessionid") login_device_token = self.get_option("devicetoken") if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed.") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif not self._authed and login_session_id and login_device_token: self._login_using_session_id_and_device_token(login_session_id, login_device_token) streamer_data = self.get_streamer_data() performers = streamer_data.get("performers") log.trace("{0!r}".format(streamer_data)) if performers: co_hosts = [] # create a list of all available performers for p in performers: co_hosts += [(p["user"]["unique_name"], p["user"]["name"])] log.info("Available hosts: {0}".format(", ".join( ["{0} ({1})".format(k, v) for k, v in co_hosts]))) # control if the host from --pixiv-performer is valid, # if not let the User select a different host if (self.get_option("performer") and not self.get_option("performer") in [v[0] for v in co_hosts]): # print the owner as 0 log.info("0 - {0} ({1})".format( streamer_data["owner"]["user"]["unique_name"], streamer_data["owner"]["user"]["name"])) # print all other performer for i, item in enumerate(co_hosts, start=1): log.info("{0} - {1} ({2})".format(i, item[0], item[1])) try: number = int(self.input_ask( "Enter the number you'd like to watch").split(" ")[0]) if number == 0: # default stream self.set_option("performer", None) else: # other co-hosts self.set_option("performer", co_hosts[number - 1][0]) except FatalPluginError: raise PluginError("Selected performer is invalid.") except (IndexError, ValueError, TypeError): raise PluginError("Input is invalid") # ignore the owner stream, if a performer is selected # or use it when there are no other performers if not self.get_option("performer") or not performers: return self.hls_stream(streamer_data["owner"]["hls_movie"]["url"]) # play a co-host stream if performers and self.get_option("performer"): for p in performers: if p["user"]["unique_name"] == self.get_option("performer"): # if someone goes online at the same time as Streamlink # was used, the hls URL might not be in the JSON data hls_movie = p.get("hls_movie") if hls_movie: return self.hls_stream(hls_movie["url"])
class Bilibili(Plugin): arguments = PluginArguments( PluginArgument( "apihost", metavar="APIHOST", default=API_HOST, help="Use custom api host url to bypass bilibili's cloud blocking" ), PluginArgument("lowquality", metavar="LOWQN", default=False, help="Use low quality"), ) @classmethod def can_handle_url(self, url): return _url_re.match(url) @classmethod def stream_weight(cls, stream): if stream in STREAM_WEIGHTS: return STREAM_WEIGHTS[stream], "Bilibili" return Plugin.stream_weight(stream) def update_playlist(self): params = { 'cid': self.room_id, 'qn': '20000' if not self.options.get("apihost") else '0', 'quality': 10000, 'platform': 'h5', } res = self.session.http.get(self.options.get("apihost") + API_URL, params=params) log.debug(res.json()) room = self.session.http.json(res, schema=_room_stream_list_schema) if not room: return _url = None onlyQiniu = False urls = list(room["durl"]) urls.sort(key=lambda x: x['url'], reverse=True) # make gotcha01 in the front for stream_list in urls: onlyQiniu = False _url = stream_list["url"] if 'd1--cn-gotcha01.bilivideo.com' in _url: newurl = _url.replace( "d1--cn-gotcha01.bilivideo.com/", "a5mh2a1mc3ghf.cfc-execute.gz.baidubce.com/101/") newurl = re.sub(r'src=[0-9]+?&', 'src=9&', newurl) log.debug(newurl) r = self.session.http.get(newurl, retries=0, timeout=15, acceptable_status=(301, 302), allow_redirects=False) if r.status_code == 302: _url = r.headers.get("Location", _url) else: new_url = r.text.splitlines()[-1] if new_url.startswith("http"): _url = new_url url = _url if "d1--cn-gotcha104.bilivideo.com" in url: url = url.replace( "d1--cn-gotcha104.bilivideo.com/", "a5mh2a1mc3ghf.cfc-execute.gz.baidubce.com/104/") if "d1--cn-gotcha105.bilivideo.com" in url: url = url.replace( "d1--cn-gotcha104.bilivideo.com/", "a5mh2a1mc3ghf.cfc-execute.gz.baidubce.com/104/") if "d1--cn-gotcha103.bilivideo.com" in url or "d1--cn-gotcha108.bilivideo.com" in url: onlyQiniu = True continue # check if the URL is available log.trace('URL={0}'.format(url)) r = self.session.http.get(url, retries=0, timeout=15, stream=True, acceptable_status=(200, 403, 404, 405)) p = urlparse(url) if r.status_code != 200: log.error('Netloc: {0} with error {1}'.format( p.netloc, r.status_code)) continue log.debug('Netloc: {0}'.format(p.netloc)) yield _url.replace( "https://", "http://" ) #.replace("https://", "hls://").replace("http://", "hls://") break if onlyQiniu and _url is not None: #yield _url.replace("https://", "http://").replace("d1--cn-gotcha103.bilivideo.com/", "shnode.misty.moe:49980") #yield _url.replace("https://", "http://") return def _get_streams(self): self.session.http.headers.update({ 'User-Agent': useragents.FIREFOX, 'Referer': self.url }) match = _url_re.match(self.url) channel = match.group("channel") res_room_id = self.session.http.get( self.options.get("apihost") + ROOM_API.format(channel)) log.debug(res_room_id.json()) _room_id_json = res_room_id.json() try: room_id_json = self.session.http.json(res_room_id, schema=_room_id_schema) except: log.info("Error during processing json: %s", _room_id_json) raise self.room_id = room_id_json['room_id'] if room_id_json['live_status'] != SHOW_STATUS_ONLINE: log.error("This video is not a live. (abort)") raise PluginError("This video is not a live. (abort)") return '''params = { 'cid': room_id, 'quality': '4', 'platform': 'web', }''' name = "source" for url in self.update_playlist(): #stream = BilibiliHLSStream.parse_variant_playlist(self.session, url) stream = BilibiliHLSStream(self.session, url) stream.plugin = self #for c in stream: # stream[c].plugin = self yield name, stream
class Rtve(Plugin): arguments = PluginArguments( PluginArgument("mux-subtitles", is_global=True), ) URL_VIDEOS = "https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2" URL_SUBTITLES = "https://www.rtve.es/api/videos/{id}/subtitulos.json" def _get_streams(self): self.id = self.session.http.get( self.url, schema=validate.Schema( validate.transform( re.compile(r"\bdata-setup='({.+?})'", re.DOTALL).search), validate.any( None, validate.all( validate.get(1), validate.parse_json(), { "idAsset": validate.any( int, validate.all(str, validate.transform(int))), }, validate.get("idAsset"))), )) if not self.id: return urls = self.session.http.get( self.URL_VIDEOS.format(id=self.id), schema=validate.Schema( validate.transform(ZTNR.translate), validate.transform(list), [(str, validate.url())], ), ) url = next( (url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None) if not url: url = next( (url for _, url in urls if urlparse(url).path.endswith(".mp4")), None) if url: yield "vod", HTTPStream(self.session, url) return streams = HLSStream.parse_variant_playlist(self.session, url).items() if self.options.get("mux-subtitles"): subs = self.session.http.get( self.URL_SUBTITLES.format(id=self.id), schema=validate.Schema( validate.parse_json(), { "page": { "items": [{ "lang": str, "src": validate.url(), }] } }, validate.get(("page", "items")), ), ) if subs: subtitles = { s["lang"]: HTTPStream(self.session, update_scheme("https://", s["src"], force=True)) for s in subs } for quality, stream in streams: yield quality, MuxedStream(self.session, stream, subtitles=subtitles) return yield from streams
class ClubbingTV(Plugin): _login_url = "https://www.clubbingtv.com/user/login" _url_re = re.compile(r"https://(www\.)?clubbingtv\.com/") _live_re = re.compile( r'playerInstance\.setup\({\s*"file"\s*:\s*"(?P<stream_url>.+?)"', re.DOTALL, ) _vod_re = re.compile(r'<iframe src="(?P<stream_url>.+?)"') arguments = PluginArguments( PluginArgument( "username", required=True, requires=["password"], help="The username used to register with Clubbing TV.", ), PluginArgument( "password", required=True, sensitive=True, help= "A Clubbing TV account password to use with --clubbingtv-username.", ), ) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def login(self): username = self.get_option("username") password = self.get_option("password") res = self.session.http.post( self._login_url, data={ "val[login]": username, "val[password]": password }, ) if "Invalid Email/User Name" in res.text: log.error( "Failed to login to Clubbing TV, incorrect email/password combination" ) return False log.info("Successfully logged in") return True def _get_live_streams(self, content): match = self._live_re.search(content) if not match: return stream_url = match.group("stream_url") for stream in HLSStream.parse_variant_playlist(self.session, stream_url).items(): yield stream def _get_vod_streams(self, content): match = self._vod_re.search(content) if not match: return stream_url = match.group("stream_url") log.info("Fetching external stream from URL {0}".format(stream_url)) return self.session.streams(stream_url) def _get_streams(self): if not self.login(): return self.session.http.headers.update({"Referer": self.url}) res = self.session.http.get(self.url) if "clubbingtv.com/live" in self.url: log.debug("Live stream detected") return self._get_live_streams(res.text) log.debug("VOD stream detected") return self._get_vod_streams(res.text)
class WWENetwork(Plugin): url_re = re.compile(r"https?://watch.wwe.com/(channel)?") site_config_re = re.compile(r'''">window.__data = (\{.*?\})</script>''') stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}" live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live" login_url = "https://dce-frontoffice.imggaming.com/api/v2/login" page_config_url = "https://cdn.watch.wwe.com/api/page" API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7" customer_id = 16 arguments = PluginArguments( PluginArgument( "email", required=True, metavar="EMAIL", requires=["password"], help=""" The email associated with your WWE Network account, required to access any WWE Network stream. """ ), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help=""" A WWE Network account password to use with --wwenetwork-email. """ ) ) def __init__(self, url): super(WWENetwork, self).__init__(url) self.session.http.headers.update({"User-Agent": useragents.CHROME}) self.auth_token = None @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def get_title(self): return self.item_config['title'] def request(self, method, url, **kwargs): headers = kwargs.pop("headers", {}) headers.update({"x-api-key": self.API_KEY, "Origin": "https://watch.wwe.com", "Referer": "https://watch.wwe.com/signin", "Accept": "application/json", "Realm": "dce.wwe"}) if self.auth_token: headers["Authorization"] = "Bearer {0}".format(self.auth_token) kwargs["raise_for_status"] = False log.debug("API request: {0} {1}".format(method, url)) res = self.session.http.request(method, url, headers=headers, **kwargs) data = self.session.http.json(res) if "status" in data and data["status"] != 200: log.debug("API request failed: {0}:{1} ({2})".format(data["status"], data.get("code"), "; ".join(data.get("messages", [])))) return data def login(self, email, password): self.logger.debug("Attempting login as {0}", email) # sets some required cookies to login data = self.request('POST', self.login_url, data=json.dumps({"id": email, "secret": password}), headers={"Content-Type": "application/json"}) if "authorisationToken" in data: self.auth_token = data["authorisationToken"] return self.auth_token @property @memoize def item_config(self): log.debug("Loading page config") p = urlparse(self.url) res = self.session.http.get(self.page_config_url, params=dict(device="web_browser", ff="idp,ldp", item_detail_expand="all", lang="en-US", list_page_size="1", max_list_prefetch="1", path=p.path, segments="es", sub="Registered", text_entry_format="html")) data = self.session.http.json(res) return data["item"] def _get_media_info(self, content_id): """ Get the info about the content, based on the ID :param content_id: contentId for the video :return: """ info = self.request('GET', self.stream_url.format(id=content_id)) return self.request('GET', info.get("playerUrlCallback")) def _get_video_id(self): # check the page to find the contentId log.debug("Searching for content ID") try: if self.item_config['type'] == "channel": return self._get_live_id() else: return "vod/{id}".format(id=self.item_config['customFields']['DiceVideoId']) except KeyError: log.error("Could not find video ID") return def _get_live_id(self): log.debug("Loading live event") res = self.request('GET', self.live_url) for event in res.get('events', []): return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format(**event) def _get_streams(self): if not self.login(self.get_option("email"), self.get_option("password")): raise PluginError("Login failed") try: start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get("startPoint", 0.0))) if start_point > 0: log.info("Stream will start at {0}".format(seconds_to_hhmmss(start_point))) except ValueError: start_point = 0 content_id = self._get_video_id() if content_id: self.logger.debug("Found content ID: {0}", content_id) info = self._get_media_info(content_id) if info.get("hlsUrl"): for s in HLSStream.parse_variant_playlist(self.session, info["hlsUrl"], start_offset=start_point).items(): yield s else: log.error("Could not find the HLS URL")
class Twitch(Plugin): arguments = PluginArguments( PluginArgument("oauth-token", sensitive=True, help=argparse.SUPPRESS), PluginArgument("cookie", sensitive=True, help=argparse.SUPPRESS), PluginArgument("disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """), PluginArgument("disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """), PluginArgument("disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """), PluginArgument("low-latency", action="store_true", help=""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {live_edge}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible. Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled. Note: The caching/buffering settings of the chosen player may need to be adjusted as well. Please refer to the player's own documentation for the required parameters and its configuration. Player parameters can be set via Streamlink's --player or --player-args parameters. """.format(live_edge=LOW_LATENCY_MAX_LIVE_EDGE))) @classmethod def stream_weight(cls, key): weight = QUALITY_WEIGHTS.get(key) if weight: return weight, "twitch" return Plugin.stream_weight(key) @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_metadata(self): if self.video_id: api_res = self.api.videos(self.video_id) self.title = api_res["title"] self.author = api_res["channel"]["display_name"] self.category = api_res["game"] elif self.clip_name: self._get_clips() elif self._channel: api_res = self.api.streams(self.channel_id)["stream"]["channel"] self.title = api_res["status"] self.author = api_res["display_name"] self.category = api_res["game"] def get_title(self): if self.title is None: self._get_metadata() return self.title def get_author(self): if self.author is None: self._get_metadata() return self.author def get_category(self): if self.category is None: self._get_metadata() return self.category def __init__(self, url): Plugin.__init__(self, url) self._hosted_chain = [] match = _url_re.match(url).groupdict() parsed = urlparse(url) self.params = parse_query(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.video_type = None self._channel_id = None self._channel = None self.clip_name = None self.title = None self.author = None self.category = None if self.subdomain == "player": # pop-out player if self.params.get("video"): try: self.video_type = self.params["video"][0] self.video_id = self.params["video"][1:] except IndexError: log.debug("Invalid video param: {0}".format( self.params["video"])) self._channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self._channel = match.get("channel") and match.get( "channel").lower() self.video_type = match.get("video_type") if match.get("videos_id"): self.video_type = "v" self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(beta=self.subdomain == "beta", session=self.session, version=5) self.usher = UsherService(session=self.session) @property def channel(self): if not self._channel: if self.video_id: cdata = self._channel_from_video_id(self.video_id) self._channel = cdata["name"].lower() self._channel_id = cdata["_id"] return self._channel @channel.setter def channel(self, channel): self._channel = channel # channel id becomes unknown self._channel_id = None @property def channel_id(self): if not self._channel_id: # If the channel name is set, use that to look up the ID if self._channel: cdata = self._channel_from_login(self._channel) self._channel_id = cdata["_id"] # If the channel name is not set but the video ID is, # use that to look up both ID and name elif self.video_id: cdata = self._channel_from_video_id(self.video_id) self._channel = cdata["name"].lower() self._channel_id = cdata["_id"] return self._channel_id def _channel_from_video_id(self, video_id): vdata = self.api.videos(video_id) if "channel" not in vdata: raise PluginError("Unable to find video: {0}".format(video_id)) return vdata["channel"] def _channel_from_login(self, channel): cdata = self.api.users(login=channel) if len(cdata["users"]): return cdata["users"][0] else: raise PluginError("Unable to find channel: {0}".format(channel)) def _create_playlist_streams(self, videos): start_offset = int(videos.get("start_offset", 0)) stop_offset = int(videos.get("end_offset", 0)) streams = {} for quality, chunks in videos.get("chunks").items(): if not chunks: if videos.get("restrictions", {}).get(quality) == "chansub": log.warning( "The quality '{0}' is not available since it requires a subscription." .format(quality)) continue # Rename 'live' to 'source' if quality == "live": quality = "source" chunks_filtered = list(filter(lambda c: c["url"], chunks)) if len(chunks) != len(chunks_filtered): log.warning( "The video '{0}' contains invalid chunks. There will be missing data." .format(quality)) chunks = chunks_filtered chunks_duration = sum(c.get("length") for c in chunks) # If it's a full broadcast we just use all the chunks if start_offset == 0 and chunks_duration == stop_offset: # No need to use the FLV concat if it's just one chunk if len(chunks) == 1: url = chunks[0].get("url") stream = HTTPStream(self.session, url) else: chunks = [ HTTPStream(self.session, c.get("url")) for c in chunks ] stream = FLVPlaylist(self.session, chunks, duration=chunks_duration) else: try: stream = self._create_video_clip(chunks, start_offset, stop_offset) except StreamError as err: log.error("Error while creating video '{0}': {1}".format( quality, err)) continue streams[quality] = stream return streams def _create_video_clip(self, chunks, start_offset, stop_offset): playlist_duration = stop_offset - start_offset playlist_offset = 0 playlist_streams = [] playlist_tags = [] for chunk in chunks: chunk_url = chunk["url"] chunk_length = chunk["length"] chunk_start = playlist_offset chunk_stop = chunk_start + chunk_length chunk_stream = HTTPStream(self.session, chunk_url) if chunk_start <= start_offset <= chunk_stop: try: headers = extract_flv_header_tags(chunk_stream) except IOError as err: raise StreamError("Error while parsing FLV: {0}", err) if not headers.metadata: raise StreamError( "Missing metadata tag in the first chunk") metadata = headers.metadata.data.value keyframes = metadata.get("keyframes") if not keyframes: if chunk["upkeep"] == "fail": raise StreamError( "Unable to seek into muted chunk, try another timestamp" ) else: raise StreamError( "Missing keyframes info in the first chunk") keyframe_offset = None keyframe_offsets = keyframes.get("filepositions") keyframe_times = [ playlist_offset + t for t in keyframes.get("times") ] for time, offset in zip(keyframe_times, keyframe_offsets): if time > start_offset: break keyframe_offset = offset if keyframe_offset is None: raise StreamError("Unable to find a keyframe to seek to " "in the first chunk") chunk_headers = dict( Range="bytes={0}-".format(int(keyframe_offset))) chunk_stream = HTTPStream(self.session, chunk_url, headers=chunk_headers) playlist_streams.append(chunk_stream) for tag in headers: playlist_tags.append(tag) elif start_offset <= chunk_start < stop_offset: playlist_streams.append(chunk_stream) playlist_offset += chunk_length return FLVPlaylist(self.session, playlist_streams, tags=playlist_tags, duration=playlist_duration) def _get_video_streams(self): log.debug("Getting video steams for {0} (type={1})".format( self.video_id, self.video_type)) if self.video_type == "b": self.video_type = "a" try: videos = self.api.videos(self.video_type + self.video_id, schema=_video_schema) except PluginError as err: if "HTTP/1.1 0 ERROR" in str(err): raise NoStreamsError(self.url) else: raise # Parse the "t" query parameter on broadcasts and adjust # start offset if needed. time_offset = self.params.get("t") if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 videos["start_offset"] += time_offset return self._create_playlist_streams(videos) def _access_token(self, type="live"): try: if type == "live": endpoint = "channels" value = self.channel elif type == "video": endpoint = "vods" value = self.video_id sig, token = self.api.access_token(endpoint, value, schema=_access_token_schema) except PluginError as err: if "404 Client Error" in str(err): raise NoStreamsError(self.url) else: raise return sig, token def _check_for_host(self): host_info = self.api.hosted_channel( include_logins=1, host=self.channel_id).json()["hosts"][0] if "target_login" in host_info and host_info["target_login"].lower( ) != self.channel.lower(): log.info("{0} is hosting {1}".format(self.channel, host_info["target_login"])) return host_info["target_login"] def _check_for_rerun(self): stream = self.api.streams(self.channel_id, schema=_stream_schema) return stream and ( stream["stream_type"] != "live" or stream["broadcast_platform"] == "rerun" or stream["channel"] and stream["channel"]["broadcaster_software"] == "watch_party_rerun") def _get_hls_streams(self, stream_type="live"): log.debug("Getting {0} HLS streams for {1}".format( stream_type, self.channel)) self._hosted_chain.append(self.channel) if stream_type == "live": if self.options.get("disable_reruns") and self._check_for_rerun(): log.info("Reruns were disabled by command line option") return {} hosted_channel = self._check_for_host() if hosted_channel and self.options.get("disable_hosting"): log.info("hosting was disabled by command line option") elif hosted_channel: log.info("switching to {0}".format(hosted_channel)) if hosted_channel in self._hosted_chain: log.error(u"A loop of hosted channels has been detected, " "cannot find a playable stream. ({0})".format( u" -> ".join(self._hosted_chain + [hosted_channel]))) return {} self.channel = hosted_channel return self._get_hls_streams(stream_type) # only get the token once the channel has been resolved sig, token = self._access_token(stream_type) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) elif stream_type == "video": sig, token = self._access_token(stream_type) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) else: log.debug("Unknown HLS stream type: {0}".format(stream_type)) return {} time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: # If the stream is a VOD that is still being recorded the stream should start at the # beginning of the recording streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, force_restart=not stream_type == "live") except IOError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) try: token = parse_json(token, schema=_token_schema) for name in token["restricted_bitrates"]: if name not in streams: log.warning( "The quality '{0}' is not available since it requires a subscription." .format(name)) except PluginError: pass return streams def _get_clips(self): data = json.dumps({ 'query': '''{{ clip(slug: "{0}") {{ broadcaster {{ displayName }} title videoQualities {{ quality sourceURL }} }} }}'''.format(self.clip_name) }) clip_data = self.session.http.post( 'https://gql.twitch.tv/gql', data=data, headers={ 'Client-ID': TWITCH_CLIENT_ID_PRIVATE }, ).json()['data']['clip'] log.trace('{0!r}'.format(clip_data)) if not clip_data: return self.author = clip_data['broadcaster']['displayName'] self.title = clip_data['title'] streams = {} for quality_option in clip_data['videoQualities']: streams['{0}p'.format(quality_option['quality'])] = HTTPStream( self.session, quality_option['sourceURL']) return streams def _get_streams(self): if self.video_id: if self.video_type == "v": return self._get_hls_streams("video") else: return self._get_video_streams() elif self.clip_name: return self._get_clips() elif self._channel: return self._get_hls_streams("live")
class Zattoo(Plugin): STREAMS_ZATTOO = ['dash', 'hls5'] TIME_CONTROL = 60 * 60 * 2 TIME_SESSION = 60 * 60 * 24 * 30 arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help=""" The email associated with your zattoo account, required to access any zattoo stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A zattoo account password to use with --zattoo-email. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached zattoo credentials to initiate a new session and reauthenticate. """), PluginArgument('stream-types', metavar='TYPES', type=comma_list_filter(STREAMS_ZATTOO), default=['dash'], help=''' A comma-delimited list of stream types which should be used, the following types are allowed: - {0} Default is "dash". '''.format('\n - '.join(STREAMS_ZATTOO)))) def __init__(self, url): super().__init__(url) self.domain = self.match.group('base_url') self._session_attributes = Cache( filename='plugin-cache.json', key_prefix='zattoo:attributes:{0}'.format(self.domain)) self._uuid = self._session_attributes.get('uuid') self._authed = (self._session_attributes.get('power_guide_hash') and self._uuid and self.session.http.cookies.get( 'pzuid', domain=self.domain) and self.session.http.cookies.get('beaker.session.id', domain=self.domain)) self._session_control = self._session_attributes.get( 'session_control', False) self.base_url = 'https://{0}'.format(self.domain) self.headers = { 'User-Agent': useragents.CHROME, 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Referer': self.base_url } def _hello(self): log.debug('_hello ...') app_token = self.session.http.get(f'{self.base_url}/token.json', schema=validate.Schema( validate.transform(parse_json), { 'success': bool, 'session_token': str, }, validate.get('session_token'))) if self._uuid: __uuid = self._uuid else: __uuid = str(uuid.uuid4()) self._session_attributes.set('uuid', __uuid, expires=self.TIME_SESSION) params = { 'app_version': '3.2120.1', 'client_app_token': app_token, 'format': 'json', 'lang': 'en', 'uuid': __uuid, } res = self.session.http.post(f'{self.base_url}/zapi/v3/session/hello', headers=self.headers, data=params, schema=validate.Schema( validate.transform(parse_json), validate.any({'active': bool}, {'success': bool}))) if res.get('active') or res.get('success'): log.debug('Hello was successful.') else: log.debug('Hello failed.') def _login(self, email, password): log.debug('_login ...') data = self.session.http.post( f'{self.base_url}/zapi/v3/account/login', headers=self.headers, data={ 'login': email, 'password': password, 'remember': 'true', 'format': 'json', }, acceptable_status=(200, 400), schema=validate.Schema( validate.transform(parse_json), validate.any({ 'active': bool, 'power_guide_hash': str, }, { 'success': bool, })), ) if data.get('active'): log.debug('Login was successful.') else: log.debug('Login failed.') return self._authed = data['active'] self.save_cookies(default_expires=self.TIME_SESSION) self._session_attributes.set('power_guide_hash', data['power_guide_hash'], expires=self.TIME_SESSION) self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) def _watch(self): log.debug('_watch ...') channel = self.match.group('channel') vod_id = self.match.group('vod_id') recording_id = self.match.group('recording_id') params = {'https_watch_urls': True} if channel: watch_url = f'{self.base_url}/zapi/watch' params_cid = self._get_params_cid(channel) if not params_cid: return params.update(params_cid) elif vod_id: log.debug('Found vod_id: {0}'.format(vod_id)) watch_url = f'{self.base_url}/zapi/avod/videos/{vod_id}/watch' elif recording_id: log.debug('Found recording_id: {0}'.format(recording_id)) watch_url = f'{self.base_url}/zapi/watch/recording/{recording_id}' else: log.debug('Missing watch_url') return zattoo_stream_types = self.get_option('stream-types') for stream_type in zattoo_stream_types: params_stream_type = {'stream_type': stream_type} params.update(params_stream_type) try: res = self.session.http.post(watch_url, headers=self.headers, data=params) except Exception as e: if '404 Client Error' in str(e): log.error('Unfortunately streaming is not permitted in ' 'this country or this channel does not exist.') elif '402 Client Error: Payment Required' in str(e): log.error('Paid subscription required for this channel.') log.info('If paid subscription exist, use --zattoo-purge' '-credentials to start a new session.') elif '403 Client Error' in str(e): log.debug('Force session reset for watch_url') self.reset_session() else: log.error(str(e)) return data = self.session.http.json(res) log.debug('Found data for {0}'.format(stream_type)) if data['success'] and stream_type == 'hls5': for url in data['stream']['watch_urls']: yield from HLSStream.parse_variant_playlist( self.session, url['url']).items() elif data['success'] and stream_type == 'dash': for url in data['stream']['watch_urls']: yield from DASHStream.parse_manifest( self.session, url['url']).items() def _get_params_cid(self, channel): log.debug('get channel ID for {0}'.format(channel)) try: res = self.session.http.get( f'{self.base_url}/zapi/v2/cached/channels/{self._session_attributes.get("power_guide_hash")}', headers=self.headers, params={'details': 'False'}) except Exception: log.debug('Force session reset for _get_params_cid') self.reset_session() return False data = self.session.http.json( res, schema=validate.Schema( { 'success': bool, 'channel_groups': [{ 'channels': [ { 'display_alias': validate.text, 'cid': validate.text }, ] }] }, validate.get('channel_groups'), )) c_list = [] for d in data: for c in d['channels']: c_list.append(c) cid = [] zattoo_list = [] for c in c_list: zattoo_list.append(c['display_alias']) if c['display_alias'] == channel: cid = c['cid'] log.trace('Available zattoo channels in this country: {0}'.format( ', '.join(sorted(zattoo_list)))) if not cid: cid = channel log.debug('CHANNEL ID: {0}'.format(cid)) return {'cid': cid} def reset_session(self): self._session_attributes.set('power_guide_hash', None, expires=0) self._session_attributes.set('uuid', None, expires=0) self.clear_cookies() self._authed = False def _get_streams(self): email = self.get_option('email') password = self.get_option('password') if self.options.get('purge_credentials'): self.reset_session() log.info('All credentials were successfully removed.') elif (self._authed and not self._session_control): # check every two hours, if the session is actually valid log.debug('Session control for {0}'.format(self.domain)) active = self.session.http.get(f'{self.base_url}/zapi/v3/session', schema=validate.Schema( validate.transform(parse_json), {'active': bool}, validate.get('active'))) if active: self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) log.debug('User is logged in') else: log.debug('User is not logged in') self._authed = False if not self._authed and (not email and not password): log.error( 'A login for Zattoo is required, use --zattoo-email EMAIL' ' --zattoo-password PASSWORD to set them') return if not self._authed: self._hello() self._login(email, password) if self._authed: return self._watch()
class UStreamTV(Plugin): arguments = PluginArguments( PluginArgument( "password", argument_name="ustream-password", sensitive=True, metavar="PASSWORD", help="A password to access password protected UStream.tv channels." )) STREAM_READY_TIMEOUT = 15 def _get_media_app(self): video_id = self.match.group("video_id") if video_id: return video_id, "recorded" channel_id = self.match.group("channel_id") if not channel_id: channel_id = self.session.http.get( self.url, headers={"User-Agent": useragents.CHROME}, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//meta[@name='ustream:channel_id'][@content][1]/@content" ))) return channel_id, "channel" def _get_streams(self): if not MuxedStream.is_usable(self.session): return media_id, application = self._get_media_app() if not media_id: return wsclient = UStreamTVWsClient(self.session, media_id, application, referrer=self.url, cluster="live", password=self.get_option("password")) log.debug(f"Connecting to UStream API:" f" media_id={media_id}," f" application={application}," f" referrer={self.url}," f" cluster=live") wsclient.start() log.debug( f"Waiting for stream data (for at most {self.STREAM_READY_TIMEOUT} seconds)..." ) if (not wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not wsclient.is_alive() or wsclient.stream_error): log.error(wsclient.stream_error or "Waiting for stream data timed out.") wsclient.close() return if not wsclient.stream_formats_audio: for video in wsclient.stream_formats_video: yield f"{video.height}p", UStreamTVStream( self.session, "video", wsclient, video) else: for video in wsclient.stream_formats_video: for audio in wsclient.stream_formats_audio: yield f"{video.height}p+a{audio.bitrate}k", MuxedStream( self.session, UStreamTVStream(self.session, "video", wsclient, video), UStreamTVStream(self.session, "audio", wsclient, audio))