class UStreamTV(Plugin): url_re = re.compile( r""" https?://(www\.)?ustream\.tv (?: (/embed/|/channel/id/)(?P<channel_id>\d+) )? (?: (/embed)?/recorded/(?P<video_id>\d+) )? """, re.VERBOSE) media_id_re = re.compile(r'"ustream:channel_id"\s+content\s*=\s*"(\d+)"') arguments = PluginArguments( PluginArgument("password", argument_name="ustream-password", sensitive=True, metavar="PASSWORD", help=""" A password to access password protected UStream.tv channels. """)) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _api_get_streams(self, media_id, application, cluster="live", referrer=None, retries=3): if retries > 0: app_id = 11 app_ver = 2 referrer = referrer or self.url self.api = UHSClient(self.session, media_id, application, referrer=referrer, cluster=cluster, app_id=app_id, app_version=app_ver, password=self.get_option("password")) self.logger.debug( "Connecting to UStream API: media_id={0}, application={1}, referrer={2}, cluster={3}, " "app_id={4}, app_ver={5}", media_id, application, referrer, cluster, app_id, app_ver) if self.api.connect(): for i in range( 5): # make at most five requests to get the moduleInfo try: for s in self._do_poll(media_id, application, cluster, referrer, retries): yield s except ModuleInfoNoStreams: self.logger.debug("Retrying moduleInfo request") time.sleep(1) else: break def _do_poll(self, media_id, application, cluster="live", referrer=None, retries=3): res = self.api.poll() if res: for result in res: if result["cmd"] == "moduleInfo": for s in self.handle_module_info(result["args"], media_id, application, cluster, referrer, retries): yield s elif result["cmd"] == "reject": for s in self.handle_reject(result["args"], media_id, application, cluster, referrer, retries): yield s else: self.logger.debug("Unknown command: {0}({1})", result["cmd"], result["args"]) def handle_module_info(self, args, media_id, application, cluster="live", referrer=None, retries=3): has_results = False for streams in UHSClient.module_info_schema.validate(args): has_results = True if isinstance(streams, list): for stream in streams: for q, s in HLSStream.parse_variant_playlist( self.session, stream["url"]).items(): yield q, UStreamHLSStream(self.session, s.url, self.api) elif isinstance(streams, dict): for stream in streams.get("streams", []): name = "{0}k".format(stream["bitrate"]) for surl in stream["streamName"]: yield name, HTTPStream(self.session, surl) elif streams == "offline": self.logger.warning("This stream is currently offline") if not has_results: raise ModuleInfoNoStreams def handle_reject(self, args, media_id, application, cluster="live", referrer=None, retries=3): for arg in args: if "cluster" in arg: self.logger.debug("Switching cluster to {0}", arg["cluster"]["name"]) cluster = arg["cluster"]["name"] if "referrerLock" in arg: referrer = arg["referrerLock"]["redirectUrl"] return self._api_get_streams(media_id, application, cluster=cluster, referrer=referrer, retries=retries - 1) def _get_streams(self): # establish a mobile non-websockets api connection umatch = self.url_re.match(self.url) application = "channel" channel_id = umatch.group("channel_id") video_id = umatch.group("video_id") if channel_id: application = "channel" media_id = channel_id elif video_id: application = "recorded" media_id = video_id else: media_id = self._find_media_id() if media_id: for s in self._api_get_streams(media_id, application): yield s else: self.logger.error("Cannot find a media_id on this page") def _find_media_id(self): self.logger.debug("Searching for media ID on the page") res = http.get(self.url, headers={"User-Agent": useragents.CHROME}) m = self.media_id_re.search(res.text) return m and m.group(1)
class Generic(Plugin): pattern_re = re.compile(r'((?:generic|resolve)://)?(?P<url>.+)') # iframes _iframe_re = re.compile(r'''(?isx) <ifr(?:["']\s?\+\s?["'])?ame (?!\sname=["']g_iFrame).*?src= ["'](?P<url>[^"'\s<>]+)["'] [^<>]*?> ''') # playlists _playlist_re = re.compile(r'''(?sx) (?:["']|=|")(?P<url> (?<!title=["']) (?<!["']title["']:["']) [^"'<>\s\;{}]+\.(?:m3u8|f4m|mp3|mp4|mpd) (?:\?[^"'<>\s\\{}]+)?)/? (?:\\?["']|(?<!;)\s|>|\\") ''') # mp3 and mp4 files _httpstream_bitrate_re = re.compile(r'''(?x) (?:_|\.|/|-) (?: (?P<bitrate>\d{1,4})(?:k)? | (?P<resolution>\d{1,4}p) ) \.mp(?:3|4) ''') _httpstream_common_resolution_list = [ '2160', '1440', '1080', '720', '576', '480', '360', '240', ] # javascript redirection _window_location_re = re.compile(r'''(?sx) <script[^<]+window\.location\.href\s?=\s?["'] (?P<url>[^"']+)["'];[^<>]+ ''') # obviously ad paths _ads_path_re = re.compile(r'''(?x) (?:/(?:static|\d+))? /ads?/?(?:\w+)? (?:\d+x\d+)? (?:_\w+)?\.(?:html?|php) ''') # START - _make_url_list # Not allowed at the end of the parsed url path blacklist_endswith = ( '.gif', '.jpg', '.png', '.svg', '.vtt', '/chat.html', '/chat', '/novideo.mp4', '/vidthumb.mp4', ) # Not allowed at the end of the parsed url netloc blacklist_netloc = ( '127.0.0.1', 'about:blank', 'abv.bg', 'adfox.ru', 'cbox.ws', 'googletagmanager.com', 'javascript:false', ) # END - _make_url_list arguments = PluginArguments( PluginArgument('playlist-max', metavar='NUMBER', type=num(int, min=0, max=25), default=5, help=''' Number of how many playlist URLs of the same type are allowed to be resolved with this plugin. Default is 5 '''), PluginArgument('playlist-referer', metavar='URL', help=''' Set a custom referer URL for the playlist URLs. This only affects playlist URLs of this plugin. Default is the URL of the last website. '''), PluginArgument('blacklist-netloc', metavar='NETLOC', type=comma_list, help=''' Blacklist domains that should not be used, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with a lot of iframes. '''), PluginArgument('blacklist-path', metavar='PATH', type=comma_list, help=''' Blacklist the path of a domain that should not be used, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain. '''), PluginArgument('blacklist-filepath', metavar='FILEPATH', type=comma_list, help=''' Blacklist file names for iframes and playlists by using a comma-separated list: 'index.html,ignore.m3u8,/ad/master.m3u8' Sometimes there are invalid URLs in the result list, this can be used to remove them. '''), PluginArgument('whitelist-netloc', metavar='NETLOC', type=comma_list, help=''' Whitelist domains that should only be searched for iframes, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with lots of iframes, where the main iframe always has the same hosting domain. '''), PluginArgument('whitelist-path', metavar='PATH', type=comma_list, help=''' Whitelist the path of a domain that should only be searched for iframes, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain, where the main iframe always has the same path. '''), ) def __init__(self, url): super(Generic, self).__init__(url) self.url = update_scheme('http://', self.pattern_re.match(self.url).group('url')) self.html_text = '' self.title = None # START - cache every used url and set a referer if hasattr(GenericCache, 'cache_url_list'): GenericCache.cache_url_list += [self.url] # set the last url as a referer self.referer = GenericCache.cache_url_list[-2] else: GenericCache.cache_url_list = [self.url] self.referer = self.url self.session.http.headers.update({'Referer': self.referer}) # END # START - how often _get_streams already run self._run = len(GenericCache.cache_url_list) # END @classmethod def priority(cls, url): m = cls.pattern_re.match(url) if m: prefix, url = cls.pattern_re.match(url).groups() if prefix is not None: return HIGH_PRIORITY return NO_PRIORITY @classmethod def can_handle_url(cls, url): m = cls.pattern_re.match(url) if m: return m.group('url') is not None def compare_url_path(self, parsed_url, check_list, path_status='startswith'): status = False for netloc, path in check_list: if path_status == '==': if (parsed_url.netloc.endswith(netloc) and parsed_url.path == path): status = True break elif path_status == 'startswith': if (parsed_url.netloc.endswith(netloc) and parsed_url.path.startswith(path)): status = True break return status def merge_path_list(self, static, user): for _path_url in user: if not _path_url.startswith(('http', '//')): _path_url = update_scheme('http://', _path_url) _parsed_path_url = urlparse(_path_url) if _parsed_path_url.netloc and _parsed_path_url.path: static += [(_parsed_path_url.netloc, _parsed_path_url.path)] return static def repair_url(self, url, base_url, stream_base=''): # remove \ new_url = url.replace('\\', '') # repairs broken scheme if new_url.startswith('http://'): new_url = 'http:' + new_url[9:] elif new_url.startswith('https://'): new_url = 'https:' + new_url[10:] # creates a valid url from path only urls # and adds missing scheme for // urls if stream_base and new_url[1] is not '/': if new_url[0] is '/': new_url = new_url[1:] new_url = urljoin(stream_base, new_url) else: new_url = urljoin(base_url, new_url) return new_url def _make_url_list(self, old_list, base_url, url_type=''): # START - List for not allowed URL Paths # --generic-blacklist-path if not hasattr(GenericCache, 'blacklist_path'): # static list blacklist_path = [ ('bigo.tv', '/show.mp4'), ('expressen.se', '/_livetvpreview/'), ('facebook.com', '/connect'), ('facebook.com', '/plugins'), ('google.com', '/recaptcha/'), ('haber7.com', '/radyohome/station-widget/'), ('static.tvr.by', '/upload/video/atn/promo'), ('twitter.com', '/widgets'), ('vesti.ru', '/native_widget.html'), ('www.blogger.com', '/static'), ('youtube.com', '/['), ] # merge user and static list blacklist_path_user = self.get_option('blacklist_path') if blacklist_path_user is not None: blacklist_path = self.merge_path_list(blacklist_path, blacklist_path_user) GenericCache.blacklist_path = blacklist_path # END blacklist_path_same = [ ('player.vimeo.com', '/video/'), ('youtube.com', '/embed/'), ] # START - List of only allowed URL Paths for Iframes # --generic-whitelist-path if not hasattr(GenericCache, 'whitelist_path'): whitelist_path = [] whitelist_path_user = self.get_option('whitelist_path') if whitelist_path_user is not None: whitelist_path = self.merge_path_list([], whitelist_path_user) GenericCache.whitelist_path = whitelist_path # END status_hls_session_reload = ( self.session.get_option('hls-session-reload-time') or self.session.get_option('hls-session-reload-segment')) new_list = [] for url in old_list: new_url = self.repair_url(url, base_url) # parse the url parse_new_url = urlparse(new_url) # START REMOVE = False if new_url in GenericCache.cache_url_list and status_hls_session_reload is None: # Removes an already used url # ignored if --hls-session-reload is used REMOVE = 'SAME-URL' elif (not parse_new_url.scheme.startswith(('http'))): # Allow only an url with a valid scheme REMOVE = 'SCHEME' elif (url_type == 'iframe' and self.get_option('whitelist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('whitelist_netloc'))) is False): # Allow only whitelisted domains for iFrames # --generic-whitelist-netloc REMOVE = 'WL-netloc' elif (url_type == 'iframe' and GenericCache.whitelist_path and self.compare_url_path( parse_new_url, GenericCache.whitelist_path) is False): # Allow only whitelisted paths from a domain for iFrames # --generic-whitelist-path REMOVE = 'WL-path' elif (parse_new_url.netloc.endswith(self.blacklist_netloc)): # Removes blacklisted domains from a static list # self.blacklist_netloc REMOVE = 'BL-static' elif (self.get_option('blacklist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('blacklist_netloc')))): # Removes blacklisted domains # --generic-blacklist-netloc REMOVE = 'BL-netloc' elif (self.compare_url_path(parse_new_url, GenericCache.blacklist_path) is True): # Removes blacklisted paths from a domain # --generic-blacklist-path REMOVE = 'BL-path' elif (parse_new_url.path.endswith(self.blacklist_endswith)): # Removes unwanted endswith images and chatrooms REMOVE = 'BL-ew' elif (self.get_option('blacklist_filepath') and parse_new_url.path.endswith( tuple(self.get_option('blacklist_filepath')))): # Removes blacklisted file paths # --generic-blacklist-filepath REMOVE = 'BL-filepath' elif (self._ads_path_re.match(parse_new_url.path)): # Removes obviously AD URL REMOVE = 'ADS' elif (self.compare_url_path(parse_new_url, blacklist_path_same, path_status='==') is True): # Removes blacklisted same paths from a domain REMOVE = 'BL-path-same' elif parse_new_url.netloc == 'cdn.embedly.com' and parse_new_url.path == '/widgets/media.html': # do not use the direct URL for 'cdn.embedly.com', search the query for a new URL params = dict(parse_qsl(parse_new_url.query)) embedly_new_url = params.get('url') or params.get('src') if embedly_new_url: new_list += [embedly_new_url] else: log.error( 'Missing params URL or SRC for {0}'.format(new_url)) continue else: # valid URL new_list += [new_url] continue log.debug('{0} - Removed: {1}'.format(REMOVE, new_url)) # END # Remove duplicates log.debug('List length: {0} (with duplicates)'.format(len(new_list))) new_list = sorted(list(set(new_list))) return new_list def _window_location(self): match = self._window_location_re.search(self.html_text) if match: temp_url = urljoin(self.url, match.group('url')) log.debug('Found window_location: {0}'.format(temp_url)) return temp_url log.trace('No window_location') return False def _resolve_playlist(self, playlist_all): playlist_referer = self.get_option('playlist_referer') or self.url self.session.http.headers.update({'Referer': playlist_referer}) playlist_max = self.get_option('playlist_max') or 5 count_playlist = { 'dash': 0, 'hds': 0, 'hls': 0, 'http': 0, } o = urlparse(self.url) origin_tuple = ( '.cloudfront.net', '.metube.id', ) for url in playlist_all: parsed_url = urlparse(url) if parsed_url.netloc.endswith(origin_tuple): self.session.http.headers.update({ 'Origin': '{0}://{1}'.format(o.scheme, o.netloc), }) if (parsed_url.path.endswith(('.m3u8')) or parsed_url.query.endswith(('.m3u8'))): if count_playlist['hls'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: streams = HLSStream.parse_variant_playlist( self.session, url).items() if not streams: yield 'live', HLSStream(self.session, url) for s in streams: yield s log.debug('HLS URL - {0}'.format(url)) count_playlist['hls'] += 1 except Exception as e: log.error('Skip HLS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.f4m')) or parsed_url.query.endswith(('.f4m'))): if count_playlist['hds'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in HDSStream.parse_manifest(self.session, url).items(): yield s log.debug('HDS URL - {0}'.format(url)) count_playlist['hds'] += 1 except Exception as e: log.error('Skip HDS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mp3', '.mp4')) or parsed_url.query.endswith(('.mp3', '.mp4'))): if count_playlist['http'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: name = 'vod' m = self._httpstream_bitrate_re.search(url) if m: bitrate = m.group('bitrate') resolution = m.group('resolution') if bitrate: if bitrate in self._httpstream_common_resolution_list: name = '{0}p'.format(m.group('bitrate')) else: name = '{0}k'.format(m.group('bitrate')) elif resolution: name = resolution yield name, HTTPStream(self.session, url) log.debug('HTTP URL - {0}'.format(url)) count_playlist['http'] += 1 except Exception as e: log.error('Skip HTTP with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mpd')) or parsed_url.query.endswith(('.mpd'))): if count_playlist['dash'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in DASHStream.parse_manifest(self.session, url).items(): yield s log.debug('DASH URL - {0}'.format(url)) count_playlist['dash'] += 1 except Exception as e: log.error('Skip DASH with error {0}'.format(str(e))) else: log.error('parsed URL - {0}'.format(url)) def _res_text(self, url): try: res = self.session.http.get(url, allow_redirects=True) except Exception as e: if 'Received response with content-encoding: gzip' in str(e): headers = { 'User-Agent': useragents.FIREFOX, 'Accept-Encoding': 'deflate' } res = self.session.http.get(url, headers=headers, allow_redirects=True) elif '403 Client Error' in str(e): log.error('Website Access Denied/Forbidden, you might be geo-' 'blocked or other params are missing.') raise NoStreamsError(self.url) elif '404 Client Error' in str(e): log.error('Website was not found, the link is broken or dead.') raise NoStreamsError(self.url) else: raise e if res.history: for resp in res.history: log.debug('Redirect: {0} - {1}'.format(resp.status_code, resp.url)) log.debug('URL: {0}'.format(res.url)) return res.text def settings_url(self): o = urlparse(self.url) # User-Agent _android = [] _chrome = [] _ipad = [] _iphone = [ 'bigo.tv', ] if self.session.http.headers['User-Agent'].startswith( 'python-requests'): if o.netloc.endswith(tuple(_android)): self.session.http.headers.update( {'User-Agent': useragents.ANDROID}) elif o.netloc.endswith(tuple(_chrome)): self.session.http.headers.update( {'User-Agent': useragents.CHROME}) elif o.netloc.endswith(tuple(_ipad)): self.session.http.headers.update( {'User-Agent': useragents.IPAD}) elif o.netloc.endswith(tuple(_iphone)): self.session.http.headers.update( {'User-Agent': useragents.IPHONE_6}) else: self.session.http.headers.update( {'User-Agent': useragents.FIREFOX}) # SSL Verification - http.verify http_verify = [ '.cdn.bg', 'sportal.bg', ] if (o.netloc.endswith(tuple(http_verify)) and self.session.http.verify): self.session.http.verify = False log.warning('SSL Verification disabled.') def get_title(self): if self.title is None: if not self.html_text: self.html_text = self._res_text(self.url) _title_re = re.compile(r'<title>(?P<title>[^<>]+)</title>') m = _title_re.search(self.html_text) if m: self.title = m.group('title') if self.title is None: # fallback if there is no <title> self.title = self.url return self.title def _get_streams(self): self.settings_url() if self._run <= 1: log.info('Version {0} - https://github.com/back-to/generic'.format( GENERIC_VERSION)) log.debug('User-Agent: {0}'.format( self.session.http.headers['User-Agent'])) new_url = False log.info(' {0}. URL={1}'.format(self._run, self.url)) # GET website content self.html_text = self._res_text(self.url) # unpack common javascript codes self.html_text = unpack(self.html_text) # Playlist URL playlist_all = self._playlist_re.findall(self.html_text) if playlist_all: log.debug('Found Playlists: {0}'.format(len(playlist_all))) playlist_list = self._make_url_list( playlist_all, self.url, url_type='playlist', ) if playlist_list: log.info('Found Playlists: {0} (valid)'.format( len(playlist_list))) return self._resolve_playlist(playlist_list) else: log.trace('No Playlists') # iFrame URL iframe_list = self._iframe_re.findall(self.html_text) if iframe_list: log.debug('Found Iframes: {0}'.format(len(iframe_list))) # repair and filter iframe url list new_iframe_list = self._make_url_list(iframe_list, self.url, url_type='iframe') if new_iframe_list: number_iframes = len(new_iframe_list) if number_iframes == 1: new_url = new_iframe_list[0] else: log.info('--- IFRAMES ---') for i, item in enumerate(new_iframe_list, start=1): log.info('{0} - {1}'.format(i, item)) log.info('--- IFRAMES ---') try: number = int( self.input_ask('Choose an iframe number from above' ).split(' ')[0]) new_url = new_iframe_list[number - 1] except FatalPluginError: new_url = new_iframe_list[0] except ValueError: log.error('invalid input answer') except (IndexError, TypeError): log.error('invalid input number') if not new_url: new_url = new_iframe_list[0] else: log.trace('No iframes') if not new_url: # search for window.location.href new_url = self._window_location() if new_url: # the Dailymotion Plugin does not work with this Referer if 'dailymotion.com' in new_url: del self.session.http.headers['Referer'] return self.session.streams(new_url) raise NoPluginError
class Pixiv(Plugin): _post_key_re = re.compile( r"""name=["']post_key["']\svalue=["'](?P<data>[^"']+)["']""") _user_dict_schema = validate.Schema({ "user": { "unique_name": validate.text, "name": validate.all(validate.text, validate.transform(maybe_decode)) }, validate.optional("hls_movie"): { "url": validate.text } }) _user_schema = validate.Schema({ "owner": _user_dict_schema, "performers": [validate.any(_user_dict_schema, None)] }) _data_lives_schema = validate.Schema({"data": { "lives": [_user_schema] }}, validate.get("data"), validate.get("lives")) api_lives = "https://sketch.pixiv.net/api/lives.json" login_url_get = "https://accounts.pixiv.net/login" login_url_post = "https://accounts.pixiv.net/api/login" arguments = PluginArguments( PluginArgument("sessionid", requires=["devicetoken"], sensitive=True, metavar="SESSIONID", help=""" The pixiv.net sessionid that's used in pixivs PHPSESSID cookie. can be used instead of the username/password login process. """), PluginArgument("devicetoken", sensitive=True, metavar="DEVICETOKEN", help=""" The pixiv.net device token that's used in pixivs device_token cookie. can be used instead of the username/password login process. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached Pixiv credentials to initiate a new session and reauthenticate. """), PluginArgument("performer", metavar="USER", help=""" Select a co-host stream instead of the owner stream. """)) def __init__(self, url): super(Pixiv, self).__init__(url) self._authed = (self.session.http.cookies.get("PHPSESSID") and self.session.http.cookies.get("device_token")) self.session.http.headers.update({"Referer": self.url}) def _login_using_session_id_and_device_token(self, session_id, device_token): self.session.http.get(self.login_url_get) self.session.http.cookies.set('PHPSESSID', session_id, domain='.pixiv.net', path='/') self.session.http.cookies.set('device_token', device_token, domain='.pixiv.net', path='/') self.save_cookies() log.info("Successfully set sessionId and deviceToken") def hls_stream(self, hls_url): log.debug("URL={0}".format(hls_url)) for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): yield s def get_streamer_data(self): headers = { "X-Requested-With": "https://sketch.pixiv.net/lives", } res = self.session.http.get(self.api_lives, headers=headers) data = self.session.http.json(res, schema=self._data_lives_schema) log.debug("Found {0} streams".format(len(data))) for item in data: if item["owner"]["user"]["unique_name"] == self.match.group( "user"): return item raise NoStreamsError(self.url) def _get_streams(self): login_session_id = self.get_option("sessionid") login_device_token = self.get_option("devicetoken") if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed.") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif not self._authed and login_session_id and login_device_token: self._login_using_session_id_and_device_token( login_session_id, login_device_token) streamer_data = self.get_streamer_data() performers = streamer_data.get("performers") log.trace("{0!r}".format(streamer_data)) if performers: co_hosts = [] # create a list of all available performers for p in performers: co_hosts += [(p["user"]["unique_name"], p["user"]["name"])] log.info("Available hosts: {0}".format(", ".join( ["{0} ({1})".format(k, v) for k, v in co_hosts]))) # control if the host from --pixiv-performer is valid, # if not let the User select a different host if (self.get_option("performer") and not self.get_option("performer") in [v[0] for v in co_hosts]): # print the owner as 0 log.info("0 - {0} ({1})".format( streamer_data["owner"]["user"]["unique_name"], streamer_data["owner"]["user"]["name"])) # print all other performer for i, item in enumerate(co_hosts, start=1): log.info("{0} - {1} ({2})".format(i, item[0], item[1])) try: number = int( self.input_ask("Enter the number you'd like to watch"). split(" ")[0]) if number == 0: # default stream self.set_option("performer", None) else: # other co-hosts self.set_option("performer", co_hosts[number - 1][0]) except FatalPluginError: raise PluginError("Selected performer is invalid.") except (IndexError, ValueError, TypeError): raise PluginError("Input is invalid") # ignore the owner stream, if a performer is selected # or use it when there are no other performers if not self.get_option("performer") or not performers: return self.hls_stream(streamer_data["owner"]["hls_movie"]["url"]) # play a co-host stream if performers and self.get_option("performer"): for p in performers: if p["user"]["unique_name"] == self.get_option("performer"): # if someone goes online at the same time as Streamlink # was used, the hls URL might not be in the JSON data hls_movie = p.get("hls_movie") if hls_movie: return self.hls_stream(hls_movie["url"])
class WWENetwork(Plugin): site_config_re = re.compile(r'''">window.__data = (\{.*?\})</script>''') stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}" live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live" login_url = "https://dce-frontoffice.imggaming.com/api/v2/login" page_config_url = "https://cdn.watch.wwe.com/api/page" API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7" customer_id = 16 arguments = PluginArguments( PluginArgument("email", required=True, metavar="EMAIL", requires=["password"], help=""" The email associated with your WWE Network account, required to access any WWE Network stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A WWE Network account password to use with --wwenetwork-email. """)) def __init__(self, url): super(WWENetwork, self).__init__(url) self.session.http.headers.update({"User-Agent": useragents.CHROME}) self.auth_token = None def get_title(self): return self.item_config['title'] def request(self, method, url, **kwargs): headers = kwargs.pop("headers", {}) headers.update({ "x-api-key": self.API_KEY, "Origin": "https://watch.wwe.com", "Referer": "https://watch.wwe.com/signin", "Accept": "application/json", "Realm": "dce.wwe" }) if self.auth_token: headers["Authorization"] = "Bearer {0}".format(self.auth_token) kwargs["raise_for_status"] = False log.debug("API request: {0} {1}".format(method, url)) res = self.session.http.request(method, url, headers=headers, **kwargs) data = self.session.http.json(res) if "status" in data and data["status"] != 200: log.debug("API request failed: {0}:{1} ({2})".format( data["status"], data.get("code"), "; ".join(data.get("messages", [])))) return data def login(self, email, password): log.debug("Attempting login as {0}".format(email)) # sets some required cookies to login data = self.request('POST', self.login_url, data=json.dumps({ "id": email, "secret": password }), headers={"Content-Type": "application/json"}) if "authorisationToken" in data: self.auth_token = data["authorisationToken"] return self.auth_token @property @lru_cache(maxsize=128) def item_config(self): log.debug("Loading page config") p = urlparse(self.url) res = self.session.http.get(self.page_config_url, params=dict(device="web_browser", ff="idp,ldp", item_detail_expand="all", lang="en-US", list_page_size="1", max_list_prefetch="1", path=p.path, segments="es", sub="Registered", text_entry_format="html")) data = self.session.http.json(res) return data["item"] def _get_media_info(self, content_id): """ Get the info about the content, based on the ID :param content_id: contentId for the video :return: """ info = self.request('GET', self.stream_url.format(id=content_id)) return self.request('GET', info.get("playerUrlCallback")) def _get_video_id(self): # check the page to find the contentId log.debug("Searching for content ID") try: if self.item_config['type'] == "channel": return self._get_live_id() else: return "vod/{id}".format( id=self.item_config['customFields']['DiceVideoId']) except KeyError: log.error("Could not find video ID") return def _get_live_id(self): log.debug("Loading live event") res = self.request('GET', self.live_url) for event in res.get('events', []): return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format( **event) def _get_streams(self): if not self.login(self.get_option("email"), self.get_option("password")): raise PluginError("Login failed") try: start_point = int( float( dict(parse_qsl(urlparse(self.url).query)).get( "startPoint", 0.0))) if start_point > 0: log.info("Stream will start at {0}".format( seconds_to_hhmmss(start_point))) except ValueError: start_point = 0 content_id = self._get_video_id() if content_id: log.debug("Found content ID: {0}".format(content_id)) info = self._get_media_info(content_id) if info.get("hlsUrl"): for s in HLSStream.parse_variant_playlist( self.session, info["hlsUrl"], start_offset=start_point).items(): yield s else: log.error("Could not find the HLS URL")
class BBCiPlayer(Plugin): """ Allows streaming of live channels from bbc.co.uk/iplayer/live/* and of iPlayer programmes from bbc.co.uk/iplayer/episode/* """ url_re = re.compile(r"""https?://(?:www\.)?bbc.co.uk/iplayer/ ( episode/(?P<episode_id>\w+)| live/(?P<channel_name>\w+) ) """, re.VERBOSE) mediator_re = re.compile( r'window\.mediatorDefer\s*=\s*page\([^,]*,\s*({.*?})\);', re.DOTALL) tvip_re = re.compile(r'channel"\s*:\s*{\s*"id"\s*:\s*"(\w+?)"') tvip_master_re = re.compile(r'event_master_brand=(\w+?)&') account_locals_re = re.compile(r'window.bbcAccount.locals\s*=\s*({.*?});') swf_url = "http://emp.bbci.co.uk/emp/SMPf/1.18.3/StandardMediaPlayerChromelessFlash.swf" hash = base64.b64decode( b"N2RmZjc2NzFkMGM2OTdmZWRiMWQ5MDVkOWExMjE3MTk5MzhiOTJiZg==") api_url = ("http://open.live.bbc.co.uk/mediaselector/6/select/" "version/2.0/mediaset/{platform}/vpid/{vpid}/format/json/atk/{vpid_hash}/asn/1/") platforms = ("pc", "iptv-all") session_url = "https://session.bbc.com/session" auth_url = "https://account.bbc.com/signin" mediator_schema = validate.Schema( { "episode": { "versions": [{"id": validate.text}] } }, validate.get("episode"), validate.get("versions"), validate.get(0), validate.get("id") ) mediaselector_schema = validate.Schema( validate.transform(parse_json), {"media": [ {"connection": [{ validate.optional("href"): validate.url(), validate.optional("transferFormat"): validate.text }], "kind": validate.text} ]}, validate.get("media"), validate.filter(lambda x: x["kind"] == "video") ) arguments = PluginArguments( PluginArgument( "username", requires=["password"], metavar="USERNAME", help="The username used to register with bbc.co.uk." ), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A bbc.co.uk account password to use with --bbciplayer-username.", prompt="Enter bbc.co.uk account password" ), PluginArgument( "hd", action="store_true", help=""" Prefer HD streams over local SD streams, some live programmes may not be broadcast in HD. """ ), ) @classmethod def can_handle_url(cls, url): """ Confirm plugin can handle URL """ return cls.url_re.match(url) is not None @classmethod def _hash_vpid(cls, vpid): return sha1(cls.hash + str(vpid).encode("utf8")).hexdigest() @classmethod def _extract_nonce(cls, http_result): """ Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it. We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests. :param http_result: HTTP response from the bbc session endpoint. :type http_result: requests.Response :return: nonce to "sign" url requests with :rtype: string """ # Extract the redirect URL from the last call last_redirect_url = urlparse(http_result.history[-1].request.url) last_redirect_query = dict(parse_qsl(last_redirect_url.query)) # Extract the nonce from the query string in the redirect URL final_url = urlparse(last_redirect_query['goto']) goto_url = dict(parse_qsl(final_url.query)) goto_url_query = parse_json(goto_url['state']) # Return the nonce we can use for future queries return goto_url_query['nonce'] def find_vpid(self, url, res=None): """ Find the Video Packet ID in the HTML for the provided URL :param url: URL to download, if res is not provided. :param res: Provide a cached version of the HTTP response to search :type url: string :type res: requests.Response :return: Video Packet ID for a Programme in iPlayer :rtype: string """ log.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid def find_tvip(self, url, master=False): log.debug("Looking for {0} tvip on {1}", "master" if master else "", url) res = http.get(url) if master: m = self.tvip_master_re.search(res.text) else: m = self.tvip_re.search(res.text) return m and m.group(1) def mediaselector(self, vpid): urls = defaultdict(set) for platform in self.platforms: url = self.api_url.format(vpid=vpid, vpid_hash=self._hash_vpid(vpid), platform=platform) log.debug("Info API request: {0}", url) medias = http.get(url, schema=self.mediaselector_schema) for media in medias: for connection in media["connection"]: urls[connection.get("transferFormat")].add(connection["href"]) for stream_type, urls in urls.items(): log.debug("{0} {1} streams", len(urls), stream_type) for url in list(urls): try: if stream_type == "hds": for s in HDSStream.parse_manifest(self.session, url).items(): yield s if stream_type == "hls": for s in HLSStream.parse_variant_playlist(self.session, url).items(): yield s if stream_type == "dash": for s in DASHStream.parse_manifest(self.session, url).items(): yield s log.debug(" OK: {0}", url) except: log.debug(" FAIL: {0}", url) def login(self, ptrt_url): """ Create session using BBC ID. See https://www.bbc.co.uk/usingthebbc/account/ :param ptrt_url: The snapback URL to redirect to after successful authentication :type ptrt_url: string :return: Whether authentication was successful :rtype: bool """ session_res = http.get( self.session_url, params=dict(ptrt=ptrt_url) ) http_nonce = self._extract_nonce(session_res) res = http.post( self.auth_url, params=dict( ptrt=ptrt_url, nonce=http_nonce ), data=dict( jsEnabled=True, username=self.get_option("username"), password=self.get_option('password'), attempts=0 ), headers={"Referer": self.url}) return len(res.history) != 0 def _get_streams(self): if not self.get_option("username"): log.error( "BBC iPlayer requires an account you must login using " "--bbciplayer-username and --bbciplayer-password") return log.info( "A TV License is required to watch BBC iPlayer streams, see the BBC website for more " "information: https://www.bbc.co.uk/iplayer/help/tvlicence") if not self.login(self.url): log.error( "Could not authenticate, check your username and password") return m = self.url_re.match(self.url) episode_id = m.group("episode_id") channel_name = m.group("channel_name") if episode_id: log.debug("Loading streams for episode: {0}", episode_id) vpid = self.find_vpid(self.url) if vpid: log.debug("Found VPID: {0}", vpid) for s in self.mediaselector(vpid): yield s else: log.error("Could not find VPID for episode {0}", episode_id) elif channel_name: log.debug("Loading stream for live channel: {0}", channel_name) if self.get_option("hd"): tvip = self.find_tvip(self.url, master=True) + "_hd" if tvip: log.debug("Trying HD stream {0}...", tvip) try: for s in self.mediaselector(tvip): yield s except PluginError: log.error( "Failed to get HD streams, falling back to SD") else: return tvip = self.find_tvip(self.url) if tvip: log.debug("Found TVIP: {0}", tvip) for s in self.mediaselector(tvip): yield s
class Twitch(Plugin): arguments = PluginArguments( PluginArgument("disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """), PluginArgument("disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """), PluginArgument("disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """), PluginArgument("low-latency", action="store_true", help=f""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {LOW_LATENCY_MAX_LIVE_EDGE}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible, but will most likely cause buffering. In order to achieve true low latency streaming during playback, the player's caching/buffering settings will need to be adjusted and reduced to a value as low as possible, but still high enough to not cause any buffering. This depends on the stream's bitrate and the quality of the connection to Twitch's servers. Please refer to the player's own documentation for the required configuration. Player parameters can be set via --player-args. Note: Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled due to the reduced --hls-live-edge value. """), PluginArgument("api-header", metavar="KEY=VALUE", type=keyvalue, action="append", help=""" A header to add to each Twitch API HTTP request. Can be repeated to add multiple headers. """)) def __init__(self, url): super().__init__(url) match = self.match.groupdict() parsed = urlparse(url) self.params = parse_qsd(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.channel = None self.clip_name = None self._checked_metadata = False if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self.channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self.channel = match.get("channel") and match.get( "channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def method_factory(parent_method): def inner(): if not self._checked_metadata: self._checked_metadata = True self._get_metadata() return parent_method() return inner parent = super() for metadata in "id", "author", "category", "title": method = f"get_{metadata}" setattr(self, method, method_factory(getattr(parent, method))) def _get_metadata(self): try: if self.video_id: data = self.api.metadata_video(self.video_id) elif self.clip_name: data = self.api.metadata_clips(self.clip_name) elif self.channel: data = self.api.metadata_channel(self.channel) else: # pragma: no cover return self.id, self.author, self.category, self.title = data except (PluginError, TypeError): pass def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except (PluginError, TypeError): raise NoStreamsError(self.url) try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: login, display_name = self.api.hosted_channel(self.channel) except PluginError: return False log.info(f"{self.channel} is hosting {login}") if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error( f"A loop of hosted channels has been detected, cannot find a playable stream. ({loop})" ) return True hosted_chain.append(login) log.info(f"switching to {login}") self.channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug(f"Getting live HLS streams for {self.channel}") self.session.http.headers.update({ "referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv", }) sig, token, restricted_bitrates = self._access_token( True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug(f"Getting HLS streams for video ID {self.video_id}") sig, token, restricted_bitrates = self._access_token( False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, **extra_params) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning( f"The quality '{name}' is not available since it requires a subscription." ) return streams def _get_clips(self): try: sig, token, streams = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream( self.session, update_qsd(stream, { "sig": sig, "token": token })) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self.channel: return self._get_hls_streams_live()
class Zattoo(Plugin): STREAMS_ZATTOO = ['dash', 'hls7'] TIME_CONTROL = 60 * 60 * 2 TIME_SESSION = 60 * 60 * 24 * 30 arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help=""" The email associated with your zattoo account, required to access any zattoo stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A zattoo account password to use with --zattoo-email. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached zattoo credentials to initiate a new session and reauthenticate. """), PluginArgument('stream-types', metavar='TYPES', type=comma_list_filter(STREAMS_ZATTOO), default=['dash'], help=''' A comma-delimited list of stream types which should be used, the following types are allowed: - {0} Default is "dash". '''.format('\n - '.join(STREAMS_ZATTOO)))) def __init__(self, url): super().__init__(url) self.domain = self.match.group('base_url') self._session_attributes = Cache( filename='plugin-cache.json', key_prefix='zattoo:attributes:{0}'.format(self.domain)) self._uuid = self._session_attributes.get('uuid') self._authed = (self._session_attributes.get('power_guide_hash') and self._uuid and self.session.http.cookies.get( 'pzuid', domain=self.domain) and self.session.http.cookies.get('beaker.session.id', domain=self.domain)) self._session_control = self._session_attributes.get( 'session_control', False) self.base_url = 'https://{0}'.format(self.domain) self.headers = { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Referer': self.base_url } def _hello(self): log.debug('_hello ...') app_token = self.session.http.get(f'{self.base_url}/token.json', schema=validate.Schema( validate.parse_json(), { 'success': bool, 'session_token': str, }, validate.get('session_token'))) if self._uuid: __uuid = self._uuid else: __uuid = str(uuid.uuid4()) self._session_attributes.set('uuid', __uuid, expires=self.TIME_SESSION) params = { 'app_version': '3.2120.1', 'client_app_token': app_token, 'format': 'json', 'lang': 'en', 'uuid': __uuid, } res = self.session.http.post(f'{self.base_url}/zapi/v3/session/hello', headers=self.headers, data=params, schema=validate.Schema( validate.parse_json(), validate.any({'active': bool}, {'success': bool}))) if res.get('active') or res.get('success'): log.debug('Hello was successful.') else: log.debug('Hello failed.') def _login(self, email, password): log.debug('_login ...') data = self.session.http.post( f'{self.base_url}/zapi/v3/account/login', headers=self.headers, data={ 'login': email, 'password': password, 'remember': 'true', 'format': 'json', }, acceptable_status=(200, 400), schema=validate.Schema( validate.parse_json(), validate.any( { 'active': bool, 'power_guide_hash': str }, {'success': bool}, )), ) if data.get('active'): log.debug('Login was successful.') else: log.debug('Login failed.') return self._authed = data['active'] self.save_cookies(default_expires=self.TIME_SESSION) self._session_attributes.set('power_guide_hash', data['power_guide_hash'], expires=self.TIME_SESSION) self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) def _watch(self): log.debug('_watch ...') channel = self.match.group('channel') vod_id = self.match.group('vod_id') recording_id = self.match.group('recording_id') params = {'https_watch_urls': True} if channel: watch_url = f'{self.base_url}/zapi/watch' params_cid = self._get_params_cid(channel) if not params_cid: return params.update(params_cid) elif vod_id: log.debug('Found vod_id: {0}'.format(vod_id)) watch_url = f'{self.base_url}/zapi/avod/videos/{vod_id}/watch' elif recording_id: log.debug('Found recording_id: {0}'.format(recording_id)) watch_url = f'{self.base_url}/zapi/watch/recording/{recording_id}' else: log.debug('Missing watch_url') return zattoo_stream_types = self.get_option('stream-types') for stream_type in zattoo_stream_types: params_stream_type = {'stream_type': stream_type} params.update(params_stream_type) data = self.session.http.post( watch_url, headers=self.headers, data=params, acceptable_status=(200, 402, 403, 404), schema=validate.Schema( validate.parse_json(), validate.any( { 'success': validate.transform(bool), 'stream': { 'watch_urls': [{ 'url': validate.url(), validate.optional('maxrate'): int, validate.optional('audio_channel'): str, }], validate.optional('quality'): str, }, }, { 'success': validate.transform(bool), 'internal_code': int, validate.optional('http_status'): int, })), ) if not data['success']: if data['internal_code'] == 401: log.error(f'invalid stream_type {stream_type}') elif data['internal_code'] == 421: log.error( 'Unfortunately streaming is not permitted in this country or this channel does not exist.' ) elif data['internal_code'] == 422: log.error('Paid subscription required for this channel.') log.info( 'If paid subscription exist, use --zattoo-purge-credentials to start a new session.' ) else: log.debug(f'unknown error {data!r}') log.debug('Force session reset for watch_url') self.reset_session() continue log.debug(f'Found data for {stream_type}') if stream_type == 'hls7': for url in data['stream']['watch_urls']: yield from HLSStream.parse_variant_playlist( self.session, url['url']).items() elif stream_type == 'dash': for url in data['stream']['watch_urls']: yield from DASHStream.parse_manifest( self.session, url['url']).items() def _get_params_cid(self, channel): log.debug('get channel ID for {0}'.format(channel)) try: res = self.session.http.get( f'{self.base_url}/zapi/v2/cached/channels/{self._session_attributes.get("power_guide_hash")}', headers=self.headers, params={'details': 'False'}) except Exception: log.debug('Force session reset for _get_params_cid') self.reset_session() return False data = self.session.http.json( res, schema=validate.Schema( { 'success': validate.transform(bool), 'channel_groups': [{ 'channels': [ { 'display_alias': str, 'cid': str, 'qualities': [{ 'title': str, 'stream_types': validate.all( [str], validate.filter(lambda n: not re.match( r"(.+_(?:fairplay|playready|widevine))", n))), 'level': str, 'availability': str, }], }, ], }] }, validate.get('channel_groups'), )) c_list = [] for d in data: for c in d['channels']: c_list.append(c) cid = None zattoo_list = [] for c in c_list: zattoo_list.append(c['display_alias']) if c['display_alias'] == channel: cid = c['cid'] log.debug(f'{c!r}') log.trace('Available zattoo channels in this country: {0}'.format( ', '.join(sorted(zattoo_list)))) if not cid: cid = channel log.debug('CHANNEL ID: {0}'.format(cid)) return {'cid': cid} def reset_session(self): self._session_attributes.set('power_guide_hash', None, expires=0) self._session_attributes.set('uuid', None, expires=0) self.clear_cookies() self._authed = False def _get_streams(self): email = self.get_option('email') password = self.get_option('password') if self.options.get('purge_credentials'): self.reset_session() log.info('All credentials were successfully removed.') elif (self._authed and not self._session_control): # check every two hours, if the session is actually valid log.debug('Session control for {0}'.format(self.domain)) active = self.session.http.get(f'{self.base_url}/zapi/v3/session', schema=validate.Schema( validate.parse_json(), {'active': bool}, validate.get('active'))) if active: self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) log.debug('User is logged in') else: log.debug('User is not logged in') self._authed = False if not self._authed and (not email and not password): log.error( 'A login for Zattoo is required, use --zattoo-email EMAIL' ' --zattoo-password PASSWORD to set them') return if not self._authed: self._hello() self._login(email, password) if self._authed: return self._watch()
class Crunchyroll(Plugin): arguments = PluginArguments( PluginArgument( "username", metavar="USERNAME", requires=["password"], help="A Crunchyroll username to allow access to restricted streams." ), PluginArgument("password", sensitive=True, metavar="PASSWORD", nargs="?", const=None, default=None, help=""" A Crunchyroll password for use with --crunchyroll-username. If left blank you will be prompted. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached Crunchyroll credentials to initiate a new session and reauthenticate. """), PluginArgument("session-id", sensitive=True, metavar="SESSION_ID", help=""" Set a specific session ID for crunchyroll, can be used to bypass region restrictions. If using an authenticated session ID, it is recommended that the authentication parameters be omitted as the session ID is account specific. Note: The session ID will be overwritten if authentication is used and the session ID does not match the account. """)) @classmethod def stream_weight(cls, key): weight = STREAM_WEIGHTS.get(key) if weight: return weight, "crunchyroll" return Plugin.stream_weight(key) def _get_streams(self): api = self._create_api() media_id = int(self.match.group("media_id")) try: # the media.stream_data field is required, no stream data is returned otherwise info = api.get_info(media_id, fields=[ "media.name", "media.series_name", "media.media_type", "media.stream_data" ], schema=_media_schema) except CrunchyrollAPIError as err: raise PluginError(f"Media lookup error: {err.msg}") if not info: return streams = {} self.title = info.get("name") self.author = info.get("series_name") self.category = info.get("media_type") info = info["stream_data"] # The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included has_adaptive = any( [s["quality"] == "adaptive" for s in info["streams"]]) if has_adaptive: log.debug("Loading streams from adaptive playlist") for stream in filter(lambda x: x["quality"] == "adaptive", info["streams"]): for q, s in HLSStream.parse_variant_playlist( self.session, stream["url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s # If there is no adaptive quality stream then parse each individual result for stream in info["streams"]: if stream["quality"] != "adaptive": # the video_encode_id indicates that the stream is not a variant playlist if "video_encode_id" in stream: streams[stream["quality"]] = HLSStream( self.session, stream["url"]) else: # otherwise the stream url is actually a list of stream qualities for q, s in HLSStream.parse_variant_playlist( self.session, stream["url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s return streams def _create_api(self): """Creates a new CrunchyrollAPI object, initiates it's session and tries to authenticate it either by using saved credentials or the user's username and password. """ if self.options.get("purge_credentials"): self.cache.set("session_id", None, 0) self.cache.set("auth", None, 0) self.cache.set("session_id", None, 0) # use the crunchyroll locale as an override, for backwards compatibility locale = self.get_option( "locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): log.debug(f"Creating session with locale: {locale}") api.start_session() if api.auth: log.debug("Using saved credentials") login = api.authenticate() if login: login_name = login["user"]["username"] or login["user"][ "email"] log.info(f"Successfully logged in as '{login_name}'") if not api.auth and self.options.get("username"): try: log.debug( "Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() login_name = login["user"]["username"] or login["user"][ "email"] log.info(f"Logged in as '{login_name}'") except CrunchyrollAPIError as err: raise PluginError(f"Authentication error: {err.msg}") if not api.auth: log.warning( "No authentication provided, you won't be able to access " "premium restricted content") return api
class Pixiv(Plugin): """Plugin for https://sketch.pixiv.net/lives""" _url_re = re.compile(r"https?://sketch\.pixiv\.net/@?(?P<user>[^/]+)") _post_key_re = re.compile( r"""name=["']post_key["']\svalue=["'](?P<data>[^"']+)["']""") _user_dict_schema = validate.Schema( { "user": { "unique_name": validate.text, "name": validate.all(validate.text, validate.transform(maybe_decode)) }, validate.optional("hls_movie"): { "url": validate.text } } ) _user_schema = validate.Schema( { "owner": _user_dict_schema, "performers": [ validate.any(_user_dict_schema, None) ] } ) _data_lives_schema = validate.Schema( { "data": { "lives": [_user_schema] } }, validate.get("data"), validate.get("lives") ) api_lives = "https://sketch.pixiv.net/api/lives.json" login_url_get = "https://accounts.pixiv.net/login" login_url_post = "https://accounts.pixiv.net/api/login" arguments = PluginArguments( PluginArgument( "username", requires=["password"], metavar="USERNAME", help=""" The email/username used to register with pixiv.net """ ), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help=""" A pixiv.net account password to use with --pixiv-username """ ), PluginArgument( "purge-credentials", action="store_true", help=""" Purge cached Pixiv credentials to initiate a new session and reauthenticate. """), PluginArgument( "performer", metavar="USER", help=""" Select a co-host stream instead of the owner stream. """) ) def __init__(self, url): super(Pixiv, self).__init__(url) self._authed = (self.session.http.cookies.get("PHPSESSID") and self.session.http.cookies.get("device_token")) self.session.http.headers.update({ "User-Agent": useragents.FIREFOX, "Referer": self.url }) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _login(self, username, password): res = self.session.http.get(self.login_url_get) m = self._post_key_re.search(res.text) if not m: raise PluginError("Missing post_key, no login posible.") post_key = m.group("data") data = { "lang": "en", "source": "sketch", "post_key": post_key, "pixiv_id": username, "password": password, } res = self.session.http.post(self.login_url_post, data=data) res = self.session.http.json(res) log.trace("{0!r}".format(res)) if res["body"].get("success"): self.save_cookies() log.info("Successfully logged in") else: log.error("Failed to log in.") def hls_stream(self, hls_url): log.debug("URL={0}".format(hls_url)) for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): yield s def get_streamer_data(self): res = self.session.http.get(self.api_lives) data = self.session.http.json(res, schema=self._data_lives_schema) log.debug("Found {0} streams".format(len(data))) m = self._url_re.match(self.url) for item in data: if item["owner"]["user"]["unique_name"] == m.group("user"): return item raise NoStreamsError(self.url) def _get_streams(self): login_username = self.get_option("username") login_password = self.get_option("password") if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed.") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif not self._authed and login_username and login_password: self._login(login_username, login_password) streamer_data = self.get_streamer_data() performers = streamer_data.get("performers") log.trace("{0!r}".format(streamer_data)) if performers: co_hosts = [] # create a list of all available performers for p in performers: co_hosts += [(p["user"]["unique_name"], p["user"]["name"])] log.info("Available hosts: {0}".format(", ".join( ["{0} ({1})".format(k, v) for k, v in co_hosts]))) # control if the host from --pixiv-performer is valid, # if not let the User select a different host if (self.get_option("performer") and not self.get_option("performer") in [v[0] for v in co_hosts]): # print the owner as 0 log.info("0 - {0} ({1})".format( streamer_data["owner"]["user"]["unique_name"], streamer_data["owner"]["user"]["name"])) # print all other performer for i, item in enumerate(co_hosts, start=1): log.info("{0} - {1} ({2})".format(i, item[0], item[1])) try: number = int(self.input_ask( "Enter the number you'd like to watch").split(" ")[0]) if number == 0: # default stream self.set_option("performer", None) else: # other co-hosts self.set_option("performer", co_hosts[number - 1][0]) except FatalPluginError: raise PluginError("Selected performer is invalid.") except (IndexError, ValueError, TypeError): raise PluginError("Input is invalid") # ignore the owner stream, if a performer is selected # or use it when there are no other performers if not self.get_option("performer") or not performers: return self.hls_stream(streamer_data["owner"]["hls_movie"]["url"]) # play a co-host stream if performers and self.get_option("performer"): for p in performers: if p["user"]["unique_name"] == self.get_option("performer"): # if someone goes online at the same time as Streamlink # was used, the hls URL might not be in the JSON data hls_movie = p.get("hls_movie") if hls_movie: return self.hls_stream(hls_movie["url"])
class ABweb(Plugin): '''BIS Livestreams of french AB Groupe http://www.abweb.com/BIS-TV-Online/ ''' login_url = 'http://www.abweb.com/BIS-TV-Online/Default.aspx' _url_re = re.compile( r'https?://(?:www\.)?abweb\.com/BIS-TV-Online/bistvo-tele-universal.aspx', re.IGNORECASE) _hls_re = re.compile( r'''["']file["']:\s?["'](?P<url>[^"']+\.m3u8[^"']+)["']''') _iframe_re = re.compile(r'''<iframe[^>]+src=["'](?P<url>[^"']+)["']''') _input_re = re.compile(r'''(<input[^>]+>)''') _name_re = re.compile(r'''name=["']([^"']*)["']''') _value_re = re.compile(r'''value=["']([^"']*)["']''') expires_time = 3600 * 24 arguments = PluginArguments( PluginArgument("username", required=True, requires=["password"], metavar="USERNAME", help=""" The username associated with your ABweb account, required to access any ABweb stream. """, prompt="Enter ABweb username"), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A ABweb account password to use with --abweb-username.", prompt="Enter ABweb password"), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached ABweb credentials to initiate a new session and reauthenticate. """)) def __init__(self, url): super(ABweb, self).__init__(url) self._session_attributes = Cache(filename='plugin-cache.json', key_prefix='abweb:attributes') self._authed = self._session_attributes.get( 'ASP.NET_SessionId') and self._session_attributes.get( '.abportail1') self._expires = self._session_attributes.get( 'expires', time.time() + self.expires_time) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def set_expires_time_cache(self): expires = time.time() + self.expires_time self._session_attributes.set('expires', expires, expires=self.expires_time) def get_iframe_url(self): self.logger.debug('search for an iframe') res = self.session.http.get(self.url) m = self._iframe_re.search(res.text) if not m: raise PluginError('No iframe found.') iframe_url = m.group('url') iframe_url = update_scheme('http://', iframe_url) self.logger.debug('IFRAME URL={0}'.format(iframe_url)) return iframe_url def get_hls_url(self, iframe_url): self.logger.debug('search for hls url') res = self.session.http.get(iframe_url) m = self._hls_re.search(res.text) if not m: raise PluginError('No playlist found.') return m and m.group('url') def _login(self, username, password): '''login and update cached cookies''' self.logger.debug('login ...') res = self.session.http.get(self.login_url) input_list = self._input_re.findall(res.text) if not input_list: raise PluginError('Missing input data on login website.') data = {} for _input_data in input_list: try: _input_name = self._name_re.search(_input_data).group(1) except AttributeError: continue try: _input_value = self._value_re.search(_input_data).group(1) except AttributeError: _input_value = '' data[_input_name] = _input_value login_data = { 'ctl00$Login1$UserName': username, 'ctl00$Login1$Password': password, 'ctl00$Login1$LoginButton.x': '0', 'ctl00$Login1$LoginButton.y': '0' } data.update(login_data) res = self.session.http.post(self.login_url, data=data) for cookie in self.session.http.cookies: self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24) if self._session_attributes.get( 'ASP.NET_SessionId') and self._session_attributes.get( '.abportail1'): self.logger.debug('New session data') self.set_expires_time_cache() return True else: self.logger.error('Failed to login, check your username/password') return False def _get_streams(self): self.session.http.headers.update({ 'User-Agent': useragents.CHROME, 'Referer': 'http://www.abweb.com/BIS-TV-Online/bistvo-tele-universal.aspx' }) login_username = self.get_option('username') login_password = self.get_option('password') if self.options.get('purge_credentials'): self._session_attributes.set('ASP.NET_SessionId', None, expires=0) self._session_attributes.set('.abportail1', None, expires=0) self._authed = False self.logger.info('All credentials were successfully removed.') if not self._authed and not (login_username and login_password): self.logger.error( 'A login for ABweb is required, use --abweb-username USERNAME --abweb-password PASSWORD' ) return if self._authed: if self._expires < time.time(): self.logger.debug('get new cached cookies') # login after 24h self.set_expires_time_cache() self._authed = False else: self.logger.info( 'Attempting to authenticate using cached cookies') self.session.http.cookies.set( 'ASP.NET_SessionId', self._session_attributes.get('ASP.NET_SessionId')) self.session.http.cookies.set( '.abportail1', self._session_attributes.get('.abportail1')) if not self._authed and not self._login(login_username, login_password): return iframe_url = self.get_iframe_url() self.session.http.headers.update({'Referer': iframe_url}) hls_url = self.get_hls_url(iframe_url) hls_url = update_scheme(self.url, hls_url) self.logger.debug('URL={0}'.format(hls_url)) variant = HLSStream.parse_variant_playlist(self.session, hls_url) if variant: for q, s in variant.items(): yield q, s else: yield 'live', HLSStream(self.session, hls_url)
class Resolve(Plugin): _url_re = re.compile(r'''(resolve://)?(?P<url>.+)''') # regex for iframes _iframe_re = re.compile( r''' <ifr(?:["']\s?\+\s?["'])?ame (?!\sname=["']g_iFrame).*?src= ["'](?P<url>[^"'\s<>]+)["'] [^<>]*?> ''', re.VERBOSE | re.IGNORECASE | re.DOTALL) # regex for playlists _playlist_re = re.compile( r''' (?:["']|=|")(?P<url> (?<!title=["']) (?<!["']title["']:["']) [^"'<>\s\;{}]+\.(?:m3u8|f4m|mp3|mp4|mpd) (?:\?[^"'<>\s\\{}]+)?) (?:\\?["']|(?<!;)\s|>|\\") ''', re.DOTALL | re.VERBOSE) # regex for mp3 and mp4 files _httpstream_bitrate_re = re.compile( r''' (?:_|\.) (?: (?P<bitrate>\d{1,4}) | (?P<resolution>\d{1,4}p) ) \.mp(?:3|4) ''', re.VERBOSE) # Regex for: javascript redirection _window_location_re = re.compile( r''' <script[^<]+window\.location\.href\s?=\s?["'] (?P<url>[^"']+)["'];[^<>]+ ''', re.DOTALL | re.VERBOSE) _unescape_iframe_re = re.compile( r''' unescape\050["'] (?P<data>%3C(?: iframe|%69%66%72%61%6d%65 )%20[^"']+)["'] ''', re.IGNORECASE | re.VERBOSE) _unescape_hls_re = re.compile( r''' unescape\050["'] (?P<data>%3C(?: [^"']+m3u8[^"']+ )%20[^"']+)["'] ''', re.IGNORECASE | re.VERBOSE) # Regex for obviously ad paths _ads_path_re = re.compile( r''' (?:/(?:static|\d+))? /ads?/?(?:\w+)? (?:\d+x\d+)? (?:_\w+)?\.(?:html?|php) ''', re.VERBOSE) # START - _make_url_list # Not allowed at the end of the parsed url path blacklist_endswith = ( '.gif', '.jpg', '.png', '.svg', '.vtt', '/chat.html', '/chat', '/novideo.mp4', '/vidthumb.mp4', ) # Not allowed at the end of the parsed url netloc blacklist_netloc = ( '127.0.0.1', 'about:blank', 'abv.bg', 'adfox.ru', 'cbox.ws', 'googletagmanager.com', 'javascript:false', ) # END - _make_url_list arguments = PluginArguments( PluginArgument('playlist-max', metavar='NUMBER', type=num(int, min=0, max=25), default=5, help=''' Number of how many playlist URLs of the same type are allowed to be resolved with this plugin. Default is 5 '''), PluginArgument('playlist-referer', metavar='URL', help=''' Set a custom referer URL for the playlist URLs. This only affects playlist URLs of this plugin. Default URL of the last website. '''), PluginArgument('blacklist-netloc', metavar='NETLOC', type=comma_list, help=''' Blacklist domains that should not be used, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with a lot of iframes. '''), PluginArgument('blacklist-path', metavar='PATH', type=comma_list, help=''' Blacklist the path of a domain that should not be used, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain. '''), PluginArgument('blacklist-filepath', metavar='FILEPATH', type=comma_list, help=''' Blacklist file names for iframes and playlists by using a comma-separated list: 'index.html,ignore.m3u8,/ad/master.m3u8' Sometimes there are invalid URLs in the result list, this can be used to remove them. '''), PluginArgument('whitelist-netloc', metavar='NETLOC', type=comma_list, help=''' Whitelist domains that should only be searched for iframes, by using a comma-separated list: 'example.com,localhost,google.com' Useful for websites with lots of iframes, where the main iframe always has the same hosting domain. '''), PluginArgument('whitelist-path', metavar='PATH', type=comma_list, help=''' Whitelist the path of a domain that should only be searched for iframes, by using a comma-separated list: 'example.com/mypath,localhost/example,google.com/folder' Useful for websites with different iframes of the same domain, where the main iframe always has the same path. '''), ) def __init__(self, url): super(Resolve, self).__init__(url) ''' generates default options and caches them into ResolveCache class ''' self.url = update_scheme('http://', self._url_re.match(self.url).group('url')) self.html_text = '' self.title = None # START - cache every used url and set a referer if hasattr(ResolveCache, 'cache_url_list'): ResolveCache.cache_url_list += [self.url] # set the last url as a referer self.referer = ResolveCache.cache_url_list[-2] else: ResolveCache.cache_url_list = [self.url] self.referer = self.url self.session.http.headers.update({'Referer': self.referer}) # END # START - how often _get_streams already run self._run = len(ResolveCache.cache_url_list) # END @classmethod def priority(cls, url): ''' Returns - NO priority if the URL is not prefixed - HIGH priority if the URL is prefixed :param url: the URL to find the plugin priority for :return: plugin priority for the given URL ''' m = cls._url_re.match(url) if m: prefix, url = cls._url_re.match(url).groups() if prefix is not None: return HIGH_PRIORITY return NO_PRIORITY @classmethod def can_handle_url(cls, url): m = cls._url_re.match(url) if m: return m.group('url') is not None def compare_url_path(self, parsed_url, check_list): '''compare a parsed url, if it matches an item from a list Args: parsed_url: an URL that was used with urlparse check_list: a list of URLs as a tuple [('foo.bar', '/path/'), ('foo2.bar', '/path/')] Returns: True if parsed_url in check_list False if parsed_url not in check_list ''' status = False for netloc, path in check_list: if (parsed_url.netloc.endswith(netloc) and parsed_url.path.startswith(path)): status = True break return status def merge_path_list(self, static, user): '''merge the static list, with an user list Args: static (list): static list from this plugin user (list): list from an user command Returns: A new valid list ''' for _path_url in user: if not _path_url.startswith(('http', '//')): _path_url = update_scheme('http://', _path_url) _parsed_path_url = urlparse(_path_url) if _parsed_path_url.netloc and _parsed_path_url.path: static += [(_parsed_path_url.netloc, _parsed_path_url.path)] return static def repair_url(self, url, base_url, stream_base=''): '''repair a broken url''' # remove \ new_url = url.replace('\\', '') # repairs broken scheme if new_url.startswith('http://'): new_url = 'http:' + new_url[9:] elif new_url.startswith('https://'): new_url = 'https:' + new_url[10:] # creates a valid url from path only urls # and adds missing scheme for // urls if stream_base and new_url[1] is not '/': if new_url[0] is '/': new_url = new_url[1:] new_url = urljoin(stream_base, new_url) else: new_url = urljoin(base_url, new_url) return new_url def _make_url_list(self, old_list, base_url, url_type=''): '''removes unwanted URLs and creates a list of valid URLs Args: old_list: list of URLs base_url: URL that will get used for scheme and netloc repairs url_type: can be ... and is used for ... - iframe --resolve-whitelist-netloc - playlist Not used Returns: (list) A new valid list of urls. ''' # START - List for not allowed URL Paths # --resolve-blacklist-path if not hasattr(ResolveCache, 'blacklist_path'): # static list blacklist_path = [ ('bigo.tv', '/show.mp4'), ('expressen.se', '/_livetvpreview/'), ('facebook.com', '/connect'), ('facebook.com', '/plugins'), ('haber7.com', '/radyohome/station-widget/'), ('static.tvr.by', '/upload/video/atn/promo'), ('twitter.com', '/widgets'), ('vesti.ru', '/native_widget.html'), ('youtube.com', '/['), ] # merge user and static list blacklist_path_user = self.get_option('blacklist_path') if blacklist_path_user is not None: blacklist_path = self.merge_path_list(blacklist_path, blacklist_path_user) ResolveCache.blacklist_path = blacklist_path # END # START - List of only allowed URL Paths for Iframes # --resolve-whitelist-path if not hasattr(ResolveCache, 'whitelist_path'): whitelist_path = [] whitelist_path_user = self.get_option('whitelist_path') if whitelist_path_user is not None: whitelist_path = self.merge_path_list([], whitelist_path_user) ResolveCache.whitelist_path = whitelist_path # END # sorted after the way streamlink will try to remove an url status_remove = [ 'SAME-URL', 'SCHEME', 'WL-netloc', 'WL-path', 'BL-static', 'BL-netloc', 'BL-path', 'BL-ew', 'BL-filepath', 'ADS', ] new_list = [] for url in old_list: new_url = self.repair_url(url, base_url) # parse the url parse_new_url = urlparse(new_url) # START - removal of unwanted urls REMOVE = False count = 0 # status_remove must be updated on changes for url_status in ( # Removes an already used iframe url (new_url in ResolveCache.cache_url_list), # Allow only an url with a valid scheme (not parse_new_url.scheme.startswith(('http'))), # Allow only whitelisted domains for iFrames # --resolve-whitelist-netloc (url_type == 'iframe' and self.get_option('whitelist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('whitelist_netloc'))) is False), # Allow only whitelisted paths from a domain for iFrames # --resolve-whitelist-path (url_type == 'iframe' and ResolveCache.whitelist_path and self.compare_url_path( parse_new_url, ResolveCache.whitelist_path) is False), # Removes blacklisted domains from a static list # self.blacklist_netloc (parse_new_url.netloc.endswith(self.blacklist_netloc)), # Removes blacklisted domains # --resolve-blacklist-netloc (self.get_option('blacklist_netloc') and parse_new_url.netloc.endswith( tuple(self.get_option('blacklist_netloc')))), # Removes blacklisted paths from a domain # --resolve-blacklist-path (self.compare_url_path(parse_new_url, ResolveCache.blacklist_path) is True), # Removes unwanted endswith images and chatrooms (parse_new_url.path.endswith(self.blacklist_endswith)), # Removes blacklisted file paths # --resolve-blacklist-filepath (self.get_option('blacklist_filepath') and parse_new_url.path.endswith( tuple(self.get_option('blacklist_filepath')))), # Removes obviously AD URL (self._ads_path_re.match(parse_new_url.path)), ): count += 1 if url_status: REMOVE = True break if REMOVE is True: log.debug('{0} - Removed: {1}'.format(status_remove[count - 1], new_url)) continue # END - removal of unwanted urls # Add repaired url new_list += [new_url] # Remove duplicates log.debug('List length: {0} (with duplicates)'.format(len(new_list))) new_list = sorted(list(set(new_list))) return new_list def _unescape_type(self, _re, _type_re): '''search for unescaped iframes or m3u8 URLs''' unescape_type = _re.findall(self.html_text) if unescape_type: unescape_text = [] for data in unescape_type: unescape_text += [unquote(data)] unescape_text = ','.join(unescape_text) unescape_type = _type_re.findall(unescape_text) if unescape_type: log.debug('Found unescape_type: {0}'.format( len(unescape_type))) return unescape_type log.trace('No unescape_type') return False def _window_location(self): '''Try to find a script with window.location.href Args: res_text: Content from self._res_text Returns: (str) url or False if no url was found. ''' match = self._window_location_re.search(self.html_text) if match: temp_url = urljoin(self.url, match.group('url')) log.debug('Found window_location: {0}'.format(temp_url)) return temp_url log.trace('No window_location') return False def _resolve_playlist(self, playlist_all): ''' create streams ''' playlist_referer = self.get_option('playlist_referer') or self.url self.session.http.headers.update({'Referer': playlist_referer}) playlist_max = self.get_option('playlist_max') or 5 count_playlist = { 'dash': 0, 'hds': 0, 'hls': 0, 'http': 0, } for url in playlist_all: parsed_url = urlparse(url) if (parsed_url.path.endswith(('.m3u8')) or parsed_url.query.endswith(('.m3u8'))): if count_playlist['hls'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: streams = HLSStream.parse_variant_playlist( self.session, url).items() if not streams: yield 'live', HLSStream(self.session, url) for s in streams: yield s log.debug('HLS URL - {0}'.format(url)) count_playlist['hls'] += 1 except Exception as e: log.error('Skip HLS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.f4m')) or parsed_url.query.endswith(('.f4m'))): if count_playlist['hds'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in HDSStream.parse_manifest(self.session, url).items(): yield s log.debug('HDS URL - {0}'.format(url)) count_playlist['hds'] += 1 except Exception as e: log.error('Skip HDS with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mp3', '.mp4')) or parsed_url.query.endswith(('.mp3', '.mp4'))): if count_playlist['http'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: name = 'vod' m = self._httpstream_bitrate_re.search(url) if m: bitrate = m.group('bitrate') resolution = m.group('resolution') if bitrate: name = '{0}k'.format(m.group('bitrate')) elif resolution: name = resolution yield name, HTTPStream(self.session, url) log.debug('HTTP URL - {0}'.format(url)) count_playlist['http'] += 1 except Exception as e: log.error('Skip HTTP with error {0}'.format(str(e))) elif (parsed_url.path.endswith(('.mpd')) or parsed_url.query.endswith(('.mpd'))): if count_playlist['dash'] >= playlist_max: log.debug('Skip - {0}'.format(url)) continue try: for s in DASHStream.parse_manifest(self.session, url).items(): yield s log.debug('DASH URL - {0}'.format(url)) count_playlist['dash'] += 1 except Exception as e: log.error('Skip DASH with error {0}'.format(str(e))) else: log.error('parsed URL - {0}'.format(url)) def _res_text(self, url): '''Content of a website Args: url: URL with an embedded Video Player. Returns: Content of the response ''' try: res = self.session.http.get(url, allow_redirects=True) except Exception as e: if 'Received response with content-encoding: gzip' in str(e): headers = { 'User-Agent': useragents.FIREFOX, 'Accept-Encoding': 'deflate' } res = self.session.http.get(url, headers=headers, allow_redirects=True) elif '403 Client Error' in str(e): log.error('Website Access Denied/Forbidden, you might be geo-' 'blocked or other params are missing.') raise NoStreamsError(self.url) elif '404 Client Error' in str(e): log.error('Website was not found, the link is broken or dead.') raise NoStreamsError(self.url) else: raise e if res.history: for resp in res.history: log.debug('Redirect: {0} - {1}'.format(resp.status_code, resp.url)) log.debug('URL: {0}'.format(res.url)) return res.text def settings_url(self): '''store custom settings for URLs''' o = urlparse(self.url) # User-Agent _android = [] _chrome = [] _ipad = [] _iphone = [ 'bigo.tv', ] if self.session.http.headers['User-Agent'].startswith( 'python-requests'): if o.netloc.endswith(tuple(_android)): self.session.http.headers.update( {'User-Agent': useragents.ANDROID}) elif o.netloc.endswith(tuple(_chrome)): self.session.http.headers.update( {'User-Agent': useragents.CHROME}) elif o.netloc.endswith(tuple(_ipad)): self.session.http.headers.update( {'User-Agent': useragents.IPAD}) elif o.netloc.endswith(tuple(_iphone)): self.session.http.headers.update( {'User-Agent': useragents.IPHONE_6}) else: # default User-Agent self.session.http.headers.update( {'User-Agent': useragents.FIREFOX}) # SSL Verification - http.verify http_verify = [ '.cdn.bg', 'sportal.bg', ] if (o.netloc.endswith(tuple(http_verify)) and self.session.http.verify): self.session.http.verify = False log.warning('SSL Verification disabled.') def get_title(self): if self.title is None: if not self.html_text: self.html_text = self._res_text(self.url) _title_re = re.compile(r'<title>(?P<title>[^<>]+)</title>') m = _title_re.search(self.html_text) if m: self.title = m.group('title') if self.title is None: # fallback if there is no <title> self.title = self.url return self.title def _get_streams(self): self.settings_url() if self._run <= 1: log.debug('Version 2018-08-19') log.info('This is a custom plugin.') log.debug('User-Agent: {0}'.format( self.session.http.headers['User-Agent'])) new_session_url = False log.info(' {0}. URL={1}'.format(self._run, self.url)) # GET website content self.html_text = self._res_text(self.url) # Playlist URL playlist_all = self._playlist_re.findall(self.html_text) _p_u = self._unescape_type(self._unescape_hls_re, self._playlist_re) if _p_u: playlist_all += _p_u if playlist_all: log.debug('Found Playlists: {0}'.format(len(playlist_all))) playlist_list = self._make_url_list( playlist_all, self.url, url_type='playlist', ) if playlist_list: log.info('Found Playlists: {0} (valid)'.format( len(playlist_list))) return self._resolve_playlist(playlist_list) else: log.trace('No Playlists') # iFrame URL iframe_list = [] for _iframe_list in (self._iframe_re.findall(self.html_text), self._unescape_type(self._unescape_iframe_re, self._iframe_re)): if not _iframe_list: continue iframe_list += _iframe_list if iframe_list: log.debug('Found Iframes: {0}'.format(len(iframe_list))) # repair and filter iframe url list new_iframe_list = self._make_url_list(iframe_list, self.url, url_type='iframe') if new_iframe_list: number_iframes = len(new_iframe_list) if number_iframes == 1: new_session_url = new_iframe_list[0] else: log.info('--- IFRAMES ---') for i, item in enumerate(new_iframe_list, start=1): log.info('{0} - {1}'.format(i, item)) log.info('--- IFRAMES ---') try: number = int( self.input_ask('Choose an iframe number from above' ).split(' ')[0]) new_session_url = new_iframe_list[number - 1] except FatalPluginError: new_session_url = new_iframe_list[0] except ValueError: log.error('invalid input answer') except (IndexError, TypeError): log.error('invalid input number') if not new_session_url: new_session_url = new_iframe_list[0] else: log.trace('No Iframes') if not new_session_url: # search for window.location.href new_session_url = self._window_location() if new_session_url: # the Dailymotion Plugin does not work with this Referer if 'dailymotion.com' in new_session_url: del self.session.http.headers['Referer'] return self.session.streams(new_session_url) raise NoPluginError
class Streann(Plugin): arguments = PluginArguments( PluginArgument( "url", type=str, metavar="URL", help=""" Source URL where the iframe is located, only required for direct URLs of `ott.streann.com` """ ) ) base_url = "https://ott.streann.com" get_time_url = base_url + "/web/services/public/get-server-time" token_url = base_url + "/loadbalancer/services/web-players/{playerId}/token/{type}/{dataId}/{deviceId}" stream_url = base_url + "/loadbalancer/services/web-players/{type}s-reseller-secure/{dataId}/{playerId}" \ "/{token}/{resellerId}/playlist.m3u8?date={time}&device-type=web&device-name=web" \ "&device-os=web&device-id={deviceId}" passphrase_re = re.compile(r'''CryptoJS\.AES\.decrypt\(.*?,\s*(['"])(?P<passphrase>(?:(?!\1).)*)\1\s*?\);''') _device_id = None _domain = None @property def device_id(self): """ Randomly generated deviceId. :return: """ if self._device_id is None: self._device_id = "".join( random.choice("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") for _ in range(50)) return self._device_id @property def time(self): res = self.session.http.get(self.get_time_url) data = self.session.http.json(res) return str(data.get("serverTime", int(time.time() * 1000))) def passphrase(self): log.debug("passphrase") res = self.session.http.get(self.url) passphrase_m = self.passphrase_re.search(res.text) return passphrase_m and passphrase_m.group("passphrase").encode("utf8") def get_token(self, **config): log.debug("get_token") pdata = dict(arg1=base64.b64encode(self._domain.encode("utf8")), arg2=base64.b64encode(self.time.encode("utf8"))) headers = { "Referer": self.url, "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/x-www-form-urlencoded" } res = self.session.http.post( self.token_url.format(deviceId=self.device_id, **config), data=pdata, headers=headers ) if res.status_code == 204: log.error(f"self._domain might be invalid - {self._domain}") return data = self.session.http.json(res, schema=validate.Schema({ "token": str, validate.optional("name"): str, validate.optional("webPlayer"): { validate.optional("id"): str, validate.optional("name"): str, validate.optional("type"): str, validate.optional("allowedDomains"): [str], }, })) log.trace(f"{data!r}") self.title = data.get("name") return data["token"] def _get_streams(self): if not self.matches[0]: self._domain = urlparse(self.url).netloc iframes = self.session.http.get(self.url, schema=validate.Schema( validate.parse_html(), validate.xml_findall(".//iframe[@src]"), validate.filter(lambda elem: urlparse(elem.attrib.get("src")).netloc == "ott.streann.com") )) if not iframes: log.error("Could not find 'ott.streann.com' iframe") return self.url = iframes[0].attrib.get("src") if not self._domain and self.get_option("url"): self._domain = urlparse(self.get_option("url")).netloc if self._domain is None: log.error("Missing source URL, use --streann-url") return self.session.http.headers.update({"Referer": self.url}) # Get the query string encrypted_data = urlparse(self.url).query data = base64.b64decode(encrypted_data) # and decrypt it passphrase = self.passphrase() if passphrase: log.debug("Found passphrase") params = decrypt_openssl(data, passphrase) config = parse_qsd(params.decode("utf8")) log.trace(f"config: {config!r}") token = self.get_token(**config) if not token: return hls_url = self.stream_url.format(time=self.time, deviceId=self.device_id, token=token, **config) log.debug("URL={0}".format(hls_url)) return HLSStream.parse_variant_playlist(self.session, hls_url, acceptable_status=(200, 403, 404, 500))
class WWENetwork(Plugin): url_re = re.compile(r"https?://network.wwe.com") content_id_re = re.compile(r'''"content_id" : "(\d+)"''') playback_scenario = "HTTP_CLOUD_WIRED" login_url = "https://secure.net.wwe.com/workflow.do" login_page_url = "https://secure.net.wwe.com/enterworkflow.do?flowId=account.login&forwardUrl=http%3A%2F%2Fnetwork.wwe.com" api_url = "https://ws.media.net.wwe.com/ws/media/mf/op-findUserVerifiedEvent/v-2.3" _info_schema = validate.Schema( validate.union({ "status": validate.union({ "code": validate.all(validate.xml_findtext(".//status-code"), validate.transform(int)), "message": validate.xml_findtext(".//status-message"), }), "urls": validate.all(validate.xml_findall(".//url"), [validate.getattr("text")]), validate.optional("fingerprint"): validate.xml_findtext(".//updated-fingerprint"), validate.optional("session_key"): validate.xml_findtext(".//session-key"), "session_attributes": validate.all(validate.xml_findall(".//session-attribute"), [ validate.getattr("attrib"), validate.union({ "name": validate.get("name"), "value": validate.get("value") }) ]) })) arguments = PluginArguments( PluginArgument("email", required=True, metavar="EMAIL", requires=["password"], help=""" The email associated with your WWE Network account, required to access any WWE Network stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A WWE Network account password to use with --wwenetwork-email. """)) def __init__(self, url): super(WWENetwork, self).__init__(url) http.headers.update({"User-Agent": useragents.CHROME}) self._session_attributes = Cache(filename="plugin-cache.json", key_prefix="wwenetwork:attributes") self._session_key = self.cache.get("session_key") self._authed = self._session_attributes.get( "ipid") and self._session_attributes.get("fprt") @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, email, password): self.logger.debug("Attempting login as {0}", email) # sets some required cookies to login http.get(self.login_page_url) # login res = http.post(self.login_url, data=dict(registrationAction='identify', emailAddress=email, password=password, submitButton=""), headers={"Referer": self.login_page_url}, allow_redirects=False) self._authed = "Authentication Error" not in res.text if self._authed: self._session_attributes.set("ipid", res.cookies.get("ipid"), expires=3600 * 1.5) self._session_attributes.set("fprt", res.cookies.get("fprt"), expires=3600 * 1.5) return self._authed def _update_session_attribute(self, key, value): if value: self._session_attributes.set(key, value, expires=3600 * 1.5) # 1h30m expiry http.cookies.set(key, value) @property def session_key(self): return self._session_key @session_key.setter def session_key(self, value): self.cache.set("session_key", value) self._session_key = value def _get_media_info(self, content_id): """ Get the info about the content, based on the ID :param content_id: :return: """ params = { "identityPointId": self._session_attributes.get("ipid"), "fingerprint": self._session_attributes.get("fprt"), "contentId": content_id, "playbackScenario": self.playback_scenario, "platform": "WEB_MEDIAPLAYER_5", "subject": "LIVE_EVENT_COVERAGE", "frameworkURL": "https://ws.media.net.wwe.com", "_": int(time.time()) } if self.session_key: params["sessionKey"] = self.session_key url = self.api_url.format(id=content_id) res = http.get(url, params=params) return http.xml(res, ignore_ns=True, schema=self._info_schema) def _get_content_id(self): # check the page to find the contentId res = http.get(self.url) m = self.content_id_re.search(res.text) if m: return m.group(1) def _get_streams(self): email = self.get_option("email") password = self.get_option("password") if not self._authed and (not email and not password): self.logger.error( "A login for WWE Network is required, use --wwenetwork-email/" "--wwenetwork-password to set them") return if not self._authed: if not self.login(email, password): self.logger.error( "Failed to login, check your username/password") return content_id = self._get_content_id() if content_id: self.logger.debug("Found content ID: {0}", content_id) info = self._get_media_info(content_id) if info["status"]["code"] == 1: # update the session attributes self._update_session_attribute("fprt", info.get("fingerprint")) for attr in info["session_attributes"]: self._update_session_attribute(attr["name"], attr["value"]) if info.get("session_key"): self.session_key = info.get("session_key") for url in info["urls"]: for s in HLSStream.parse_variant_playlist( self.session, url, name_fmt="{pixels}_{bitrate}").items(): yield s else: raise PluginError( "Could not load streams: {message} ({code})".format( **info["status"]))
class Rtve(Plugin): arguments = PluginArguments( PluginArgument("mux-subtitles", is_global=True), ) URL_VIDEOS = "https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2" URL_SUBTITLES = "https://www.rtve.es/api/videos/{id}/subtitulos.json" def _get_streams(self): self.id = self.session.http.get( self.url, schema=validate.Schema( validate.transform( re.compile(r"\bdata-setup='({.+?})'", re.DOTALL).search), validate.any( None, validate.all( validate.get(1), validate.parse_json(), { "idAsset": validate.any( int, validate.all(str, validate.transform(int))), }, validate.get("idAsset"))), )) if not self.id: return urls = self.session.http.get( self.URL_VIDEOS.format(id=self.id), schema=validate.Schema( validate.transform(ZTNR.translate), validate.transform(list), [(str, validate.url())], ), ) url = next( (url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None) if not url: url = next( (url for _, url in urls if urlparse(url).path.endswith(".mp4")), None) if url: yield "vod", HTTPStream(self.session, url) return streams = HLSStream.parse_variant_playlist(self.session, url).items() if self.options.get("mux-subtitles"): subs = self.session.http.get( self.URL_SUBTITLES.format(id=self.id), schema=validate.Schema( validate.parse_json(), { "page": { "items": [{ "lang": str, "src": validate.url(), }] } }, validate.get(("page", "items")), ), ) if subs: subtitles = { s["lang"]: HTTPStream(self.session, update_scheme("https://", s["src"], force=True)) for s in subs } for quality, stream in streams: yield quality, MuxedStream(self.session, stream, subtitles=subtitles) return yield from streams
class Twitch(Plugin): arguments = PluginArguments( PluginArgument( "disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """ ), PluginArgument( "disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """ ), PluginArgument( "disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """ ), PluginArgument( "low-latency", action="store_true", help=""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {live_edge}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible. Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled. Note: The caching/buffering settings of the chosen player may need to be adjusted as well. Please refer to the player's own documentation for the required parameters and its configuration. Player parameters can be set via Streamlink's --player or --player-args parameters. """.format(live_edge=LOW_LATENCY_MAX_LIVE_EDGE) ) ) def __init__(self, url): super().__init__(url) match = self.match.groupdict() parsed = urlparse(url) self.params = parse_query(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self._channel_id = None self._channel = None self.clip_name = None self.title = None self.author = None self.category = None if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self._channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self._channel = match.get("channel") and match.get("channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def get_title(self): if self.title is None: self._get_metadata() return self.title def get_author(self): if self.author is None: self._get_metadata() return self.author def get_category(self): if self.category is None: self._get_metadata() return self.category def _get_metadata(self): if self.video_id: (self.author, self.title, self.category) = self.api.metadata_video(self.video_id) elif self.clip_name: self._get_clips() elif self._channel: (self.author, self.title, self.category) = self.api.metadata_channel(self.channel_id) @property def channel(self): if not self._channel: if self.video_id: self._channel_from_video_id(self.video_id) return self._channel @property def channel_id(self): if not self._channel_id: if self._channel: self._channel_from_login(self._channel) elif self.video_id: self._channel_from_video_id(self.video_id) return self._channel_id def _channel_from_video_id(self, video_id): try: self._channel_id, self._channel = self.api.channel_from_video_id(video_id) except PluginError: raise PluginError("Unable to find video: {0}".format(video_id)) def _channel_from_login(self, channel): try: self._channel_id = self.api.channel_from_login(channel) except PluginError: raise PluginError("Unable to find channel: {0}".format(channel)) def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except (PluginError, TypeError): raise NoStreamsError(self.url) try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: target_id, login, display_name = self.api.hosted_channel(self.channel) except PluginError: return False log.info("{0} is hosting {1}".format(self.channel, login)) if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error("A loop of hosted channels has been detected, cannot find a playable stream. ({0})".format(loop)) return True hosted_chain.append(login) log.info("switching to {0}".format(login)) self._channel_id = target_id self._channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug("Getting live HLS streams for {0}".format(self.channel)) self.session.http.headers.update({ "referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv", }) sig, token, restricted_bitrates = self._access_token(True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug("Getting video HLS streams for {0}".format(self.channel)) sig, token, restricted_bitrates = self._access_token(False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist(self.session, url, start_offset=time_offset, **extra_params) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning("The quality '{0}' is not available since it requires a subscription.".format(name)) return streams def _get_clips(self): try: (((sig, token), streams), (self.author, self.category), self.title) = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream(self.session, update_qsd(stream, {"sig": sig, "token": token})) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self._channel: return self._get_hls_streams_live()
class OPENRECtv(Plugin): _url_re = re.compile( r"https?://(?:www\.)?openrec.tv/(?:live|movie)/(?P<id>[^/]+)") _stores_re = re.compile(r"window.stores\s*=\s*({.*?});", re.DOTALL | re.MULTILINE) _config_re = re.compile(r"window.sharedConfig\s*=\s*({.*?});", re.DOTALL | re.MULTILINE) api_url = "https://apiv5.openrec.tv/api/v5/movies/{id}/detail" login_url = "https://www.openrec.tv/viewapp/v4/mobile/user/login" _config_schema = validate.Schema( {"urls": { "apiv5Authorized": validate.url() }}) _stores_schema = validate.Schema( { "moviePageStore": { "movieStore": { "id": validate.text, "title": validate.text, "media": { "url": validate.any(None, '', validate.url()) } } } }, validate.get("moviePageStore"), validate.get("movieStore")) _detail_schema = validate.Schema({ validate.optional("error_message"): validate.text, "status": int, validate.optional("data"): { "type": validate.text, "items": [{ "media": { "url": validate.any(None, validate.url()), "url_dvr": validate.any(None, validate.url()) } }] } }) _login_schema = validate.Schema({ validate.optional("error_message"): validate.text, "status": int, validate.optional("data"): object }) arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help=""" The email associated with your openrectv account, required to access any openrectv stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" An openrectv account password to use with --openrectv-email. """)) def __init__(self, url): super(OPENRECtv, self).__init__(url) self._pdata = None self._pres = None self._pconfig = None @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def login(self, email, password): res = self.session.http.post(self.login_url, data={ "mail": email, "password": password }) data = self.session.http.json(res, self._login_schema) if data["status"] == 0: log.debug("Logged in as {0}".format(data["data"]["user_name"])) else: log.error("Failed to login: {0}".format(data["error_message"])) return data["status"] == 0 def _get_page(self): if not self._pres: self._pres = self.session.http.get(self.url) return self._pres def _get_movie_data(self): pres = self._get_page() match = self._stores_re.search(pres.text) if match: self._pdata = parse_json(match.group(1), schema=self._stores_schema) return self._pdata def _get_page_config(self): pres = self._get_page() match = self._config_re.search(pres.text) if match: self._pconfig = parse_json(match.group(1)) return self._pconfig def _get_details(self, id): config = self._get_page_config() api_url = config["urls"]["apiv5Authorized"] url = "{base}/movies/{id}/detail".format(base=api_url, id=id) res = self.session.http.get( url, headers={ "access-token": self.session.http.cookies.get("access_token"), "uuid": self.session.http.cookies.get("uuid") }) data = self.session.http.json(res, schema=self._detail_schema) if data["status"] == 0: log.debug("Got valid detail response") return data["data"] else: log.error("Failed to get video stream: {0}".format( data["error_message"])) def get_title(self): mdata = self._get_movie_data() if mdata: return mdata["title"] def _get_streams(self): mdata = self._get_movie_data() if mdata: log.debug("Found video: {0} ({1})".format(mdata["title"], mdata["id"])) if mdata["media"]["url"]: for s in HLSStream.parse_variant_playlist( self.session, mdata["media"]["url"]).items(): yield s elif self.get_option("email") and self.get_option("password"): if self.login(self.get_option("email"), self.get_option("password")): details = self._get_details(mdata["id"]) if details: for item in details["items"]: for s in HLSStream.parse_variant_playlist( self.session, item["media"]["url"]).items(): yield s else: log.error("You must login to access this stream")
class YuppTV(Plugin): _m3u8_re = re.compile(r'''['"](http.+\.m3u8.*?)['"]''') _cookie_expiry = 3600 * 24 * 365 arguments = PluginArguments( PluginArgument("boxid", requires=["yuppflixtoken"], sensitive=True, metavar="BOXID", help=""" The yupptv.com boxid that's used in the BoxId cookie. Can be used instead of the username/password login process. """), PluginArgument("yuppflixtoken", sensitive=True, metavar="YUPPFLIXTOKEN", help=""" The yupptv.com yuppflixtoken that's used in the YuppflixToken cookie. Can be used instead of the username/password login process. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached YuppTV credentials to initiate a new session and reauthenticate. """), ) def __init__(self, url): super().__init__(url) self._authed = (self.session.http.cookies.get("BoxId") and self.session.http.cookies.get("YuppflixToken")) def _login_using_box_id_and_yuppflix_token(self, box_id, yuppflix_token): time_now = time.time() self.session.http.cookies.set( 'BoxId', box_id, domain='www.yupptv.com', path='/', expires=time_now + self._cookie_expiry, ) self.session.http.cookies.set( 'YuppflixToken', yuppflix_token, domain='www.yupptv.com', path='/', expires=time_now + self._cookie_expiry, ) self.save_cookies() log.info("Successfully set BoxId and YuppflixToken") def _get_streams(self): self.session.http.headers.update({"User-Agent": useragents.CHROME}) login_box_id = self.get_option("boxid") login_yuppflix_token = self.get_option("yuppflixtoken") if self.options.get("purge_credentials"): self.clear_cookies() self._authed = False log.info("All credentials were successfully removed") if self._authed: log.debug("Attempting to authenticate using cached cookies") elif not self._authed and login_box_id and login_yuppflix_token: self._login_using_box_id_and_yuppflix_token( login_box_id, login_yuppflix_token, ) self._authed = True page = self.session.http.get(self.url) if self._authed and "btnsignup" in page.text: log.error("This device requires renewed credentials to log in") return match = self._m3u8_re.search(page.text) if match: stream_url = match.group(1) if "preview/" in stream_url: if "btnsignup" in page.text: log.error("This stream requires you to login") else: log.error("This stream requires a subscription") return return HLSStream.parse_variant_playlist(self.session, stream_url) elif "btnsignup" in page.text: log.error("This stream requires you to login") elif "btnsubscribe" in page.text: log.error("This stream requires a subscription")
class AnimeLab(Plugin): url_re = re.compile(r"https?://(?:www\.)?animelab\.com/player/") login_url = "https://www.animelab.com/login" video_collection_re = re.compile(r"VideoCollection\((\[.*?\])\);") playlist_position_re = re.compile(r"playlistPosition\s*=\s*(\d+);") video_collection_schema = validate.Schema( validate.union({ "position": validate.all( validate.transform(playlist_position_re.search), validate.any( None, validate.all(validate.get(1), validate.transform(int)))), "playlist": validate.all( validate.transform(video_collection_re.search), validate.any( None, validate.all(validate.get(1), validate.transform(parse_json)))) })) arguments = PluginArguments( PluginArgument( "email", requires=["password"], metavar="EMAIL", help="The email address used to register with animelab.com."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A animelab.com account password to use with --animelab-email." )) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, email, password): self.logger.debug("Attempting to log in as {0}", email) res = self.session.http.post(self.login_url, data=dict(email=email, password=password), allow_redirects=False, raise_for_status=False) loc = res.headers.get("Location", "") if "geoblocked" in loc.lower(): self.logger.error("AnimeLab is not available in your territory") elif res.status_code >= 400: self.logger.error( "Failed to login to AnimeLab, check your email/password combination" ) else: return True return False def _get_streams(self): email, password = self.get_option("email"), self.get_option("password") if not email or not password: self.logger.error( "AnimeLab requires authentication, use --animelab-email " "and --animelab-password to set your email/password combination" ) return if self.login(email, password): self.logger.info("Successfully logged in as {0}", email) video_collection = self.session.http.get( self.url, schema=self.video_collection_schema) if video_collection["playlist"] is None or video_collection[ "position"] is None: return data = video_collection["playlist"][video_collection["position"]] self.logger.debug("Found {0} version {1} hard-subs", data["language"]["name"], "with" if data["hardSubbed"] else "without") for video in data["videoInstances"]: if video["httpUrl"]: q = video["videoQuality"]["description"] s = HTTPStream(self.session, video["httpUrl"]) yield q, s
class TVPlayer(Plugin): api_url = "https://v1-streams-elb.tvplayer-cdn.com/api/live/stream/" stream_url = "https://live.tvplayer.com/stream-live.php" login_url = "https://v1-auth.tvplayer-cdn.com/login?responseType=redirect&redirectUri=https://tvplayer.com/login&lang=en" update_url = "https://tvplayer.com/account/update-detail" dummy_postcode = "SE1 9LT" # location of ITV HQ in London url_re = re.compile( r"https?://(?:www\.)?tvplayer\.com/(:?uk/)?(:?watch/?|watch/(.+)?)") stream_attrs_re = re.compile( r'data-player-(expiry|key|token|uvid)\s*=\s*"(.*?)"', re.S) login_token_re = re.compile(r'input.*?name="_token".*?value="(\w+)"') stream_schema = validate.Schema({ "response": validate.Schema({ "stream": validate.any(None, validate.text), "drm": validate.any(None, validate.text) }) }) arguments = PluginArguments( PluginArgument( "email", help="The email address used to register with tvplayer.com.", metavar="EMAIL", requires=["password"]), PluginArgument("password", sensitive=True, help="The password for your tvplayer.com account.", metavar="PASSWORD")) @classmethod def can_handle_url(cls, url): match = TVPlayer.url_re.match(url) return match is not None def __init__(self, url): super().__init__(url) self.session.http.headers.update({"User-Agent": useragents.CHROME}) def authenticate(self, username, password): res = self.session.http.get(self.login_url) match = self.login_token_re.search(res.text) token = match and match.group(1) res2 = self.session.http.post(self.login_url, data=dict(email=username, password=password, _token=token), allow_redirects=False) # there is a 302 redirect on a successful login return res2.status_code == 302 def _get_stream_data(self, expiry, key, token, uvid): res = self.session.http.get(self.api_url + uvid, params=dict(key=key, platform="chrome"), headers={ "Token": token, "Token-Expiry": expiry, "Uvid": uvid }) res_schema = self.session.http.json(res, schema=self.stream_schema) if res_schema["response"]["stream"] is None: res = self.session.http.get(self.stream_url, params=dict(key=key, platform="chrome"), headers={ "Token": token, "Token-Expiry": expiry, "Uvid": uvid }).json() res_schema["response"]["stream"] = res["Streams"]["Adaptive"] return res_schema def _get_stream_attrs(self, page): stream_attrs = { k.replace("-", "_"): v.strip('"') for k, v in self.stream_attrs_re.findall(page.text) } log.debug(f"Got stream attributes: {str(stream_attrs)}") valid = True for a in ("expiry", "key", "token", "uvid"): if a not in stream_attrs: log.debug(f"Missing '{a}' from stream attributes") valid = False return stream_attrs if valid else {} def _get_streams(self): if self.get_option("email") and self.get_option("password"): log.debug("Logging in as {0}".format(self.get_option("email"))) if not self.authenticate(self.get_option("email"), self.get_option("password")): log.warning("Failed to login as {0}".format( self.get_option("email"))) # find the list of channels from the html in the page self.url = self.url.replace("https", "http") # https redirects to http res = self.session.http.get(self.url) if "enter your postcode" in res.text: log.info(f"Setting your postcode to: {self.dummy_postcode}. " f"This can be changed in the settings on tvplayer.com") res = self.session.http.post( self.update_url, data=dict(postcode=self.dummy_postcode), params=dict(return_url=self.url)) stream_attrs = self._get_stream_attrs(res) if stream_attrs: stream_data = self._get_stream_data(**stream_attrs) if stream_data: if stream_data["response"]["drm"] is not None: log.error( "This stream is protected by DRM can cannot be played") return else: return HLSStream.parse_variant_playlist( self.session, stream_data["response"]["stream"]) else: if "need to login" in res.text: log.error( "You need to login using --tvplayer-email/--tvplayer-password to view this stream" )
class YouTube(Plugin): _oembed_url = "https://www.youtube.com/oembed" _video_info_url = "https://youtube.com/get_video_info" _oembed_schema = validate.Schema({ "author_name": validate.all(validate.text, validate.transform(maybe_decode)), "title": validate.all(validate.text, validate.transform(maybe_decode)) }) # There are missing itags adp_video = { 137: "1080p", 299: "1080p60", # HFR 264: "1440p", 308: "1440p60", # HFR 266: "2160p", 315: "2160p60", # HFR 138: "2160p", 302: "720p60", # HFR 135: "480p", 133: "240p", 160: "144p", } adp_audio = { 140: 128, 141: 256, 171: 128, 249: 48, 250: 64, 251: 160, 256: 256, 258: 258, } arguments = PluginArguments( PluginArgument( "api-key", sensitive=True, help=argparse.SUPPRESS # no longer used )) def __init__(self, url): super(YouTube, self).__init__(url) parsed = urlparse(self.url) if parsed.netloc == 'gaming.youtube.com': self.url = urlunparse(parsed._replace(netloc='www.youtube.com')) self.author = None self.title = None self.video_id = None self.session.http.headers.update({'User-Agent': useragents.CHROME}) def get_author(self): if self.author is None: self.get_oembed return self.author def get_title(self): if self.title is None: self.get_oembed return self.title @classmethod def can_handle_url(cls, url): return _url_re.match(url) @classmethod def stream_weight(cls, stream): match_3d = re.match(r"(\w+)_3d", stream) match_hfr = re.match(r"(\d+p)(\d+)", stream) if match_3d: weight, group = Plugin.stream_weight(match_3d.group(1)) weight -= 1 group = "youtube_3d" elif match_hfr: weight, group = Plugin.stream_weight(match_hfr.group(1)) weight += 1 group = "high_frame_rate" else: weight, group = Plugin.stream_weight(stream) return weight, group @property def get_oembed(self): if self.video_id is None: self.video_id = self._find_video_id(self.url) params = { "url": "https://www.youtube.com/watch?v={0}".format(self.video_id), "format": "json" } res = self.session.http.get(self._oembed_url, params=params) data = self.session.http.json(res, schema=self._oembed_schema) self.author = data["author_name"] self.title = data["title"] def _create_adaptive_streams(self, info, streams): adaptive_streams = {} best_audio_itag = None # Extract audio streams from the adaptive format list streaming_data = info.get("player_response", {}).get("streamingData", {}) for stream_info in streaming_data.get("adaptiveFormats", []): if "url" not in stream_info: continue stream_params = dict(parse_qsl(stream_info["url"])) if "itag" not in stream_params: continue itag = int(stream_params["itag"]) # extract any high quality streams only available in adaptive formats adaptive_streams[itag] = stream_info["url"] stream_type, stream_format = stream_info["mimeType"] if stream_type == "audio": stream = HTTPStream(self.session, stream_info["url"]) name = "audio_{0}".format(stream_format) streams[name] = stream # find the best quality audio stream m4a, opus or vorbis if best_audio_itag is None or self.adp_audio[ itag] > self.adp_audio[best_audio_itag]: best_audio_itag = itag if best_audio_itag and adaptive_streams and MuxedStream.is_usable( self.session): aurl = adaptive_streams[best_audio_itag] for itag, name in self.adp_video.items(): if itag in adaptive_streams: vurl = adaptive_streams[itag] log.debug( "MuxedStream: v {video} a {audio} = {name}".format( audio=best_audio_itag, name=name, video=itag, )) streams[name] = MuxedStream(self.session, HTTPStream(self.session, vurl), HTTPStream(self.session, aurl)) return streams def _find_video_id(self, url): m = _url_re.match(url) if m.group("video_id"): log.debug("Video ID from URL") return m.group("video_id") res = self.session.http.get(url) datam = _ytdata_re.search(res.text) if datam: data = parse_json(datam.group(1)) # find the videoRenderer object, where there is a LVE NOW badge for vid_ep in search_dict(data, 'currentVideoEndpoint'): video_id = vid_ep.get("watchEndpoint", {}).get("videoId") if video_id: log.debug("Video ID from currentVideoEndpoint") return video_id for x in search_dict(data, 'videoRenderer'): for bstyle in search_dict(x.get("badges", {}), "style"): if bstyle == "BADGE_STYLE_TYPE_LIVE_NOW": if x.get("videoId"): log.debug("Video ID from videoRenderer (live)") return x["videoId"] if "/embed/live_stream" in url: for link in itertags(res.text, "link"): if link.attributes.get("rel") == "canonical": canon_link = link.attributes.get("href") if canon_link != url: log.debug("Re-directing to canonical URL: {0}".format( canon_link)) return self._find_video_id(canon_link) raise PluginError("Could not find a video on this page") def _get_stream_info(self, video_id): # normal _params_1 = {"el": "detailpage"} # age restricted _params_2 = {"el": "embedded"} # embedded restricted _params_3 = { "eurl": "https://youtube.googleapis.com/v/{0}".format(video_id) } count = 0 info_parsed = None for _params in (_params_1, _params_2, _params_3): count += 1 params = {"video_id": video_id} params.update(_params) res = self.session.http.get(self._video_info_url, params=params) info_parsed = parse_query(res.content if is_py2 else res.text, name="config", schema=_config_schema) player_response = info_parsed.get("player_response", {}) playability_status = player_response.get("playabilityStatus", {}) if (playability_status.get("status") != "OK"): reason = playability_status.get("reason") log.debug("get_video_info - {0}: {1}".format(count, reason)) continue self.author = player_response.get("videoDetails", {}).get("author") self.title = player_response.get("videoDetails", {}).get("title") log.debug("get_video_info - {0}: Found data".format(count)) break return info_parsed def _get_streams(self): is_live = False self.video_id = self._find_video_id(self.url) log.debug("Using video ID: {0}", self.video_id) info = self._get_stream_info(self.video_id) if info and info.get("status") == "fail": log.error("Could not get video info: {0}".format( info.get("reason"))) return elif not info: log.error("Could not get video info") return if (info.get("player_response", {}).get("videoDetails", {}).get("isLiveContent") or info.get("player_response", {}).get("videoDetails", {}).get("isLive")): log.debug("This video is live.") is_live = True streams = {} protected = False if (info.get("player_response", {}).get("streamingData", {}).get( "adaptiveFormats", [{}])[0].get("cipher") or info.get( "player_response", {}).get("streamingData", {}).get( "formats", [{}])[0].get("cipher")): protected = True log.debug("This video may be protected.") for stream_info in info.get("player_response", {}).get("streamingData", {}).get("formats", []): if "url" not in stream_info: continue stream = HTTPStream(self.session, stream_info["url"]) name = stream_info["qualityLabel"] streams[name] = stream if not is_live: streams = self._create_adaptive_streams(info, streams) hls_manifest = info.get("player_response", {}).get("streamingData", {}).get("hlsManifestUrl") if hls_manifest: try: hls_streams = HLSStream.parse_variant_playlist( self.session, hls_manifest, namekey="pixels") streams.update(hls_streams) except IOError as err: log.warning("Failed to extract HLS streams: {0}", err) if not streams and protected: raise PluginError("This plugin does not support protected videos, " "try youtube-dl instead") return streams
class SVTPlay(Plugin): api_url = 'https://api.svt.se/videoplayer-api/video/{0}' author = None category = None title = None url_re = re.compile( r''' https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se (/(kanaler/)?.*) ''', re.VERBOSE) latest_episode_url_re = re.compile( r''' class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)" ''', re.VERBOSE) live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)') vod_id_re = re.compile( r''' (?:DATA_LAKE\s+=\s+{"content":{"id":|"svtId":|data-video-id=) "(?P<vod_id>[^"]+)" ''', re.VERBOSE) _video_schema = validate.Schema({ validate.optional('programTitle'): validate.text, validate.optional('episodeTitle'): validate.text, 'videoReferences': [{ 'url': validate.url(), 'format': validate.text, }], validate.optional('subtitleReferences'): [{ 'url': validate.url(), 'format': validate.text, }], }) arguments = PluginArguments( PluginArgument( 'mux-subtitles', action='store_true', help= "Automatically mux available subtitles in to the output stream.", ), ) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def get_author(self): if self.author is not None: return self.author def get_category(self): if self.category is not None: return self.category def get_title(self): if self.title is not None: return self.title def _set_metadata(self, data, category): if 'programTitle' in data: self.author = data['programTitle'] self.category = category if 'episodeTitle' in data: self.title = data['episodeTitle'] def _get_live(self, path): match = self.live_id_re.search(path) if match is None: return live_id = "ch-{0}".format(match.group('live_id')) log.debug("Live ID={0}".format(live_id)) res = self.session.http.get(self.api_url.format(live_id)) api_data = self.session.http.json(res, schema=self._video_schema) self._set_metadata(api_data, 'Live') for playlist in api_data['videoReferences']: if playlist['format'] == 'dashhbbtv': yield from DASHStream.parse_manifest(self.session, playlist['url']).items() def _get_vod(self): res = self.session.http.get(self.url) match = self.latest_episode_url_re.search(res.text) if match: res = self.session.http.get(urljoin(self.url, match.group('url')), ) match = self.vod_id_re.search(res.text) if match is None: return vod_id = match.group('vod_id') log.debug("VOD ID={0}".format(vod_id)) res = self.session.http.get(self.api_url.format(vod_id)) api_data = self.session.http.json(res, schema=self._video_schema) self._set_metadata(api_data, 'VOD') substreams = {} if 'subtitleReferences' in api_data: for subtitle in api_data['subtitleReferences']: if subtitle['format'] == 'webvtt': log.debug("Subtitle={0}".format(subtitle['url'])) substreams[subtitle['format']] = HTTPStream( self.session, subtitle['url'], ) for manifest in api_data['videoReferences']: if manifest['format'] == 'dashhbbtv': for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items(): if self.get_option('mux_subtitles') and substreams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: yield q, s def _get_streams(self): path, live = self.url_re.match(self.url).groups() log.debug("Path={0}".format(path)) if live: return self._get_live(path) else: return self._get_vod()
class YuppTV(Plugin): _url_re = re.compile(r"""https?://(?:www\.)?yupptv\.com""", re.VERBOSE) _m3u8_re = re.compile(r'''['"](http.+\.m3u8.*?)['"]''') _login_url = "https://www.yupptv.com/auth/validateSignin" _box_logout = "https://www.yupptv.com/auth/confirmLogout" _signin_url = "https://www.yupptv.com/signin/" _account_url = "https://www.yupptv.com/account/myaccount.aspx" arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help="Your YuppTV account email"), PluginArgument("password", sensitive=True, metavar="PASSWORD", help="Your YuppTV account password.")) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def login(self, username, password, depth=3): if depth == 0: log.error("Failed to login to YuppTV") raise PluginError("cannot login") res = http.post(self._login_url, data=dict(user=username, password=password, isMobile=0), headers={"Referer": self._signin_url}) data = http.json(res) resp = data['Response'] if resp["tempBoxid"]: # log out on other device log.info("Logging out on other device: {0}".format( resp["tempBoxid"])) _ = http.get(self._box_logout, params=dict(boxId=resp["tempBoxid"])) return self.login(username, password, depth - 1) return resp['errorCode'], resp['statusmsg'] def _get_streams(self): http.headers.update({"User-Agent": useragents.CHROME}) if self.get_option("email") and self.get_option("password"): error_code, error_msg = self.login(self.get_option("email"), self.get_option("password")) if error_code is None: log.info("Logged in as {0}".format(self.get_option("email"))) else: log.error("Failed to login: {1} (code: {0})".format( error_code, error_msg)) page = http.get(self.url) match = self._m3u8_re.search(page.text) if match: stream_url = match.group(1) if "preview/" in stream_url: if "btnsignup" in page.text: log.error("This stream requires you to login") else: log.error("This stream requires a subscription") return return HLSStream.parse_variant_playlist(self.session, stream_url) elif "btnsignup" in page.text: log.error("This stream requires you to login") elif "btnsubscribe" in page.text: log.error("This stream requires a subscription")
class BTSports(Plugin): url_re = re.compile(r"https?://sport\.bt\.com") arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", required=True, help=""" The email associated with your BT Sport account, required to access any BT Sport stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help="Your BT Sport account password.")) content_re = re.compile(r"CONTENT_(\w+)\s*=\s*'(\w+)'") saml_re = re.compile(r'''name="SAMLResponse" value="(.*?)"''', re.M | re.DOTALL) api_url = "https://be.avs.bt.com/AVS/besc" saml_url = "https://samlfed.bt.com/sportgetfedwebhls" login_url = "https://signin1.bt.com/siteminderagent/forms/login.fcc" def __init__(self, url): super().__init__(url) self.session.http.headers = {"User-Agent": useragents.FIREFOX} @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, username, password): log.debug("Logging in as {0}".format(username)) redirect_to = "https://home.bt.com/ss/Satellite/secure/loginforward?view=btsport&redirectURL={0}".format( quote(self.url)) data = { "cookieExpp": "30", "Switch": "yes", "SMPostLoginUrl": "/appsyouraccount/secure/postlogin", "loginforward": "https://home.bt.com/ss/Satellite/secure/loginforward?view=btsport", "smauthreason": "0", "TARGET": redirect_to, "USER": username, "PASSWORD": password } res = self.session.http.post(self.login_url, data=data) log.debug("Redirected to: {0}".format(res.url)) if "loginerror" not in res.text: log.debug("Login successful, getting SAML token") res = self.session.http.get( "https://samlfed.bt.com/sportgetfedwebhls?bt.cid={0}".format( self.acid())) d = self.saml_re.search(res.text) if d: saml_data = d.group(1) log.debug("BT Sports federated login...") res = self.session.http.post(self.api_url, params={ "action": "LoginBT", "channel": "WEBHLS", "bt.cid": self.acid }, data={"SAMLResponse": saml_data}) fed_json = self.session.http.json(res) success = fed_json['resultCode'] == "OK" if not success: log.error("Failed to login: {0} - {1}".format( fed_json['errorDescription'], fed_json['message'])) return success else: return False def device_id(self): device_id = self.cache.get("device_id") or str(uuid4()) self.cache.set("device_id", device_id) return device_id def acid(self): acid = self.cache.get("acid") or "{cid}-B-{timestamp}".format( cid=self.device_id(), timestamp=int(time.time())) self.cache.set("acid", acid) return acid def _get_cdn(self, channel_id, channel_type="LIVE"): d = { "action": "GetCDN", "type": channel_type, "id": channel_id, "channel": "WEBHLS", "asJson": "Y", "bt.cid": self.acid(), "_": int(time.time()) } res = self.session.http.get(self.api_url, params=d, headers={"Accept": "application/json"}) return self.session.http.json(res) @Plugin.broken(2946) def _get_streams(self): if self.options.get("email") and self.options.get("password"): if self.login(self.options.get("email"), self.options.get("password")): log.debug("Logged in and authenticated with BT Sports.") res = self.session.http.get(self.url) m = self.content_re.findall(res.text) if m: info = dict(m) data = self._get_cdn(info.get("ID"), info.get("TYPE")) log.debug("CDN respsonse: {0}".format(data)) if data['resultCode'] == 'OK': return HLSStream.parse_variant_playlist( self.session, data['resultObj']['src']) else: log.error("Failed to get stream with error: {0} - {1}". format(data['errorDescription'], data['message'])) else: log.error("Login failed.") else: log.error("A username and password is required to use BT Sports")
class USTVNow(Plugin): _main_js_re = re.compile(r"""src=['"](main\..*\.js)['"]""") _enc_key_re = re.compile( r'(?P<key>AES_(?:Key|IV))\s*:\s*"(?P<value>[^"]+)"') TENANT_CODE = "ustvnow" _api_url = "https://teleupapi.revlet.net/service/api/v1/" _token_url = _api_url + "get/token" _signin_url = "https://www.ustvnow.com/signin" arguments = PluginArguments( PluginArgument("username", metavar="USERNAME", required=True, help="Your USTV Now account username"), PluginArgument("password", sensitive=True, metavar="PASSWORD", required=True, help="Your USTV Now account password", prompt="Enter USTV Now account password")) def __init__(self, url): super(USTVNow, self).__init__(url) self._encryption_config = {} self._token = None @classmethod def encrypt_data(cls, data, key, iv): rkey = "".join(reversed(key)).encode('utf8') riv = "".join(reversed(iv)).encode('utf8') fkey = SHA256.new(rkey).hexdigest()[:32].encode("utf8") cipher = AES.new(fkey, AES.MODE_CBC, riv) encrypted = cipher.encrypt(pad(data, 16, 'pkcs7')) return base64.b64encode(encrypted) @classmethod def decrypt_data(cls, data, key, iv): rkey = "".join(reversed(key)).encode('utf8') riv = "".join(reversed(iv)).encode('utf8') fkey = SHA256.new(rkey).hexdigest()[:32].encode("utf8") cipher = AES.new(fkey, AES.MODE_CBC, riv) decrypted = cipher.decrypt(base64.b64decode(data)) if decrypted: return unpad(decrypted, 16, 'pkcs7') else: return decrypted def _get_encryption_config(self, url): # find the path to the main.js # load the main.js and extract the config if not self._encryption_config: res = self.session.http.get(url) m = self._main_js_re.search(res.text) main_js_path = m and m.group(1) if main_js_path: res = self.session.http.get(urljoin(url, main_js_path)) self._encryption_config = dict( self._enc_key_re.findall(res.text)) return self._encryption_config.get( "AES_Key"), self._encryption_config.get("AES_IV") @property def box_id(self): if not self.cache.get("box_id"): self.cache.set("box_id", str(uuid4())) return self.cache.get("box_id") def get_token(self): """ Get the token for USTVNow :return: a valid token """ if not self._token: log.debug("Getting new session token") res = self.session.http.get(self._token_url, params={ "tenant_code": self.TENANT_CODE, "box_id": self.box_id, "product": self.TENANT_CODE, "device_id": 5, "display_lang_code": "ENG", "device_sub_type": "", "timezone": "UTC" }) data = res.json() if data['status']: self._token = data['response']['sessionId'] log.debug("New token: {}".format(self._token)) else: log.error( "Token acquisition failed: {details} ({detail})".format( **data['error'])) raise PluginError("could not obtain token") return self._token def api_request(self, path, data, metadata=None): key, iv = self._get_encryption_config(self._signin_url) post_data = { "data": self.encrypt_data(json.dumps(data).encode('utf8'), key, iv).decode("utf8"), "metadata": self.encrypt_data(json.dumps(metadata).encode('utf8'), key, iv).decode("utf8") } headers = { "box-id": self.box_id, "session-id": self.get_token(), "tenant-code": self.TENANT_CODE, "content-type": "application/json" } res = self.session.http.post(self._api_url + path, data=json.dumps(post_data), headers=headers).json() data = dict((k, v and json.loads(self.decrypt_data(v, key, iv))) for k, v in res.items()) return data def login(self, username, password): log.debug("Trying to login...") resp = self.api_request( "send", { "login_id": username, "login_key": password, "login_mode": "1", "manufacturer": "123" }, {"request": "signin"}) return resp['data']['status'] def _get_streams(self): """ Finds the streams from ustvnow.com. """ if self.login(self.get_option("username"), self.get_option("password")): path = urlparse(self.url).path.strip("/") resp = self.api_request("send", {"path": path}, {"request": "page/stream"}) if resp['data']['status']: for stream in resp['data']['response']['streams']: if stream['keys']['licenseKey']: log.warning("Stream possibly protected by DRM") for q, s in HLSStream.parse_variant_playlist( self.session, stream['url']).items(): yield (q, s) else: log.error( "Could not find any streams: {code}: {message}".format( **resp['data']['error'])) else: log.error("Failed to login, check username and password")
class FunimationNow(Plugin): arguments = PluginArguments( PluginArgument("email", argument_name="funimation-email", requires=["password"], help="Email address for your Funimation account."), PluginArgument("password", argument_name="funimation-password", sensitive=True, help="Password for your Funimation account."), PluginArgument("language", argument_name="funimation-language", choices=["en", "ja", "english", "japanese"], default="english", help=""" The audio language to use for the stream; japanese or english. Default is "english". """), PluginArgument("mux-subtitles", is_global=True)) experience_id_re = re.compile(r"/player/(\d+)") mp4_quality = "480p" def _get_streams(self): self.session.http.headers = {"User-Agent": useragents.CHROME} res = self.session.http.get(self.url) # remap en to english, and ja to japanese rlanguage = { "en": "english", "ja": "japanese" }.get( self.get_option("language").lower(), self.get_option("language").lower()) if "_Incapsula_Resource" in res.text: log.error("This page is protected by Incapsula, please see " "https://github.com/streamlink/streamlink/issues/2088" " for a workaround.") return if "Out of Territory" in res.text: log.error( "The content requested is not available in your territory.") return id_m = self.experience_id_re.search(res.text) experience_id = id_m and int(id_m.group(1)) if experience_id: log.debug(f"Found experience ID: {experience_id}") exp = Experience(self.session, experience_id) if self.get_option("email") and self.get_option("password"): if exp.login(self.get_option("email"), self.get_option("password")): log.info( f"Logged in to Funimation as {self.get_option('email')}" ) else: log.warning("Failed to login") if exp.episode_info: log.debug(f"Found episode: {exp.episode_info['episodeTitle']}") log.debug( f" has languages: {', '.join(exp.episode_info['languages'].keys())}" ) log.debug(f" requested language: {rlanguage}") log.debug(f" current language: {exp.language}") if rlanguage != exp.language: log.debug(f"switching language to: {rlanguage}") exp.set_language(rlanguage) if exp.language != rlanguage: log.warning( f"Requested language {rlanguage} is not available, continuing with {exp.language}" ) else: log.debug(f"New experience ID: {exp.experience_id}") subtitles = None stream_metadata = {} disposition = {} for subtitle in exp.subtitles(): log.debug(f"Subtitles: {subtitle['src']}") if subtitle["src"].endswith( ".vtt") or subtitle["src"].endswith(".srt"): sub_lang = Localization.get_language( subtitle["language"]).alpha3 # pick the first suitable subtitle stream subtitles = subtitles or HTTPStream( self.session, subtitle["src"]) stream_metadata["s:s:0"] = [ "language={0}".format(sub_lang) ] stream_metadata["s:a:0"] = [ "language={0}".format(exp.language_code) ] sources = exp.sources() if 'errors' in sources: for error in sources['errors']: log.error("{0} : {1}".format(error['title'], error['detail'])) return for item in sources["items"]: url = item["src"] if ".m3u8" in url: for q, s in HLSStream.parse_variant_playlist( self.session, url).items(): if self.get_option("mux_subtitles") and subtitles: yield q, MuxedStream(self.session, s, subtitles, metadata=stream_metadata, disposition=disposition) else: yield q, s elif ".mp4" in url: # TODO: fix quality s = HTTPStream(self.session, url) if self.get_option("mux_subtitles") and subtitles: yield self.mp4_quality, MuxedStream( self.session, s, subtitles, metadata=stream_metadata, disposition=disposition) else: yield self.mp4_quality, s else: log.error("Could not find experience ID?!")
class Crunchyroll(Plugin): arguments = PluginArguments( PluginArgument( "username", metavar="USERNAME", requires=["password"], help="A Crunchyroll username to allow access to restricted streams." ), PluginArgument("password", sensitive=True, metavar="PASSWORD", nargs="?", const=None, default=None, help=""" A Crunchyroll password for use with --crunchyroll-username. If left blank you will be prompted. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached Crunchyroll credentials to initiate a new session and reauthenticate. """), PluginArgument("session-id", sensitive=True, metavar="SESSION_ID", help=""" Set a specific session ID for crunchyroll, can be used to bypass region restrictions. If using an authenticated session ID, it is recommended that the authentication parameters be omitted as the session ID is account specific. Note: The session ID will be overwritten if authentication is used and the session ID does not match the account. """)) @classmethod def stream_weight(cls, key): weight = STREAM_WEIGHTS.get(key) if weight: return weight, "crunchyroll" return Plugin.stream_weight(key) def _get_streams(self): beta_json_re = re.compile(r"window.__INITIAL_STATE__\s*=\s*({.*});") beta_id = self.match.group("beta_id") if beta_id: json = self.session.http.get( self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//script[contains(text(), 'window.__INITIAL_STATE__')]/text()" ), validate.any( None, validate.all( validate.transform(beta_json_re.search), validate.any( None, validate.all( validate.get(1), validate.parse_json(), validate.any( None, validate.all( { "content": { "byId": { str: { "external_id": validate.all( validate. transform( lambda s: int( s. replace( "EPI.", "") )), ) } } } }, validate.get(("content", "byId")), )), )), )), )) if not json or beta_id not in json: return media_id = json[beta_id]["external_id"] else: media_id = int(self.match.group("media_id")) api = self._create_api() try: # the media.stream_data field is required, no stream data is returned otherwise info = api.get_info(media_id, fields=[ "media.name", "media.series_name", "media.media_type", "media.stream_data" ], schema=_media_schema) except CrunchyrollAPIError as err: raise PluginError(u"Media lookup error: {0}".format(err.msg)) if not info: return streams = {} self.id = media_id self.title = info.get("name") self.author = info.get("series_name") self.category = info.get("media_type") info = info["stream_data"] # The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included has_adaptive = any( [s[u"quality"] == u"adaptive" for s in info[u"streams"]]) if has_adaptive: log.debug(u"Loading streams from adaptive playlist") for stream in filter(lambda x: x[u"quality"] == u"adaptive", info[u"streams"]): for q, s in HLSStream.parse_variant_playlist( self.session, stream[u"url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s # If there is no adaptive quality stream then parse each individual result for stream in info[u"streams"]: if stream[u"quality"] != u"adaptive": # the video_encode_id indicates that the stream is not a variant playlist if u"video_encode_id" in stream: streams[stream[u"quality"]] = HLSStream( self.session, stream[u"url"]) else: # otherwise the stream url is actually a list of stream qualities for q, s in HLSStream.parse_variant_playlist( self.session, stream[u"url"]).items(): # rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams name = STREAM_NAMES.get(q, q) streams[name] = s return streams def _create_api(self): """Creates a new CrunchyrollAPI object, initiates its session and tries to authenticate it either by using saved credentials or the user's username and password. """ if self.options.get("purge_credentials"): self.cache.set("device_id", None, expires=0) self.cache.set("auth", None, expires=0) # use the crunchyroll locale as an override, for backwards compatibility locale = self.get_option( "locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): log.debug("Creating session with locale: {0}", locale) api.start_session() if api.auth: log.debug("Using saved credentials") login = api.authenticate() if login: log.info( "Successfully logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) if not api.auth and self.options.get("username"): try: log.debug( "Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() log.info( "Logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) except CrunchyrollAPIError as err: raise PluginError(u"Authentication error: {0}".format( err.msg)) if not api.auth: log.warning( "No authentication provided, you won't be able to access " "premium restricted content") return api
class NicoLive(Plugin): arguments = PluginArguments( PluginArgument("email", argument_name="niconico-email", sensitive=True, metavar="EMAIL", help="The email or phone number associated with your " "Niconico account"), PluginArgument("password", argument_name="niconico-password", sensitive=True, metavar="PASSWORD", help="The password of your Niconico account"), PluginArgument( "user-session", argument_name="niconico-user-session", sensitive=True, metavar="VALUE", help="Value of the user-session token \n(can be used in " "case you do not want to put your password here)")) is_stream_ready = False is_stream_ended = False watching_interval = 30 watching_interval_worker_thread = None stream_reader = None _ws = None frontend_id = None @classmethod def can_handle_url(cls, url): return _url_re.match(url) is not None def _get_streams(self): self.url = self.url.split("?")[0] self.session.http.headers.update({ "User-Agent": useragents.CHROME, }) if not self.get_wss_api_url(): _log.debug("Coundn't extract wss_api_url. Attempting login...") if not self.niconico_web_login(): return None if not self.get_wss_api_url(): _log.error("Failed to get wss_api_url.") _log.error( "Please check if the URL is correct, " "and make sure your account has access to the video.") return None self.api_connect(self.wss_api_url) i = 0 while not self.is_stream_ready: if i % 10 == 0: _log.debug("Waiting for permit...") if i == 600: _log.error("Waiting for permit timed out.") return None if self.is_stream_ended: return None time.sleep(0.1) i += 1 streams = HLSStream.parse_variant_playlist(self.session, self.hls_stream_url) nico_streams = {} for s in streams: nico_stream = NicoHLSStream(streams[s], self) nico_streams[s] = nico_stream return nico_streams def get_wss_api_url(self): _log.debug("Getting video page: {0}".format(self.url)) resp = self.session.http.get(self.url) try: self.wss_api_url = extract_text(resp.text, ""webSocketUrl":"", """) if not self.wss_api_url: return False except Exception as e: _log.debug(e) _log.debug("Failed to extract wss api url") return False try: self.frontend_id = extract_text(resp.text, ""frontendId":", ","") except Exception as e: _log.debug(e) _log.warning("Failed to extract frontend id") self.wss_api_url = "{0}&frontend_id={1}".format( self.wss_api_url, self.frontend_id) _log.debug("Video page response code: {0}".format(resp.status_code)) _log.trace("Video page response body: {0}".format(resp.text)) _log.debug("Got wss_api_url: {0}".format(self.wss_api_url)) _log.debug("Got frontend_id: {0}".format(self.frontend_id)) return self.wss_api_url.startswith("wss://") def api_on_open(self): self.send_playerversion() require_new_stream = not self.is_stream_ready self.send_getpermit(require_new_stream=require_new_stream) def api_on_error(self, ws, error=None): if error: _log.warning(error) _log.warning("wss api disconnected.") _log.warning("Attempting to reconnect in 5 secs...") time.sleep(5) self.api_connect(self.wss_api_url) def api_connect(self, url): # Proxy support adapted from the UStreamTV plugin (ustreamtv.py) proxy_url = self.session.get_option("https-proxy") if proxy_url is None: proxy_url = self.session.get_option("http-proxy") proxy_options = parse_proxy_url(proxy_url) if proxy_options.get('http_proxy_host'): _log.debug("Using proxy ({0}://{1}:{2})".format( proxy_options.get('proxy_type') or "http", proxy_options.get('http_proxy_host'), proxy_options.get('http_proxy_port') or 80)) _log.debug("Connecting: {0}".format(url)) self._ws = websocket.WebSocketApp( url, header=["User-Agent: {0}".format(useragents.CHROME)], on_open=self.api_on_open, on_message=self.handle_api_message, on_error=self.api_on_error) self.ws_worker_thread = threading.Thread(target=self._ws.run_forever, args=proxy_options) self.ws_worker_thread.daemon = True self.ws_worker_thread.start() def send_message(self, type_, body): msg = {"type": type_, "body": body} msg_json = json.dumps(msg) _log.debug(f"Sending: {msg_json}") if self._ws and self._ws.sock.connected: self._ws.send(msg_json) else: _log.warning("wss api is not connected.") def send_no_body_message(self, type_): msg = {"type": type_} msg_json = json.dumps(msg) _log.debug(f"Sending: {msg_json}") if self._ws and self._ws.sock.connected: self._ws.send(msg_json) else: _log.warning("wss api is not connected.") def send_custom_message(self, msg): msg_json = json.dumps(msg) _log.debug(f"Sending: {msg_json}") if self._ws and self._ws.sock.connected: self._ws.send(msg_json) else: _log.warning("wss api is not connected.") def send_playerversion(self): body = { "type": "startWatching", "data": { "stream": { "quality": "abr", "protocol": "hls", "latency": "high", "chasePlay": False }, "room": { "protocol": "webSocket", "commentable": True }, "reconnect": False } } self.send_custom_message(body) def send_getpermit(self, require_new_stream=True): body = {"type": "getAkashic", "data": {"chasePlay": False}} self.send_custom_message(body) def send_watching(self): body = { "command": "watching", "params": [self.broadcast_id, "-1", "0"] } self.send_message("watch", body) def send_pong(self): self.send_no_body_message("pong") self.send_no_body_message("keepSeat") def handle_api_message(self, message): _log.debug(f"Received: {message}") message_parsed = json.loads(message) if message_parsed["type"] == "stream": data = message_parsed["data"] self.hls_stream_url = data["uri"] self.is_stream_ready = True if message_parsed["type"] == "watch": body = message_parsed["body"] command = body["command"] if command == "currentstream": current_stream = body["currentStream"] self.hls_stream_url = current_stream["uri"] self.is_stream_ready = True elif command == "watchinginterval": self.watching_interval = int(body["params"][0]) _log.debug("Got watching_interval: {0}".format( self.watching_interval)) if self.watching_interval_worker_thread is None: _log.debug("send_watching_scheduler starting.") self.watching_interval_worker_thread = threading.Thread( target=self.send_watching_scheduler) self.watching_interval_worker_thread.daemon = True self.watching_interval_worker_thread.start() else: _log.debug("send_watching_scheduler already running.") elif command == "disconnect": _log.info("Websocket API closed.") _log.info("Stream ended.") self.is_stream_ended = True if self.stream_reader is not None: self.stream_reader.close() _log.info("Stream reader closed.") elif message_parsed["type"] == "ping": self.send_pong() def send_watching_scheduler(self): """ Periodically send "watching" command to the API. This is necessary to keep the session alive. """ while not self.is_stream_ended: self.send_watching() time.sleep(self.watching_interval) def niconico_web_login(self): user_session = self.get_option("user-session") email = self.get_option("email") password = self.get_option("password") if user_session is not None: _log.info("User session cookie is provided. Using it.") self.session.http.cookies.set("user_session", user_session, path="/", domain="nicovideo.jp") self.save_cookies() return True elif email is not None and password is not None: _log.info("Email and password are provided. Attemping login.") payload = {"mail_tel": email, "password": password} resp = self.session.http.post(_login_url, data=payload, params=_login_url_params) _log.debug("Login response code: {0}".format(resp.status_code)) _log.trace("Login response body: {0}".format(resp.text)) _log.debug("Cookies: {0}".format( self.session.http.cookies.get_dict())) if self.session.http.cookies.get("user_session") is None: try: msg = extract_text(resp.text, '<p class="notice__text">', "</p>") except Exception as e: _log.debug(e) msg = "unknown reason" _log.warn("Login failed. {0}".format(msg)) return False else: _log.info("Logged in.") self.save_cookies() return True else: _log.warn( "Neither a email and password combination nor a user session " "token is provided. Cannot attempt login.") return False
class UStreamTV(Plugin): arguments = PluginArguments( PluginArgument( "password", argument_name="ustream-password", sensitive=True, metavar="PASSWORD", help="A password to access password protected UStream.tv channels." )) STREAM_READY_TIMEOUT = 15 def _get_media_app(self): video_id = self.match.group("video_id") if video_id: return video_id, "recorded" channel_id = self.match.group("channel_id") if not channel_id: channel_id = self.session.http.get( self.url, headers={"User-Agent": useragents.CHROME}, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string( ".//meta[@name='ustream:channel_id'][@content][1]/@content" ))) return channel_id, "channel" def _get_streams(self): if not MuxedStream.is_usable(self.session): return media_id, application = self._get_media_app() if not media_id: return wsclient = UStreamTVWsClient(self.session, media_id, application, referrer=self.url, cluster="live", password=self.get_option("password")) log.debug("Connecting to UStream API:" " media_id={0}," " application={1}," " referrer={2}," " cluster=live".format(media_id, application, self.url)) wsclient.start() log.debug( "Waiting for stream data (for at most {0} seconds)...".format( self.STREAM_READY_TIMEOUT)) if (not wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not wsclient.is_alive() or wsclient.stream_error): log.error(wsclient.stream_error or "Waiting for stream data timed out.") wsclient.close() return if not wsclient.stream_formats_audio: for video in wsclient.stream_formats_video: yield "{0}p".format(video.height), UStreamTVStream( self.session, "video", wsclient, video) else: for video in wsclient.stream_formats_video: for audio in wsclient.stream_formats_audio: yield "{0}p+a{1}k".format( video.height, audio.bitrate), MuxedStream( self.session, UStreamTVStream(self.session, "video", wsclient, video), UStreamTVStream(self.session, "audio", wsclient, audio))
class AfreecaTV(Plugin): login_url = "https://member.afreecatv.com:8111/login/LoginAction.php" arguments = PluginArguments( PluginArgument( "username", requires=["password"], metavar="USERNAME", help="The username used to register with afreecatv.com."), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help= "A afreecatv.com account password to use with --afreeca-username.") ) @classmethod def can_handle_url(self, url): return _url_re.match(url) @classmethod def stream_weight(cls, key): weight = QUALITY_WEIGHTS.get(key) if weight: return weight, "afreeca" return Plugin.stream_weight(key) def _get_channel_info(self, username): data = {"bid": username, "mode": "landing", "player_type": "html5"} res = self.session.http.post(CHANNEL_API_URL, data=data) return self.session.http.json(res, schema=_channel_schema) def _get_hls_key(self, broadcast, username, quality): headers = {"Referer": self.url} data = { "bid": username, "bno": broadcast, "pwd": "", "quality": quality, "type": "pwd" } res = self.session.http.post(CHANNEL_API_URL, data=data, headers=headers) return self.session.http.json(res, schema=_channel_schema) def _get_stream_info(self, broadcast, quality, cdn, rmd): params = { "return_type": cdn, "broad_key": "{broadcast}-flash-{quality}-hls".format(**locals()) } res = self.session.http.get(STREAM_INFO_URLS.format(rmd=rmd), params=params) return self.session.http.json(res, schema=_stream_schema) def _get_hls_stream(self, broadcast, username, quality, cdn, rmd): keyjson = self._get_hls_key(broadcast, username, quality) if keyjson["RESULT"] != CHANNEL_RESULT_OK: return key = keyjson["AID"] info = self._get_stream_info(broadcast, quality, cdn, rmd) if "view_url" in info: return HLSStream(self.session, info["view_url"], params=dict(aid=key)) def _login(self, username, password): data = { "szWork": "login", "szType": "json", "szUid": username, "szPassword": password, "isSaveId": "true", "isSavePw": "false", "isSaveJoin": "false" } res = self.session.http.post(self.login_url, data=data) res = self.session.http.json(res) if res["RESULT"] == 1: return True else: return False def _get_streams(self): if not self.session.get_option("hls-segment-ignore-names"): ignore_segment = ["preloading"] self.session.set_option("hls-segment-ignore-names", ignore_segment) login_username = self.get_option("username") login_password = self.get_option("password") if login_username and login_password: self.logger.debug("Attempting login as {0}".format(login_username)) if self._login(login_username, login_password): self.logger.info( "Successfully logged in as {0}".format(login_username)) else: self.logger.info( "Failed to login as {0}".format(login_username)) match = _url_re.match(self.url) username = match.group("username") channel = self._get_channel_info(username) if channel.get("BPWD") == "Y": self.logger.error("Stream is Password-Protected") return elif channel.get("RESULT") == -6: self.logger.error("Login required") return elif channel.get("RESULT") != CHANNEL_RESULT_OK: return (broadcast, rmd, cdn) = (channel["BNO"], channel["RMD"], channel["CDN"]) if not (broadcast and rmd and cdn): return for qkey in QUALITYS: hls_stream = self._get_hls_stream(broadcast, username, qkey, cdn, rmd) if hls_stream: yield qkey, hls_stream
class Zattoo(Plugin): API_CHANNELS = '{0}/zapi/v2/cached/channels/{1}?details=False' API_HELLO = '{0}/zapi/session/hello' API_HELLO_V2 = '{0}/zapi/v2/session/hello' API_HELLO_V3 = '{0}/zapi/v3/session/hello' API_LOGIN = '******' API_LOGIN_V3 = '{0}/zapi/v3/account/login' API_SESSION = '{0}/zapi/v2/session' API_WATCH = '{0}/zapi/watch' API_WATCH_REC = '{0}/zapi/watch/recording/{1}' API_WATCH_VOD = '{0}/zapi/avod/videos/{1}/watch' STREAMS_ZATTOO = ['dash', 'hls', 'hls5'] TIME_CONTROL = 60 * 60 * 2 TIME_SESSION = 60 * 60 * 24 * 30 _url_re = re.compile(r'''(?x) https?:// (?P<base_url> (?:(?: iptv\.glattvision|www\.(?:myvisiontv|saktv|vtxtv) )\.ch )|(?:(?: mobiltv\.quickline|www\.quantum-tv|zattoo )\.com )|(?:(?: tvonline\.ewe|nettv\.netcologne|tvplus\.m-net )\.de )|(?:(?: player\.waly|www\.(?:1und1|netplus) )\.tv) |www\.bbv-tv\.net |www\.meinewelt\.cc )/ (?: (?: recording(?:s\?recording=|/) | (?:ondemand/)?(?:watch/(?:[^/\s]+)(?:/[^/]+/)) )(?P<recording_id>\d+) | (?: (?:live/|watch/)|(?:channels(?:/\w+)?|guide)\?channel= )(?P<channel>[^/\s]+) | ondemand(?:\?video=|/watch/)(?P<vod_id>[^-]+) ) ''') _app_token_re = re.compile(r"""window\.appToken\s+=\s+'([^']+)'""") _channels_schema = validate.Schema( { 'success': bool, 'channel_groups': [{ 'channels': [ { 'display_alias': validate.text, 'cid': validate.text }, ] }] }, validate.get('channel_groups'), ) _session_schema = validate.Schema( { 'success': bool, 'session': { 'loggedin': bool } }, validate.get('session')) arguments = PluginArguments( PluginArgument("email", requires=["password"], metavar="EMAIL", help=""" The email associated with your zattoo account, required to access any zattoo stream. """), PluginArgument("password", sensitive=True, metavar="PASSWORD", help=""" A zattoo account password to use with --zattoo-email. """), PluginArgument("purge-credentials", action="store_true", help=""" Purge cached zattoo credentials to initiate a new session and reauthenticate. """), PluginArgument('stream-types', metavar='TYPES', type=comma_list_filter(STREAMS_ZATTOO), default=['hls'], help=''' A comma-delimited list of stream types which should be used, the following types are allowed: - {0} Default is "hls". '''.format('\n - '.join(STREAMS_ZATTOO)))) def __init__(self, url): super(Zattoo, self).__init__(url) self.domain = self._url_re.match(url).group('base_url') self._session_attributes = Cache( filename='plugin-cache.json', key_prefix='zattoo:attributes:{0}'.format(self.domain)) self._uuid = self._session_attributes.get('uuid') self._authed = (self._session_attributes.get('power_guide_hash') and self._uuid and self.session.http.cookies.get( 'pzuid', domain=self.domain) and self.session.http.cookies.get('beaker.session.id', domain=self.domain)) self._session_control = self._session_attributes.get( 'session_control', False) self.base_url = 'https://{0}'.format(self.domain) self.headers = { 'User-Agent': useragents.CHROME, 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Referer': self.base_url } @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _hello(self): log.debug('_hello ...') # a new session is required for the app_token self.session.http.cookies = cookiejar_from_dict({}) if self.base_url == 'https://zattoo.com': app_token_url = 'https://zattoo.com/client/token-2fb69f883fea03d06c68c6e5f21ddaea.json' elif self.base_url == 'https://www.quantum-tv.com': app_token_url = 'https://www.quantum-tv.com/token-4d0d61d4ce0bf8d9982171f349d19f34.json' else: app_token_url = self.base_url res = self.session.http.get(app_token_url) if self.base_url == 'https://www.quantum-tv.com': app_token = self.session.http.json(res)["session_token"] hello_url = self.API_HELLO_V3.format(self.base_url) elif self.base_url == 'https://zattoo.com': app_token = self.session.http.json(res)['app_tid'] hello_url = self.API_HELLO_V2.format(self.base_url) else: match = self._app_token_re.search(res.text) app_token = match.group(1) hello_url = self.API_HELLO.format(self.base_url) if self._uuid: __uuid = self._uuid else: __uuid = str(uuid.uuid4()) self._session_attributes.set('uuid', __uuid, expires=self.TIME_SESSION) if self.base_url == 'https://zattoo.com': params = { 'uuid': __uuid, 'app_tid': app_token, 'app_version': '1.0.0' } else: params = { 'client_app_token': app_token, 'uuid': __uuid, } if self.base_url == 'https://www.quantum-tv.com': params['app_version'] = '3.2028.3' else: params['lang'] = 'en' params['format'] = 'json' res = self.session.http.post(hello_url, headers=self.headers, data=params) def _login(self, email, password): log.debug('_login ... Attempting login as {0}'.format(email)) params = {'login': email, 'password': password, 'remember': 'true'} if self.base_url == 'https://quantum-tv.com': login_url = self.API_LOGIN_V3.format(self.base_url) else: login_url = self.API_LOGIN.format(self.base_url) try: res = self.session.http.post(login_url, headers=self.headers, data=params) except Exception as e: if '400 Client Error' in str(e): raise PluginError( 'Failed to login, check your username/password') raise e data = self.session.http.json(res) self._authed = data['success'] log.debug('New Session Data') self.save_cookies(default_expires=self.TIME_SESSION) self._session_attributes.set('power_guide_hash', data['session']['power_guide_hash'], expires=self.TIME_SESSION) self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) def _watch(self): log.debug('_watch ...') match = self._url_re.match(self.url) if not match: log.debug('_watch ... no match') return channel = match.group('channel') vod_id = match.group('vod_id') recording_id = match.group('recording_id') params = {'https_watch_urls': True} if channel: watch_url = self.API_WATCH.format(self.base_url) params_cid = self._get_params_cid(channel) if not params_cid: return params.update(params_cid) elif vod_id: log.debug('Found vod_id: {0}'.format(vod_id)) watch_url = self.API_WATCH_VOD.format(self.base_url, vod_id) elif recording_id: log.debug('Found recording_id: {0}'.format(recording_id)) watch_url = self.API_WATCH_REC.format(self.base_url, recording_id) else: log.debug('Missing watch_url') return zattoo_stream_types = self.get_option('stream-types') or ['hls'] for stream_type in zattoo_stream_types: params_stream_type = {'stream_type': stream_type} params.update(params_stream_type) try: res = self.session.http.post(watch_url, headers=self.headers, data=params) except Exception as e: if '404 Client Error' in str(e): log.error('Unfortunately streaming is not permitted in ' 'this country or this channel does not exist.') elif '402 Client Error: Payment Required' in str(e): log.error('Paid subscription required for this channel.') log.info('If paid subscription exist, use --zattoo-purge' '-credentials to start a new session.') elif '403 Client Error' in str(e): log.debug('Force session reset for watch_url') self.reset_session() else: log.error(str(e)) return data = self.session.http.json(res) log.debug('Found data for {0}'.format(stream_type)) if data['success'] and stream_type in ['hls', 'hls5']: for url in data['stream']['watch_urls']: for s in HLSStream.parse_variant_playlist( self.session, url['url']).items(): yield s elif data['success'] and stream_type == 'dash': for url in data['stream']['watch_urls']: for s in DASHStream.parse_manifest(self.session, url['url']).items(): yield s def _get_params_cid(self, channel): log.debug('get channel ID for {0}'.format(channel)) channels_url = self.API_CHANNELS.format( self.base_url, self._session_attributes.get('power_guide_hash')) try: res = self.session.http.get(channels_url, headers=self.headers) except Exception: log.debug('Force session reset for _get_params_cid') self.reset_session() return False data = self.session.http.json(res, schema=self._channels_schema) c_list = [] for d in data: for c in d['channels']: c_list.append(c) cid = [] zattoo_list = [] for c in c_list: zattoo_list.append(c['display_alias']) if c['display_alias'] == channel: cid = c['cid'] log.debug('Available zattoo channels in this country: {0}'.format( ', '.join(sorted(zattoo_list)))) if not cid: cid = channel log.debug('CHANNEL ID: {0}'.format(cid)) return {'cid': cid} def reset_session(self): self._session_attributes.set('power_guide_hash', None, expires=0) self._session_attributes.set('uuid', None, expires=0) self.clear_cookies() self._authed = False def _get_streams(self): email = self.get_option('email') password = self.get_option('password') if self.options.get('purge_credentials'): self.reset_session() log.info('All credentials were successfully removed.') elif (self._authed and not self._session_control): # check every two hours, if the session is actually valid log.debug('Session control for {0}'.format(self.domain)) res = self.session.http.get(self.API_SESSION.format(self.base_url)) res = self.session.http.json(res, schema=self._session_schema) if res['loggedin']: self._session_attributes.set('session_control', True, expires=self.TIME_CONTROL) else: log.debug('User is not logged in') self._authed = False if not self._authed and (not email and not password): log.error( 'A login for Zattoo is required, use --zattoo-email EMAIL' ' --zattoo-password PASSWORD to set them') return if not self._authed: self._hello() self._login(email, password) return self._watch()