class Mitele(Plugin): url_re = re.compile(r"https?://(?:www.)?mitele.es/directo/(\w+)") supported_channels = ("telecinco", "bemad", "boing", "cuatro") app_key = "56c3464fe4b0b8a18ac02511" session_url = "https://appgrid-api.cloud.accedo.tv/session" config_url = "https://appgrid-api.cloud.accedo.tv/metadata/general_configuration, web_configuration?" \ "sessionKey={key}" channel_id_url = "http://indalo.mediaset.es/mmc-player/api/mmc/v1/{channel}/live/flash.json" stream_info_url = "http://player.ooyala.com/sas/player_api/v2/authorization/embed_code/{key}/{yoo}?" \ "device=html5&domain=www.mitele.es" session_schema = validate.Schema({ "sessionKey": validate.text, "expiration": validate.transform( lambda d: datetime.strptime(d, "%Y%m%dT%H:%M:%S+0000")) }) config_schema = validate.Schema({ "general_configuration": { "api_configuration": { "ooyala_discovery": { "api_key": validate.text } } } }) channel_id_schema = validate.Schema( validate.all({"locations": [{ "yoo": validate.text }]}, validate.get("locations"), validate.get(0), validate.get("yoo"))) stream_info_schema = validate.Schema({ "authorization_data": { validate.text: { "authorized": bool, "message": validate.text, validate.optional("streams"): [{ "delivery_type": validate.text, "url": { "format": "encoded", "data": validate.all( validate.text, validate.transform(b64decode), validate.transform(lambda d: d.decode("utf8")), validate.url()) } }] } } }) def __init__(self, url): super(Mitele, self).__init__(url) self.cache = Cache("mitele.cache") @classmethod def can_handle_url(cls, url): m = cls.url_re.match(url) return m and m.group(1) in cls.supported_channels @property def session_key(self): """ Get a cached or new session key, uuid is a random uuid (type 4) :return: """ session_key = self.cache.get("sessionKey") if session_key: self.logger.debug("Using cached sessionKey") return session_key else: self.logger.debug("Requesting new sessionKey") uuid = uuid4() res = http.get(self.session_url, params=dict(appKey=self.app_key, uuid=uuid)) data = parse_json(res.text, schema=self.session_schema) # when to expire the sessionKey, -1 hour for good measure expires_in = (data["expiration"] - datetime.now()).total_seconds() - 3600 self.cache.set("sessionKey", data["sessionKey"], expires=expires_in) return data["sessionKey"] @property def config(self): """ Get the API config data """ config_res = http.get(self.config_url.format(key=self.session_key)) return parse_json(config_res.text, schema=self.config_schema) def get_channel_id(self, channel): """ Get the ID of the channel form the name :param channel: channel name :return: channel id """ channel_id_res = http.get(self.channel_id_url.format(channel=channel)) return parse_json(channel_id_res.text, schema=self.channel_id_schema) def get_stream_info(self, key, channel_id): """ Get details about the streams :param key: API key :param channel_id: channel id :return: stream info """ stream_info_res = http.get( self.stream_info_url.format(key=key, yoo=channel_id)) return parse_json(stream_info_res.text, schema=self.stream_info_schema) def _get_streams(self): channel = self.url_re.match(self.url).group(1) key, sig = self.config["general_configuration"]["api_configuration"][ "ooyala_discovery"]["api_key"].split(".") self.logger.debug("Got api key: {}.{}", key, sig) channel_id = self.get_channel_id(channel) self.logger.debug("Got channel ID {} for channel {}", channel_id, channel) data = self.get_stream_info(key, channel_id) stream_info = data["authorization_data"][channel_id] if stream_info["authorized"]: for stream in stream_info["streams"]: if stream["delivery_type"] == "hls": for s in HLSStream.parse_variant_playlist( self.session, stream["url"]["data"]).items(): yield s else: self.logger.error("Cannot load streams: {}", stream_info["message"])
log = logging.getLogger(__name__) COOKIES = {"family_filter": "off", "ff": "off"} STREAM_INFO_URL = "https://www.dailymotion.com/player/metadata/video/{0}" USER_INFO_URL = "https://api.dailymotion.com/user/{0}" _media_schema = validate.Schema( validate.any( {"error": { "title": validate.text }}, # "stream_chromecast_url": validate.url(), # Chromecast URL is already available in qualities subdict { "qualities": validate.any({ validate.text: validate.all([{ "type": validate.text, "url": validate.url() }]) }) })) _live_id_schema = validate.Schema({ "total": int, "list": validate.any([], [{ "id": validate.text }])
class Dogan(Plugin): """ Support for the live streams from Doğan Media Group channels """ url_re = re.compile( r""" https?://(?:www\.)? (?:cnnturk\.com/(?:action/embedvideo/.*|canli-yayin|tv-cnn-turk|video/.*)| dreamturk\.com\.tr/(?:canli|canli-yayin-izle|dream-turk-ozel/.*|programlar/.*)| dreamtv\.com\.tr/dream-ozel/.*| kanald\.com\.tr/.*| teve2\.com\.tr/(?:canli-yayin|diziler/.*|embed/.*|filmler/.*|programlar/.*)) """, re.VERBOSE) playerctrl_re = re.compile(r'''<div\s+id="video-element".*?>''', re.DOTALL) data_id_re = re.compile( r'''data-id=(?P<quote>["'])/?(?P<id>\w+)(?P=quote)''') content_id_re = re.compile(r'"content[Ii]d",\s*"(\w+)"') item_id_re = re.compile(r"_itemId\s+=\s+'(\w+)';") content_api = "/actions/media?id={id}" dream_api = "/actions/content/media/{id}" new_content_api = "/action/media/{id}" content_api_schema = validate.Schema( { "data": { "id": str, "media": { "link": { validate.optional("defaultServiceUrl"): validate.any(validate.url(), ""), validate.optional("serviceUrl"): validate.any(validate.url(), ""), "securePath": str, }, }, }, }, validate.get("data"), validate.get("media"), validate.get("link"), ) new_content_api_schema = validate.Schema( { "Media": { "Link": { "ContentId": str, validate.optional("DefaultServiceUrl"): validate.any(validate.url(), ""), validate.optional("ServiceUrl"): validate.any(validate.url(), ""), "SecurePath": str, }, }, }, validate.get("Media"), validate.get("Link"), ) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _get_content_id(self): res = self.session.http.get(self.url) # find the contentId content_id_m = self.content_id_re.search(res.text) if content_id_m: log.debug("Found contentId by contentId regex") return content_id_m.group(1) # find the PlayerCtrl div player_ctrl_m = self.playerctrl_re.search(res.text) if player_ctrl_m: # extract the content id from the player control data player_ctrl_div = player_ctrl_m.group(0) content_id_m = self.data_id_re.search(player_ctrl_div) if content_id_m: log.debug("Found contentId by player data-id regex") return content_id_m.group("id") # find the itemId var item_id_m = self.item_id_re.search(res.text) if item_id_m: log.debug("Found contentId by itemId regex") return item_id_m.group(1) def _get_new_content_hls_url(self, content_id, api_url): log.debug("Using new content API url") d = self.session.http.get( urljoin(self.url, api_url.format(id=content_id))) d = self.session.http.json(d, schema=self.new_content_api_schema) if d["DefaultServiceUrl"] == "https://www.kanald.com.tr": self.url = d["DefaultServiceUrl"] return self._get_content_hls_url(content_id) else: if d["SecurePath"].startswith("http"): return d["SecurePath"] else: return urljoin((d["ServiceUrl"] or d["DefaultServiceUrl"]), d["SecurePath"]) def _get_content_hls_url(self, content_id): d = self.session.http.get( urljoin(self.url, self.content_api.format(id=content_id))) d = self.session.http.json(d, schema=self.content_api_schema) return urljoin((d["serviceUrl"] or d["defaultServiceUrl"]), d["securePath"]) def _get_hls_url(self, content_id): # make the api url relative to the current domain if "cnnturk.com" in self.url or "teve2.com.tr" in self.url: return self._get_new_content_hls_url(content_id, self.new_content_api) elif "dreamturk.com.tr" in self.url or "dreamtv.com.tr" in self.url: return self._get_new_content_hls_url(content_id, self.dream_api) else: return self._get_content_hls_url(content_id) def _get_streams(self): content_id = self._get_content_id() if content_id: log.debug(f"Loading content: {content_id}") hls_url = self._get_hls_url(content_id) return HLSStream.parse_variant_playlist(self.session, hls_url) else: log.error("Could not find the contentId for this stream")
from streamlink.stream import HLSStream, HTTPStream, RTMPStream CHANNEL_INFO_URL = "http://api.plu.cn/tga/streams/%s" QQ_STREAM_INFO_URL = "http://info.zb.qq.com/?cnlid=%d&cmd=2&stream=%d&system=1&sdtfrom=113" PLU_STREAM_INFO_URL = "http://star.api.plu.cn/live/GetLiveUrl?roomId=%d" _quality_re = re.compile(r"\d+x(\d+)$") _url_re = re.compile(r"http://star\.longzhu\.(?:tv|com)/(m\/)?(?P<domain>[a-z0-9]+)") _channel_schema = validate.Schema( { "data" : validate.any(None, { "channel" : validate.any(None, { "id" : validate.all( validate.text, validate.transform(int) ), "vid" : int }) }) }, validate.get("data") ) _plu_schema = validate.Schema( { "urls": [{ "securityUrl": validate.url(scheme=validate.any("rtmp", "http")), "resolution": validate.text, "ext": validate.text }] }
class YouTube(Plugin): _oembed_url = "https://www.youtube.com/oembed" _video_info_url = "https://youtube.com/get_video_info" _oembed_schema = validate.Schema({ "author_name": validate.all(validate.text, validate.transform(maybe_decode)), "title": validate.all(validate.text, validate.transform(maybe_decode)) }) adp_video = { 137: "1080p", 303: "1080p60", # HFR 299: "1080p60", # HFR 264: "1440p", 308: "1440p60", # HFR 266: "2160p", 315: "2160p60", # HFR 138: "2160p", 302: "720p60", # HFR } adp_audio = { 140: 128, 141: 256, 171: 128, 249: 48, 250: 64, 251: 160, 256: 256, 258: 258, } arguments = PluginArguments( PluginArgument( "api-key", sensitive=True, help=argparse.SUPPRESS # no longer used )) def __init__(self, url): super(YouTube, self).__init__(url) parsed = urlparse(self.url) if parsed.netloc == 'gaming.youtube.com': self.url = urlunparse(parsed._replace(netloc='www.youtube.com')) self.author = None self.title = None self.video_id = None self.session.http.headers.update({'User-Agent': useragents.CHROME}) def get_author(self): if self.author is None: self.get_oembed return self.author def get_title(self): if self.title is None: self.get_oembed return self.title @classmethod def can_handle_url(cls, url): return _url_re.match(url) @classmethod def stream_weight(cls, stream): match_3d = re.match(r"(\w+)_3d", stream) match_hfr = re.match(r"(\d+p)(\d+)", stream) if match_3d: weight, group = Plugin.stream_weight(match_3d.group(1)) weight -= 1 group = "youtube_3d" elif match_hfr: weight, group = Plugin.stream_weight(match_hfr.group(1)) weight += 1 group = "high_frame_rate" else: weight, group = Plugin.stream_weight(stream) return weight, group @property def get_oembed(self): if self.video_id is None: self.video_id = self._find_video_id(self.url) params = { "url": "https://www.youtube.com/watch?v={0}".format(self.video_id), "format": "json" } res = self.session.http.get(self._oembed_url, params=params) data = self.session.http.json(res, schema=self._oembed_schema) self.author = data["author_name"] self.title = data["title"] def _create_adaptive_streams(self, info, streams, protected): adaptive_streams = {} best_audio_itag = None # Extract audio streams from the DASH format list for stream_info in info.get("adaptive_fmts", []): if stream_info.get("s"): protected = True continue stream_params = dict(parse_qsl(stream_info["url"])) if "itag" not in stream_params: continue itag = int(stream_params["itag"]) # extract any high quality streams only available in adaptive formats adaptive_streams[itag] = stream_info["url"] stream_type, stream_format = stream_info["type"] if stream_type == "audio": stream = HTTPStream(self.session, stream_info["url"]) name = "audio_{0}".format(stream_format) streams[name] = stream # find the best quality audio stream m4a, opus or vorbis if best_audio_itag is None or self.adp_audio[ itag] > self.adp_audio[best_audio_itag]: best_audio_itag = itag if best_audio_itag and adaptive_streams and MuxedStream.is_usable( self.session): aurl = adaptive_streams[best_audio_itag] for itag, name in self.adp_video.items(): if itag in adaptive_streams: vurl = adaptive_streams[itag] log.debug( "MuxedStream: v {video} a {audio} = {name}".format( audio=best_audio_itag, name=name, video=itag, )) streams[name] = MuxedStream(self.session, HTTPStream(self.session, vurl), HTTPStream(self.session, aurl)) return streams, protected def _find_video_id(self, url): m = _url_re.match(url) if m.group("video_id"): log.debug("Video ID from URL") return m.group("video_id") res = self.session.http.get(url) datam = _ytdata_re.search(res.text) if datam: data = parse_json(datam.group(1)) # find the videoRenderer object, where there is a LVE NOW badge for vid_ep in search_dict(data, 'currentVideoEndpoint'): video_id = vid_ep.get("watchEndpoint", {}).get("videoId") if video_id: log.debug("Video ID from currentVideoEndpoint") return video_id for x in search_dict(data, 'videoRenderer'): for bstyle in search_dict(x.get("badges", {}), "style"): if bstyle == "BADGE_STYLE_TYPE_LIVE_NOW": if x.get("videoId"): log.debug("Video ID from videoRenderer (live)") return x["videoId"] if "/embed/live_stream" in url: for link in itertags(res.text, "link"): if link.attributes.get("rel") == "canonical": canon_link = link.attributes.get("href") if canon_link != url: log.debug("Re-directing to canonical URL: {0}".format( canon_link)) return self._find_video_id(canon_link) raise PluginError("Could not find a video on this page") def _get_stream_info(self, video_id): # normal _params_1 = {"el": "detailpage"} # age restricted _params_2 = {"el": "embedded"} # embedded restricted _params_3 = { "eurl": "https://youtube.googleapis.com/v/{0}".format(video_id) } count = 0 info_parsed = None for _params in (_params_1, _params_2, _params_3): count += 1 params = {"video_id": video_id} params.update(_params) res = self.session.http.get(self._video_info_url, params=params) info_parsed = parse_query(res.content if is_py2 else res.text, name="config", schema=_config_schema) if info_parsed.get("status") == "fail": log.debug("get_video_info - {0}: {1}".format( count, info_parsed.get("reason"))) continue self.author = info_parsed.get("author") self.title = info_parsed.get("title") log.debug("get_video_info - {0}: Found data".format(count)) break return info_parsed def _get_streams(self): is_live = False self.video_id = self._find_video_id(self.url) log.debug("Using video ID: {0}", self.video_id) info = self._get_stream_info(self.video_id) if info and info.get("status") == "fail": log.error("Could not get video info: {0}".format( info.get("reason"))) return elif not info: log.error("Could not get video info") return if info.get("livestream") == '1' or info.get("live_playback") == '1' \ or info.get("player_response", {}).get("videoDetails", {}).get("isLive") == True: log.debug("This video is live.") is_live = True formats = info.get("fmt_list") streams = {} protected = False for stream_info in info.get("url_encoded_fmt_stream_map", []): if stream_info.get("s"): protected = True continue stream = HTTPStream(self.session, stream_info["url"]) name = formats.get(stream_info["itag"]) or stream_info["quality"] if stream_info.get("stereo3d"): name += "_3d" streams[name] = stream if not is_live: streams, protected = self._create_adaptive_streams( info, streams, protected) hls_playlist = info.get("hlsvp") or info.get( "player_response", {}).get("streamingData", {}).get("hlsManifestUrl") if hls_playlist: try: hls_streams = HLSStream.parse_variant_playlist( self.session, hls_playlist, namekey="pixels") streams.update(hls_streams) except IOError as err: log.warning("Failed to extract HLS streams: {0}", err) if not streams and protected: raise PluginError("This plugin does not support protected videos, " "try youtube-dl instead") return streams
import re import websocket from streamlink.plugin import Plugin from streamlink.plugin.api import validate, utils from streamlink.stream import RTMPStream SWF_URL = 'http://showup.tv/flash/suStreamer.swf' RANDOM_UID = '%032x' % random.getrandbits(128) JSON_UID = u'{"id":0,"value":["%s",""]}' JSON_CHANNEL = u'{"id":2,"value":["%s"]}' _url_re = re.compile(r'https?://(\w+.)?showup\.tv/(?P<channel>[A-Za-z0-9_-]+)') _websocket_url_re = re.compile( r'''socket\.connect\(["'](?P<ws>[^"']+)["']\)''') _schema = validate.Schema(validate.get('value')) log = logging.getLogger(__name__) class ShowUp(Plugin): @classmethod def can_handle_url(cls, url): return _url_re.match(url) def _get_stream_id(self, channel, ws_url): ws = websocket.WebSocket() ws.connect(ws_url) ws.send(JSON_UID % RANDOM_UID) ws.send(JSON_CHANNEL % channel) # STREAM_ID
import re from streamlink.compat import urlparse from streamlink.plugin import Plugin from streamlink.plugin.api import http, validate from streamlink.stream import RTMPStream, HTTPStream, HLSStream from streamlink.utils import parse_json, rtmpparse, swfdecompress _url_re = re.compile(r"http(s)?://api.dmcloud.net/player/embed/[^/]+/[^/]+") _rtmp_re = re.compile(br'customURL[^h]+(https://.*?)\\') _info_re = re.compile(r"var info = (.*);") _schema = validate.Schema({ "mode": validate.text, validate.optional("mp4_url"): validate.url(scheme="http"), validate.optional("ios_url"): validate.url(scheme="http"), validate.optional("swf_url"): validate.url(scheme="http"), }) class DMCloud(Plugin): @classmethod def can_handle_url(self, url): return _url_re.match(url) def _get_rtmp_stream(self, swfurl): res = http.get(swfurl) swf = swfdecompress(res.content) match = _rtmp_re.search(swf)
def test_parse_xml_validate(self): expected = ET.Element("test", {"foo": "bar"}) actual = parse_xml("""<test foo="bar"/>""", schema=validate.Schema(xml_element(tag="test", attrib={"foo": text}))) self.assertEqual(expected.tag, actual.tag) self.assertEqual(expected.attrib, actual.attrib)
class DLive(Plugin): _re_url = re.compile(r""" https?://(?:www\.)?dlive\.tv/ (?: (?:p/(?P<video>[^/]+)) | (?P<channel>[^/]+) ) """, re.VERBOSE) _re_videoPlaybackUrl = re.compile(r'"playbackUrl"\s*:\s*"([^"]+\.m3u8)"') _schema_userByDisplayName = validate.Schema({ "data": { "userByDisplayName": { "livestream": validate.any(None, { "title": validate.text }), "username": validate.text } }}, validate.get("data"), validate.get("userByDisplayName") ) _schema_videoPlaybackUrl = validate.Schema( validate.transform(_re_videoPlaybackUrl.search), validate.any(None, validate.all( validate.get(1), validate.transform(unquote_plus), validate.transform(lambda url: bytes(url, "utf-8").decode("unicode_escape")), validate.url() )) ) @classmethod def can_handle_url(cls, url): return cls._re_url.match(url) @classmethod def stream_weight(cls, key): weight = QUALITY_WEIGHTS.get(key) if weight: return weight, "dlive" return Plugin.stream_weight(key) def __init__(self, *args, **kwargs): super(DLive, self).__init__(*args, **kwargs) self.author = None self.title = None match = self._re_url.match(self.url) self.video = match.group("video") self.channel = match.group("channel") def get_author(self): return self.author def get_title(self): return self.title def _get_streams_video(self): log.debug("Getting video HLS streams for {0}".format(self.video)) try: hls_url = self.session.http.get(self.url, schema=self._schema_videoPlaybackUrl) if hls_url is None: return except PluginError: return return HLSStream.parse_variant_playlist(self.session, hls_url) def _get_streams_live(self): log.debug("Getting live HLS streams for {0}".format(self.channel)) try: data = json.dumps({"query": """query {{ userByDisplayName(displayname:"{displayname}") {{ livestream {{ title }} username }} }}""".format(displayname=self.channel)}) res = self.session.http.post("https://graphigo.prd.dlive.tv/", data=data) res = self.session.http.json(res, schema=self._schema_userByDisplayName) if res["livestream"] is None: return except PluginError: return self.author = self.channel self.title = res["livestream"]["title"] hls_url = "https://live.prd.dlive.tv/hls/live/{0}.m3u8".format(res["username"]) return HLSStream.parse_variant_playlist(self.session, hls_url) def _get_streams(self): if self.video: return self._get_streams_video() elif self.channel: return self._get_streams_live()
class Gulli(Plugin): LIVE_PLAYER_URL = 'https://replay.gulli.fr/jwplayer/embedstreamtv' VOD_PLAYER_URL = 'https://replay.gulli.fr/jwplayer/embed/{0}' _url_re = re.compile(r'https?://replay\.gulli\.fr/(?:Direct|.+/(?P<video_id>VOD[0-9]+))') _playlist_re = re.compile(r'sources: (\[.+?\])', re.DOTALL) _vod_video_index_re = re.compile(r'jwplayer\(idplayer\).playlistItem\((?P<video_index>[0-9]+)\)') _mp4_bitrate_re = re.compile(r'.*_(?P<bitrate>[0-9]+)\.mp4') _video_schema = validate.Schema( validate.all( validate.transform(lambda x: re.sub(r'"?file"?:\s*[\'"](.+?)[\'"],?', r'"file": "\1"', x, flags=re.DOTALL)), validate.transform(lambda x: re.sub(r'"?\w+?"?:\s*function\b.*?(?<={).*(?=})', "", x, flags=re.DOTALL)), validate.transform(parse_json), [ validate.Schema({ 'file': validate.url() }) ] ) ) @classmethod def can_handle_url(cls, url): return Gulli._url_re.match(url) def _get_streams(self): match = self._url_re.match(self.url) video_id = match.group('video_id') if video_id is not None: # VOD live = False player_url = self.VOD_PLAYER_URL.format(video_id) else: # Live live = True player_url = self.LIVE_PLAYER_URL res = self.session.http.get(player_url) playlist = re.findall(self._playlist_re, res.text) index = 0 if not live: # Get the index for the video on the playlist match = self._vod_video_index_re.search(res.text) if match is None: return index = int(match.group('video_index')) if not playlist: return videos = self._video_schema.validate(playlist[index]) for video in videos: video_url = video['file'] # Ignore non-supported MSS streams if 'isml/Manifest' in video_url: continue try: if '.m3u8' in video_url: yield from HLSStream.parse_variant_playlist(self.session, video_url).items() elif '.mp4' in video_url: match = self._mp4_bitrate_re.match(video_url) if match is not None: bitrate = '%sk' % match.group('bitrate') else: bitrate = 'vod' yield bitrate, HTTPStream(self.session, video_url) except IOError as err: if '403 Client Error' in str(err): log.error('Failed to access stream, may be due to geo-restriction') raise
def test_parse_qsd(self): self.assertEqual( {"test": "1", "foo": "bar"}, parse_qsd("test=1&foo=bar", schema=validate.Schema({"test": validate.text, "foo": "bar"})))
def _get_api_data(self, type, slug, filter=None): log.debug(f"slug={slug}") app_version = self.session.http.get(self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string(".//head/meta[@name='appVersion']/@content"), validate.any(None, str), )) if not app_version: return log.debug(f"app_version={app_version}") return self.session.http.get( "https://boot.pluto.tv/v4/start", params={ "appName": "web", "appVersion": app_version, "deviceVersion": "94.0.0", "deviceModel": "web", "deviceMake": "firefox", "deviceType": "web", "clientID": str(uuid4()), "clientModelNumber": "1.0", type: slug, }, schema=validate.Schema( validate.parse_json(), { "servers": { "stitcher": validate.url(), }, validate.optional("EPG"): [{ "name": str, "id": str, "slug": str, "stitched": { "path": str, }, }], validate.optional("VOD"): [{ "name": str, "id": str, "slug": str, "genre": str, "stitched": { "path": str, }, validate.optional("seasons"): [{ "episodes": validate.all([{ "name": str, "_id": str, "slug": str, "stitched": { "path": str, }, }], validate.filter(lambda k: filter and k["slug"] == filter)), }], }], "sessionToken": str, "stitcherParams": str, }, ), )
class Rtve(Plugin): secret_key = base64.b64decode("eWVMJmRhRDM=") content_id_re = re.compile(r'data-id\s*=\s*"(\d+)"') url_re = re.compile( r""" https?://(?:www\.)?rtve\.es/(?:directo|noticias|television|deportes|alacarta|drmn)/.*?/? """, re.VERBOSE) cdn_schema = validate.Schema( validate.transform(parse_xml), validate.xml_findall(".//preset"), [ validate.union({ "quality": validate.all(validate.getattr("attrib"), validate.get("type")), "urls": validate.all(validate.xml_findall(".//url"), [validate.getattr("text")]) }) ]) subtitles_api = "http://www.rtve.es/api/videos/{id}/subtitulos.json" subtitles_schema = validate.Schema( {"page": { "items": [{ "src": validate.url(), "lang": validate.text }] }}, validate.get("page"), validate.get("items")) video_api = "http://www.rtve.es/api/videos/{id}.json" video_schema = validate.Schema( { "page": { "items": [{ "qualities": [{ "preset": validate.text, "height": int }] }] } }, validate.get("page"), validate.get("items"), validate.get(0)) options = PluginOptions({"mux_subtitles": False}) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def __init__(self, url): Plugin.__init__(self, url) self.zclient = ZTNRClient(self.secret_key) http.headers = {"User-Agent": useragents.SAFARI_8} def _get_content_id(self): res = http.get(self.url) m = self.content_id_re.search(res.text) return m and int(m.group(1)) def _get_subtitles(self, content_id): res = http.get(self.subtitles_api.format(id=content_id)) return http.json(res, schema=self.subtitles_schema) def _get_quality_map(self, content_id): res = http.get(self.video_api.format(id=content_id)) data = http.json(res, schema=self.video_schema) qmap = {} for item in data["qualities"]: qname = { "MED": "Media", "HIGH": "Alta", "ORIGINAL": "Original" }.get(item["preset"], item["preset"]) qmap[qname] = u"{0}p".format(item["height"]) return qmap def _get_streams(self): streams = [] content_id = self._get_content_id() if content_id: self.logger.debug("Found content with id: {0}", content_id) stream_data = self.zclient.get_cdn_list(content_id, schema=self.cdn_schema) quality_map = None for stream in stream_data: for url in stream["urls"]: if url.endswith("m3u8"): try: streams.extend( HLSStream.parse_variant_playlist( self.session, url).items()) except OSError: self.logger.debug("Failed to load m3u8 url: {0}", url) elif ((url.endswith("mp4") or url.endswith("mov") or url.endswith("avi")) and http.head( url, raise_for_status=False).status_code == 200): if quality_map is None: # only make the request when it is necessary quality_map = self._get_quality_map(content_id) # rename the HTTP sources to match the HLS sources quality = quality_map.get(stream["quality"], stream["quality"]) streams.append((quality, HTTPStream(self.session, url))) subtitles = None if self.get_option("mux_subtitles"): subtitles = self._get_subtitles(content_id) if subtitles: substreams = {} for i, subtitle in enumerate(subtitles): substreams[subtitle["lang"]] = HTTPStream( self.session, subtitle["src"]) for q, s in streams: yield q, MuxedStream(self.session, s, subtitles=substreams) else: for s in streams: yield s
def _get_streams(self): re_media_code = re.compile( r"""routePath:\s*(["'])(\\u002F|/)live(\\u002F|/)play(\\u002F|/)(?P<id>\d+)\1""" ) media_code = self.session.http.get( self.url, schema=validate.Schema(validate.transform(re_media_code.search), validate.any(None, validate.get("id")))) if not media_code: return log.debug(f"Media code: {media_code}") json = self.session.http.post( "https://api.pandalive.co.kr/v1/live/play", data={ "action": "watch", "userIdx": media_code }, schema=validate.Schema( validate.parse_json(), { "media": { "title": str, "userId": str, "userNick": str, "isPw": bool, "isLive": bool, "liveType": str, }, "PlayList": { validate.optional("hls"): [{ "url": validate.url(), }], validate.optional("hls2"): [{ "url": validate.url(), }], validate.optional("hls3"): [{ "url": validate.url(), }], }, "result": bool, "message": str, })) if not json["result"]: log.error(json["message"]) return if not json["media"]["isLive"]: log.error("The broadcast has ended") return if json["media"]["isPw"]: log.error("The broadcast is password protected") return log.info(f"Broadcast type: {json['media']['liveType']}") self.author = f"{json['media']['userNick']} ({json['media']['userId']})" self.title = f"{json['media']['title']}" playlist = json["PlayList"] for key in ("hls", "hls2", "hls3"): # use the first available HLS stream if key in playlist and playlist[key]: # all stream qualities share the same URL, so just use the first one return HLSStream.parse_variant_playlist( self.session, playlist[key][0]["url"])
class AdultSwim(Plugin): token_url = 'https://token.ngtv.io/token/token_spe' video_data_url = 'https://www.adultswim.com/api/shows/v1/media/{0}/desktop' app_id_js_url_re = re.compile( r'''<script src="([^"]*asvp\..*?\.bundle\.js)">''') app_id_re = re.compile(r'''CDN_TOKEN_APP_ID="(.*?)"''') json_data_re = re.compile( r'''<script id="__NEXT_DATA__" type="application/json">({.*})</script>''' ) truncate_url_re = re.compile(r'''(.*)/\w+/?''') url_re = re.compile( r'''https?://(?:www\.)?adultswim\.com /(streams|videos) (?:/([^/]+))? (?:/([^/]+))? ''', re.VERBOSE, ) _api_schema = validate.Schema( {'media': { 'desktop': { validate.text: { 'url': validate.url() } } }}, validate.get('media'), validate.get('desktop'), validate.filter(lambda k, v: k in ['unprotected', 'bulkaes'])) _stream_data_schema = validate.Schema( { 'props': { '__REDUX_STATE__': { 'streams': [{ 'id': validate.text, 'stream': validate.text, }] } } }, validate.get('props'), validate.get('__REDUX_STATE__'), validate.get('streams'), ) _token_schema = validate.Schema( validate.any( {'auth': { 'token': validate.text }}, {'auth': { 'error': { 'message': validate.text } }}, ), validate.get('auth'), ) _video_data_schema = validate.Schema( { 'props': { 'pageProps': { '__APOLLO_STATE__': { validate.text: { validate.optional('id'): validate.text, validate.optional('slug'): validate.text, } } } } }, validate.get('props'), validate.get('pageProps'), validate.get('__APOLLO_STATE__'), validate.filter(lambda k, v: k.startswith('Video:')), ) @classmethod def can_handle_url(cls, url): match = AdultSwim.url_re.match(url) return match is not None def _get_stream_data(self, id): res = self.session.http.get(self.url) m = self.json_data_re.search(res.text) if m and m.group(1): streams = parse_json(m.group(1), schema=self._stream_data_schema) else: raise PluginError("Failed to get json_data") for stream in streams: if 'id' in stream: if id == stream['id'] and 'stream' in stream: return stream['stream'] def _get_video_data(self, slug): m = self.truncate_url_re.search(self.url) if m and m.group(1): log.debug("Truncated URL={0}".format(m.group(1))) else: raise PluginError("Failed to truncate URL") res = self.session.http.get(m.group(1)) m = self.json_data_re.search(res.text) if m and m.group(1): videos = parse_json(m.group(1), schema=self._video_data_schema) else: raise PluginError("Failed to get json_data") for video in videos: if 'slug' in videos[video]: if slug == videos[video]['slug'] and 'id' in videos[video]: return videos[video]['id'] def _get_token(self, path): res = self.session.http.get(self.url) m = self.app_id_js_url_re.search(res.text) app_id_js_url = m and m.group(1) if not app_id_js_url: raise PluginError("Could not determine app_id_js_url") log.debug("app_id_js_url={0}".format(app_id_js_url)) res = self.session.http.get(app_id_js_url) m = self.app_id_re.search(res.text) app_id = m and m.group(1) if not app_id: raise PluginError("Could not determine app_id") log.debug("app_id={0}".format(app_id)) res = self.session.http.get(self.token_url, params=dict( format='json', appId=app_id, path=path, )) token_data = self.session.http.json(res, schema=self._token_schema) if 'error' in token_data: raise PluginError(token_data['error']['message']) return token_data['token'] def _get_streams(self): url_match = self.url_re.match(self.url) url_type, show_name, episode_name = url_match.groups() if url_type == 'streams' and not show_name: url_type = 'live-stream' elif not show_name: raise PluginError("Missing show_name for url_type: {0}".format( url_type, )) log.debug("URL type={0}".format(url_type)) if url_type == 'live-stream': video_id = self._get_stream_data(url_type) elif url_type == 'streams': video_id = self._get_stream_data(show_name) elif url_type == 'videos': if show_name is None or episode_name is None: raise PluginError( "Missing show_name or episode_name for url_type: {0}". format(url_type, )) video_id = self._get_video_data(episode_name) else: raise PluginError("Unrecognised url_type: {0}".format(url_type)) if video_id is None: raise PluginError("Could not find video_id") log.debug("Video ID={0}".format(video_id)) res = self.session.http.get(self.video_data_url.format(video_id)) url_data = self.session.http.json(res, schema=self._api_schema) if 'unprotected' in url_data: url = url_data['unprotected']['url'] elif 'bulkaes' in url_data: url_parsed = urlparse(url_data['bulkaes']['url']) token = self._get_token(url_parsed.path) url = urlunparse(( url_parsed.scheme, url_parsed.netloc, url_parsed.path, url_parsed.params, "{0}={1}".format('hdnts', token), url_parsed.fragment, )) else: raise PluginError("Could not find a usable URL in url_data") log.debug("URL={0}".format(url)) return HLSStream.parse_variant_playlist(self.session, url)
"360": 1, "low": 1 } _url_re = re.compile( """ http(s)?://(\w+\.)?gaminglive\.tv /(?P<type>channels|videos)/(?P<name>[^/]+) """, re.VERBOSE) _quality_re = re.compile("[^/]+-(?P<quality>[^/]+)") _channel_schema = validate.Schema( { validate.optional("state"): { "stream": { "qualities": [validate.text], "rootUrl": validate.url(scheme="rtmp") } } }, validate.get("state")) _vod_schema = validate.Schema( { "name": validate.text, "channel_slug": validate.text, "title": validate.text, "created_at": validate.transform(int) }, ) class GamingLive(Plugin):
import re from streamlink.plugin import Plugin from streamlink.plugin.api import validate from streamlink.stream import HLSStream from streamlink.plugin.api import useragents from streamlink.utils import update_scheme HUYA_URL = "http://m.huya.com/%s" _url_re = re.compile(r'https?://(www\.)?huya.com/(?P<channel>[^/]+)') _hls_re = re.compile(r'liveLineUrl\s*=\s*"(?P<url>[^"]+)"') _hls_schema = validate.Schema( validate.transform(_hls_re.search), validate.any(None, validate.get("url")), validate.transform(lambda v: update_scheme("http://", v)), validate.url()) class Huya(Plugin): @classmethod def can_handle_url(self, url): return _url_re.match(url) def _get_streams(self): match = _url_re.match(self.url) channel = match.group("channel") self.session.http.headers.update({"User-Agent": useragents.IPAD}) # Some problem with SSL on huya.com now, do not use https
class Huya(Plugin): _re_stream = re.compile(r'"stream"\s?:\s?"([^"]+)"') _schema_data = validate.Schema( { # 'status': int, # 'msg': validate.any(None, validate.text), 'data': [{ 'gameStreamInfoList': [{ 'sCdnType': validate.text, 'sStreamName': validate.text, 'sFlvUrl': validate.text, 'sFlvUrlSuffix': validate.text, 'sFlvAntiCode': validate.all( validate.text, validate.transform(lambda v: html_unescape(v))), # 'sHlsUrl': validate.text, # 'sHlsUrlSuffix': validate.text, # 'sHlsAntiCode': validate.all(validate.text, validate.transform(lambda v: html_unescape(v))), validate.optional('iIsMultiStream'): int, 'iPCPriorityRate': int, }] }], # 'vMultiStreamInfo': [{ # 'sDisplayName': validate.text, # 'iBitRate': int, # }], }, validate.get('data'), validate.get(0), validate.get('gameStreamInfoList'), ) QUALITY_WEIGHTS = {} @classmethod def stream_weight(cls, key): weight = cls.QUALITY_WEIGHTS.get(key) if weight: return weight, 'huya' return Plugin.stream_weight(key) def _get_streams(self): res = self.session.http.get(self.url) data = self._re_stream.search(res.text) if not data: return data = parse_json(base64.b64decode(data.group(1)), schema=self._schema_data) for info in data: log.trace('{0!r}'.format(info)) flv_url = '{0}/{1}.{2}?{3}'.format(info["sFlvUrl"], info["sStreamName"], info["sFlvUrlSuffix"], info["sFlvAntiCode"]) name = 'source_{0}'.format(info["sCdnType"].lower()) self.QUALITY_WEIGHTS[name] = info['iPCPriorityRate'] yield name, HTTPStream(self.session, flv_url) log.debug('QUALITY_WEIGHTS: {0!r}'.format(self.QUALITY_WEIGHTS))
def clips(self, clipname): queries = [ self._gql_persisted_query( "VideoAccessToken_Clip", "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11", slug=clipname), self._gql_persisted_query( "ClipsView", "4480c1dcc2494a17bb6ef64b94a5213a956afb8a45fe314c66b0d04079a93a8f", slug=clipname), self._gql_persisted_query( "ClipsTitle", "f6cca7f2fdfbfc2cecea0c88452500dae569191e58a265f97711f8f2a838f5b4", slug=clipname) ] return self.call( queries, schema=validate.Schema([ validate.all( { "data": { "clip": { "playbackAccessToken": validate.all({ "signature": str, "value": str }, validate.union_get("signature", "value")), "videoQualities": [ validate.all( { "frameRate": validate.transform(int), "quality": str, "sourceURL": validate.url() }, validate.transform(lambda q: ( f"{q['quality']}p{q['frameRate']}", q["sourceURL"]))) ] } } }, validate.get(("data", "clip")), validate.union_get("playbackAccessToken", "videoQualities")), validate.all( { "data": { "clip": { "broadcaster": { "displayName": str }, "game": { "name": str } } } }, validate.get(("data", "clip")), validate.union_get(("broadcaster", "displayName"), ("game", "name"))), validate.all({"data": { "clip": { "title": str } }}, validate.get(("data", "clip", "title"))) ]))
(?P<subdomain>.+) \. )? douyu.com/ (?: show/(?P<vid>[^/&?]+)| (?P<channel>[^/&?]+) ) """, re.VERBOSE) _room_id_re = re.compile(r'"room_id\\*"\s*:\s*(\d+),') _room_id_alt_re = re.compile(r'data-onlineid=(\d+)') _room_id_schema = validate.Schema( validate.all( validate.transform(_room_id_re.search), validate.any(None, validate.all(validate.get(1), validate.transform(int))))) _room_id_alt_schema = validate.Schema( validate.all( validate.transform(_room_id_alt_re.search), validate.any(None, validate.all(validate.get(1), validate.transform(int))))) _room_schema = validate.Schema( { "data": validate.any( None, { "show_status":
QUALITY_MAP = { "auto": "auto", 4: "1080p", 3: "720p", 2: "544p", 1: "360p", 0: "144p" } _url_re = re.compile(r"https?://(?:(\w+\.)?ardmediathek\.de/|mediathek\.daserste\.de/)") _media_id_re = re.compile(r"/play/(?:media|config|sola)/(\d+)") _media_schema = validate.Schema({ "_mediaArray": [{ "_mediaStreamArray": [{ validate.optional("_server"): validate.text, "_stream": validate.any(validate.text, [validate.text]), "_quality": validate.any(int, validate.text) }] }] }) log = logging.getLogger(__name__) class ard_mediathek(Plugin): @classmethod def can_handle_url(cls, url): return _url_re.match(url) is not None def _get_http_streams(self, info): name = QUALITY_MAP.get(info["_quality"], "vod")
class Experience: CSRF_NAME = "csrfmiddlewaretoken" login_url = "https://www.funimation.com/log-in/" api_base = "https://www.funimation.com/api" login_api_url = "https://prod-api-funimationnow.dadcdigital.com/api/auth/login/" show_api_url = api_base + "/experience/{experience_id}/" sources_api_url = api_base + "/showexperience/{experience_id}/" languages = ["english", "japanese"] alphas = ["uncut", "simulcast"] login_schema = validate.Schema( validate.any({ "success": False, "error": validate.text }, { "token": validate.text, "user": { "id": int } })) def __init__(self, session, experience_id): """ :param session: streamlink session :param experience_id: starting experience_id, may be changed later """ self.session = session self.experience_id = experience_id self._language = None self.cache = {} self.token = None def request(self, method, url, *args, **kwargs): headers = kwargs.pop("headers", {}) if self.token: headers.update({"Authorization": "Token {0}".format(self.token)}) self.session.http.cookies.update({"src_token": self.token}) log.debug("Making {0}request to {1}".format( "authorized " if self.token else "", url)) res = self.session.http.request(method, url, *args, headers=headers, **kwargs) if "_Incapsula_Resource" in res.text: log.error("This page is protected by Incapsula, please see " "https://github.com/streamlink/streamlink/issues/2088" " for a workaround.") return return res def get(self, *args, **kwargs): return self.request("GET", *args, **kwargs) def post(self, *args, **kwargs): return self.request("POST", *args, **kwargs) @property def pinst_id(self): return ''.join([ random.choice( "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" ) for _ in range(8) ]) def _update(self): api_url = self.show_api_url.format(experience_id=self.experience_id) log.debug("Requesting experience data: {0}".format(api_url)) res = self.get(api_url) if res: data = self.session.http.json(res) self.cache[self.experience_id] = data @property def show_info(self): if self.experience_id not in self.cache: self._update() return self.cache.get(self.experience_id) @property def episode_info(self): """ Search for the episode with the requested experience Id :return: """ if self.show_info: for season in self.show_info["seasons"]: for episode in season["episodes"]: for lang in episode["languages"].values(): for alpha in lang["alpha"].values(): if alpha["experienceId"] == self.experience_id: return episode @property def language(self): for language, lang_data in self.episode_info["languages"].items(): for alpha in lang_data["alpha"].values(): if alpha["experienceId"] == self.experience_id: return language @property def language_code(self): return {"english": "eng", "japanese": "jpn"}[self.language] def set_language(self, language): if language in self.episode_info["languages"]: for alpha in self.episode_info["languages"][language][ "alpha"].values(): self.experience_id = alpha["experienceId"] def _get_alpha(self): for lang_data in self.episode_info["languages"].values(): for alpha in lang_data["alpha"].values(): if alpha["experienceId"] == self.experience_id: return alpha def subtitles(self): alpha = self._get_alpha() for src in alpha["sources"]: return src["textTracks"] def sources(self): """ Get the sources for a given experience_id, which is tied to a specific language :param experience_id: int; video content id :return: sources dict """ api_url = self.sources_api_url.format(experience_id=self.experience_id) res = self.get(api_url, params={"pinst_id": self.pinst_id}) return self.session.http.json(res) def login_csrf(self): r = self.session.http.get(self.login_url) for input in itertags(r.text, "input"): if input.attributes.get("name") == self.CSRF_NAME: return input.attributes.get("value") def login(self, email, password): log.debug("Attempting to login as {0}".format(email)) r = self.post( self.login_api_url, data={ 'username': email, 'password': password, self.CSRF_NAME: self.login_csrf() }, raise_for_status=False, headers={"Referer": "https://www.funimation.com/log-in/"}) d = self.session.http.json(r, schema=self.login_schema) self.token = d.get("token", None) return self.token is not None
_config_schema = validate.Schema({ validate.optional("fmt_list"): validate.all(validate.text, validate.transform(parse_fmt_list)), validate.optional("url_encoded_fmt_stream_map"): validate.all(validate.text, validate.transform(parse_stream_map), [{ "itag": validate.all(validate.text, validate.transform(int)), "quality": validate.text, "url": validate.url(scheme="http"), validate.optional("s"): validate.text, validate.optional("stereo3d"): validate.all(validate.text, validate.transform(int), validate.transform(bool)), }]), validate.optional("adaptive_fmts"): validate.all(validate.text, validate.transform(parse_stream_map), [{ validate.optional("s"): validate.text, "type": validate.all(validate.text, validate.transform(lambda t: t.split(";")[0].split("/")), [validate.text, validate.text]), "url": validate.all(validate.url(scheme="http")) }]), validate.optional("hlsvp"): validate.text, validate.optional("player_response"): validate.all( validate.text, validate.transform(parse_json), { validate.optional("streamingData"): { validate.optional("hlsManifestUrl"): validate.text, }, validate.optional("videoDetails"): { validate.optional("isLive"): validate.transform(bool), } }), validate.optional("live_playback"): validate.transform(bool), validate.optional("reason"): validate.all(validate.text, validate.transform(maybe_decode)), validate.optional("livestream"): validate.text, validate.optional("live_playback"): validate.text, validate.optional("author"): validate.all(validate.text, validate.transform(maybe_decode)), validate.optional("title"): validate.all(validate.text, validate.transform(maybe_decode)), "status": validate.text })
def _get_live_streams(self, channel_id): # Get quality info and check if user is live1 data = self.session.http.get( "https://cloudac.mildom.com/nonolive/gappserv/live/enterstudio", params={ "__platform": "web", "user_id": channel_id, }, headers={"Accept-Language": "en"}, schema=validate.Schema( validate.transform(parse_json), { "code": int, validate.optional("message"): str, validate.optional("body"): { validate.optional("status"): int, "anchor_live": int, validate.optional("live_type"): int, "ext": { "cmode_params": [{ "cmode": str, "name": str, }], validate.optional("live_mode"): int, }, }, }, )) log.trace(f"{data!r}") if data["code"] != 0: log.debug(data.get("message", "Mildom API returned an error")) return if data["body"]["anchor_live"] != 11: log.debug("User doesn't appear to be live") return qualities = [] for quality_info in data["body"]["ext"]["cmode_params"]: qualities.append((quality_info["name"], "_" + quality_info["cmode"] if quality_info["cmode"] != "raw" else "")) # Create stream URLs data = self.session.http.get( "https://cloudac.mildom.com/nonolive/gappserv/live/liveserver", params={ "__platform": "web", "user_id": channel_id, "live_server_type": "hls", }, headers={"Accept-Language": "en"}, schema=validate.Schema( validate.transform(parse_json), { "code": int, validate.optional("message"): str, validate.optional("body"): { "stream_server": validate.url(), } })) log.trace(f"{data!r}") if data["code"] != 0: log.debug(data.get("message", "Mildom API returned an error")) return base_url = url_concat(data["body"]["stream_server"], f"{channel_id}{{}}.m3u8") self.session.http.headers.update( {"Referer": "https://www.mildom.com/"}) for quality in qualities: yield quality[0], HLSStream(self.session, base_url.format(quality[1]))
class OlympicChannel(Plugin): _url_re = re.compile( r"http(?:s)?://(\w+)\.?olympicchannel.com/../(?P<type>tv|playback)/(livestream-.\d|.*)/" ) _live_api_url = "https://www.olympicchannel.com{0}api/v2/metadata/{1}" _stream_get_url = "https://www.olympicchannel.com/en/proxy/viewings/" _stream_api_schema = validate.Schema({ u'status': u'ok', u'primary': validate.url(), validate.optional(u'backup'): validate.url() }) @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) def _get_vod_streams(self): page = self.session.http.get(self.url) asset = re.search(r'asse_.{32}', str(page._content)).group(0) post_data = '{"asset_url":"/api/assets/%s/"}' % asset stream_data = self.session.http.json( self.session.http.post(self._stream_get_url, data=post_data)) return HLSStream.parse_variant_playlist( self.session, stream_data['objects'][0]['level3']['streaming_url']) def _get_live_streams(self, lang, path): """ Get the live stream in a particular language :param lang: :param path: :return: """ res = self.session.http.get(self._live_api_url.format(lang, path)) live_res = self.session.http.json(res)['default']['uid'] post_data = '{"channel_url":"/api/channels/%s/"}' % live_res try: stream_data = self.session.http.json( self.session.http.post(self._stream_get_url, data=post_data))['stream_url'] except BaseException: stream_data = self.session.http.json( self.session.http.post(self._stream_get_url, data=post_data))['channel_url'] return HLSStream.parse_variant_playlist(self.session, stream_data) def _get_streams(self): """ Find the streams for OlympicChannel :return: """ match = self._url_re.match(self.url) type_of_stream = match.group('type') lang = re.search(r"/../", self.url).group(0) if type_of_stream == 'tv': path = re.search(r"tv/.*-\d/$", self.url).group(0) return self._get_live_streams(lang, path) elif type_of_stream == 'playback': path = re.search(r"/playback/.*/$", self.url).group(0) return self._get_vod_streams()
class Schoolism(Plugin): url_re = re.compile(r"https?://(?:www\.)?schoolism\.com/watchLesson.php") login_url = "https://www.schoolism.com/index.php" key_time_url = "https://www.schoolism.com/video-html/key-time.php" playlist_re = re.compile(r"var allVideos=(\[\{.*\}]);", re.DOTALL) js_to_json = partial(re.compile(r'(?!<")(\w+):(?!/)').sub, r'"\1":') playlist_schema = validate.Schema( validate.transform(playlist_re.search), validate.any( None, validate.all( validate.get(1), validate.transform(js_to_json), validate.transform( lambda x: x.replace(",}", "}")), # remove invalid , validate.transform(parse_json), [{ "sources": validate.all( [{ "playlistTitle": validate.text, "title": validate.text, "src": validate.text, "type": validate.text, }], # only include HLS streams validate.filter( lambda s: s["type"] == "application/x-mpegurl")) }]))) arguments = PluginArguments( PluginArgument("email", required=True, requires=["password"], metavar="EMAIL", help=""" The email associated with your Schoolism account, required to access any Schoolism stream. """), PluginArgument( "password", sensitive=True, metavar="PASSWORD", help="A Schoolism account password to use with --schoolism-email." ), PluginArgument("part", type=int, default=1, metavar="PART", help=""" Play part number PART of the lesson. Defaults is 1. """)) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def login(self, email, password): """ Login to the schoolism account and return the users account :param email: (str) email for account :param password: (str) password for account :return: (str) users email """ if self.options.get("email") and self.options.get("password"): res = self.session.http.post(self.login_url, data={ "email": email, "password": password, "redirect": None, "submit": "Login" }) if res.cookies.get("password") and res.cookies.get("email"): return res.cookies.get("email") else: self.logger.error( "Failed to login to Schoolism, incorrect email/password combination" ) else: self.logger.error( "An email and password are required to access Schoolism streams" ) def _get_streams(self): user = self.login(self.options.get("email"), self.options.get("password")) if user: self.logger.debug("Logged in to Schoolism as {0}", user) res = self.session.http.get( self.url, headers={"User-Agent": useragents.SAFARI_8}) lesson_playlist = self.playlist_schema.validate(res.text) part = self.options.get("part") self.logger.info("Attempting to play lesson Part {0}", part) found = False # make request to key-time api, to get key specific headers res = self.session.http.get( self.key_time_url, headers={"User-Agent": useragents.SAFARI_8}) for i, video in enumerate(lesson_playlist, 1): if video["sources"] and i == part: found = True for source in video["sources"]: for s in HLSStream.parse_variant_playlist( self.session, source["src"], headers={ "User-Agent": useragents.SAFARI_8, "Referer": self.url }).items(): yield s if not found: self.logger.error("Could not find lesson Part {0}", part)
class RTBF(Plugin): GEO_URL = 'https://www.rtbf.be/api/geoloc' TOKEN_URL = 'https://token.rtbf.be/' RADIO_STREAM_URL = 'http://www.rtbfradioplayer.be/radio/liveradio/rtbf/radios/{}/config.json' _url_re = re.compile( r'https?://(?:www\.)?(?:rtbf\.be/auvio/.*\?l?id=(?P<video_id>[0-9]+)#?|rtbfradioplayer\.be/radio/liveradio/(?:webradio-)?(?P<radio>.+))' ) _stream_size_re = re.compile(r'https?://.+-(?P<size>\d+p?)\..+?$') _video_player_re = re.compile( r'<iframe\s+class="embed-responsive-item\s+js-embed-iframe".*src="(?P<player_url>.+?)".*?</iframe>', re.DOTALL) _video_stream_data_re = re.compile( r'<div\s+id="js-embed-player"\s+class="js-embed-player\s+embed-player"\s+data-media="(.+?)"' ) _geo_schema = validate.Schema({ 'country': validate.text, 'zone': validate.text }) _video_stream_schema = validate.Schema( validate.transform(_video_stream_data_re.search), validate.any( None, validate.all( validate.get(1), validate.transform(HTMLParser().unescape), validate.transform(parse_json), { 'geoLocRestriction': validate.text, validate.optional('isLive'): bool, validate.optional('startDate'): validate.text, validate.optional('endDate'): validate.text, 'sources': validate.any([], validate.Schema({ validate.text: validate.any(None, '', validate.url()) })), validate.optional('urlHls'): validate.any(None, '', validate.url()), validate.optional('urlDash'): validate.any(None, '', validate.url()), validate.optional('streamUrlHls'): validate.any(None, '', validate.url()), validate.optional('streamUrlDash'): validate.any(None, '', validate.url()) }))) _radio_stream_schema = validate.Schema({ 'audioUrls': validate.all([{ 'url': validate.url(), 'mimeType': validate.text }]) }) @classmethod def check_geolocation(cls, geoloc_flag): if geoloc_flag == 'open': return True res = http.get(cls.GEO_URL) data = http.json(res, schema=cls._geo_schema) return data['country'] == geoloc_flag or data['zone'] == geoloc_flag @classmethod def tokenize_stream(cls, url): res = http.post(cls.TOKEN_URL, data={'streams[url]': url}) data = http.json(res) return data['streams']['url'] @staticmethod def iso8601_to_epoch(date): # Convert an ISO 8601-formatted string date to datetime return datetime.datetime.strptime(date[:-6], '%Y-%m-%dT%H:%M:%S') + \ datetime.timedelta(hours=int(date[-6:-3]), minutes=int(date[-2:])) @classmethod def can_handle_url(cls, url): return RTBF._url_re.match(url) def _get_radio_streams(self, radio): res = http.get(self.RADIO_STREAM_URL.format(radio.replace('-', '_'))) streams = http.json(res, schema=self._radio_stream_schema) for stream in streams['audioUrls']: match = self._stream_size_re.match(stream['url']) if match is not None: quality = '{}k'.format(match.group('size')) else: quality = stream['mimetype'] yield quality, HTTPStream(self.session, stream['url']) def _get_video_streams(self): res = http.get(self.url) match = self._video_player_re.search(res.text) if match is None: return player_url = match.group('player_url') stream_data = http.get(player_url, schema=self._video_stream_schema) if stream_data is None: return # Check geolocation to prevent further errors when stream is parsed if not self.check_geolocation(stream_data['geoLocRestriction']): self.logger.error('Stream is geo-restricted') return now = datetime.datetime.now() try: if isinstance(stream_data['sources'], dict): urls = [] for profile, url in stream_data['sources'].items(): if not url or url in urls: continue match = self._stream_size_re.match(url) if match is not None: quality = match.group('size') else: quality = profile yield quality, HTTPStream(self.session, url) urls.append(url) hls_url = stream_data.get('urlHls') or stream_data.get( 'streamUrlHls') if hls_url: if stream_data.get('isLive', False): # Live streams require a token hls_url = self.tokenize_stream(hls_url) for stream in HLSStream.parse_variant_playlist( self.session, hls_url).items(): yield stream except IOError as err: if '403 Client Error' in str(err): # Check whether video is expired if 'startDate' in stream_data: if now < self.iso8601_to_epoch(stream_data['startDate']): self.logger.error('Stream is not yet available') elif 'endDate' in stream_data: if now > self.iso8601_to_epoch(stream_data['endDate']): self.logger.error('Stream has expired') def _get_streams(self): match = self.can_handle_url(self.url) if match.group('radio'): return self._get_radio_streams(match.group('radio')) return self._get_video_streams()
class AtresPlayer(Plugin): url_re = re.compile(r"https?://(?:www\.)?atresplayer\.com/") state_re = re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL) channel_id_schema = validate.Schema( validate.transform(state_re.search), validate.any( None, validate.all( validate.get(1), validate.transform(parse_json), validate.transform(partial(search_dict, key="href")), ))) player_api_schema = validate.Schema( validate.any( None, validate.all( validate.transform(parse_json), validate.transform(partial(search_dict, key="urlVideo")), ))) stream_schema = validate.Schema( validate.transform(parse_json), { "sources": [ validate.all({ "src": validate.url(), validate.optional("type"): validate.text }) ] }, validate.get("sources")) @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def __init__(self, url): # must be HTTPS super(AtresPlayer, self).__init__(update_scheme("https://", url)) def _get_streams(self): api_urls = self.session.http.get(self.url, schema=self.channel_id_schema) _api_url = list(api_urls)[0] log.debug("API URL: {0}".format(_api_url)) player_api_url = self.session.http.get(_api_url, schema=self.player_api_schema) for api_url in player_api_url: log.debug("Player API URL: {0}".format(api_url)) for source in self.session.http.get(api_url, schema=self.stream_schema): log.debug("Stream source: {0} ({1})".format( source['src'], source.get("type", "n/a"))) if "type" not in source or source[ "type"] == "application/vnd.apple.mpegurl": streams = HLSStream.parse_variant_playlist( self.session, source["src"]) if not streams: yield "live", HLSStream(self.session, source["src"]) else: for s in streams.items(): yield s elif source["type"] == "application/dash+xml": for s in DASHStream.parse_manifest(self.session, source["src"]).items(): yield s
class UHSClient(object): """ API Client, reverse engineered by observing the interactions between the web browser and the ustream servers. """ API_URL = "ws://r{0}-1-{1}-{2}-ws-{3}.ums.ustream.tv:1935/1/ustream" APP_ID, APP_VERSION = 3, 2 api_schema = validate.Schema({"args": [object], "cmd": validate.text}) def __init__(self, media_id, application, **options): self.media_id = media_id self.application = application self._referrer = options.pop("referrer", None) self._host = None self.rsid = self.generate_rsid() self.rpin = self.generate_rpin() self._connection_id = None self._app_id = options.pop("app_id", self.APP_ID) self._app_version = options.pop("app_version", self.APP_VERSION) self._cluster = options.pop("cluster", "live") self._password = options.pop("password") self._ws = None @property def referrer(self): return self._referrer @referrer.setter def referrer(self, referrer): log.info("Updating referrer to: {0}".format(referrer)) self._referrer = referrer self.reconnect() @property def cluster(self): return self._cluster @cluster.setter def cluster(self, cluster): log.info("Switching cluster to: {0}".format(cluster)) self._cluster = cluster self.reconnect() def connect(self): log.debug("Connecting to {0}".format(self.host)) self._ws = websocket.create_connection( self.host, header=["User-Agent: {0}".format(useragents.CHROME)], origin="https://www.ustream.tv") args = dict( type="viewer", appId=self._app_id, appVersion=self._app_version, rsid=self.rsid, rpin=self.rpin, referrer=self._referrer, clusterHost= "r%rnd%-1-%mediaId%-%mediaType%-%protocolPrefix%-%cluster%.ums.ustream.tv", media=str(self.media_id), application=self.application) if self._password: args["password"] = self._password result = self.send("connect", **args) return result > 0 def reconnect(self): log.debug("Reconnecting...") if self._ws: self._ws.close() return self.connect() def generate_rsid(self): return "{0:x}:{1:x}".format(randint(0, 1e10), randint(0, 1e10)) def generate_rpin(self): return "_rpin.{0}".format(randint(0, 1e15)) def send(self, command, **args): log.debug("Sending `{0}` command".format(command)) log.trace("{0!r}".format({"cmd": command, "args": [args]})) return self._ws.send(json.dumps({"cmd": command, "args": [args]})) def recv(self): data = parse_json(self._ws.recv(), schema=self.api_schema) log.debug("Received `{0}` command".format(data["cmd"])) log.trace("{0!r}".format(data)) return data def disconnect(self): if self._ws: log.debug("Disconnecting...") self._ws.close() self._ws = None @property def host(self): return self._host or self.API_URL.format(randint( 0, 0xffffff), self.media_id, self.application, self._cluster)
) (?: / (?P<video_type>[bcv])(?:ideo)? / (?P<video_id>\d+) )? (?: /(?:clip/)? (?P<clip_name>[\w]+) )? """, re.VERBOSE) _access_token_schema = validate.Schema( { "token": validate.text, "sig": validate.text }, validate.union((validate.get("sig"), validate.get("token")))) _token_schema = validate.Schema( { "chansub": { "restricted_bitrates": validate.all([validate.text], validate.filter(lambda n: not re.match( r"(.+_)?archives|live|chunked", n))) } }, validate.get("chansub")) _stream_schema = validate.Schema( { "stream": validate.any(