def fetch_program_api_data(self, data): """ Loaded the data that contains the main episodes for a show. :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] This is done to prevent performance issues of the self.__get_api_url() method when a lot of items are generated. """ items = [] slug = self.parentItem.metaData["slug"] variables = {"titleSlugs": [slug.strip("/")]} hash_value = "4122efcb63970216e0cfb8abb25b74d1ba2bb7e780f438bbee19d92230d491c5" url = self.__get_api_url("TitlePage", hash_value, variables) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) # Get the parent thumb info parent_item_thumb_data = json_data.get_value("data", "listablesBySlug", 0, "image") possible_folders = json_data.get_value("data", "listablesBySlug", 0, "associatedContent") possible_folders = [ p for p in possible_folders if p["id"] != "upcoming" ] if self.__folder_id in self.parentItem.metaData: folder_id = self.parentItem.metaData[self.__folder_id] Logger.debug("Retrieving folder with id='%s'", folder_id) json_data.json = { "videos": [f for f in possible_folders if f["id"] == folder_id][0]["items"] } elif len(possible_folders) == 1: json_data.json = {"videos": possible_folders[0]["items"]} else: json_data.json = {"folders": possible_folders} if "folders" in json_data.json: [ folder.update({self.__parent_images: parent_item_thumb_data}) for folder in json_data.json["folders"] ] if "videos" in json_data.json: [ video.update({self.__parent_images: parent_item_thumb_data}) for video in json_data.json["videos"] ] return json_data, items
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) if not item.url.endswith(".js"): data = UriHandler.open(item.url, proxy=self.proxy) data_id = Regexer.do_regex(r'data-id="(\d+)"[^>]+data-playout', data) if data_id is None: Logger.warning("Cannot find stream-id for L1 stream.") return item data_url = "https://limburg.bbvms.com/p/L1_video/c/{}.json".format(data_id[0]) else: data_url = item.url data = UriHandler.open(data_url, proxy=self.proxy) json = JsonHelper(data, logger=Logger.instance()) Logger.trace(json) base_url = json.get_value("publicationData", "defaultMediaAssetPath") streams = json.get_value("clipData", "assets") item.MediaItemParts = [] part = item.create_new_empty_media_part() for stream in streams: url = stream.get("src", None) if "://" not in url: url = "{}{}".format(base_url, url) bitrate = stream.get("bandwidth", None) if url: part.append_media_stream(url, bitrate) if not item.thumb and json.get_value("thumbnails"): url = json.get_value("thumbnails")[0].get("src", None) if url and "http:/" not in url: url = "%s%s" % (self.baseUrl, url) item.thumb = url item.complete = True return item
def update_live_item(self, item): data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) media_regex = 'data-media="([^"]+)"' media_info = Regexer.do_regex(media_regex, data)[0] media_info = HtmlEntityHelper.convert_html_entities(media_info) media_info = JsonHelper(media_info) Logger.trace(media_info) part = item.create_new_empty_media_part() hls_url = media_info.get_value("streamUrl") if hls_url is not None and "m3u8" in hls_url: Logger.debug("Found HLS url for %s: %s", media_info.json["streamName"], hls_url) for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy): part.append_media_stream(s, b) item.complete = True else: Logger.debug("No HLS url found for %s. Fetching RTMP Token.", media_info.json["streamName"]) # fetch the token: token_url = "%s/api/media/streaming?streamname=%s" \ % (self.baseUrl, media_info.json["streamName"]) token_data = UriHandler.open(token_url, proxy=self.proxy, additional_headers=item.HttpHeaders, no_cache=True) token_data = JsonHelper(token_data) token = token_data.get_value("token") Logger.debug("Found token '%s' for '%s'", token, media_info.json["streamName"]) rtmp_url = "rtmp://rtmp.rtbf.be/livecast/%s?%s pageUrl=%s tcUrl=rtmp://rtmp.rtbf.be/livecast" \ % (media_info.json["streamName"], token, self.baseUrl) rtmp_url = self.get_verifiable_video_url(rtmp_url) part.append_media_stream(rtmp_url, 0) item.complete = True item.isGeoLocked = not media_info.get_value( "geoLocRestriction", fallback="world") == "world" return item
def load_all_episodes(self, data): episodes_json = [] data, items = self.add_live_channel(data) for i in range(0, 20): url = "https://at5news.vinsontv.com/api/news?source=web&slug=tv&page={}".format( i) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) item_data = json_data.get_value("category", "news", fallback=[]) episodes_json += item_data if len(item_data) < json_data.get_value("pageSize"): break dummy = JsonHelper("{}") dummy.json = episodes_json return dummy, items
def authenticate(self, username, password): # Step 1: First initiate an authentication request auth_request = self.__get_authentication_request(username) auth_data = JsonHelper.dump(auth_request) auth_headers = { "X-Amz-Target": "AWSCognitoIdentityProviderService.InitiateAuth", "Accept-Encoding": "identity", "Content-Type": "application/x-amz-json-1.1" } auth_response = UriHandler.open(self.url, proxy=self.__proxy, params=auth_data, additional_headers=auth_headers, force_text=True) auth_response_json = JsonHelper(auth_response) challenge_parameters = auth_response_json.get_value("ChallengeParameters") if self.__logger: self.__logger.trace(challenge_parameters) challenge_name = auth_response_json.get_value("ChallengeName") if not challenge_name == "PASSWORD_VERIFIER": message = auth_response_json.get_value("message") if self.__logger: self.__logger.error("Cannot start authentication challenge: %s", message or None) return None # Step 2: Respond to the Challenge with a valid ChallengeResponse challenge_request = self.__get_challenge_response_request(challenge_parameters, password) challenge_data = JsonHelper.dump(challenge_request) challenge_headers = { "X-Amz-Target": "AWSCognitoIdentityProviderService.RespondToAuthChallenge", "Content-Type": "application/x-amz-json-1.1" } auth_response = UriHandler.open(self.url, proxy=self.__proxy, params=challenge_data, additional_headers=challenge_headers, force_text=True) auth_response_json = JsonHelper(auth_response) if "message" in auth_response_json.json: self.__logger.error("Error logging in: %s", auth_response_json.get_value("message")) return None, None id_token = auth_response_json.get_value("AuthenticationResult", "IdToken") refresh_token = auth_response_json.get_value("AuthenticationResult", "RefreshToken") return id_token, refresh_token
def add_categories(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ Logger.info("Performing Pre-Processing") items = [] cat = MediaItem( "\b.: Categorieën :.", "http://m.schooltv.nl/api/v1/categorieen.json?size=100") cat.complete = True cat.dontGroup = True items.append(cat) tips = MediaItem( "\b.: Tips :.", "http://m.schooltv.nl/api/v1/programmas/tips.json?size=100") tips.complete = True tips.dontGroup = True items.append(tips) data = JsonHelper(data) ages = MediaItem("\b.: Leeftijden :.", "") ages.complete = True ages.dontGroup = True for age in ("0-4", "5-6", "7-8", "9-12", "13-15", "16-18"): age_item = MediaItem( "%s Jaar" % (age, ), "http://m.schooltv.nl/api/v1/leeftijdscategorieen/%s/afleveringen.json?" "size=%s&sort=Nieuwste" % (age, self.__PageSize)) age_item.complete = True age_item.dontGroup = True ages.items.append(age_item) # We should list programs instead of videos, so just prefill them here. for program in data.get_value(): if age in program['ageGroups']: age_item.items.append(self.create_episode_item(program)) items.append(ages) Logger.debug("Pre-Processing finished") return data, items
def _purge_texture_cache(self, channel_path): """ Removes those entries from the textures cache that are no longer required. :param str channel_path: the channel path """ self._logger.info("Purging Texture for: %s", channel_path) # read the md5 hashes with io.open(os.path.join(channel_path, "..", "channelpack.json"), 'rt', encoding='utf-8') as fd: lines = fd.read() textures = JsonHelper(lines).get_value("textures") # remove items not in the textures.md5 cdn_folder = self._get_cdn_sub_folder(channel_path) texture_path = os.path.join(self.__channelTexturePath, cdn_folder) if not os.path.isdir(texture_path): self._logger.warning("Missing path '%s' to purge", texture_path) return images = [image for image in os.listdir(texture_path) if image.lower().endswith(".png") or image.lower().endswith(".jpg")] texture_change = False for image in images: image_key = "%s/%s" % (cdn_folder, image) file_path = os.path.join(self.__channelTexturePath, cdn_folder, image) if image_key in textures: # verify the MD5 in the textures.md5 md5 = self.__get_hash(file_path) if md5 == textures[image_key]: self._logger.trace("Texture up to date: %s", file_path) else: self._logger.warning("Texture expired: %s", file_path) os.remove(file_path) texture_change = True # and fetch the updated one if it was already used if file_path in Cached.__retrievedTexturePaths: self._get_texture_uri(channel_path, image) else: self._logger.warning("Texture no longer required: %s", file_path) os.remove(file_path) texture_change = True # always reset the Kodi Texture cache for this channel if texture_change: self._purge_kodi_cache(cdn_folder) return
def update_json_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ data = UriHandler.open(item.url) video_data = JsonHelper(data) stream_data = video_data.get_value("playable") if not stream_data: return item part = item.create_new_empty_media_part() for stream_info in stream_data["assets"]: url = stream_info["url"] stream_type = stream_info["format"] if stream_type == "HLS": item.complete = M3u8.update_part_with_m3u8_streams(part, url) else: Logger.warning("Found unknow stream type: %s", stream_type) if "subtitles" not in stream_data or not stream_data["subtitles"]: return item for sub in stream_data["subtitles"]: sub_url = None sub_type = sub["type"] default_sub = sub["defaultOn"] if default_sub: sub_url = sub["webVtt"] sub_type = "webvtt" # set Retrospect type if sub_url: part.Subtitle = SubtitleHelper.download_subtitle( sub_url, format=sub_type) break return item
def __extract_session_data(self, logon_data): logon_json = JsonHelper(logon_data) result_code = logon_json.get_value("statusCode") if result_code != 200: Logger.error("Error loging in: %s - %s", logon_json.get_value("errorMessage"), logon_json.get_value("errorDetails")) return None, None, None return logon_json.get_value("UID"), \ logon_json.get_value("UIDSignature"), \ logon_json.get_value("signatureTimestamp")
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) json = JsonHelper(data) video_info = json.get_value("content", "videoInfos") part = item.create_new_empty_media_part() if "HLSurlHD" in video_info: # HLSurlHD=http://srfvodhd-vh.akamaihd.net/i/vod/potzmusig/2015/03/ # potzmusig_20150307_184438_v_webcast_h264_,q10,q20,q30,q40,q50,q60,.mp4.csmil/master.m3u8 for s, b in M3u8.get_streams_from_m3u8(video_info["HLSurlHD"], self.proxy): item.complete = True part.append_media_stream(s, b) elif "HLSurl" in video_info: # HLSurl=http://srfvodhd-vh.akamaihd.net/i/vod/potzmusig/2015/03/ # potzmusig_20150307_184438_v_webcast_h264_,q10,q20,q30,q40,.mp4.csmil/master.m3u8 for s, b in M3u8.get_streams_from_m3u8(video_info["HLSurl"], self.proxy): item.complete = True part.append_media_stream(s, b) if "downloadLink" in video_info: # downloadLink=http://podcastsource.sf.tv/nps/podcast/10vor10/2015/03/ # 10vor10_20150304_215030_v_podcast_h264_q10.mp4 part.append_media_stream(video_info["downloadLink"], 1000) return item
def update_video_item(self, item): """Updates an existing MediaItem with more data. Arguments: item : MediaItem - the MediaItem that needs to be updated Returns: The original item with more data added to it's properties. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy) # get the playlist GUID playlist_guids = Regexer.do_regex( "<div[^>]+data-playlist-id='([^']+)'[^>]+></div>", data) if not playlist_guids: # let's try the alternative then (for the new channels) playlist_guids = Regexer.do_regex( 'local_playlist[", -]+([a-f0-9]{20})"', data) playlist_guid = playlist_guids[0] play_list_url = "http://api.mtvnn.com/v2/nl/NL/local_playlists/{}.json?video_format=m3u8".format( playlist_guid) data = UriHandler.open(play_list_url, proxy=self.proxy) from resources.lib.helpers.jsonhelper import JsonHelper from resources.lib.streams.m3u8 import M3u8 json_data = JsonHelper(data) m3u8_url = json_data.get_value("local_playlist_videos", 0, "url") part = item.create_new_empty_media_part() item.complete = M3u8.update_part_with_m3u8_streams(part, m3u8_url, proxy=self.proxy, channel=self, encrypted=True) return item
def __update_video_from_brightcove(self, item, data, use_adaptive_with_encryption): """ Updates an existing MediaItem with more data based on an MPD stream. :param str data: Stream info retrieved from BrightCove. :param bool use_adaptive_with_encryption: Do we use the Adaptive InputStream add-on? :param MediaItem item: The original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ part = item.create_new_empty_media_part() # Then try the new BrightCove JSON bright_cove_regex = '<video[^>]+data-video-id="(?<videoId>[^"]+)[^>]+data-account="(?<videoAccount>[^"]+)' bright_cove_data = Regexer.do_regex(Regexer.from_expresso(bright_cove_regex), data) if not bright_cove_data: Logger.warning("Error updating using BrightCove data: %s", item) return item Logger.info("Found new BrightCove JSON data") bright_cove_url = 'https://edge.api.brightcove.com/playback/v1/accounts/' \ '%(videoAccount)s/videos/%(videoId)s' % bright_cove_data[0] headers = { "Accept": "application/json;pk=BCpkADawqM3ve1c3k3HcmzaxBvD8lXCl89K7XEHiKutxZArg2c5RhwJHJANOwPwS_4o7UsC4RhIzXG8Y69mrwKCPlRkIxNgPQVY9qG78SJ1TJop4JoDDcgdsNrg" } bright_cove_data = UriHandler.open(bright_cove_url, proxy=self.proxy, additional_headers=headers) bright_cove_json = JsonHelper(bright_cove_data) streams = [d for d in bright_cove_json.get_value("sources") if d["container"] == "M2TS"] # Old filter # streams = filter(lambda d: d["container"] == "M2TS", bright_cove_json.get_value("sources")) if not streams: Logger.warning("Error extracting streams from BrightCove data: %s", item) return item # noinspection PyTypeChecker stream_url = streams[0]["src"] # these streams work better with the the InputStreamAddon because it removes the # "range" http header if use_adaptive_with_encryption: Logger.info("Using InputStreamAddon for playback of HLS stream") strm = part.append_media_stream(stream_url, 0) M3u8.set_input_stream_addon_input(strm, proxy=self.proxy) item.complete = True return item for s, b in M3u8.get_streams_from_m3u8(stream_url, self.proxy): item.complete = True part.append_media_stream(s, b) return item
def log_on(self, username, password): """ Peforms the logon of a user. :param str username: The username :param str password: The password to use :returns: a AuthenticationResult with the result of the log on :rtype: AuthenticationResult """ # first we need a random context_id R<10 numbers> context_id = int(random.random() * 8999999999) + 1000000000 # then we do an initial bootstrap call, which retrieves the `gmid` and `ucid` cookies url = "https://sso.rtl.nl/accounts.webSdkBootstrap" \ "?apiKey={}" \ "&pageURL=https%3A%2F%2Fwww.rtlxl.nl%2F" \ "&format=json" \ "&callback=gigya.callback" \ "&context=R{}".format(self.api_key, context_id) init_login = UriHandler.open(url, no_cache=True) init_data = JsonHelper(init_login) if init_data.get_value("statusCode") != 200: Logger.error("Error initiating login") return AuthenticationResult(None) # actually do the login request, which requires an async call to retrieve the result login_url = "https://sso.rtl.nl/accounts.login" \ "?context={0}".format(context_id) login_data = { "loginID": username, "password": password, # "include": "profile,data", # "includeUserInfo": "true", "pageURL": "https://www.rtlxl.nl/profiel", "format": "json", # "callback": "gigya.callback", "context": "R{}".format(context_id), "targetEnv": "jssdk", "sessionExpiration": 7776000 } login_data.update(self.__common_param_dict) login_response = UriHandler.open(login_url, data=login_data, no_cache=True) # Process the result authentication_result = self.__extract_session_data(login_response) authentication_result.existing_login = False return authentication_result
def extract_live_channel_data(self, data): """ Adds the channel items to the listing. :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ json_string, _ = self.extract_json_data(data) json_data = JsonHelper(json_string) channels = json_data.get_value("guidePage", "channels") channel_list = {} # get a dictionary with channels for channel_data in channels: channel_tag = channel_data["urlName"] # find the corresponding episode channel_list[channel_tag] = channel_data programs = dict([ kv for kv in json_data.get_value("guidePage", "programs").items() if kv[1].get("isActiveBroadcast", False) ]) schedules = json_data.get_value("guidePage", "schedules") for channel_name, program_ids in schedules.items(): channel_tag = channel_name.split(":")[0] channel = channel_list.get(channel_tag, None) if channel is None: continue # see what program is playing program_id = [p for p in program_ids if p in programs] if not program_id: del channel_list[channel_tag] continue program_id = program_id[0] if program_id.startswith("TT"): del channel_list[channel_tag] continue program = programs.get(program_id) if program is None: del channel_list[channel_tag] continue channel.update(program) json_data.json = channel_list.values() return json_data, []
def get_authentication_token(self): """ Fetches an authentication token for the given login :return: token value :rtype: str """ token_data = UriHandler.open( "https://api.rtl.nl/rtlxl/token/api/2/token", no_cache=True) token_json = JsonHelper(token_data) token = token_json.get_value("accessToken") return token
def update_json_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ headers = {} if self.localIP: headers.update(self.localIP) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=headers) video_data = JsonHelper(data) stream_data = video_data.get_value("mediaAssetsOnDemand") if not stream_data: return item use_adaptive = AddonSettings.use_adaptive_stream_add_on() stream_data = stream_data[0] part = item.create_new_empty_media_part() if "hlsUrl" in stream_data: hls_url = stream_data["hlsUrl"] if use_adaptive: stream = part.append_media_stream(hls_url, 0) M3u8.set_input_stream_addon_input(stream, self.proxy, headers=headers) item.complete = True else: for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy, headers=headers): item.complete = True part.append_media_stream(s, b) if "timedTextSubtitlesUrl" in stream_data and stream_data["timedTextSubtitlesUrl"]: sub_url = stream_data["timedTextSubtitlesUrl"].replace(".ttml", ".vtt") sub_url = HtmlEntityHelper.url_decode(sub_url) part.Subtitle = SubtitleHelper.download_subtitle(sub_url, format="webvtt") return item
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem NOTE: This is a 100% copy of the chn_vtmbe.Channel.update_html_clip_item """ data = UriHandler.open(item.url) json_data = Regexer.do_regex( r"Drupal\.settings,\s*({[\w\W]+?})\);\s*//-->", data) json_data = JsonHelper(json_data[-1]) video_info = json_data.get_value('medialaan_player', ) video_config = None for key in video_info: Logger.trace("Checking key: %s", key) if "videoConfig" not in video_info[key]: continue video_config = video_info[key]['videoConfig']['video'] break if not video_config: Logger.error("No video info found.") streams = video_config['formats'] for stream in streams: stream_url = stream['url'] if stream['type'] == "mp4": item.append_single_stream(stream_url, 0) item.complete = True return item
def __get_video_streams(self, video_id, part): """ Fetches the video stream for a given videoId @param video_id: (integer) the videoId @param part: (MediaPart) the mediapart to add the streams to @return: (bool) indicating a successfull retrieval """ # hardcoded for now as it does not seem top matter dscgeo = '{"countryCode":"%s","expiry":1446917369986}' % ( self.language.upper(), ) dscgeo = HtmlEntityHelper.url_encode(dscgeo) headers = {"Cookie": "dsc-geo=%s" % (dscgeo, )} # send the data http, nothing, host, other = self.baseUrl.split("/", 3) subdomain, domain = host.split(".", 1) url = "https://secure.%s/secure/api/v2/user/authorization/stream/%s?stream_type=hls" \ % (domain, video_id,) data = UriHandler.open(url, proxy=self.proxy, additional_headers=headers, no_cache=True) json = JsonHelper(data) url = json.get_value("hls") if url is None: return False streams_found = False if "?" in url: qs = url.split("?")[-1] else: qs = None for s, b in M3u8.get_streams_from_m3u8(url, self.proxy): # and we need to append the original QueryString if "X-I-FRAME-STREAM" in s: continue streams_found = True if qs is not None: if "?" in s: s = "%s&%s" % (s, qs) else: s = "%s?%s" % (s, qs) part.append_media_stream(s, b) return streams_found
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) json = JsonHelper(data) part = item.create_new_empty_media_part() part.Subtitle = NpoStream.get_subtitle(json.get_value("mid"), proxy=self.proxy) for stream in json.get_value("videoStreams"): if not stream["url"].startswith("odi"): part.append_media_stream(stream["url"], stream["bitrate"] / 1000) item.complete = True if item.has_media_item_parts(): return item for s, b in NpoStream.get_streams_from_npo(None, json.get_value("mid"), proxy=self.proxy): item.complete = True part.append_media_stream(s, b) return item
def extract_json(self, data): """ Extracts the JSON datda from a main list. :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[JsonHelper,list[MediaItem]] """ items = [] json_regex = r'window.__IPLAYER_REDUX_STATE__ = (.*?);\s*</script>' json_data = Regexer.do_regex(json_regex, data) return JsonHelper(json_data[0]), items
def add_seasons(self, data, items): """ Performs post-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str|JsonHelper data: The retrieve data that was loaded for the current item and URL. :param list[MediaItem] items: The currently available items :return: A tuple of the data and a list of MediaItems that were generated. :rtype: list[MediaItem] """ Logger.info("Performing Post-Processing") if not self.parentItem or "guid" not in self.parentItem.metaData: return items existing_seasons = set([i.metaData.get("season_id") for i in items]) if not existing_seasons: return items item_id = self.parentItem.metaData["guid"] season_info_url = "http://www.mtv.nl/feeds/intl_m308/V8_0_0/{0}/{1}/{1}".\ format(self.__season_list_id, item_id) season_data = UriHandler.open(season_info_url) season_info = JsonHelper(season_data) for season in season_info.get_value("result", "data", "seasons", fallback=[]): Logger.trace("Found season: %s", season) season_id = season["id"] if season_id in existing_seasons: Logger.trace("Season is current season") continue url = "{}/feeds/intl_m112/V8_0_0/{}/{}/{}"\ .format(self.baseUrl, self.__show_list_id, item_id, season_id) season_item = MediaItem(season["eTitle"], url) items.append(season_item) Logger.debug("Post-Processing finished") return items
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) # Get the authentication part right. token = self.__authenticator.get_authentication_token() headers = {"Authorization": "Bearer {}".format(token)} video_data = UriHandler.open(item.url, additional_headers=headers) video_json = JsonHelper(video_data) license_url = video_json.get_value("licenseUrl") video_manifest = video_json.get_value("manifest") token = video_json.get_value("token") key_headers = { "Authorization": "Bearer {0}".format(token), "content-type": "application/octet-stream" } part = item.create_new_empty_media_part() stream = part.append_media_stream(video_manifest, 0) from resources.lib.streams.mpd import Mpd license_key = Mpd.get_license_key(license_url, key_headers=key_headers, key_type="A") Mpd.set_input_stream_addon_input(stream, license_key=license_key) item.complete = True return item
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) if not item.url.endswith("m3u8"): data = UriHandler.open(item.url, proxy=self.proxy) json_data = Regexer.do_regex(self.mediaUrlRegex, data) if not json_data: Logger.error("Cannot find JSON stream info.") return item json = JsonHelper(json_data[0]) Logger.trace(json.json) stream = json.get_value("source", "hls") if stream is None: stream = json.get_value("mzsource", "hls") Logger.debug("Found HLS: %s", stream) else: stream = item.url part = item.create_new_empty_media_part() for s, b in M3u8.get_streams_from_m3u8(stream, self.proxy): item.complete = True part.append_media_stream(s, b) # var playerConfig = {"id":"mediaplayer","width":"100%","height":"100%","autostart":"false","image":"http:\/\/www.ketnet.be\/sites\/default\/files\/thumb_5667ea22632bc.jpg","brand":"ketnet","source":{"hls":"http:\/\/vod.stream.vrt.be\/ketnet\/_definst_\/mp4:ketnet\/2015\/12\/Ben_ik_familie_van_R001_A0023_20151208_143112_864.mp4\/playlist.m3u8"},"analytics":{"type_stream":"vod","playlist":"Ben ik familie van?","program":"Ben ik familie van?","episode":"Ben ik familie van?: Warre - Aflevering 3","parts":"1","whatson":"270157835527"},"title":"Ben ik familie van?: Warre - Aflevering 3","description":"Ben ik familie van?: Warre - Aflevering 3"} return item
def update_video_item_javascript(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ url_parts = item.url.rsplit("/", 3) if url_parts[-3] == "aflevering": video_id = url_parts[-2] else: video_id = url_parts[-1] Logger.debug("Found videoId '%s' for '%s'", video_id, item.url) url = "https://omroepzeeland.bbvms.com/p/regiogrid/q/sourceid_string:{}*.js".format( video_id) data = UriHandler.open(url, proxy=self.proxy) json_data = Regexer.do_regex(r'var opts\s*=\s*({.+});\W*//window', data) Logger.debug("Found jsondata with size: %s", len(json_data[0])) json_data = JsonHelper(json_data[0]) clip_data = json_data.get_value("clipData", "assets") server = json_data.get_value("publicationData", "defaultMediaAssetPath") part = item.create_new_empty_media_part() for clip in clip_data: part.append_media_stream("{}{}".format(server, clip["src"]), int(clip["bandwidth"])) item.complete = True return item
def add_missing_live_streams(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ items = [] slam = MediaItem( "Slam! TV", "https://hls.slam.nl/streaming/hls/SLAM!/playlist.m3u8") slam.icon = self.icon slam.thumb = self.noImage slam.type = "video" slam.isLive = True items.append(slam) slam_fm = MediaItem( "Slam! FM", "https://18973.live.streamtheworld.com/SLAM_AAC.aac" "?ttag=PLAYER%3ANOPREROLL&tdsdk=js-2.9" "&pname=TDSdk&pversion=2.9&banners=none") slam_fm.icon = self.icon slam_fm.thumb = self.noImage slam_fm.type = "audio" slam_fm.isLive = True slam_fm.append_single_stream(slam_fm.url) slam_fm.complete = True items.append(slam_fm) data = JsonHelper(data) for e in data.get_value("includes", "Entry"): self.__liveData[e["sys"]["id"]] = e for e in data.get_value("includes", "Asset"): self.__liveData[e["sys"]["id"]] = e return data, items
def fetch_genre_api_data(self, data): url = self.__get_api_url( "GenreProgramsAO", "189b3613ec93e869feace9a379cca47d8b68b97b3f53c04163769dcffa509318", {"genre": [self.parentItem.metaData[self.__genre_id]]} ) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) possible_lists = json_data.get_value("data", "genres", 0, "selectionsForWeb") program_items = [genres["items"] for genres in possible_lists if genres["selectionType"] == "all"] clip_items = [genres["items"] for genres in possible_lists if genres["selectionType"] == "clips"] json_data.json = { "programs": [p["item"] for p in program_items[0]], "videos": [c["item"] for c in clip_items[0]] } return json_data, []
def create_episode_item(self, result_set): """ Creates a new MediaItem for an episode. This method creates a new MediaItem from the Regular Expression or Json results <result_set>. The method should be implemented by derived classes and are specific to the channel. :param list[str] result_set: The result_set of the self.episodeItemRegex :return: A new MediaItem of type 'folder'. :rtype: MediaItem|None """ json_data = result_set[1].replace(""", "\"") result_set = JsonHelper(json_data) result_set = result_set.json return self.create_episode_item_api(result_set)
def update_live_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ # http://services.vrt.be/videoplayer/r/live.json?_1466364209811= channel_data = UriHandler.open( "http://services.vrt.be/videoplayer/r/live.json", proxy=self.proxy) channel_data = JsonHelper(channel_data) url = None for channel_id in channel_data.json: if channel_id not in item.url: continue else: url = channel_data.json[channel_id].get("hls") if url is None: Logger.error("Could not find stream for live channel: %s", item.url) return item Logger.debug("Found stream url for %s: %s", item, url) part = item.create_new_empty_media_part() for s, b in M3u8.get_streams_from_m3u8(url, self.proxy): item.complete = True part.append_media_stream(s, b) return item
def add_page_items(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ Logger.info("Performing Pre-Processing") items = [] json = JsonHelper(data) total_results = json.get_value("totalResults") from_value = json.get_value("from") size_value = json.get_value("size") if from_value + size_value < total_results: more_pages = LanguageHelper.get_localized_string( LanguageHelper.MorePages) url = self.parentItem.url.split('?')[0] url = "%s?size=%s&from=%s&sort=Nieuwste" % ( url, size_value, from_value + size_value) Logger.debug("Adding next-page item from %s to %s", from_value + size_value, from_value + size_value + size_value) next_page = MediaItem(more_pages, url) next_page.icon = self.parentItem.icon next_page.fanart = self.parentItem.fanart next_page.thumb = self.parentItem.thumb next_page.dontGroup = True items.append(next_page) Logger.debug("Pre-Processing finished") return json, items
def __send_haste_bin(self, code): """ Sends a logfile to paste.kodi.tv :param str code: The content to post """ response = UriHandler.open("https://paste.kodi.tv/documents", params=code.encode(), proxy=self.__proxy) json = JsonHelper(response) key = json.get_value("key") if not key: raise IOError(json.get_value("message")) url = "https://paste.kodi.tv/{}".format(key) if self.__logger: self.__logger.info("HasteBin Url: %s", url) return url