def update_live_item(self, item): data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) media_regex = 'data-media="([^"]+)"' media_info = Regexer.do_regex(media_regex, data)[0] media_info = HtmlEntityHelper.convert_html_entities(media_info) media_info = JsonHelper(media_info) Logger.trace(media_info) part = item.create_new_empty_media_part() hls_url = media_info.get_value("streamUrl") if hls_url is not None and "m3u8" in hls_url: Logger.debug("Found HLS url for %s: %s", media_info.json["streamName"], hls_url) for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy): part.append_media_stream(s, b) item.complete = True else: Logger.debug("No HLS url found for %s. Fetching RTMP Token.", media_info.json["streamName"]) # fetch the token: token_url = "%s/api/media/streaming?streamname=%s" \ % (self.baseUrl, media_info.json["streamName"]) token_data = UriHandler.open(token_url, proxy=self.proxy, additional_headers=item.HttpHeaders, no_cache=True) token_data = JsonHelper(token_data) token = token_data.get_value("token") Logger.debug("Found token '%s' for '%s'", token, media_info.json["streamName"]) rtmp_url = "rtmp://rtmp.rtbf.be/livecast/%s?%s pageUrl=%s tcUrl=rtmp://rtmp.rtbf.be/livecast" \ % (media_info.json["streamName"], token, self.baseUrl) rtmp_url = self.get_verifiable_video_url(rtmp_url) part.append_media_stream(rtmp_url, 0) item.complete = True item.isGeoLocked = not media_info.get_value( "geoLocRestriction", fallback="world") == "world" return item
def load_all_episodes(self, data): episodes_json = [] data, items = self.add_live_channel(data) for i in range(0, 20): url = "https://at5news.vinsontv.com/api/news?source=web&slug=tv&page={}".format( i) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) item_data = json_data.get_value("category", "news", fallback=[]) episodes_json += item_data if len(item_data) < json_data.get_value("pageSize"): break dummy = JsonHelper("{}") dummy.json = episodes_json return dummy, items
def fetch_program_api_data(self, data): """ Loaded the data that contains the main episodes for a show. :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] This is done to prevent performance issues of the self.__get_api_url() method when a lot of items are generated. """ items = [] slug = self.parentItem.metaData["slug"] variables = {"titleSlugs": [slug.strip("/")]} hash_value = "4122efcb63970216e0cfb8abb25b74d1ba2bb7e780f438bbee19d92230d491c5" url = self.__get_api_url("TitlePage", hash_value, variables) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) # Get the parent thumb info parent_item_thumb_data = json_data.get_value("data", "listablesBySlug", 0, "image") possible_folders = json_data.get_value("data", "listablesBySlug", 0, "associatedContent") possible_folders = [p for p in possible_folders if p["id"] != "upcoming"] if self.__folder_id in self.parentItem.metaData: folder_id = self.parentItem.metaData[self.__folder_id] Logger.debug("Retrieving folder with id='%s'", folder_id) json_data.json = {"videos": [f for f in possible_folders if f["id"] == folder_id][0]["items"]} elif len(possible_folders) == 1: json_data.json = {"videos": possible_folders[0]["items"]} else: json_data.json = {"folders": possible_folders} if "folders" in json_data.json: [folder.update({self.__parent_images: parent_item_thumb_data}) for folder in json_data.json["folders"]] if "videos" in json_data.json: [video.update({self.__parent_images: parent_item_thumb_data}) for video in json_data.json["videos"]] return json_data, items
def __send_haste_bin(self, code): """ Sends a logfile to paste.kodi.tv :param str code: The content to post """ response = UriHandler.open("https://paste.kodi.tv/documents", params=code.encode(), proxy=self.__proxy) json = JsonHelper(response) key = json.get_value("key") if not key: raise IOError(json.get_value("message")) url = "https://paste.kodi.tv/{}".format(key) if self.__logger: self.__logger.info("HasteBin Url: %s", url) return url
def add_page_items(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ Logger.info("Performing Pre-Processing") items = [] json = JsonHelper(data) total_results = json.get_value("totalResults") from_value = json.get_value("from") size_value = json.get_value("size") if from_value + size_value < total_results: more_pages = LanguageHelper.get_localized_string( LanguageHelper.MorePages) url = self.parentItem.url.split('?')[0] url = "%s?size=%s&from=%s&sort=Nieuwste" % ( url, size_value, from_value + size_value) Logger.debug("Adding next-page item from %s to %s", from_value + size_value, from_value + size_value + size_value) next_page = MediaItem(more_pages, url) next_page.icon = self.parentItem.icon next_page.fanart = self.parentItem.fanart next_page.thumb = self.parentItem.thumb next_page.dontGroup = True items.append(next_page) Logger.debug("Pre-Processing finished") return json, items
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) json = JsonHelper(data) part = item.create_new_empty_media_part() part.Subtitle = NpoStream.get_subtitle(json.get_value("mid"), proxy=self.proxy) for stream in json.get_value("videoStreams"): if not stream["url"].startswith("odi"): part.append_media_stream(stream["url"], stream["bitrate"] / 1000) item.complete = True if item.has_media_item_parts(): return item for s, b in NpoStream.get_streams_from_npo(None, json.get_value("mid"), proxy=self.proxy): item.complete = True part.append_media_stream(s, b) return item
def add_categories(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ Logger.info("Performing Pre-Processing") items = [] cat = MediaItem( "\b.: Categorieën :.", "http://m.schooltv.nl/api/v1/categorieen.json?size=100") cat.complete = True cat.dontGroup = True items.append(cat) tips = MediaItem( "\b.: Tips :.", "http://m.schooltv.nl/api/v1/programmas/tips.json?size=100") tips.complete = True tips.dontGroup = True items.append(tips) data = JsonHelper(data) ages = MediaItem("\b.: Leeftijden :.", "") ages.complete = True ages.dontGroup = True for age in ("0-4", "5-6", "7-8", "9-12", "13-15", "16-18"): age_item = MediaItem( "%s Jaar" % (age, ), "http://m.schooltv.nl/api/v1/leeftijdscategorieen/%s/afleveringen.json?" "size=%s&sort=Nieuwste" % (age, self.__PageSize)) age_item.complete = True age_item.dontGroup = True ages.items.append(age_item) # We should list programs instead of videos, so just prefill them here. for program in data.get_value(): if age in program['ageGroups']: age_item.items.append(self.create_episode_item(program)) items.append(ages) Logger.debug("Pre-Processing finished") return data, items
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) if item.url.startswith("http"): data = UriHandler.open(item.url, proxy=self.proxy) json_data = Regexer.do_regex(self.mediaUrlRegex, data) json = JsonHelper(json_data[0]) mzid = json.get_value("mzid") if not mzid: item.url = json.get_value("source", "hls") return self.__update_from_source(item) else: mzid = item.url hls_over_dash = self._get_setting("hls_over_dash", 'false') == 'true' from resources.lib.streams.vualto import Vualto v = Vualto(self, "ketnet@prod") item = v.get_stream_info(item, mzid, hls_over_dash=hls_over_dash) return item
def add_missing_live_streams(self, data): """ Performs pre-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ items = [] slam = MediaItem( "Slam! TV", "https://hls.slam.nl/streaming/hls/SLAM!/playlist.m3u8") slam.type = "video" slam.isLive = True items.append(slam) slam_fm = MediaItem( "Slam! FM", "https://18973.live.streamtheworld.com/SLAM_AAC.aac" "?ttag=PLAYER%3ANOPREROLL&tdsdk=js-2.9" "&pname=TDSdk&pversion=2.9&banners=none") slam_fm.type = "audio" slam_fm.isLive = True slam_fm.append_single_stream(slam_fm.url) slam_fm.complete = True items.append(slam_fm) data = JsonHelper(data) for e in data.get_value("includes", "Entry"): self.__liveData[e["sys"]["id"]] = e for e in data.get_value("includes", "Asset"): self.__liveData[e["sys"]["id"]] = e return data, items
def update_json_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ data = UriHandler.open(item.url) video_data = JsonHelper(data) stream_data = video_data.get_value("playable") if not stream_data: return item part = item.create_new_empty_media_part() for stream_info in stream_data["assets"]: url = stream_info["url"] stream_type = stream_info["format"] if stream_type == "HLS": item.complete = M3u8.update_part_with_m3u8_streams(part, url) else: Logger.warning("Found unknow stream type: %s", stream_type) if "subtitles" not in stream_data or not stream_data["subtitles"]: return item for sub in stream_data["subtitles"]: sub_url = None sub_type = sub["type"] default_sub = sub["defaultOn"] if default_sub: sub_url = sub["webVtt"] sub_type = "webvtt" # set Retrospect type if sub_url: part.Subtitle = SubtitleHelper.download_subtitle( sub_url, format=sub_type) break return item
def update_video_item(self, item): """Updates an existing MediaItem with more data. Arguments: item : MediaItem - the MediaItem that needs to be updated Returns: The original item with more data added to it's properties. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy) # get the playlist GUID playlist_guids = Regexer.do_regex( "<div[^>]+data-playlist-id='([^']+)'[^>]+></div>", data) if not playlist_guids: # let's try the alternative then (for the new channels) playlist_guids = Regexer.do_regex( 'local_playlist[", -]+([a-f0-9]{20})"', data) playlist_guid = playlist_guids[0] play_list_url = "http://api.mtvnn.com/v2/nl/NL/local_playlists/{}.json?video_format=m3u8".format( playlist_guid) data = UriHandler.open(play_list_url, proxy=self.proxy) from resources.lib.helpers.jsonhelper import JsonHelper from resources.lib.streams.m3u8 import M3u8 json_data = JsonHelper(data) m3u8_url = json_data.get_value("local_playlist_videos", 0, "url") part = item.create_new_empty_media_part() item.complete = M3u8.update_part_with_m3u8_streams(part, m3u8_url, proxy=self.proxy, channel=self, encrypted=True) return item
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) json = JsonHelper(data) video_info = json.get_value("content", "videoInfos") part = item.create_new_empty_media_part() if "HLSurlHD" in video_info: # HLSurlHD=http://srfvodhd-vh.akamaihd.net/i/vod/potzmusig/2015/03/ # potzmusig_20150307_184438_v_webcast_h264_,q10,q20,q30,q40,q50,q60,.mp4.csmil/master.m3u8 for s, b in M3u8.get_streams_from_m3u8(video_info["HLSurlHD"], self.proxy): item.complete = True part.append_media_stream(s, b) elif "HLSurl" in video_info: # HLSurl=http://srfvodhd-vh.akamaihd.net/i/vod/potzmusig/2015/03/ # potzmusig_20150307_184438_v_webcast_h264_,q10,q20,q30,q40,.mp4.csmil/master.m3u8 for s, b in M3u8.get_streams_from_m3u8(video_info["HLSurl"], self.proxy): item.complete = True part.append_media_stream(s, b) if "downloadLink" in video_info: # downloadLink=http://podcastsource.sf.tv/nps/podcast/10vor10/2015/03/ # 10vor10_20150304_215030_v_podcast_h264_q10.mp4 part.append_media_stream(video_info["downloadLink"], 1000) return item
def authenticate(self, username, password): # Step 1: First initiate an authentication request auth_request = self.__get_authentication_request(username) auth_data = JsonHelper.dump(auth_request) auth_headers = { "X-Amz-Target": "AWSCognitoIdentityProviderService.InitiateAuth", "Accept-Encoding": "identity", "Content-Type": "application/x-amz-json-1.1" } auth_response = UriHandler.open(self.url, proxy=self.__proxy, params=auth_data, additional_headers=auth_headers) auth_response_json = JsonHelper(auth_response) challenge_parameters = auth_response_json.get_value("ChallengeParameters") if self.__logger: self.__logger.trace(challenge_parameters) challenge_name = auth_response_json.get_value("ChallengeName") if not challenge_name == "PASSWORD_VERIFIER": if self.__logger: self.__logger.error("Cannot start authentication challenge") return None # Step 2: Respond to the Challenge with a valid ChallengeResponse challenge_request = self.__get_challenge_response_request(challenge_parameters, password) challenge_data = JsonHelper.dump(challenge_request) challenge_headers = { "X-Amz-Target": "AWSCognitoIdentityProviderService.RespondToAuthChallenge", "Content-Type": "application/x-amz-json-1.1" } auth_response = UriHandler.open(self.url, proxy=self.__proxy, params=challenge_data, additional_headers=challenge_headers) auth_response_json = JsonHelper(auth_response) if "message" in auth_response_json.json: self.__logger.error("Error logging in: %s", auth_response_json.get_value("message")) return None, None id_token = auth_response_json.get_value("AuthenticationResult", "IdToken") refresh_token = auth_response_json.get_value("AuthenticationResult", "RefreshToken") return id_token, refresh_token
def log_on(self, username, password): """ Peforms the logon of a user. :param str username: The username :param str password: The password to use :returns: a AuthenticationResult with the result of the log on :rtype: AuthenticationResult """ # first we need a random context_id R<10 numbers> context_id = int(random.random() * 8999999999) + 1000000000 # then we do an initial bootstrap call, which retrieves the `gmid` and `ucid` cookies url = "https://sso.rtl.nl/accounts.webSdkBootstrap" \ "?apiKey={}" \ "&pageURL=https%3A%2F%2Fwww.rtlxl.nl%2F" \ "&format=json" \ "&callback=gigya.callback" \ "&context=R{}".format(self.api_key, context_id) init_login = UriHandler.open(url, no_cache=True) init_data = JsonHelper(init_login) if init_data.get_value("statusCode") != 200: Logger.error("Error initiating login") return AuthenticationResult(None) # actually do the login request, which requires an async call to retrieve the result login_url = "https://sso.rtl.nl/accounts.login" \ "?context={0}".format(context_id) login_data = { "loginID": username, "password": password, # "include": "profile,data", # "includeUserInfo": "true", "pageURL": "https://www.rtlxl.nl/profiel", "format": "json", # "callback": "gigya.callback", "context": "R{}".format(context_id), "targetEnv": "jssdk", "sessionExpiration": 7776000 } login_data.update(self.__common_param_dict) login_response = UriHandler.open(login_url, data=login_data, no_cache=True) # Process the result authentication_result = self.__extract_session_data(login_response) authentication_result.existing_login = False return authentication_result
def __update_video_from_brightcove(self, item, data, use_adaptive_with_encryption): """ Updates an existing MediaItem with more data based on an MPD stream. :param str data: Stream info retrieved from BrightCove. :param bool use_adaptive_with_encryption: Do we use the Adaptive InputStream add-on? :param MediaItem item: The original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ part = item.create_new_empty_media_part() # Then try the new BrightCove JSON bright_cove_regex = '<video[^>]+data-video-id="(?<videoId>[^"]+)[^>]+data-account="(?<videoAccount>[^"]+)' bright_cove_data = Regexer.do_regex(Regexer.from_expresso(bright_cove_regex), data) if not bright_cove_data: Logger.warning("Error updating using BrightCove data: %s", item) return item Logger.info("Found new BrightCove JSON data") bright_cove_url = 'https://edge.api.brightcove.com/playback/v1/accounts/' \ '%(videoAccount)s/videos/%(videoId)s' % bright_cove_data[0] headers = { "Accept": "application/json;pk=BCpkADawqM3ve1c3k3HcmzaxBvD8lXCl89K7XEHiKutxZArg2c5RhwJHJANOwPwS_4o7UsC4RhIzXG8Y69mrwKCPlRkIxNgPQVY9qG78SJ1TJop4JoDDcgdsNrg" } bright_cove_data = UriHandler.open(bright_cove_url, proxy=self.proxy, additional_headers=headers) bright_cove_json = JsonHelper(bright_cove_data) streams = [d for d in bright_cove_json.get_value("sources") if d["container"] == "M2TS"] # Old filter # streams = filter(lambda d: d["container"] == "M2TS", bright_cove_json.get_value("sources")) if not streams: Logger.warning("Error extracting streams from BrightCove data: %s", item) return item # noinspection PyTypeChecker stream_url = streams[0]["src"] # these streams work better with the the InputStreamAddon because it removes the # "range" http header if use_adaptive_with_encryption: Logger.info("Using InputStreamAddon for playback of HLS stream") strm = part.append_media_stream(stream_url, 0) M3u8.set_input_stream_addon_input(strm, proxy=self.proxy) item.complete = True return item for s, b in M3u8.get_streams_from_m3u8(stream_url, self.proxy): item.complete = True part.append_media_stream(s, b) return item
def get_authentication_token(self): """ Fetches an authentication token for the given login :return: token value :rtype: str """ token_data = UriHandler.open( "https://api.rtl.nl/rtlxl/token/api/2/token", no_cache=True) token_json = JsonHelper(token_data) token = token_json.get_value("accessToken") return token
def update_json_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ headers = {} if self.localIP: headers.update(self.localIP) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=headers) video_data = JsonHelper(data) stream_data = video_data.get_value("mediaAssetsOnDemand") if not stream_data: return item use_adaptive = AddonSettings.use_adaptive_stream_add_on() stream_data = stream_data[0] part = item.create_new_empty_media_part() if "hlsUrl" in stream_data: hls_url = stream_data["hlsUrl"] if use_adaptive: stream = part.append_media_stream(hls_url, 0) M3u8.set_input_stream_addon_input(stream, self.proxy, headers=headers) item.complete = True else: for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy, headers=headers): item.complete = True part.append_media_stream(s, b) if "timedTextSubtitlesUrl" in stream_data and stream_data["timedTextSubtitlesUrl"]: sub_url = stream_data["timedTextSubtitlesUrl"].replace(".ttml", ".vtt") sub_url = HtmlEntityHelper.url_decode(sub_url) part.Subtitle = SubtitleHelper.download_subtitle(sub_url, format="webvtt") return item
def __is_already_logged_on(self, username): """ Check if the given user is logged on and sets what packages he/she has. :param str username: :return: Indicator if the user is alreadly logged in :rtype: bool """ me = UriHandler.open("https://disco-api.dplay.se/users/me", proxy=self.proxy, no_cache=True) if UriHandler.instance().status.code >= 300: return False account_data = JsonHelper(me) signed_in_user = account_data.get_value("data", "attributes", "username") if signed_in_user is not None and signed_in_user != username: # Log out UriHandler.open("https://disco-api.dplay.se/logout", data="", proxy=self.proxy, no_cache=True) return False logged_in = not account_data.get_value("data", "attributes", "anonymous") if logged_in: Logger.debug("Already logged in") packages = account_data.get_value("data", "attributes", "packages", fallback=[]) self.__has_premium = "Premium" in packages return True else: return False
def __get_video_streams(self, video_id, part): """ Fetches the video stream for a given videoId @param video_id: (integer) the videoId @param part: (MediaPart) the mediapart to add the streams to @return: (bool) indicating a successfull retrieval """ # hardcoded for now as it does not seem top matter dscgeo = '{"countryCode":"%s","expiry":1446917369986}' % ( self.language.upper(), ) dscgeo = HtmlEntityHelper.url_encode(dscgeo) headers = {"Cookie": "dsc-geo=%s" % (dscgeo, )} # send the data http, nothing, host, other = self.baseUrl.split("/", 3) subdomain, domain = host.split(".", 1) url = "https://secure.%s/secure/api/v2/user/authorization/stream/%s?stream_type=hls" \ % (domain, video_id,) data = UriHandler.open(url, proxy=self.proxy, additional_headers=headers, no_cache=True) json = JsonHelper(data) url = json.get_value("hls") if url is None: return False streams_found = False if "?" in url: qs = url.split("?")[-1] else: qs = None for s, b in M3u8.get_streams_from_m3u8(url, self.proxy): # and we need to append the original QueryString if "X-I-FRAME-STREAM" in s: continue streams_found = True if qs is not None: if "?" in s: s = "%s&%s" % (s, qs) else: s = "%s?%s" % (s, qs) part.append_media_stream(s, b) return streams_found
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem NOTE: This is a 100% copy of the chn_vtmbe.Channel.update_html_clip_item """ data = UriHandler.open(item.url) json_data = Regexer.do_regex( r"Drupal\.settings,\s*({[\w\W]+?})\);\s*//-->", data) json_data = JsonHelper(json_data[-1]) video_info = json_data.get_value('medialaan_player', ) video_config = None for key in video_info: Logger.trace("Checking key: %s", key) if "videoConfig" not in video_info[key]: continue video_config = video_info[key]['videoConfig']['video'] break if not video_config: Logger.error("No video info found.") streams = video_config['formats'] for stream in streams: stream_url = stream['url'] if stream['type'] == "mp4": item.append_single_stream(stream_url, 0) item.complete = True return item
def update_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ Logger.debug('Starting update_video_item for %s (%s)', item.name, self.channelName) # Get the MZID data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=item.HttpHeaders) json_data = Regexer.do_regex( r'<script type="application/ld\+json">(.*?)</script>', data) json_info = JsonHelper(json_data[-1]) video_id = json_info.get_value("video", "@id") publication_id = json_info.get_value("publication", -1, "@id") mzid = "{}${}".format(publication_id, video_id) return self.update_video_for_mzid(item, mzid)
def select_video_section(self, data): """ Performs pre-process actions for data processing :param str|unicode data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ Logger.info("Performing Pre-Processing") items = [] end_of_section = data.rfind('<div class="ketnet-abc-index">') if end_of_section > 0: data = data[:end_of_section] # find the first main video json_data = Regexer.do_regex(self.mediaUrlRegex, data) if not json_data: Logger.debug("No show data found as JSON") return data, items Logger.trace(json_data[0]) json = JsonHelper(json_data[0]) title = json.get_value("title") url = json.get_value("source", "hls") or "" item = MediaItem(title, url) item.type = 'video' item.description = json.get_value("description", fallback=None) item.thumb = json.get_value("image", fallback=self.noImage) item.fanart = self.parentItem.fanart item.complete = False items.append(item) Logger.debug("Pre-Processing finished") return data, items
def __validate_and_get_add_on_version(self, path): """ Parses the channelpack.json file and checks if all is OK. :param str|unicode path: The path to load the addon from. :return: the AddonId-Version :rtype: tuple[str|unicode|none,str|unicode|none] """ addon_file = os.path.join(path, "channelpack.json") # continue if no addon.xml exists if not os.path.isfile(addon_file): Logger.info("No channelpack.json found at %s.", addon_file) return None, None with io.open(addon_file, 'rt+', encoding='utf-8') as f: channel_json = f.read() channels_data = JsonHelper(channel_json) pack_version = channels_data.get_value("version") package_id = channels_data.get_value("id") if not pack_version or not package_id: Logger.critical( "Cannot determine Channel Pack version. Not loading Add-on @ '%s'.", path) return None, None package_version = Version(version=pack_version) if Config.version.are_compatible(package_version): Logger.info("Adding %s version %s", package_id, package_version) return package_id, package_version else: Logger.warning("Skipping %s version %s: Versions do not match.", package_id, package_version) return None, None
def add_seasons(self, data, items): """ Performs post-process actions for data processing. Accepts an data from the process_folder_list method, BEFORE the items are processed. Allows setting of parameters (like title etc) for the channel. Inside this method the <data> could be changed and additional items can be created. The return values should always be instantiated in at least ("", []). :param str|JsonHelper data: The retrieve data that was loaded for the current item and URL. :param list[MediaItem] items: The currently available items :return: A tuple of the data and a list of MediaItems that were generated. :rtype: list[MediaItem] """ Logger.info("Performing Post-Processing") if not self.parentItem or "guid" not in self.parentItem.metaData: return items existing_seasons = set([i.metaData.get("season_id") for i in items]) if not existing_seasons: return items item_id = self.parentItem.metaData["guid"] season_info_url = "http://www.mtv.nl/feeds/intl_m308/V8_0_0/{0}/{1}/{1}".\ format(self.__season_list_id, item_id) season_data = UriHandler.open(season_info_url) season_info = JsonHelper(season_data) for season in season_info.get_value("result", "data", "seasons", fallback=[]): Logger.trace("Found season: %s", season) season_id = season["id"] if season_id in existing_seasons: Logger.trace("Season is current season") continue url = "{}/feeds/intl_m112/V8_0_0/{}/{}/{}"\ .format(self.baseUrl, self.__show_list_id, item_id, season_id) season_item = MediaItem(season["eTitle"], url) items.append(season_item) Logger.debug("Post-Processing finished") return items
def __send_git_hub_gist(self, name, code): """ Send a file to a Github gist. :param str|unicode name: Name of the logfile paste/gist. :param str code: The content to post. :return: the ID of the gist :rtype: int """ params = { "description": name, "public": False, "files": { name: { "content": code } } } headers = {"Content-Type": "application/json"} post_data = JsonHelper.dump(params, pretty_print=False) data = UriHandler.open("https://api.github.com/gists", params=post_data.encode(), proxy=self.__proxy, additional_headers=headers) if not data: raise IOError("Error posting Gist to GitHub") json_data = JsonHelper(data) url = json_data.get_value("html_url") if self.__logger: self.__logger.info("Gist: %s", url) # minify with google # POST https://www.googleapis.com/urlshortener/v1/url # Content-Type: application/json shortener = {"longUrl": url} google = "https://www.googleapis.com/urlshortener/v1/url?key=%s" % ( self.__apiKey, ) google_data = UriHandler.open( google, params=JsonHelper.dump(shortener, False), proxy=self.__proxy, additional_headers={"Content-Type": "application/json"}) return JsonHelper(google_data).get_value("id")
def __extract_session_data(self, logon_data): logon_json = JsonHelper(logon_data) result_code = logon_json.get_value("statusCode") if result_code != 200: Logger.error("Error loging in: %s - %s", logon_json.get_value("errorMessage"), logon_json.get_value("errorDetails")) return None, None, None return logon_json.get_value("UID"), \ logon_json.get_value("UIDSignature"), \ logon_json.get_value("signatureTimestamp")
def fetch_genre_api_data(self, data): url = self.__get_api_url( "GenreProgramsAO", "189b3613ec93e869feace9a379cca47d8b68b97b3f53c04163769dcffa509318", {"genre": [self.parentItem.metaData[self.__genre_id]]} ) data = UriHandler.open(url, proxy=self.proxy) json_data = JsonHelper(data) possible_lists = json_data.get_value("data", "genres", 0, "selectionsForWeb") program_items = [genres["items"] for genres in possible_lists if genres["selectionType"] == "all"] clip_items = [genres["items"] for genres in possible_lists if genres["selectionType"] == "clips"] json_data.json = { "programs": [p["item"] for p in program_items[0]], "videos": [c["item"] for c in clip_items[0]] } return json_data, []
def _purge_kodi_cache(self, channel_texture_path): """ Class the JSON RPC within Kodi that removes all changed items which paths contain the value given in channelTexturePath @param channel_texture_path: string - The """ json_cmd = '{' \ '"jsonrpc": "2.0", ' \ '"method": "Textures.GetTextures", ' \ '"params": {' \ '"filter": {"operator": "contains", "field": "url", "value": "%s"}, ' \ '"properties": ["url"]' \ '}, ' \ '"id": "libTextures"' \ '}' % (channel_texture_path,) json_results = XbmcWrapper.execute_json_rpc(json_cmd, self._logger) results = JsonHelper(json_results, logger=self._logger) if "error" in results.json or "result" not in results.json: self._logger.error( "Error retreiving textures:\nCmd : %s\nResult: %s", json_cmd, results.json) return results = results.get_value("result", "textures", fallback=[]) for result in results: texture_id = result["textureid"] texture_url = result["url"] self._logger.debug("Going to remove texture: %d - %s", texture_id, texture_url) json_cmd = '{' \ '"jsonrpc": "2.0", ' \ '"method": "Textures.RemoveTexture", ' \ '"params": {' \ '"textureid": %s' \ '}' \ '}' % (texture_id,) XbmcWrapper.execute_json_rpc(json_cmd, self._logger) return
def merge_season_data(self, data): """ Merge some season data to make it more easy for parsing. The return values should always be instantiated in at least ("", []). :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ items = [] json_data = JsonHelper(data) season_folders = json_data.get_value("context", "dispatcher", "stores", "ContentPageProgramStore", "format", "videos") for season in season_folders: for video in season_folders[season]['program']: items.append(self.create_json_video_item(video)) return data, items
def extract_live_channel_data(self, data): """ Adds the channel items to the listing. :param str data: The retrieve data that was loaded for the current item and URL. :return: A tuple of the data and a list of MediaItems that were generated. :rtype: tuple[str|JsonHelper,list[MediaItem]] """ json_string, _ = self.extract_json_data(data) json_data = JsonHelper(json_string) channels = json_data.get_value("channelsPage", "schedule") channel_list = [] for channel_name, channel_data in channels.items(): if "channel" not in channel_data: continue channel_list.append(channel_data) json_data.json = channel_list return json_data, []