def update_json_video_item(self, item): """ Updates an existing MediaItem with more data. Used to update none complete MediaItems (self.complete = False). This could include opening the item's URL to fetch more data and then process that data or retrieve it's real media-URL. The method should at least: * cache the thumbnail to disk (use self.noImage if no thumb is available). * set at least one MediaItemPart with a single MediaStream. * set self.complete = True. if the returned item does not have a MediaItemPart then the self.complete flag will automatically be set back to False. :param MediaItem item: the original MediaItem that needs updating. :return: The original item with more data added to it's properties. :rtype: MediaItem """ headers = {} if self.localIP: headers.update(self.localIP) data = UriHandler.open(item.url, proxy=self.proxy, additional_headers=headers) video_data = JsonHelper(data) stream_data = video_data.get_value("mediaAssetsOnDemand") if not stream_data: return item use_adaptive = AddonSettings.use_adaptive_stream_add_on() stream_data = stream_data[0] part = item.create_new_empty_media_part() if "hlsUrl" in stream_data: hls_url = stream_data["hlsUrl"] if use_adaptive: stream = part.append_media_stream(hls_url, 0) M3u8.set_input_stream_addon_input(stream, self.proxy, headers=headers) item.complete = True else: for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy, headers=headers): item.complete = True part.append_media_stream(s, b) if "timedTextSubtitlesUrl" in stream_data and stream_data[ "timedTextSubtitlesUrl"]: sub_url = stream_data["timedTextSubtitlesUrl"].replace( ".ttml", ".vtt") sub_url = HtmlEntityHelper.url_decode(sub_url) part.Subtitle = SubtitleHelper.download_subtitle(sub_url, format="webvtt") return item
def __update_m3u8(self, url, part, headers, use_kodi_hls): """ Update a video that has M3u8 streams. :param str url: The URL for the stream. :param MediaItemPart part: The new part that needs updating. :param dict[str,str] headers: The URL headers to use. :param bool use_kodi_hls: Should we use the InputStream Adaptive add-on? """ # first see if there are streams in this file, else check the second location. for s, b in M3u8.get_streams_from_m3u8(url, self.proxy, headers=headers): if use_kodi_hls: strm = part.append_media_stream(url, 0) M3u8.set_input_stream_addon_input(strm, headers=headers) # Only the main M3u8 is needed break else: part.append_media_stream(s, b) if not part.MediaStreams and "manifest.m3u8" in url: Logger.warning( "No streams found in %s, trying alternative with 'master.m3u8'", url) url = url.replace("manifest.m3u8", "master.m3u8") for s, b in M3u8.get_streams_from_m3u8(url, self.proxy, headers=headers): if use_kodi_hls: strm = part.append_media_stream(url, 0) M3u8.set_input_stream_addon_input(strm, headers=headers) # Only the main M3u8 is needed break else: part.append_media_stream(s, b) # check for subs # https://mtgxse01-vh.akamaihd.net/i/201703/13/DCjOLN_1489416462884_427ff3d3_,48,260,460,900,1800,2800,.mp4.csmil/master.m3u8?__b__=300&hdnts=st=1489687185~exp=3637170832~acl=/*~hmac=d0e12e62c219d96798e5b5ef31b11fa848724516b255897efe9808c8a499308b&cc1=name=Svenska%20f%C3%B6r%20h%C3%B6rselskadade~default=no~forced=no~lang=sv~uri=https%3A%2F%2Fsubstitch.play.mtgx.tv%2Fsubtitle%2Fconvert%2Fxml%3Fsource%3Dhttps%3A%2F%2Fcdn-subtitles-mtgx-tv.akamaized.net%2Fpitcher%2F20xxxxxx%2F2039xxxx%2F203969xx%2F20396967%2F20396967-swt.xml%26output%3Dm3u8 # https://cdn-subtitles-mtgx-tv.akamaized.net/pitcher/20xxxxxx/2039xxxx/203969xx/20396967/20396967-swt.xml&output=m3u8 if "uri=" in url and not part.Subtitle: Logger.debug("Extracting subs from M3u8") sub_url = url.rsplit("uri=")[-1] sub_url = HtmlEntityHelper.url_decode(sub_url) sub_data = UriHandler.open(sub_url, proxy=self.proxy) subs = [ line for line in sub_data.split("\n") if line.startswith("http") ] if subs: part.Subtitle = SubtitleHelper.download_subtitle( subs[0], format='webvtt', proxy=self.proxy) return
def create_page_item(self, result_set): """ Creates a MediaItem of type 'page' using the result_set from the regex. This method creates a new MediaItem from the Regular Expression or Json results <result_set>. The method should be implemented by derived classes and are specific to the channel. :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex :return: A new MediaItem of type 'page'. :rtype: MediaItem|None """ item = chn_class.Channel.create_page_item(self, result_set) url = "%s/auvio/archives%s%s" % (self.baseUrl, HtmlEntityHelper.url_decode( result_set[0]), result_set[1]) item.url = url return item
def get_streams_from_you_tube(url, proxy=None, use_add_on=True): """ Parsers standard YouTube videos and returns a list of tuples with streams and bitrates that can be used by other methods. :param ProxyInfo proxy: The proxy to use for opening :param str url: The url to download :param bool use_add_on: Should we use the Youtube add-on if available Can be used like this: part = item.create_new_empty_media_part() for s, b in YouTube.get_streams_from_you_tube(url, self.proxy): item.complete = True # s = self.get_verifiable_video_url(s) part.append_media_stream(s, b) :return: a list of streams with their bitrate and optionally the audio streams. :rtype: list[tuple[str,str]] """ you_tube_streams = [] you_tube_add_on_available = xbmc.getCondVisibility( 'System.HasAddon("plugin.video.youtube")') == 1 if you_tube_add_on_available and use_add_on: Logger.info("Found Youtube add-on. Using it") you_tube_streams.append((YouTube.__play_you_tube_url(url), 0)) Logger.trace(you_tube_streams) return you_tube_streams Logger.info("No Kodi Youtube Video add-on was found. Falling back.") if "watch?v=" in url: video_id = url.split("?v=")[-1] Logger.debug("Using Youtube ID '%s' retrieved from '%s'", video_id, url) # get the meta data url url = "https://www.youtube.com/get_video_info?hl=en_GB&asv=3&video_id=%s" % ( video_id, ) elif "get_video_info" not in url: Logger.error("Invalid Youtube URL specified: '%s'", url) return [] data = UriHandler.open(url, proxy=proxy) if isinstance(data, bytes): data = data.decode() # get the stream data from the page # Up to 720p with audio and video combined. url_encoded_fmt_stream_map = Regexer.do_regex( "url_encoded_fmt_stream_map=([^&]+)", data) # Up to 4K with audio and video split. # url_encoded_fmt_stream_map = Regexer.do_regex("adaptive_fmts=([^&]+)", data) url_encoded_fmt_stream_map_data = HtmlEntityHelper.url_decode( url_encoded_fmt_stream_map[0]) # split per stream streams = url_encoded_fmt_stream_map_data.split(',') for stream in streams: # let's create a new part # noinspection PyTypeChecker qs_data = dict([x.split("=") for x in stream.split("&")]) Logger.trace(qs_data) if "itag" in qs_data and "bitrate" not in qs_data: i_tag = int(qs_data.get('itag', -1)) stream_encoding = YouTube.__YouTubeEncodings.get(i_tag, None) if stream_encoding is None: # if the i_tag was not in the list, skip it. Logger.debug( "Not using i_tag %s as it is not in the list of supported encodings.", i_tag) continue bitrate = stream_encoding[0] else: bitrate = int(qs_data['bitrate']) / 1000 signature = qs_data.get('s', None) quality = qs_data.get('quality_label', qs_data.get('quality')) if not quality: Logger.debug("Missing 'quality_label', skipping: %s", qs_data) continue video_url = HtmlEntityHelper.url_decode(qs_data['url']) if signature is None: url = video_url else: url = "%s&signature=%s" % (video_url, signature) you_tube_streams.append((url, bitrate)) return you_tube_streams