Exemplo n.º 1
0
    def create_episode_item_api(self, result_set):
        """ Creates a new MediaItem for an episode.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'folder'.
        :rtype: MediaItem|None

        """

        if not isinstance(result_set, dict):
            json_data = result_set[1].replace("&quot;", "\"")
            result_set = JsonHelper(json_data)
            result_set = result_set.json

        brand = result_set["brand"]
        if brand != self.__channel_brand:
            return None

        title = result_set["title"]
        url = "{}{}".format(self.baseUrl, result_set["link"])
        item = MediaItem(title, url)
        item.description = result_set["description"]
        item.isGeoLocked = True

        images = result_set["images"]
        item.poster = HtmlEntityHelper.convert_html_entities(images.get("poster"))
        item.thumb = HtmlEntityHelper.convert_html_entities(images.get("teaser"))
        return item
    def create_folder_item(self, result_set):
        """ Creates a MediaItem of type 'folder' using the result_set from the regex.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'folder'.
        :rtype: MediaItem|None

        """

        if len(result_set) > 3 and result_set[3] != "":
            Logger.debug("Sub category folder found.")
            url = parse.urljoin(
                self.baseUrl,
                HtmlEntityHelper.convert_html_entities(result_set[3]))
            name = "\a.: %s :." % (result_set[4], )
            item = MediaItem(name, url)
            item.complete = True
            item.type = "folder"
            return item

        url = parse.urljoin(
            self.baseUrl,
            HtmlEntityHelper.convert_html_entities(result_set[0]))
        name = HtmlEntityHelper.convert_html_entities(result_set[1])

        helper = HtmlHelper(result_set[2])
        description = helper.get_tag_content("div", {'class': 'description'})

        item = MediaItem(name, "%s/RSS" % (url, ))
        item.type = 'folder'
        item.description = description.strip()

        date = helper.get_tag_content("div", {'class': 'date'})
        if date == "":
            date = helper.get_tag_content("span",
                                          {'class': 'lastPublishedDate'})

        if not date == "":
            date_parts = Regexer.do_regex(r"(\w+) (\d+)[^<]+, (\d+)", date)
            if len(date_parts) > 0:
                date_parts = date_parts[0]
                month_part = date_parts[0].lower()
                day_part = date_parts[1]
                year_part = date_parts[2]

                try:
                    month = DateHelper.get_month_from_name(month_part, "en")
                    item.set_date(year_part, month, day_part)
                except:
                    Logger.error("Error matching month: %s",
                                 month_part,
                                 exc_info=True)

        item.complete = True
        return item
    def create_episode_item(self, result_set):
        """ Creates a new MediaItem for an episode.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'folder'.
        :rtype: MediaItem|None

        """

        url = parse.urljoin(
            self.baseUrl,
            HtmlEntityHelper.convert_html_entities(result_set[0]))
        name = result_set[1]

        if name == "Tags":
            return None
        if name == "Authors":
            return None
        if name == "Most Viewed":
            return None
        if name == "Top Rated":
            name = "Recent"
            url = "http://channel9.msdn.com/Feeds/RSS"
        else:
            url = "%s?sort=atoz" % (url, )

        item = MediaItem(name, url)
        item.complete = True
        return item
Exemplo n.º 4
0
    def __add_breadcrumb(self,
                         handle,
                         channel,
                         selected_item,
                         last_only=False):
        """ Updates the Kodi category with a breadcrumb to the current parent item

        :param int handle:                      The Kodi file handle
        :param ChannelInfo|Channel channel:     The channel to which the item belongs
        :param MediaItem selected_item:         The item from which to show the breadcrumbs
        :param bool last_only:                  Show only the last item

        """

        bread_crumb = None
        if selected_item is not None:
            bread_crumb = selected_item.name
        elif self.channelObject is not None:
            bread_crumb = channel.channelName

        if not bread_crumb:
            return

        bread_crumb = HtmlEntityHelper.convert_html_entities(bread_crumb)
        xbmcplugin.setPluginCategory(handle=handle, category=bread_crumb)
    def create_episode_item(self, result_set):
        """ Creates a new MediaItem for an episode.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'folder'.
        :rtype: MediaItem|None

        """

        Logger.trace(result_set)

        genres = result_set[0]
        if self.__genre and self.__genre not in genres:
            Logger.debug("Item '%s' filtered due to genre: %s", result_set[2], genres)
            return None

        url = result_set[1]
        if "&" in url:
            url = HtmlEntityHelper.convert_html_entities(url)

        if not url.startswith("http:"):
            url = "%s%s" % (self.baseUrl, url)

        # get the ajax page for less bandwidth
        url = "%s?sida=1&amp;sort=tid_stigande&embed=true" % (url, )

        item = MediaItem(result_set[2], url)
        item.complete = True
        item.isGeoLocked = True
        return item
Exemplo n.º 6
0
    def __convert_ttml_to_srt(ttml):
        """Converts sami format into SRT format:

        Arguments:
        ttml : string - TTML (Timed Text Markup Language) subtitle format

        Returns:
        SRT formatted subtitle:

        Example:
            1
            00:00:20,000 --> 00:00:24,400
            text

        """

        pars_regex = r'<p[^>]+begin="([^"]+)\.(\d+)"[^>]+end="([^"]+)\.(\d+)"[^>]*>([\w\W]+?)</p>'
        subs = Regexer.do_regex(pars_regex, ttml)

        srt = ""
        i = 1

        for sub in subs:
            try:
                start = "%s,%03d" % (sub[0], int(sub[1]))
                end = "%s,%03d" % (sub[2], int(sub[3]))
                text = sub[4].replace("<br />", "\n")
                text = HtmlEntityHelper.convert_html_entities(text)
                text = text.replace("\r\n", "")
                srt = "%s\n%s\n%s --> %s\n%s\n" % (srt, i, start, end, text.strip())
                i += 1
            except:
                Logger.error("Error parsing subtitle: %s", sub[1], exc_info=True)

        return srt
Exemplo n.º 7
0
    def update_video_item(self, item):
        data = UriHandler.open(item.url,
                               proxy=self.proxy,
                               additional_headers=item.HttpHeaders)
        media_regex = 'data-media="([^"]+)"'
        media_info = Regexer.do_regex(media_regex, data)[0]
        media_info = HtmlEntityHelper.convert_html_entities(media_info)
        media_info = JsonHelper(media_info)
        Logger.trace(media_info)

        # sources
        part = item.create_new_empty_media_part()
        # high, web, mobile, url
        media_sources = media_info.json.get("sources", {})
        for quality in media_sources:
            url = media_sources[quality]
            if quality == "high":
                bitrate = 2000
            elif quality == "web":
                bitrate = 800
            elif quality == "mobile":
                bitrate = 400
            else:
                bitrate = 0
            part.append_media_stream(url, bitrate)

        # geoLocRestriction
        item.isGeoLocked = not media_info.get_value(
            "geoLocRestriction", fallback="world") == "world"
        item.complete = True
        return item
Exemplo n.º 8
0
    def get_kodi_item(self):
        """ Creates an Kodi ListItem object for this channel
        
        :return: a Kodi ListItem with all required properties set.
        :rtype: xbmcgui.ListItem

        """

        name = HtmlEntityHelper.convert_html_entities(self.channelName)
        description = HtmlEntityHelper.convert_html_entities(
            self.channelDescription)

        if self.uses_external_addon:
            from resources.lib.xbmcwrapper import XbmcWrapper
            name = "{} {}".format(
                name, XbmcWrapper.get_external_add_on_label(self.addonUrl))

        self.icon = self.__get_image_path(self.icon)
        item = kodifactory.list_item(name, description)
        item.setArt({'thumb': self.icon, 'icon': self.icon})

        # http://mirrors.kodi.tv/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo
        item.setInfo(
            "video",
            {
                "Title": name,
                # "Count": self.sortOrderPerCountry,
                # "TrackNumber": self.sortOrder,
                "Genre": LanguageHelper.get_full_language(self.language),
                # "Tagline": description,
                "Plot": description
            })

        if self.poster is not None:
            self.poster = self.__get_image_path(self.poster)
            item.setArt({'poster': self.poster})

        if AddonSettings.hide_fanart():
            return item

        if self.fanart is not None:
            self.fanart = self.__get_image_path(self.fanart)
        else:
            self.fanart = Config.fanart
        item.setArt({'fanart': self.fanart})
        return item
Exemplo n.º 9
0
    def get_kodi_item(self):
        """ Creates an Kodi ListItem object for this channel
        
        :return: a Kodi ListItem with all required properties set.
        :rtype: xbmcgui.ListItem

        """

        name = HtmlEntityHelper.convert_html_entities(self.channelName)
        description = HtmlEntityHelper.convert_html_entities(
            self.channelDescription)

        if self.uses_external_addon:
            other = LanguageHelper.get_localized_string(
                LanguageHelper.OtherAddon)
            name = "{0} {1} [COLOR gold]{2}[/COLOR]".format(
                name, unichr(187), other)

        self.icon = self.__get_image_path(self.icon)
        item = xbmcgui.ListItem(name, description)
        item.setArt({'thumb': self.icon, 'icon': self.icon})

        # http://mirrors.kodi.tv/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo
        item.setInfo(
            "video",
            {
                "Title": name,
                # "Count": self.sortOrderPerCountry,
                # "TrackNumber": self.sortOrder,
                "Genre": LanguageHelper.get_full_language(self.language),
                # "Tagline": description,
                "Plot": description
            })

        if AddonSettings.hide_fanart():
            return item

        if self.fanart is not None:
            self.fanart = self.__get_image_path(self.fanart)
        else:
            self.fanart = os.path.join(Config.rootDir, "fanart.jpg")
        item.setArt({'fanart': self.fanart})
        return item
Exemplo n.º 10
0
    def select_channels(self):
        """ Selects the channels that should be visible.

        @return: None
        """

        valid_channels = ChannelIndex.get_register().get_channels(
            include_disabled=True)
        channels_to_show = [c for c in valid_channels if c.visible]
        # The old way
        # channels_to_show = filter(lambda c: c.visible, valid_channels)

        selected_channels = [c for c in channels_to_show if c.enabled]
        selected_indices = list(
            [channels_to_show.index(c) for c in selected_channels])
        Logger.debug("Currently selected channels: %s", selected_indices)

        channel_to_show_names = [
            HtmlEntityHelper.convert_html_entities(c.channelName)
            for c in channels_to_show
        ]
        # The old way
        # channel_to_show_names = list(map(lambda c: HtmlEntityHelper.convert_html_entities(c.channelName), channels_to_show))

        dialog = xbmcgui.Dialog()
        heading = LanguageHelper.get_localized_string(
            LanguageHelper.ChannelSelection)[:-1]
        selected_channels = dialog.multiselect(heading,
                                               channel_to_show_names,
                                               preselect=selected_indices)
        if selected_channels is None:
            return

        selected_channels = list(selected_channels)
        Logger.debug("New selected channels:       %s", selected_channels)

        indices_to_remove = [
            i for i in selected_indices if i not in selected_channels
        ]
        indices_to_add = [
            i for i in selected_channels if i not in selected_indices
        ]
        for i in indices_to_remove:
            Logger.info("Hiding channel: %s", channels_to_show[i])
            AddonSettings.set_channel_visiblity(channels_to_show[i], False)

        for i in indices_to_add:
            Logger.info("Showing channel: %s", channels_to_show[i])
            AddonSettings.set_channel_visiblity(channels_to_show[i], True)

        self.refresh()
        return
Exemplo n.º 11
0
    def update_live_item(self, item):
        data = UriHandler.open(item.url,
                               proxy=self.proxy,
                               additional_headers=item.HttpHeaders)
        media_regex = 'data-media="([^"]+)"'
        media_info = Regexer.do_regex(media_regex, data)[0]
        media_info = HtmlEntityHelper.convert_html_entities(media_info)
        media_info = JsonHelper(media_info)
        Logger.trace(media_info)
        part = item.create_new_empty_media_part()

        hls_url = media_info.get_value("streamUrl")
        if hls_url is not None and "m3u8" in hls_url:
            Logger.debug("Found HLS url for %s: %s",
                         media_info.json["streamName"], hls_url)

            for s, b in M3u8.get_streams_from_m3u8(hls_url, self.proxy):
                part.append_media_stream(s, b)
                item.complete = True
        else:
            Logger.debug("No HLS url found for %s. Fetching RTMP Token.",
                         media_info.json["streamName"])
            # fetch the token:
            token_url = "%s/api/media/streaming?streamname=%s" \
                        % (self.baseUrl, media_info.json["streamName"])

            token_data = UriHandler.open(token_url,
                                         proxy=self.proxy,
                                         additional_headers=item.HttpHeaders,
                                         no_cache=True)

            token_data = JsonHelper(token_data)
            token = token_data.get_value("token")
            Logger.debug("Found token '%s' for '%s'", token,
                         media_info.json["streamName"])

            rtmp_url = "rtmp://rtmp.rtbf.be/livecast/%s?%s pageUrl=%s tcUrl=rtmp://rtmp.rtbf.be/livecast" \
                       % (media_info.json["streamName"], token, self.baseUrl)
            rtmp_url = self.get_verifiable_video_url(rtmp_url)
            part.append_media_stream(rtmp_url, 0)
            item.complete = True

        item.isGeoLocked = not media_info.get_value(
            "geoLocRestriction", fallback="world") == "world"
        return item
Exemplo n.º 12
0
    def update_video_item(self, item):
        """ Updates an existing MediaItem with more data.

        Used to update none complete MediaItems (self.complete = False). This
        could include opening the item's URL to fetch more data and then process that
        data or retrieve it's real media-URL.

        The method should at least:
        * cache the thumbnail to disk (use self.noImage if no thumb is available).
        * set at least one MediaItemPart with a single MediaStream.
        * set self.complete = True.

        if the returned item does not have a MediaItemPart then the self.complete flag
        will automatically be set back to False.

        :param MediaItem item: the original MediaItem that needs updating.

        :return: The original item with more data added to it's properties.
        :rtype: MediaItem

        """

        Logger.debug('Starting update_video_item for %s (%s)', item.name,
                     self.channelName)

        # now the mediaurl is derived. First we try WMV
        data = UriHandler.open(item.url)

        urls = Regexer.do_regex(
            '<a href="([^"]+.(?:wmv|mp4))">(High|Medium|Mid|Low|MP4)', data)
        media_part = item.create_new_empty_media_part()
        for url in urls:
            if url[1].lower() == "high":
                bitrate = 2000
            elif url[1].lower() == "medium" or url[1].lower() == "mid":
                bitrate = 1200
            elif url[1].lower() == "low" or url[1].lower() == "mp4":
                bitrate = 200
            else:
                bitrate = 0
            media_part.append_media_stream(
                HtmlEntityHelper.convert_html_entities(url[0]), bitrate)

        item.complete = True
        return item
Exemplo n.º 13
0
    def __convert_json_subtitle_to_srt(json_subtitle):
        """Converts Json Subtitle format into SRT format:

        Arguments:
        jsonSubtitle : string - Json Subtitle subtitle format

        Returns:
        SRT formatted subtitle:

        Example:
            {"startMillis":80,"endMillis":4170,"text":"Ett Kanal 5:\nAlla gonblick i \"100 jdare!!!\"?","posX":0.5,"posY":0.9,"colorR":220,"colorG":220,"colorB":220}

        Returns
            1
            00:00:20,000 --> 00:00:24,400
            text

        The format of the timecode is Hours:Minutes:Seconds:Ticks where a "Tick"
        is a value of between 0 and 249 and lasts 4 milliseconds.

        """

        regex = r'"startMillis":(\d+),"endMillis":(\d+),"text":"(.+?)(?=["] *,)'
        subs = Regexer.do_regex(regex, json_subtitle)

        # Init some stuff
        srt = ""
        i = 1

        for sub in subs:
            try:
                start = SubtitleHelper.__convert_to_time(sub[0])
                end = SubtitleHelper.__convert_to_time(sub[1])

                text = sub[2].replace('\"', '"')
                text = JsonHelper.convert_special_chars(text)
                text = HtmlEntityHelper.convert_html_entities(text)
                srt = "%s\n%s\n%s --> %s\n%s\n" % (srt, i, start, end,
                                                   text.strip())
                i += 1
            except:
                Logger.error("Error parsing subtitle: %s", sub, exc_info=True)

        return srt
Exemplo n.º 14
0
    def __convert_web_vtt_to_srt(webvvt):
        """Converts sami format into SRT format:

        Arguments:
        ttml : string - TTML (Timed Text Markup Language) subtitle format

        Returns:
        SRT formatted subtitle:

        Example:
            1
            00:00:20,000 --> 00:00:24,400
            text

        """

        count = 0
        result = ""
        for line in webvvt.split("\n"):
            line = line.strip()
            if line.endswith("WEBVTT") or line.startswith("X-TIMESTAMP"):
                continue
            if not line:
                continue

            if " --> " in line:
                count += 1
                start, end = line.split(" --> ")
                result = "%s\n\n%s" % (result, count)
                if start.count(":") == 1:
                    result = "%s\n00:%s --> 00:%s" % (
                        result, start.replace(".", ","), end.replace(".", ","))
                else:
                    result = "%s\n%s --> %s" % (result, start.replace(
                        ".", ","), end.replace(".", ","))
            elif line == str(count + 1):
                # we apparently have built-in numbering using WebVTT cue-numbering
                continue
            else:
                result = "%s\n%s" % (
                    result, HtmlEntityHelper.convert_html_entities(line))

        return result
Exemplo n.º 15
0
    def extract_json_data(self, data):
        """ Extracts the JSON data for video parsing

        The return values should always be instantiated in at least ("", []).

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[JsonHelper,list[MediaItem]]

        """

        Logger.info("Performing Pre-Processing")
        items = []
        json_text = Regexer.do_regex(
            r'ProgramDescription" data-react-props="([^"]+)', data)[0]
        json_text = HtmlEntityHelper.convert_html_entities(json_text)
        json_data = JsonHelper(json_text)

        Logger.debug("Pre-Processing finished")
        return json_data, items
    def show_channel_settings(channel):
        """ Show the add-on settings and pre-selects the channel settings tab with the correct channel
        selected.

        :param channel: The channel to display settings for.
        """

        channel_name = channel.safe_name

        # remove some HTML chars
        channel_name = HtmlEntityHelper.convert_html_entities(channel_name)
        Logger.debug("Showing channel settings for channel: %s (%s)", channel_name, channel.channelName)

        # Set the channel to be the preselected one
        AddonSettings.store(KODI).set_setting("config_channel", channel_name)

        # show settings and focus on the channel settings tab
        if AddonSettings.is_min_version(AddonSettings.KodiLeia):
            return AddonSettings.show_settings(-98)
        else:
            return AddonSettings.show_settings(102)
Exemplo n.º 17
0
    def __convert_sami_to_srt(sami):
        """Converts sami format into SRT format:

        Arguments:
        sami : string - SAMI subtitle format

        Returns:
        SRT formatted subtitle:

        Example:
            1
            00:00:20,000 --> 00:00:24,400
            text

        """
        pars_regex = r'<sync start="(\d+)"><p[^>]+>([^<]+)</p></sync>\W+<sync start="(\d+)">'
        subs = Regexer.do_regex(pars_regex, sami)

        if len(subs) == 0:
            pars_regex2 = r'<sync start=(\d+)>\W+<p[^>]+>([^\n]+)\W+<sync start=(\d+)>'
            subs = Regexer.do_regex(pars_regex2, sami)

        srt = ""
        i = 1

        for sub in subs:
            try:
                start = SubtitleHelper.__convert_to_time(sub[0])
                end = SubtitleHelper.__convert_to_time(sub[2])
                text = sub[1]
                text = HtmlEntityHelper.convert_html_entities(text)
                srt = "%s\n%s\n%s --> %s\n%s\n" % (srt, i, start, end, text)
                i += 1
            except:
                Logger.error("Error parsing subtitle: %s",
                             sub[1],
                             exc_info=True)

        # re-encode to be able to write it
        return srt
Exemplo n.º 18
0
    def __full_decode_text(self, string_value):
        """ Decodes a byte encoded string with HTML content into Unicode String

        Arguments:
        stringValue : string - The byte encoded string to decode

        Returns:
        An Unicode String with all HTML entities replaced by their UTF8 characters

        The decoding is done by first decode the string to UTF8 and then replace
        the HTML entities to their UTF8 characters.

        """

        if string_value is None:
            return None

        if string_value == "":
            return ""

        # then get rid of the HTML entities
        string_value = HtmlEntityHelper.convert_html_entities(string_value)
        return string_value
Exemplo n.º 19
0
    def create_page_item(self, result_set):
        """ Creates a MediaItem of type 'page' using the result_set from the regex.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'page'.
        :rtype: MediaItem|None

        """

        url = parse.urljoin(
            self.baseUrl,
            HtmlEntityHelper.convert_html_entities(result_set[0]))
        item = MediaItem(result_set[self.pageNavigationRegexIndex], url)
        item.type = "page"
        item.complete = True

        Logger.trace("Created '%s' for url %s", item.name, item.url)
        return item
Exemplo n.º 20
0
    def update_video_item(self, item):
        """ Updates an existing MediaItem with more data.

        Used to update none complete MediaItems (self.complete = False). This
        could include opening the item's URL to fetch more data and then process that
        data or retrieve it's real media-URL.

        The method should at least:
        * cache the thumbnail to disk (use self.noImage if no thumb is available).
        * set at least one MediaItemPart with a single MediaStream.
        * set self.complete = True.

        if the returned item does not have a MediaItemPart then the self.complete flag
        will automatically be set back to False.

        :param MediaItem item: the original MediaItem that needs updating.

        :return: The original item with more data added to it's properties.
        :rtype: MediaItem

        """

        Logger.debug('Starting update_video_item for %s (%s)', item.name,
                     self.channelName)

        # noinspection PyStatementEffect
        """
        <script type="text/javascript">/* <![CDATA[ */ var movieFlashVars = "
        image=http://assets.ur.se/id/147834/images/1_l.jpg
        file=/147000-147999/147834-20.mp4
        plugins=http://urplay.se/jwplayer/plugins/gapro-1.swf,http://urplay.se/jwplayer/plugins/sharing-2.swf,http://urplay.se/jwplayer/plugins/captions/captions.swf
        sharing.link=http://urplay.se/147834
        gapro.accountid=UA-12814852-8
        captions.margin=40
        captions.fontsize=11
        captions.back=false
        captions.file=http://undertexter.ur.se/147000-147999/147834-19.tt
        streamer=rtmp://streaming.ur.se/ondemand
        autostart=False"; var htmlVideoElementSource = "http://streaming.ur.se/ondemand/mp4:147834-23.mp4/playlist.m3u8?location=SE"; /* //]]> */ </script>

        """

        data = UriHandler.open(item.url)
        # Extract stream JSON data from HTML
        streams = Regexer.do_regex(
            r'ProgramContainer" data-react-props="({[^"]+})"', data)
        json_data = streams[0]
        json_data = HtmlEntityHelper.convert_html_entities(json_data)
        json = JsonHelper(json_data, logger=Logger.instance())
        Logger.trace(json.json)

        item.MediaItemParts = []

        # generic server information
        proxy_data = UriHandler.open(
            "https://streaming-loadbalancer.ur.se/loadbalancer.json",
            no_cache=True)
        proxy_json = JsonHelper(proxy_data)
        proxy = proxy_json.get_value("redirect")
        Logger.trace("Found RTMP Proxy: %s", proxy)

        stream_infos = json.get_value("program", "streamingInfo")
        part = item.create_new_empty_media_part()
        for stream_type, stream_info in stream_infos.items():
            Logger.trace(stream_info)
            default_stream = stream_info.get("default", False)
            bitrates = {
                "mp3": 400,
                "m4a": 250,
                "sd": 1200,
                "hd": 2000,
                "tt": None
            }
            for quality, bitrate in bitrates.items():
                stream = stream_info.get(quality)
                if stream is None:
                    continue
                stream_url = stream["location"]
                if quality == "tt":
                    part.Subtitle = SubtitleHelper.download_subtitle(
                        stream_url, format="ttml")
                    continue

                bitrate = bitrate if default_stream else bitrate + 1
                if stream_type == "raw":
                    bitrate += 1
                url = "https://%s/%smaster.m3u8" % (proxy, stream_url)
                part.append_media_stream(url, bitrate)

        item.complete = True
        return item
Exemplo n.º 21
0
    def process_folder_list(self, item=None):  # NOSONAR
        """ Process the selected item and get's it's child items using the available dataparsers.

        Accepts an <item> and returns a list of MediaListems with at least name & url
        set. The following actions are done:

        * determining the correct parsers to use
        * call a pre-processor
        * parsing the data with the parsers
        * calling the creators for item creations

        if the item is None, we assume that we are dealing with the first call for this channel and the mainlist uri
        is used.

        :param MediaItem|None item: The parent item.

        :return: A list of MediaItems that form the childeren of the <item>.
        :rtype: list[MediaItem]

        """

        items = []
        self.parentItem = item

        if item is None:
            Logger.info(
                "process_folder_list :: No item was specified. Assuming it was the main channel list"
            )
            url = self.mainListUri
        elif len(item.items) > 0:
            return item.items
        else:
            url = item.url

        # Determine the handlers and process
        data_parsers = self.__get_data_parsers(url)
        # Exclude the updaters only
        data_parsers = [
            p for p in data_parsers if not p.is_video_updater_only()
        ]

        if [p for p in data_parsers if p.LogOnRequired]:
            Logger.info("One or more dataparsers require logging in.")
            self.loggedOn = self.log_on()

        # now set the headers here and not earlier in case they might have been update by the logon
        if item is not None and item.HttpHeaders:
            headers = item.HttpHeaders
        else:
            headers = self.httpHeaders

        # Let's retrieve the required data. Main url's
        if url.startswith("http:") or url.startswith(
                "https:") or url.startswith("file:"):
            # Disable cache on live folders
            no_cache = item is not None and not item.is_playable(
            ) and item.isLive
            if no_cache:
                Logger.debug("Disabling cache for '%s'", item)
            data = UriHandler.open(url,
                                   proxy=self.proxy,
                                   additional_headers=headers,
                                   no_cache=no_cache)
        # Searching a site using search_site()
        elif url == "searchSite" or url == "#searchSite":
            Logger.debug("Starting to search")
            return self.search_site()
        # Labels instead of url's
        elif url.startswith("#"):
            data = ""
        # Others
        else:
            Logger.debug("Unknown URL format. Setting data to ''")
            data = ""

        # first check if there is a generic pre-processor
        pre_procs = [p for p in data_parsers if p.is_generic_pre_processor()]
        num_pre_procs = len(pre_procs)
        Logger.trace("Processing %s Generic Pre-Processors DataParsers",
                     num_pre_procs)
        if num_pre_procs > 1:
            # warn for strange results if more than 1 generic pre-processor is present.
            Logger.warning(
                "More than one Generic Pre-Processor is found (%s). They are being processed in the "
                "order that Python likes which might result in unexpected result.",
                num_pre_procs)

        for data_parser in pre_procs:
            # remove it from the list
            data_parsers.remove(data_parser)

            # and process it
            Logger.debug("[DataParsers] Pre-Processing %s", data_parser)
            (data, pre_items) = data_parser.PreProcessor(data)
            items += pre_items

            if isinstance(data, JsonHelper):
                Logger.debug(
                    "Generic preprocessor resulted in JsonHelper data")

        # Split normal and post-processor data parsers
        generic_post_procs = [
            p for p in data_parsers if p.is_generic_post_processor()
        ]
        data_parsers = [p for p in data_parsers if p not in generic_post_procs]

        # The the other handlers
        Logger.trace("Processing %s Normal DataParsers", len(data_parsers))
        handler_json = None
        for data_parser in data_parsers:
            Logger.debug("[DataParsers] Processing %s", data_parser)

            # Check for preprocessors
            if data_parser.PreProcessor:
                Logger.debug(
                    "[DataParsers] Processing DataParser.PreProcessor")
                (handler_data, pre_items) = data_parser.PreProcessor(data)
                items += pre_items
            else:
                handler_data = data

            Logger.debug("[DataParsers] Processing DataParser.Parser")
            if data_parser.Parser is None or (data_parser.Parser == ""
                                              and not data_parser.IsJson):
                if data_parser.Creator:
                    Logger.warning("No <parser> found for %s. Skipping.",
                                   data_parser.Creator)
                continue

            if data_parser.IsJson:
                if handler_json is None:
                    # Cache the json requests to improve performance
                    Logger.trace("Caching JSON results for Dataparsing")
                    if isinstance(handler_data, JsonHelper):
                        handler_json = handler_data
                    else:
                        handler_json = JsonHelper(handler_data,
                                                  Logger.instance())

                Logger.trace(data_parser.Parser)
                parser_results = handler_json.get_value(fallback=[],
                                                        *data_parser.Parser)

                if not isinstance(parser_results, (tuple, list)):
                    # if there is just one match, return that as a list
                    parser_results = [parser_results]
            else:
                if isinstance(handler_data, JsonHelper):
                    raise ValueError(
                        "Cannot perform Regex Parser on JsonHelper.")
                else:
                    parser_results = Regexer.do_regex(data_parser.Parser,
                                                      handler_data)

            Logger.debug(
                "[DataParsers] Processing DataParser.Creator for %s items",
                len(parser_results))
            for parser_result in parser_results:
                handler_result = data_parser.Creator(parser_result)
                if handler_result is not None:
                    if isinstance(handler_result, list):
                        items += handler_result
                    else:
                        items.append(handler_result)

            if data_parser.PostProcessor:
                Logger.debug(
                    "[DataParsers] Processing DataParser.PostProcessor")
                if data_parser.IsJson:
                    items = data_parser.PostProcessor(handler_json, items)
                else:
                    items = data_parser.PostProcessor(handler_data, items)
                Logger.trace("Post-processing returned %d items", len(items))

        # The post processors
        num_post_procs = len(generic_post_procs)
        Logger.trace("Processing %s Generic Post-Processors DataParsers",
                     num_post_procs)
        if num_post_procs > 1:
            # warn for strange results if more than 1 generic pre-processor is present.
            Logger.warning(
                "More than one Generic Post-Processor is found (%s). They are being processed in the "
                "order that Python likes which might result in unexpected result.",
                num_post_procs)

        for data_parser in generic_post_procs:
            Logger.debug("[DataParsers] Post-processing Generic %s",
                         data_parser)
            items = data_parser.PostProcessor(data, items)
            Logger.trace("Post-processing returned %d items", len(items))

        # should we exclude DRM/GEO?
        hide_geo_locked = AddonSettings.hide_geo_locked_items_for_location(
            self.language)
        hide_drm_protected = AddonSettings.hide_drm_items()
        hide_premium = AddonSettings.hide_premium_items()
        hide_folders = AddonSettings.hide_restricted_folders()
        type_to_exclude = None
        if not hide_folders:
            type_to_exclude = "folder"

        old_count = len(items)
        if hide_drm_protected:
            Logger.debug("Hiding DRM items")
            items = [
                i for i in items
                if not i.isDrmProtected or i.type == type_to_exclude
            ]
        if hide_geo_locked:
            Logger.debug("Hiding GEO Locked items due to GEO region: %s",
                         self.language)
            items = [
                i for i in items
                if not i.isGeoLocked or i.type == type_to_exclude
            ]
        if hide_premium:
            Logger.debug("Hiding Premium items")
            items = [
                i for i in items if not i.isPaid or i.type == type_to_exclude
            ]

        # Local import for performance
        from resources.lib.cloaker import Cloaker
        cloaker = Cloaker(self,
                          AddonSettings.store(LOCAL),
                          logger=Logger.instance())
        if not AddonSettings.show_cloaked_items():
            Logger.debug("Hiding Cloaked items")
            items = [i for i in items if not cloaker.is_cloaked(i.url)]
        else:
            cloaked_items = [i for i in items if cloaker.is_cloaked(i.url)]
            for c in cloaked_items:
                c.isCloaked = True

        if len(items) != old_count:
            Logger.info(
                "Hidden %s items due to DRM/GEO/Premium/cloak filter (Hide Folders=%s)",
                old_count - len(items), hide_folders)

        # Check for grouping or not
        limit = AddonSettings.get_list_limit()
        folder_items = [i for i in items if i.type.lower() == "folder"]

        # we should also de-duplicate before calculating
        folder_items = list(set(folder_items))
        folders = len(folder_items)

        if 0 < limit < folders:
            # let's filter them by alphabet if the number is exceeded
            Logger.debug(
                "Creating Groups for list exceeding '%s' folder items. Total folders found '%s'.",
                limit, folders)
            other = "\a{}".format(
                LanguageHelper.get_localized_string(LanguageHelper.OtherChars))
            title_format = "\a{}".format(
                LanguageHelper.get_localized_string(LanguageHelper.StartWith))
            result = dict()
            non_grouped = []
            # Should we remove prefixes just as Kodi does?
            # prefixes = ("de", "het", "the", "een", "a", "an")

            for sub_item in items:
                if sub_item.dontGroup or sub_item.type != "folder":
                    non_grouped.append(sub_item)
                    continue

                char = sub_item.name[0].upper()
                if char == "&":
                    title = HtmlEntityHelper.convert_html_entities(
                        sub_item.name)
                    char = title[0].upper()

                # Should we de-prefix?
                # for p in prefixes:
                #     if sub_item.name.lower().startswith(p + " "):
                #         char = sub_item.name[len(p) + 1][0].upper()

                if char.isdigit():
                    char = "0-9"
                elif not char.isalpha():
                    char = other

                if char not in result:
                    Logger.trace("Creating Grouped item from: %s", sub_item)
                    if char == other:
                        item = MediaItem(
                            title_format.replace("'", "") % (char, ), "")
                    else:
                        item = MediaItem(title_format % (char.upper(), ), "")
                    item.complete = True
                    # item.set_date(2100 + ord(char[0]), 1, 1, text='')
                    result[char] = item
                else:
                    item = result[char]
                item.items.append(sub_item)

            items = non_grouped + list(result.values())

        # In order to get a better performance in de-duplicating and keeping the sort order
        # we first need to store the order in a lookup table. Then we use sorted(set()) and
        # use that lookup table for sorting. Using sorted(set(), items.index) this will be
        # an O(n) (for the index()) times O(n*log(n)) (for the sorted) = O(n^2*log(n)!.
        # The dictionary lookup (O(1)) saves us an O(n).
        # See https://wiki.python.org/moin/TimeComplexity
        sorted_order = {}
        for i in range(0, len(items)):
            sorted_order[items[i]] = i
        unique_results = sorted(set(items), key=sorted_order.get)

        Logger.trace("Found '%d' items of which '%d' are unique.", len(items),
                     len(unique_results))
        return unique_results
Exemplo n.º 22
0
    def extract_hero_data(self, data):
        """ Extacts the Hero json data

        Accepts an data from the process_folder_list method, BEFORE the items are
        processed. Allows setting of parameters (like title etc) for the channel.
        Inside this method the <data> could be changed and additional items can
        be created.

        The return values should always be instantiated in at least ("", []).

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[JsonHelper,list[MediaItem]]

        """

        Logger.info("Performing Pre-Processing")
        items = []

        hero_data = Regexer.do_regex(r'data-hero="([^"]+)', data)[0]
        hero_data = HtmlEntityHelper.convert_html_entities(hero_data)
        Logger.trace(hero_data)
        hero_json = JsonHelper(hero_data)
        hero_playlists = hero_json.get_value("data", "playlists")
        if not hero_playlists:
            # set an empty object
            hero_json.json = {}

        current = self.parentItem.metaData.get("current_playlist", None)
        if current == "clips":
            Logger.debug("Found 'clips' metadata, only listing clips")
            hero_json.json = {}
            return hero_json, items

        if current is None:
            # Add clips folder
            clip_title = LanguageHelper.get_localized_string(
                LanguageHelper.Clips)
            clips = MediaItem("\a.: %s :." % (clip_title, ),
                              self.parentItem.url)
            clips.metaData[self.__meta_playlist] = "clips"
            self.__no_clips = True
            items.append(clips)

        # See if there are seasons to show
        if len(hero_playlists) == 1:
            # first items, list all, except if there is only a single season
            Logger.debug("Only one folder playlist found. Listing that one")
            return hero_json, items

        if current is None:
            # list all folders
            for playlist in hero_playlists:
                folder = self.create_folder_item(playlist)
                items.append(folder)
            # clear the json item to prevent further listing
            hero_json.json = {}
            return hero_json, items

        # list the correct folder
        current_list = [l for l in hero_playlists if l["id"] == current]
        if current_list:
            # we are listing a subfolder, put that one on index 0 and then also
            hero_playlists.insert(0, current_list[0])
            self.__no_clips = True

        Logger.debug("Pre-Processing finished")
        return hero_json, items
Exemplo n.º 23
0
    def __convert_dc_subtitle_to_srt(dc_subtitle):
        """Converts DC Subtitle format into SRT format:

        Arguments:
        dcSubtitle : string - DC Subtitle subtitle format

        Returns:
        SRT formatted subtitle:

        Example:
            <Subtitle SpotNumber="1" TimeIn="00:00:01:220" TimeOut="00:00:04:001" FadeUpTime="20" FadeDownTime="20">
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="6.0">Line 1</Text>
            </Subtitle>
            <Subtitle SpotNumber="2" TimeIn="00:02:07:180" TimeOut="00:02:10:040" FadeUpTime="20" FadeDownTime="20">
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="6.0">Line 1</Text>
            </Subtitle>
            <Subtitle SpotNumber="3" TimeIn="00:02:15:190" TimeOut="00:02:17:190" FadeUpTime="20" FadeDownTime="20">
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="14.0">Line 1</Text>
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="6.0">Line 2</Text>
            </Subtitle>
            <Subtitle SpotNumber="4" TimeIn="00:03:23:140" TimeOut="00:03:30:120" FadeUpTime="20" FadeDownTime="20">
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="14.0">Line 1</Text>
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="14.0">Line 2</Text>
              <Text Direction="horizontal" HAlign="center" HPosition="0.0" VAlign="bottom" VPosition="14.0">Line 3</Text>
            </Subtitle>

        Returns
            1
            00:00:20,000 --> 00:00:24,400
            text

        The format of the timecode is Hours:Minutes:Seconds:Ticks where a "Tick"
        is a value of between 0 and 249 and lasts 4 milliseconds.

        """

        parse_regex = r'<subtitle[^>]+spotnumber="(\d+)" timein="(\d+:\d+:\d+):(\d+)" ' \
                      r'timeout="(\d+:\d+:\d+):(\d+)"[^>]+>|<text[^>]+>([^<]+)</text>'
        parse_regex = parse_regex.replace('"', '["\']')
        subs = Regexer.do_regex(parse_regex, dc_subtitle)

        srt = ""
        i = 1
        text = ""
        start = ""
        end = ""

        for sub in subs:
            #Logger.Trace(sub)
            try:
                if sub[0]:
                    # new start of a sub
                    if text and start and end:
                        # if we have a complete old one, save it
                        text = HtmlEntityHelper.convert_html_entities(text)
                        srt = "%s\n%s\n%s --> %s\n%s\n" % (srt, i, start, end,
                                                           text.strip())
                        i += 1
                    start = "%s,%03d" % (sub[1], int(sub[2]))
                    end = "%s,%03d" % (sub[3], int(sub[4]))
                    text = ""
                else:
                    text = "%s\n%s" % (text, sub[5].replace("<br />", "\n"))
            except:
                Logger.error("Error parsing subtitle: %s", sub, exc_info=True)
        return srt