Пример #1
0
    def create_json_video_item(self, result_set):
        """ Creates a MediaItem of type 'video' using the result_set from the regex.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        If the item is completely processed an no further data needs to be fetched
        the self.complete property should be set to True. If not set to True, the
        self.update_video_item method is called if the item is focussed or selected
        for playback.

        :param list[str]|dict[str,any] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'video' or 'audio' (despite the method's name).
        :rtype: MediaItem|None

        """

        Logger.trace(result_set)
        url = "http://playapi.mtgx.tv/v3/videos/stream/%(id)s" % result_set
        item = MediaItem(result_set["title"], url)
        item.type = "video"
        item.thumb = self.parentItem.thumb
        item.icon = self.parentItem.icon
        item.description = result_set.get("summary", None)

        aired_at = result_set.get("airedAt", None)
        if aired_at is None:
            aired_at = result_set.get("publishedAt", None)
        if aired_at is not None:
            # 2016-05-20T15:05:00+00:00
            aired_at = aired_at.split("+")[0].rstrip('Z')
            time_stamp = DateHelper.get_date_from_string(
                aired_at, "%Y-%m-%dT%H:%M:%S")
            item.set_date(*time_stamp[0:6])

        item.thumb = self.__get_thumb_image(result_set.get("image"))

        # webvttPath / samiPath
        # loginRequired
        is_premium = result_set.get("loginRequired", False)
        if is_premium and AddonSettings.hide_premium_items():
            Logger.debug("Found premium item, hiding it.")
            return None

        srt = result_set.get("samiPath")
        if not srt:
            srt = result_set.get("subtitles_webvtt")
        if srt:
            Logger.debug("Storing SRT/WebVTT path: %s", srt)
            part = item.create_new_empty_media_part()
            part.Subtitle = srt
        return item
Пример #2
0
    def process_folder_list(self, item=None):  # NOSONAR
        """ Process the selected item and get's it's child items using the available dataparsers.

        Accepts an <item> and returns a list of MediaListems with at least name & url
        set. The following actions are done:

        * determining the correct parsers to use
        * call a pre-processor
        * parsing the data with the parsers
        * calling the creators for item creations

        if the item is None, we assume that we are dealing with the first call for this channel and the mainlist uri
        is used.

        :param MediaItem|None item: The parent item.

        :return: A list of MediaItems that form the childeren of the <item>.
        :rtype: list[MediaItem]

        """

        items = []
        self.parentItem = item

        if item is None:
            Logger.info(
                "process_folder_list :: No item was specified. Assuming it was the main channel list"
            )
            url = self.mainListUri
        elif len(item.items) > 0:
            return item.items
        else:
            url = item.url

        # Determine the handlers and process
        data_parsers = self.__get_data_parsers(url)
        # Exclude the updaters only
        data_parsers = [
            p for p in data_parsers if not p.is_video_updater_only()
        ]

        if [p for p in data_parsers if p.LogOnRequired]:
            Logger.info("One or more dataparsers require logging in.")
            self.loggedOn = self.log_on()

        # now set the headers here and not earlier in case they might have been update by the logon
        if item is not None and item.HttpHeaders:
            headers = item.HttpHeaders
        else:
            headers = self.httpHeaders

        # Let's retrieve the required data. Main url's
        if url.startswith("http:") or url.startswith(
                "https:") or url.startswith("file:"):
            # Disable cache on live folders
            no_cache = item is not None and not item.is_playable(
            ) and item.isLive
            if no_cache:
                Logger.debug("Disabling cache for '%s'", item)
            data = UriHandler.open(url,
                                   proxy=self.proxy,
                                   additional_headers=headers,
                                   no_cache=no_cache)
        # Searching a site using search_site()
        elif url == "searchSite" or url == "#searchSite":
            Logger.debug("Starting to search")
            return self.search_site()
        # Labels instead of url's
        elif url.startswith("#"):
            data = ""
        # Others
        else:
            Logger.debug("Unknown URL format. Setting data to ''")
            data = ""

        # first check if there is a generic pre-processor
        pre_procs = [p for p in data_parsers if p.is_generic_pre_processor()]
        num_pre_procs = len(pre_procs)
        Logger.trace("Processing %s Generic Pre-Processors DataParsers",
                     num_pre_procs)
        if num_pre_procs > 1:
            # warn for strange results if more than 1 generic pre-processor is present.
            Logger.warning(
                "More than one Generic Pre-Processor is found (%s). They are being processed in the "
                "order that Python likes which might result in unexpected result.",
                num_pre_procs)

        for data_parser in pre_procs:
            # remove it from the list
            data_parsers.remove(data_parser)

            # and process it
            Logger.debug("Processing %s", data_parser)
            (data, pre_items) = data_parser.PreProcessor(data)
            items += pre_items

            if isinstance(data, JsonHelper):
                Logger.debug(
                    "Generic preprocessor resulted in JsonHelper data")

        # The the other handlers
        Logger.trace("Processing %s Normal DataParsers", len(data_parsers))
        handler_json = None
        for data_parser in data_parsers:
            Logger.debug("Processing %s", data_parser)

            # Check for preprocessors
            if data_parser.PreProcessor:
                Logger.debug("Processing DataParser.PreProcessor")
                (handler_data, pre_items) = data_parser.PreProcessor(data)
                items += pre_items
            else:
                handler_data = data

            Logger.debug("Processing DataParser.Parser")
            if data_parser.Parser is None or (data_parser.Parser == ""
                                              and not data_parser.IsJson):
                if data_parser.Creator:
                    Logger.warning("No <parser> found for %s. Skipping.",
                                   data_parser.Creator)
                continue

            if data_parser.IsJson:
                if handler_json is None:
                    # Cache the json requests to improve performance
                    Logger.trace("Caching JSON results for Dataparsing")
                    if isinstance(handler_data, JsonHelper):
                        handler_json = handler_data
                    else:
                        handler_json = JsonHelper(handler_data,
                                                  Logger.instance())

                Logger.trace(data_parser.Parser)
                parser_results = handler_json.get_value(fallback=[],
                                                        *data_parser.Parser)

                if not isinstance(parser_results, (tuple, list)):
                    # if there is just one match, return that as a list
                    parser_results = [parser_results]
            else:
                if isinstance(handler_data, JsonHelper):
                    raise ValueError(
                        "Cannot perform Regex Parser on JsonHelper.")
                else:
                    parser_results = Regexer.do_regex(data_parser.Parser,
                                                      handler_data)

            Logger.debug("Processing DataParser.Creator for %s items",
                         len(parser_results))
            for parser_result in parser_results:
                handler_result = data_parser.Creator(parser_result)
                if handler_result is not None:
                    if isinstance(handler_result, list):
                        items += handler_result
                    else:
                        items.append(handler_result)

        # should we exclude DRM/GEO?
        hide_geo_locked = AddonSettings.hide_geo_locked_items_for_location(
            self.language)
        hide_drm_protected = AddonSettings.hide_drm_items()
        hide_premium = AddonSettings.hide_premium_items()
        hide_folders = AddonSettings.hide_restricted_folders()
        type_to_exclude = None
        if not hide_folders:
            type_to_exclude = "folder"

        old_count = len(items)
        if hide_drm_protected:
            Logger.debug("Hiding DRM items")
            items = [
                i for i in items
                if not i.isDrmProtected or i.type == type_to_exclude
            ]
        if hide_geo_locked:
            Logger.debug("Hiding GEO Locked items due to GEO region: %s",
                         self.language)
            items = [
                i for i in items
                if not i.isGeoLocked or i.type == type_to_exclude
            ]
        if hide_premium:
            Logger.debug("Hiding Premium items")
            items = [
                i for i in items if not i.isPaid or i.type == type_to_exclude
            ]

        # Local import for performance
        from resources.lib.cloaker import Cloaker
        cloaker = Cloaker(self,
                          AddonSettings.store(LOCAL),
                          logger=Logger.instance())
        if not AddonSettings.show_cloaked_items():
            Logger.debug("Hiding Cloaked items")
            items = [i for i in items if not cloaker.is_cloaked(i.url)]
        else:
            cloaked_items = [i for i in items if cloaker.is_cloaked(i.url)]
            for c in cloaked_items:
                c.isCloaked = True

        if len(items) != old_count:
            Logger.info(
                "Hidden %s items due to DRM/GEO/Premium/cloak filter (Hide Folders=%s)",
                old_count - len(items), hide_folders)

        # Check for grouping or not
        limit = AddonSettings.get_list_limit()
        folder_items = [i for i in items if i.type.lower() == "folder"]

        # we should also de-duplicate before calculating
        folder_items = list(set(folder_items))
        folders = len(folder_items)

        if 0 < limit < folders:
            # let's filter them by alphabet if the number is exceeded
            Logger.debug(
                "Creating Groups for list exceeding '%s' folder items. Total folders found '%s'.",
                limit, folders)
            other = LanguageHelper.get_localized_string(
                LanguageHelper.OtherChars)
            title_format = LanguageHelper.get_localized_string(
                LanguageHelper.StartWith)
            result = dict()
            non_grouped = []
            # Should we remove prefixes just as Kodi does?
            # prefixes = ("de", "het", "the", "een", "a", "an")

            for sub_item in items:
                if sub_item.dontGroup or sub_item.type != "folder":
                    non_grouped.append(sub_item)
                    continue

                char = sub_item.name[0].upper()
                # Should we de-prefix?
                # for p in prefixes:
                #     if sub_item.name.lower().startswith(p + " "):
                #         char = sub_item.name[len(p) + 1][0].upper()

                if char.isdigit():
                    char = "0-9"
                elif not char.isalpha():
                    char = other

                if char not in result:
                    Logger.trace("Creating Grouped item from: %s", sub_item)
                    if char == other:
                        item = MediaItem(
                            title_format.replace("'", "") % (char, ), "")
                    else:
                        item = MediaItem(title_format % (char.upper(), ), "")
                    item.complete = True
                    # item.set_date(2100 + ord(char[0]), 1, 1, text='')
                    result[char] = item
                else:
                    item = result[char]
                item.items.append(sub_item)

            items = non_grouped + list(result.values())

        # In order to get a better performance in de-duplicating and keeping the sort order
        # we first need to store the order in a lookup table. Then we use sorted(set()) and
        # use that lookup table for sorting. Using sorted(set(), items.index) this will be
        # an O(n) (for the index()) times O(n*log(n)) (for the sorted) = O(n^2*log(n)!.
        # The dictionary lookup (O(1)) saves us an O(n).
        # See https://wiki.python.org/moin/TimeComplexity
        sorted_order = {}
        for i in range(0, len(items)):
            sorted_order[items[i]] = i
        unique_results = sorted(set(items), key=sorted_order.get)

        Logger.trace("Found '%d' items of which '%d' are unique.", len(items),
                     len(unique_results))
        return unique_results