def add_graphql_extras(self, data):
        """ Adds additional items to the main listings

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[str|JsonHelper,list[MediaItem]]

        """

        items = []
        if self.parentItem is not None:
            return data, items

        popular_url = self.__get_api_query_url(
            "trendingPrograms",
            "{__typename,title,description,guid,updated,seriesTvSeasons{id},imageMedia{url,label}}"
        )
        popular_title = LanguageHelper.get_localized_string(
            LanguageHelper.Popular)
        popular = MediaItem("\a.: {} :.".format(popular_title), popular_url)
        popular.dontGroup = True
        popular.content_type = contenttype.TVSHOWS
        items.append(popular)

        # https://graph.kijk.nl/graphql?operationName=programsByDate&variables={"date":"2020-04-19","numOfDays":7}&extensions={"persistedQuery":{"version":1,"sha256Hash":"1445cc0d283e10fa21fcdf95b127207d5f8c22834c1d0d17df1aacb8a9da7a8e"}}
        recent_url = "#recentgraphql"
        recent_title = LanguageHelper.get_localized_string(
            LanguageHelper.Recent)
        recent = MediaItem("\a.: {} :.".format(recent_title), recent_url)
        recent.dontGroup = True
        items.append(recent)

        search_title = LanguageHelper.get_localized_string(
            LanguageHelper.Search)
        search = MediaItem("\a.: {} :.".format(search_title), "#searchSite")
        search.dontGroup = True
        items.append(search)

        movie_url = self.__get_api_persisted_url(
            "programs",
            "cd8d5f074397e43ccd27b1c958d8c24264b0a92a94f3162e8281f6a2934d0391",
            variables={
                "programTypes": "MOVIE",
                "limit": 100
            })
        movie_url = self.__get_api_query_url(
            "programs(programTypes: MOVIE)",
            "{totalResults,items{type,__typename,guid,title,description,duration,displayGenre,"
            "imageMedia{url,label},epgDate,sources{type,file,drm},tracks{type,kind,label,file}}}"
        )
        movies_title = LanguageHelper.get_localized_string(
            LanguageHelper.Movies)
        movies = MediaItem("\a.: {} :.".format(movies_title), movie_url)
        movies.dontGroup = True
        movies.content_type = contenttype.MOVIES
        items.append(movies)

        return data, items
Ejemplo n.º 2
0
    def add_special_categories(self, data):
        """ Adds extra items to the mainlist.

        Accepts an data from the process_folder_list method, BEFORE the items are
        processed. Allows setting of parameters (like title etc) for the channel.
        Inside this method the <data> could be changed and additional items can
        be created.

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[str|JsonHelper,list[MediaItem]]

        """

        items = []
        extras = {
            # "Verwacht": "https://connect.pathe.nl/v1/movies/comingsoon/",
            "Verwacht": "https://connect.pathe.nl/v1/movies/lists/",
            "Specials": "https://connect.pathe.nl/v1/specials/movies/"
        }
        for title, url in extras.items():
            expected = MediaItem("\a{}".format(title), url)
            expected.content_type = contenttype.MOVIES
            expected.dontGroup = True
            items.append(expected)

        return data, items
Ejemplo n.º 3
0
    def add_recent_items(self, data):
        """ Performs pre-process actions for data processing.

        Accepts an data from the process_folder_list method, BEFORE the items are
        processed. Allows setting of parameters (like title etc) for the channel.
        Inside this method the <data> could be changed and additional items can
        be created.

        The return values should always be instantiated in at least ("", []).

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[str|JsonHelper,list[MediaItem]]

        """

        items = []
        today = datetime.datetime.now()
        days = LanguageHelper.get_days_list()
        for d in range(0, 7, 1):
            air_date = today - datetime.timedelta(d)
            Logger.trace("Adding item for: %s", air_date)

            # Determine a nice display date
            day = days[air_date.weekday()]
            if d == 0:
                day = LanguageHelper.get_localized_string(LanguageHelper.Today)
            elif d == 1:
                day = LanguageHelper.get_localized_string(LanguageHelper.Yesterday)

            title = "%04d-%02d-%02d - %s" % (air_date.year, air_date.month, air_date.day, day)
            url = "https://www.goplay.be/api/epg/{}/{:04d}-{:02d}-{:02d}".\
                format(self.__channel_brand, air_date.year, air_date.month, air_date.day)

            extra = MediaItem(title, url)
            extra.complete = True
            extra.dontGroup = True
            extra.set_date(air_date.year, air_date.month, air_date.day, text="")
            extra.content_type = contenttype.VIDEOS
            items.append(extra)

        return data, items
Ejemplo n.º 4
0
    def create_cinema(self, result_set):
        """ Creates a new MediaItem for an episode.

        This method creates a new MediaItem from the Regular Expression or Json
        results <result_set>. The method should be implemented by derived classes
        and are specific to the channel.

        :param list[str]|dict[str,str] result_set: The result_set of the self.episodeItemRegex

        :return: A new MediaItem of type 'folder'.
        :rtype: MediaItem|None

        """

        Logger.trace(result_set)
        cinema = MediaItem(result_set["name"], "")
        cinema.thumb = result_set["image"].replace("nocropthumb/[format]/", "")
        cinema.complete = True

        now_playing_url = "%s/cinemas/%s/movies/nowplaying" % (self.baseUrl, result_set["id"])
        now_playing = MediaItem("Trailers", now_playing_url)
        # https://www.pathe.nl/nocropthumb/[format]/gfx_content/bioscoop/foto/pathe.nl_380x218px_amersfoort.jpg
        now_playing.complete = True
        now_playing.HttpHeaders = self.httpHeaders
        now_playing.content_type = contenttype.MOVIES
        cinema.items.append(now_playing)

        now = datetime.datetime.now()
        for i in range(0, 10):
            date = now + datetime.timedelta(days=i)
            title = "%s-%02d-%02d" % (date.year, date.month, date.day)
            schedule_url = "%s/cinemas/%s/schedules?date=%s" % (self.baseUrl, result_set["id"], title)
            schedule = MediaItem("Agenda: %s" % (title,), schedule_url)
            schedule.complete = True
            schedule.thumb = cinema.thumb
            schedule.HttpHeaders = self.httpHeaders
            cinema.items.append(schedule)
        return cinema
Ejemplo n.º 5
0
    def add_specials(self, data):
        """ Performs pre-process actions for data processing.

        Accepts an data from the process_folder_list method, BEFORE the items are
        processed. Allows setting of parameters (like title etc) for the channel.
        Inside this method the <data> could be changed and additional items can
        be created.

        The return values should always be instantiated in at least ("", []).

        :param str data: The retrieve data that was loaded for the current item and URL.

        :return: A tuple of the data and a list of MediaItems that were generated.
        :rtype: tuple[str|JsonHelper,list[MediaItem]]

        """

        items = []

        specials = {
            "https://www.goplay.be/api/programs/popular/{}".format(self.__channel_brand): (
                LanguageHelper.get_localized_string(LanguageHelper.Popular),
                contenttype.TVSHOWS
            ),
            "#tvguide": (
                LanguageHelper.get_localized_string(LanguageHelper.Recent),
                contenttype.FILES
            )
        }

        for url, (title, content) in specials.items():
            item = MediaItem("\a.: {} :.".format(title), url)
            item.content_type = content
            items.append(item)

        return data, items
Ejemplo n.º 6
0
    def process_folder_list(self, item=None):  # NOSONAR
        """ Process the selected item and get's it's child items using the available dataparsers.

        Accepts an <item> and returns a list of MediaListems with at least name & url
        set. The following actions are done:

        * determining the correct parsers to use
        * call a pre-processor
        * parsing the data with the parsers
        * calling the creators for item creations

        if the item is None, we assume that we are dealing with the first call for this channel and the mainlist uri
        is used.

        :param MediaItem|None item: The parent item.

        :return: A list of MediaItems that form the childeren of the <item>.
        :rtype: list[MediaItem]

        """

        items = []
        self.parentItem = item

        if item is None:
            Logger.info(
                "process_folder_list :: No item was specified. Assuming it was the main channel list"
            )
            url = self.mainListUri
        elif len(item.items) > 0:
            return item.items
        else:
            url = item.url

        # Determine the handlers and process
        data_parsers = self.__get_data_parsers(url)
        # Exclude the updaters only
        data_parsers = [
            p for p in data_parsers if not p.is_video_updater_only()
        ]

        if [p for p in data_parsers if p.LogOnRequired]:
            Logger.info("One or more dataparsers require logging in.")
            self.loggedOn = self.log_on()

        # now set the headers here and not earlier in case they might have been update by the logon
        if item is not None and item.HttpHeaders:
            headers = item.HttpHeaders
        else:
            headers = self.httpHeaders

        # Let's retrieve the required data. Main url's
        if url.startswith("http:") or url.startswith(
                "https:") or url.startswith("file:"):
            # Disable cache on live folders
            no_cache = item is not None and not item.is_playable(
            ) and item.isLive
            if no_cache:
                Logger.debug("Disabling cache for '%s'", item)
            data = UriHandler.open(url,
                                   additional_headers=headers,
                                   no_cache=no_cache)
        # Searching a site using search_site()
        elif url == "searchSite" or url == "#searchSite":
            Logger.debug("Starting to search")
            return self.search_site()
        # Labels instead of url's
        elif url.startswith("#"):
            data = ""
        # Others
        else:
            Logger.debug("Unknown URL format. Setting data to ''")
            data = ""

        # first check if there is a generic pre-processor
        pre_procs = [p for p in data_parsers if p.is_generic_pre_processor()]
        num_pre_procs = len(pre_procs)
        Logger.trace("Processing %s Generic Pre-Processors DataParsers",
                     num_pre_procs)
        if num_pre_procs > 1:
            # warn for strange results if more than 1 generic pre-processor is present.
            Logger.warning(
                "More than one Generic Pre-Processor is found (%s). They are being processed in the "
                "order that Python likes which might result in unexpected result.",
                num_pre_procs)

        for data_parser in pre_procs:
            # remove it from the list
            data_parsers.remove(data_parser)

            # and process it
            Logger.debug("[DataParsers] Pre-Processing %s", data_parser)
            (data, pre_items) = data_parser.PreProcessor(data)
            items += pre_items

            if isinstance(data, JsonHelper):
                Logger.debug(
                    "Generic preprocessor resulted in JsonHelper data")

        # Split normal and post-processor data parsers
        generic_post_procs = [
            p for p in data_parsers if p.is_generic_post_processor()
        ]
        data_parsers = [p for p in data_parsers if p not in generic_post_procs]

        # The the other handlers
        Logger.trace("Processing %s Normal DataParsers", len(data_parsers))
        handler_json = None
        for data_parser in data_parsers:
            Logger.debug("[DataParsers] Processing %s", data_parser)

            # Check for preprocessors
            if data_parser.PreProcessor:
                Logger.debug(
                    "[DataParsers] Processing DataParser.PreProcessor")
                (handler_data, pre_items) = data_parser.PreProcessor(data)
                items += pre_items
            else:
                handler_data = data

            Logger.debug("[DataParsers] Processing DataParser.Parser")
            if data_parser.Parser is None or (data_parser.Parser == ""
                                              and not data_parser.IsJson):
                if data_parser.Creator:
                    Logger.warning("No <parser> found for %s. Skipping.",
                                   data_parser.Creator)
                continue

            if data_parser.IsJson:
                if handler_json is None:
                    # Cache the json requests to improve performance
                    Logger.trace("Caching JSON results for Dataparsing")
                    if isinstance(handler_data, JsonHelper):
                        handler_json = handler_data
                    else:
                        handler_json = JsonHelper(handler_data,
                                                  Logger.instance())

                Logger.trace(data_parser.Parser)
                parser_results = handler_json.json
                for parser in data_parser.Parser:
                    # Find the right elements
                    if isinstance(parser, tuple):
                        # We need to match a key in a list of objects.
                        key, value, index = parser
                        parser_results = [
                            p for p in parser_results if p.get(key) == value
                        ]
                        if not parser_results:
                            parser_results = []
                            break
                        if index is not None:
                            parser_results = parser_results[index]

                    elif isinstance(parser_results, list):
                        parser_results = parser_results[parser]
                    else:
                        parser_results = parser_results.get(parser)

                    if not parser_results:
                        parser_results = []
                        break

                if not isinstance(parser_results, (tuple, list)):
                    # if there is just one match, return that as a list
                    parser_results = [parser_results]
            else:
                if isinstance(handler_data, JsonHelper):
                    raise ValueError(
                        "Cannot perform Regex Parser on JsonHelper.")
                else:
                    parser_results = Regexer.do_regex(data_parser.Parser,
                                                      handler_data)

            Logger.debug(
                "[DataParsers] Processing DataParser.Creator for %s items",
                len(parser_results))
            for parser_result in parser_results:
                handler_result = data_parser.Creator(parser_result)
                if handler_result is not None:
                    if isinstance(handler_result, list):
                        items += handler_result
                    else:
                        items.append(handler_result)

            if data_parser.PostProcessor:
                Logger.debug(
                    "[DataParsers] Processing DataParser.PostProcessor")
                if data_parser.IsJson:
                    items = data_parser.PostProcessor(handler_json, items)
                else:
                    items = data_parser.PostProcessor(handler_data, items)
                Logger.trace("Post-processing returned %d items", len(items))

        # The post processors
        num_post_procs = len(generic_post_procs)
        Logger.trace("Processing %s Generic Post-Processors DataParsers",
                     num_post_procs)
        if num_post_procs > 1:
            # warn for strange results if more than 1 generic pre-processor is present.
            Logger.warning(
                "More than one Generic Post-Processor is found (%s). They are being processed in the "
                "order that Python likes which might result in unexpected result.",
                num_post_procs)

        for data_parser in generic_post_procs:
            Logger.debug("[DataParsers] Post-processing Generic %s",
                         data_parser)
            items = data_parser.PostProcessor(data, items)
            Logger.trace("Post-processing returned %d items", len(items))

        # should we exclude DRM/GEO?
        hide_geo_locked = AddonSettings.hide_geo_locked_items_for_location(
            self.language)
        hide_drm_protected = AddonSettings.hide_drm_items()
        hide_premium = AddonSettings.hide_premium_items()
        hide_folders = AddonSettings.hide_restricted_folders()
        type_to_exclude = None
        if not hide_folders:
            type_to_exclude = "folder"

        old_count = len(items)
        if hide_drm_protected:
            Logger.debug("Hiding DRM items")
            items = [
                i for i in items
                if not i.isDrmProtected or i.type == type_to_exclude
            ]
        if hide_geo_locked:
            Logger.debug("Hiding GEO Locked items due to GEO region: %s",
                         self.language)
            items = [
                i for i in items
                if not i.isGeoLocked or i.type == type_to_exclude
            ]
        if hide_premium:
            Logger.debug("Hiding Premium items")
            items = [
                i for i in items if not i.isPaid or i.type == type_to_exclude
            ]

        # Local import for performance
        from resources.lib.cloaker import Cloaker
        cloaker = Cloaker(self,
                          AddonSettings.store(LOCAL),
                          logger=Logger.instance())
        if not AddonSettings.show_cloaked_items():
            Logger.debug("Hiding Cloaked items")
            items = [i for i in items if not cloaker.is_cloaked(i.url)]
        else:
            cloaked_items = [i for i in items if cloaker.is_cloaked(i.url)]
            for c in cloaked_items:
                c.isCloaked = True

        if len(items) != old_count:
            Logger.info(
                "Hidden %s items due to DRM/GEO/Premium/cloak filter (Hide Folders=%s)",
                old_count - len(items), hide_folders)

        # Check for grouping or not
        limit = AddonSettings.get_list_limit()
        folder_items = [i for i in items if i.type.lower() == "folder"]

        # we should also de-duplicate before calculating
        folder_items = list(set(folder_items))
        folders = len(folder_items)

        if 0 < limit < folders:
            # let's filter them by alphabet if the number is exceeded
            Logger.debug(
                "Creating Groups for list exceeding '%s' folder items. Total folders found '%s'.",
                limit, folders)
            other = "\a{}".format(
                LanguageHelper.get_localized_string(LanguageHelper.OtherChars))
            title_format = "\a{}".format(
                LanguageHelper.get_localized_string(LanguageHelper.StartWith))
            result = dict()
            non_grouped = []
            # Should we remove prefixes just as Kodi does?
            # prefixes = ("de", "het", "the", "een", "a", "an")

            # Copy the parent's content-type for the sub-folder items
            if self.parentItem:
                content_type = self.parentItem.content_type
            else:
                content_type = self.mainListContentType

            for sub_item in items:
                if sub_item.dontGroup or sub_item.type != "folder":
                    non_grouped.append(sub_item)
                    continue

                char = sub_item.name[0].upper()
                if char == "&":
                    title = HtmlEntityHelper.convert_html_entities(
                        sub_item.name)
                    char = title[0].upper()

                # Should we de-prefix?
                # for p in prefixes:
                #     if sub_item.name.lower().startswith(p + " "):
                #         char = sub_item.name[len(p) + 1][0].upper()

                if char.isdigit():
                    char = "0-9"
                elif not char.isalpha():
                    char = other

                if char not in result:
                    Logger.trace("Creating Grouped item from: %s", sub_item)
                    if char == other:
                        item = MediaItem(
                            title_format.replace("'", "") % (char, ), "")
                    else:
                        item = MediaItem(title_format % (char.upper(), ), "")
                    item.complete = True
                    item.content_type = content_type
                    # item.set_date(2100 + ord(char[0]), 1, 1, text='')
                    result[char] = item
                else:
                    item = result[char]
                item.items.append(sub_item)

            items = non_grouped + list(result.values())

        # In order to get a better performance in de-duplicating and keeping the sort order
        # we first need to store the order in a lookup table. Then we use sorted(set()) and
        # use that lookup table for sorting. Using sorted(set(), items.index) this will be
        # an O(n) (for the index()) times O(n*log(n)) (for the sorted) = O(n^2*log(n)!.
        # The dictionary lookup (O(1)) saves us an O(n).
        # See https://wiki.python.org/moin/TimeComplexity
        sorted_order = {}
        for i in range(0, len(items)):
            sorted_order[items[i]] = i
        unique_results = sorted(set(items), key=sorted_order.get)

        Logger.trace("Found '%d' items of which '%d' are unique.", len(items),
                     len(unique_results))
        return unique_results