Exemplo n.º 1
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        for mode in search_strings:
            items = []
            logger.debug(_(f"Search Mode: {mode}"))
            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)

                if mode != 'RSS':
                    logger.debug("Search string: {0}".format
                                 (search_string))

                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.I)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/{0}'.format(tmp))
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.debug("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                                 (title, seeders, leechers))
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.debug("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers))

                            items.append(item)
                        except Exception:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 2
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.I)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/{0}'.format(tmp))
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 3
0
    def test_try_int_with_default(self):
        """
        Test try int
        """
        default_value = 42
        test_cases = {
            None: default_value,
            "": default_value,
            "123": 123,
            "-123": -123,
            "12.3": default_value,
            "-12.3": default_value,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            "": default_value,
            "123": 123,
            "-123": -123,
            "12.3": default_value,
            "-12.3": default_value,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in test.items():
                assert try_int(candidate, default_value) == result
Exemplo n.º 4
0
    def fetch_latest_hot_shows(self):
        """Get popular show information from IMDB"""

        result = []

        shows = anidbquery.query(QUERY_HOT)
        for show in shows:
            try:
                recommended_show = RecommendedShow(
                    show.id,
                    show.titles['x-jat'][0],
                    1,
                    show.tvdbid,
                    cache_subfolder=self.cache_subfolder,
                    rating=str(show.ratings['temporary']['rating']),
                    votes=str(try_int(show.ratings['temporary']['count'], 0)),
                    image_href=show.url)

                # Check cache or get and save image
                recommended_show.cache_image(
                    "http://img7.anidb.net/pics/anime/{0}".format(
                        show.image_path))

                result.append(recommended_show)
            except Exception:
                pass

        return result
Exemplo n.º 5
0
    def test_try_int(self):
        """
        Test try int
        """
        test_cases = {
            None: 0,
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in six.iteritems(test):
                self.assertEqual(try_int(candidate), result)
Exemplo n.º 6
0
    def test_try_int_with_default(self):
        """
        Test try int
        """
        default_value = 42
        test_cases = {
            None: default_value,
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in six.iteritems(test):
                self.assertEqual(try_int(candidate, default_value), result)
Exemplo n.º 7
0
    def test_try_int(self):
        """
        Test try int
        """
        test_cases = {
            None: 0,
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in six.iteritems(test):
                self.assertEqual(try_int(candidate), result)
Exemplo n.º 8
0
    def test_try_int_with_default(self):
        """
        Test try int
        """
        default_value = 42
        test_cases = {
            None: default_value,
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in six.iteritems(test):
                self.assertEqual(try_int(candidate, default_value), result)
Exemplo n.º 9
0
    def test_try_int(self):
        """
        Test try int
        """
        test_cases = {
            None: 0,
            "": 0,
            "123": 123,
            "-123": -123,
            "12.3": 0,
            "-12.3": 0,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            "": 0,
            "123": 123,
            "-123": -123,
            "12.3": 0,
            "-12.3": 0,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in test.items():
                assert try_int(candidate) == result
Exemplo n.º 10
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []

        for mode in search_strings:
            items = []
            logger.debug(_("Search Mode: {mode}".format(mode=mode)))
            for search_string in {*search_strings[mode]}:

                # Feed verified does not exist on this clone
                # search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                search_url = self.urls["feed"]
                if mode != "RSS":
                    logger.debug(_("Search String: {search_string}".format(search_string=search_string)))

                data = self.get_url(search_url, params={"f": search_string}, returns="text")
                if not data:
                    logger.debug("No data returned from provider")
                    continue

                if not data.startswith("<?xml"):
                    logger.info("Expected xml but got something else, is your mirror failing?")
                    continue

                try:
                    with BS4Parser(data, "html5lib") as parser:
                        for item in parser("item"):
                            if item.category and "tv" not in item.category.get_text(strip=True).lower():
                                continue

                            title = item.title.get_text(strip=True)
                            t_hash = item.guid.get_text(strip=True).rsplit("/", 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find("description").text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.debug(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                            title, seeders, leechers
                                        )
                                    )
                                continue

                            result = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": t_hash}
                            items.append(result)
                except Exception:
                    logger.exception("Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()))

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
            results += items

        return results
Exemplo n.º 11
0
    def vres(self):
        """
        The vertical found in the name

        :returns: an empty string if not found
        """
        attr = 'res'
        match = self._get_match_obj(attr)
        return None if not match else try_int(match.group('vres'))
Exemplo n.º 12
0
    def vres(self):
        """
        The vertical found in the name

        :returns: an empty string if not found
        """
        attr = 'res'
        match = self._get_match_obj(attr)
        return None if not match else try_int(match.group('vres'))
Exemplo n.º 13
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                # Feed verified does not exist on this clone
                # search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                search_url = self.urls['feed']
                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, params={'f': search_string}, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser('item'):
                            if item.category and 'tv' not in item.category.get_text(strip=True).lower():
                                continue

                            title = item.title.get_text(strip=True)
                            t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
                            items.append(result)
                except StandardError:
                    logger.log("Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 14
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                # Feed verified does not exist on this clone
                # search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                search_url = self.urls['feed']
                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, params={'f': search_string}, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser('item'):
                            if item.category and 'tv' not in item.category.get_text(strip=True).lower():
                                continue

                            title = item.title.get_text(strip=True)
                            t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
                            items.append(result)
                except StandardError:
                    logger.log("Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 15
0
    def _get_size(self, item):
        try:
            size = item.get('links')[1].get('length', -1)
        except (AttributeError, IndexError, TypeError):
            size = -1

        if not size:
            logger.log('The size was not found in the provider response', logger.DEBUG)

        return try_int(size, -1)
Exemplo n.º 16
0
    def _get_size(self, item):
        try:
            size = item.get("links")[1].get("length", -1)
        except (AttributeError, IndexError, TypeError):
            size = -1

        if not size:
            logger.debug("The size was not found in the provider response")

        return try_int(size, -1)
Exemplo n.º 17
0
    def _get_size(self, item):
        try:
            size = item.get('links')[1].get('length', -1)
        except (AttributeError, IndexError, TypeError):
            size = -1

        if not size:
            logger.log('The size was not found in the provider response', logger.DEBUG)

        return try_int(size, -1)
Exemplo n.º 18
0
def min_max(val, default, low, high):
    """Return value forced within range"""

    val = try_int(val, default)

    if val < low:
        return low
    if val > high:
        return high

    return val
Exemplo n.º 19
0
def change_subtitle_finder_frequency(subtitles_finder_frequency):
    """
    Change frequency of subtitle thread

    :param subtitles_finder_frequency: New frequency
    """
    if subtitles_finder_frequency == "" or subtitles_finder_frequency is None:
        subtitles_finder_frequency = 1

    settings.SUBTITLES_FINDER_FREQUENCY = try_int(subtitles_finder_frequency, 1)
    return True
Exemplo n.º 20
0
def change_subtitle_finder_frequency(subtitles_finder_frequency):
    """
    Change frequency of subtitle thread

    :param subtitles_finder_frequency: New frequency
    """
    if subtitles_finder_frequency == '' or subtitles_finder_frequency is None:
        subtitles_finder_frequency = 1

    sickbeard.SUBTITLES_FINDER_FREQUENCY = try_int(subtitles_finder_frequency, 1)
    return True
Exemplo n.º 21
0
def min_max(val, default, low, high):
    """ Return value forced within range """

    val = try_int(val, default)

    if val < low:
        return low
    if val > high:
        return high

    return val
Exemplo n.º 22
0
    def __init__(self, indexer_id, media_format='normal'):
        """
        :param indexer_id: The indexer id of the show
        :param media_format: The format of the media to get. Must be either 'normal' or 'thumb'
        """

        self.indexer_id = try_int(indexer_id, 0)

        if media_format in ('normal', 'thumb'):
            self.media_format = media_format
        else:
            self.media_format = 'normal'
Exemplo n.º 23
0
    def providers_list(data):
        default_list = [
            x for x in (
                NewznabProvider._make_provider(x)
                for x in NewznabProvider._get_default_providers().split('!!!'))
            if x
        ]
        providers_list = [
            x for x in (NewznabProvider._make_provider(x)
                        for x in data.split('!!!')) if x
        ]
        seen_values = set()
        providers_set = []

        for provider in providers_list:
            value = provider.name

            if value not in seen_values:
                providers_set.append(provider)
                seen_values.add(value)

        providers_list = providers_set
        providers_dict = dict(
            zip([x.name for x in providers_list], providers_list))

        for default in default_list:
            if not default:
                continue

            if default.name not in providers_dict:
                providers_dict[default.name] = default

                providers_dict[default.name].default = True
                providers_dict[default.name].name = default.name
                providers_dict[default.name].url = default.url
                providers_dict[default.name].needs_auth = default.needs_auth
                providers_dict[default.name].search_mode = default.search_mode
                providers_dict[
                    default.name].search_fallback = default.search_fallback
                providers_dict[
                    default.name].enable_daily = default.enable_daily
                providers_dict[
                    default.name].enable_backlog = default.enable_backlog
                providers_dict[default.name].catIDs = ','.join([
                    x for x in providers_dict[default.name].catIDs.split(',')
                    if 5000 <= try_int(x) <= 5999
                ]) or default.catIDs

                if sickbeard.QUALITY_ALLOW_HEVC and '5090' not in providers_dict[
                        default.name].catIDs:
                    providers_dict[default.name].catIDs += ',5090'

        return [x for x in providers_list if x]
Exemplo n.º 24
0
    def _retrieve_show_image_urls_from_fanart(show,
                                              img_type,
                                              thumb=False,
                                              season=None,
                                              multiple=False):
        types = {
            'poster': fanart.TYPE.TV.POSTER,
            'banner': fanart.TYPE.TV.BANNER,
            'poster_thumb': fanart.TYPE.TV.POSTER,
            'banner_thumb': fanart.TYPE.TV.BANNER,
            'fanart': fanart.TYPE.TV.BACKGROUND,
            'season_poster': fanart.TYPE.TV.SEASONPOSTER,
            'season_banner': fanart.TYPE.TV.SEASONBANNER,
        }

        try:
            if img_type in types:
                request = fanartRequest(
                    apikey=settings.FANART_API_KEY,
                    id=show.indexerid,
                    ws=fanart.WS.TV,
                    type=types[img_type],
                    sort=fanart.SORT.POPULAR,
                    limit=(fanart.LIMIT.ONE,
                           fanart.LIMIT.ALL)[season is not None],
                )

                resp = request.response()
                results = resp[types[img_type]]
                if season:
                    results = [
                        x for x in results
                        if try_int(x['season'], default_value=None) == season
                    ]

                def _to_preview_url(url):
                    return re.sub('/fanart/', '/preview/', url)

                if multiple:
                    urls = [result['url'] for result in results]
                    if thumb:
                        urls = [_to_preview_url(url) for url in urls]
                    return urls
                else:
                    url = results[0]['url']
                    if thumb:
                        url = _to_preview_url(url)
                    return url
        except Exception as error:
            logger.debug(error)

        logger.info("Could not find any " + img_type +
                    " images on Fanart.tv for " + show.name)
    def retrieveShowMetadata(self, folder):
        """
        Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
        """

        empty_return = (None, None, None)

        assert isinstance(folder, six.text_type)

        metadata_path = ek(os.path.join, folder, self._show_metadata_filename)

        if not ek(os.path.isdir, folder) or not ek(os.path.isfile, metadata_path):
            logger.log("Can't load the metadata file from " + metadata_path + ", it doesn't exist", logger.DEBUG)
            return empty_return

        logger.log("Loading show info from metadata file in " + metadata_path, logger.DEBUG)

        try:
            with io.open(metadata_path, 'rb') as xmlFileObj:
                showXML = etree.ElementTree(file=xmlFileObj)

            if showXML.findtext('title') is None or (showXML.findtext('tvdbid') is None and showXML.findtext('id') is None):
                logger.log("Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}".format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id')))
                return empty_return

            name = showXML.findtext('title')

            indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext('id')
            if indexer_id_text:
                indexer_id = try_int(indexer_id_text, None)
                if indexer_id is None or indexer_id < 1:
                    logger.log("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.DEBUG)
                    return empty_return
            else:
                logger.log("Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file", logger.DEBUG)
                return empty_return

            indexer = 1
            epg_url_text = showXML.findtext('episodeguide/url')
            if epg_url_text:
                epg_url = epg_url_text.lower()
                if str(indexer_id) in epg_url and 'tvrage' in epg_url:
                    logger.log("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info", logger.WARNING)
                    return empty_return

        except Exception as e:
            logger.log(
                "There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e),
                logger.WARNING)
            return empty_return

        return indexer_id, name, indexer
Exemplo n.º 26
0
    def providers_list(data):
        default_list = [
            x for x in (
                NewznabProvider._make_provider(x)
                for x in NewznabProvider._get_default_providers().split("!!!"))
            if x
        ]
        providers_list = [
            x for x in (NewznabProvider._make_provider(x)
                        for x in data.split("!!!")) if x
        ]
        seen_values = set()
        providers_set = []

        for provider in providers_list:
            value = provider.name

            if value not in seen_values:
                providers_set.append(provider)
                seen_values.add(value)

        providers_list = providers_set
        providers_dict = {x.name: x for x in providers_list}

        for default in default_list:
            if not default:
                continue

            if default.name not in providers_dict:
                providers_dict[default.name] = default

                providers_dict[default.name].default = True
                providers_dict[default.name].name = default.name
                providers_dict[default.name].url = default.url
                providers_dict[default.name].needs_auth = default.needs_auth
                providers_dict[default.name].search_mode = default.search_mode
                providers_dict[
                    default.name].search_fallback = default.search_fallback
                providers_dict[
                    default.name].enable_daily = default.enable_daily
                providers_dict[
                    default.name].enable_backlog = default.enable_backlog
                providers_dict[default.name].catIDs = (",".join([
                    x for x in providers_dict[default.name].catIDs.split(",")
                    if 5000 <= try_int(x) <= 5999
                ]) or default.catIDs)

                if settings.QUALITY_ALLOW_HEVC and "5090" not in providers_dict[
                        default.name].catIDs:
                    providers_dict[default.name].catIDs += ",5090"

        return [x for x in providers_list if x]
Exemplo n.º 27
0
def change_update_frequency(freq):
    """
    Change frequency of daily updater thread

    :param freq: New frequency
    """
    settings.UPDATE_FREQUENCY = try_int(freq, settings.DEFAULT_UPDATE_FREQUENCY)

    if settings.UPDATE_FREQUENCY < settings.MIN_UPDATE_FREQUENCY:
        settings.UPDATE_FREQUENCY = settings.MIN_UPDATE_FREQUENCY

    settings.versionCheckScheduler.cycleTime = datetime.timedelta(hours=settings.UPDATE_FREQUENCY)
    return True
Exemplo n.º 28
0
def change_postprocessor_frequency(freq):
    """
    Change frequency of automatic postprocessing thread

    :param freq: New frequency
    """
    sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = try_int(freq, sickbeard.DEFAULT_AUTOPOSTPROCESSOR_FREQUENCY)

    if sickbeard.AUTOPOSTPROCESSOR_FREQUENCY < sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY:
        sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY

    sickbeard.autoPostProcessorScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.AUTOPOSTPROCESSOR_FREQUENCY)
    return True
Exemplo n.º 29
0
    def _get_size(self, item):
        if isinstance(item, dict):
            size = item.get('size', -1)
        elif isinstance(item, (list, tuple)) and len(item) > 2:
            size = item[2]
        else:
            size = -1

        # Make sure we didn't select seeds/leechers by accident
        if not size or size < 1024 * 1024:
            size = -1

        return try_int(size, -1)
Exemplo n.º 30
0
def change_daily_search_frequency(freq):
    """
    Change frequency of daily search thread

    :param freq: New frequency
    """
    settings.DAILYSEARCH_FREQUENCY = try_int(freq, settings.DEFAULT_DAILYSEARCH_FREQUENCY)

    if settings.DAILYSEARCH_FREQUENCY < settings.MIN_DAILYSEARCH_FREQUENCY:
        settings.DAILYSEARCH_FREQUENCY = settings.MIN_DAILYSEARCH_FREQUENCY

    settings.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=settings.DAILYSEARCH_FREQUENCY)
    return True
Exemplo n.º 31
0
def change_postprocessor_frequency(freq):
    """
    Change frequency of automatic postprocessing thread

    :param freq: New frequency
    """
    settings.AUTOPOSTPROCESSOR_FREQUENCY = try_int(freq, settings.DEFAULT_AUTOPOSTPROCESSOR_FREQUENCY)

    if settings.AUTOPOSTPROCESSOR_FREQUENCY < settings.MIN_AUTOPOSTPROCESSOR_FREQUENCY:
        settings.AUTOPOSTPROCESSOR_FREQUENCY = settings.MIN_AUTOPOSTPROCESSOR_FREQUENCY

    settings.autoPostProcessorScheduler.cycleTime = datetime.timedelta(minutes=settings.AUTOPOSTPROCESSOR_FREQUENCY)
    return True
Exemplo n.º 32
0
def change_update_frequency(freq):
    """
    Change frequency of daily updater thread

    :param freq: New frequency
    """
    sickbeard.UPDATE_FREQUENCY = try_int(freq, sickbeard.DEFAULT_UPDATE_FREQUENCY)

    if sickbeard.UPDATE_FREQUENCY < sickbeard.MIN_UPDATE_FREQUENCY:
        sickbeard.UPDATE_FREQUENCY = sickbeard.MIN_UPDATE_FREQUENCY

    sickbeard.versionCheckScheduler.cycleTime = datetime.timedelta(hours=sickbeard.UPDATE_FREQUENCY)
    return True
Exemplo n.º 33
0
def change_daily_search_frequency(freq):
    """
    Change frequency of daily search thread

    :param freq: New frequency
    """
    sickbeard.DAILYSEARCH_FREQUENCY = try_int(freq, sickbeard.DEFAULT_DAILYSEARCH_FREQUENCY)

    if sickbeard.DAILYSEARCH_FREQUENCY < sickbeard.MIN_DAILYSEARCH_FREQUENCY:
        sickbeard.DAILYSEARCH_FREQUENCY = sickbeard.MIN_DAILYSEARCH_FREQUENCY

    sickbeard.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.DAILYSEARCH_FREQUENCY)
    return True
Exemplo n.º 34
0
def change_backlog_frequency(freq):
    """
    Change frequency of backlog thread

    :param freq: New frequency
    """
    settings.BACKLOG_FREQUENCY = try_int(freq, settings.DEFAULT_BACKLOG_FREQUENCY)

    settings.MIN_BACKLOG_FREQUENCY = settings.get_backlog_cycle_time()
    if settings.BACKLOG_FREQUENCY < settings.MIN_BACKLOG_FREQUENCY:
        settings.BACKLOG_FREQUENCY = settings.MIN_BACKLOG_FREQUENCY

    settings.backlogSearchScheduler.cycleTime = datetime.timedelta(minutes=settings.BACKLOG_FREQUENCY)
    return True
Exemplo n.º 35
0
def change_backlog_frequency(freq):
    """
    Change frequency of backlog thread

    :param freq: New frequency
    """
    sickbeard.BACKLOG_FREQUENCY = try_int(freq, sickbeard.DEFAULT_BACKLOG_FREQUENCY)

    sickbeard.MIN_BACKLOG_FREQUENCY = sickbeard.get_backlog_cycle_time()
    if sickbeard.BACKLOG_FREQUENCY < sickbeard.MIN_BACKLOG_FREQUENCY:
        sickbeard.BACKLOG_FREQUENCY = sickbeard.MIN_BACKLOG_FREQUENCY

    sickbeard.backlogSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.BACKLOG_FREQUENCY)
    return True
Exemplo n.º 36
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Exemplo n.º 37
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Exemplo n.º 38
0
def checkbox_to_value(option, value_on=True, value_off=False):
    """
    Turns checkbox option 'on' or 'true' to value_on (True)
    any other value returns value_off (False)
    """

    if isinstance(option, list):
        option = option[-1]
    if isinstance(option, str):
        option = option.strip().lower()

    if option in (True, "on", "true", value_on) or try_int(option) > 0:
        return value_on

    return value_off
Exemplo n.º 39
0
def checkbox_to_value(option, value_on=True, value_off=False):
    """
    Turns checkbox option 'on' or 'true' to value_on (True)
    any other value returns value_off (False)
    """

    if isinstance(option, list):
        option = option[-1]
    if isinstance(option, six.string_types):
        option = six.text_type(option).strip().lower()

    if option in (True, 'on', 'true', value_on) or try_int(option) > 0:
        return value_on

    return value_off
Exemplo n.º 40
0
def change_showupdate_hour(freq):
    """
    Change frequency of show updater thread

    :param freq: New frequency
    """
    settings.SHOWUPDATE_HOUR = try_int(freq, settings.DEFAULT_SHOWUPDATE_HOUR)

    if settings.SHOWUPDATE_HOUR > 23:
        settings.SHOWUPDATE_HOUR = 0
    elif settings.SHOWUPDATE_HOUR < 0:
        settings.SHOWUPDATE_HOUR = 0

    settings.showUpdateScheduler.start_time = datetime.time(hour=settings.SHOWUPDATE_HOUR)
    return True
Exemplo n.º 41
0
def checkbox_to_value(option, value_on=True, value_off=False):
    """
    Turns checkbox option 'on' or 'true' to value_on (True)
    any other value returns value_off (False)
    """

    if isinstance(option, list):
        option = option[-1]
    if isinstance(option, six.string_types):
        option = six.text_type(option).strip().lower()

    if option in (True, 'on', 'true', value_on) or try_int(option) > 0:
        return value_on

    return value_off
Exemplo n.º 42
0
def change_showupdate_hour(freq):
    """
    Change frequency of show updater thread

    :param freq: New frequency
    """
    sickbeard.SHOWUPDATE_HOUR = try_int(freq, sickbeard.DEFAULT_SHOWUPDATE_HOUR)

    if sickbeard.SHOWUPDATE_HOUR > 23:
        sickbeard.SHOWUPDATE_HOUR = 0
    elif sickbeard.SHOWUPDATE_HOUR < 0:
        sickbeard.SHOWUPDATE_HOUR = 0

    sickbeard.showUpdateScheduler.start_time = datetime.time(hour=sickbeard.SHOWUPDATE_HOUR)
    return True
Exemplo n.º 43
0
    def _retrieve_show_image_urls_from_fanart(self,
                                              show,
                                              img_type,
                                              thumb=False,
                                              season=None):
        types = {
            'poster': fanart.TYPE.TV.POSTER,
            'banner': fanart.TYPE.TV.BANNER,
            'poster_thumb': fanart.TYPE.TV.POSTER,
            'banner_thumb': fanart.TYPE.TV.BANNER,
            'fanart': fanart.TYPE.TV.BACKGROUND,
            'season_poster': fanart.TYPE.TV.SEASONPOSTER,
            'season_banner': fanart.TYPE.TV.SEASONBANNER,
        }

        try:
            if img_type in types:
                request = fanartRequest(
                    apikey=sickbeard.FANART_API_KEY,
                    id=show.indexerid,
                    ws=fanart.WS.TV,
                    type=types[img_type],
                    sort=fanart.SORT.POPULAR,
                    limit=(fanart.LIMIT.ONE,
                           fanart.LIMIT.ALL)[season is not None],
                )

                resp = request.response()
                results = resp[types[img_type]]
                if season:
                    results = filter(
                        lambda x: try_int(x['season'], default_value=None) ==
                        season, results)

                url = results[0]['url']
                if thumb:
                    url = re.sub('/fanart/', '/preview/', url)
                return url
        except Exception as e:
            print(e)
            pass

        logger.log(
            "Could not find any " + img_type + " images on Fanart.tv for " +
            show.name, logger.INFO)
Exemplo n.º 44
0
    def fetch_latest_hot_shows(self):
        """Get popular show information from IMDB"""

        shows = []
        result = []

        shows = anidbquery.query(QUERY_HOT)
        for show in shows:
            try:
                recommended_show = RecommendedShow(show.id, show.titles['x-jat'][0], 1, show.tvdbid, cache_subfolder=self.cache_subfolder,
                     rating=str(show.ratings['temporary']['rating']), votes=str(try_int(show.ratings['temporary']['count'], 0)), image_href=show.url)

                # Check cache or get and save image
                recommended_show.cache_image("http://img7.anidb.net/pics/anime/{0}".format(show.image_path))

                result.append(recommended_show)
            except Exception:
                pass

        return result
Exemplo n.º 45
0
            def indexer_attribute(indexer=None):
                # A value was passed, probably a string. Check keys and then try to convert it to int and check again
                if indexer is not None and not isinstance(indexer, int):
                    if indexer not in self.indexers:
                        if len(indexer) == 1:
                            check = try_int(indexer, indexer)
                            if check in self.indexers:
                                indexer = check

                    # Loop and find the right index
                    if indexer not in self.indexers:
                        for i in self.indexers:
                            # noinspection PyUnresolvedReferences
                            if indexer in (self.name(i), self.slug(i)):
                                indexer = i

                # If we didn't find it in our available indexers, use the default.
                if not indexer or indexer not in self.indexers:
                    indexer = settings.INDEXER_DEFAULT

                return getattr(self.indexers[indexer], _attribute)
Exemplo n.º 46
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        # TODO Removed to allow Tests to pass... Not sure about removing it
        # if not self.show or not self.show.is_anime:
        #   return results

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode == 'RSS':
                    entries = self.__rssFeed()
                else:
                    entries = self.__getShow(search_string)

                items.extend(entries)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results.extend(items)

        return results
Exemplo n.º 47
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.debug(_("Search Mode: {mode}".format(mode=mode)))

            for search_string in {*search_strings[mode]}:
                if mode == 'RSS':
                    entries = self.__rssFeed()
                else:
                    entries = self.__getShow(search_string)

                items.extend(entries)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results.extend(items)

        return results
Exemplo n.º 48
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                    search_url = self.url + '/search_torrent/' + search_string.replace('.', '-').replace(' ', '-') + '.html'
                else:
                    search_url = self.url + '/torrents_series.html'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('div', {'class': 'table-responsive'})
                    if torrent_table:
                        torrent_rows = torrent_table.findAll('tr')
                    else:
                        torrent_rows = None

                    if not torrent_rows:
                        continue
                    for result in torrent_rows:
                        try:
                            title = result.find('a').get_text(strip=False).replace("HDTV", "HDTV x264-Torrent9")
                            title = re.sub(r' Saison', ' Season', title, flags=re.I)
                            tmp = result.find("a")['href']
                            download_url = self._retrieve_dllink_from_url(self.url + tmp)
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="seed_ok").get_text(strip=True))
                            leechers = try_int(result.find_all('td')[3].get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find_all('td')[1].get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 49
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    search_url = self.urls['search'] % quote_plus(search_string)
                    logger.log("Search string: {}".format(search_string), logger.DEBUG)
                else:
                    search_url = self.urls['rss']

                if self.freeleech:
                    search_url = search_url.replace('active=1', 'active=5')

                logger.log("Search URL: {}".format(search_url), logger.DEBUG)

                data = self.get_url(search_url)
                if not data or 'Error' in data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if data.find('Non abbiamo trovato nulla') != -1:
                    logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index('<tbody id="highlighted"')
                except ValueError:
                    logger.log("Could not find table of torrents highlighted", logger.DEBUG)
                    continue

                # data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8').replace('\t', '')
                data = data[index:]

                with BS4Parser(data, 'html5lib') as html:
                    if not html:
                        logger.log("No html data parsed from provider", logger.DEBUG)
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table', class_='highlighted')
                    if torrent_table:
                        torrent_rows = torrent_table.find_all('tr')

                    if not torrent_rows:
                        logger.log("Could not find results in returned data", logger.DEBUG)
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index(1)].a.index(0).get_text(strip=True)
                            seeders = try_int(cells[labels.index(5)].a.index(0).get_text(strip=True))
                            leechers = try_int(cells[labels.index(5)].a.index(1).get_text(strip=True))
                            torrent_size = cells[labels.index(4)].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.url + '/' + cells[labels.index(1)].a.index(0)['href']
                            # title = cells[labels.index(u'Filename')].a.get_text(strip=True)
                            # seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
                            # leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
                            # torrent_size = cells[labels.index(u'Size')].get_text()

                            # size = convert_size(torrent_size) or -1
                            # download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
                        except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
Exemplo n.º 50
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    search_url = self.urls['search'] % (quote_plus(search_string.replace('.', ' ')),)
                else:
                    search_url = self.urls['search'] % ''

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, returns='text')
                if not data or 'please try later' in data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    data = data.split('<div id="information"></div>')[1]
                    index = data.index('<table')
                except ValueError:
                    logger.log("Could not find main torrent table", logger.ERROR)
                    continue
                except IndexError:
                    logger.log("Could not parse data from provider", logger.DEBUG)
                    continue

                html = BeautifulSoup(data[index:], 'html5lib')
                if not html:
                    logger.log("No html data parsed from provider", logger.DEBUG)
                    continue

                torrents = html('tr')
                if not torrents:
                    continue

                # Skip column headers
                for result in torrents[1:]:
                    if len(result.contents) < 10:
                        # skip extraneous rows at the end
                        continue

                    try:
                        dl_href = result.find('a', attrs={'href': re.compile(r'download.php.*')})['href']
                        title = re.search('f=(.*).torrent', dl_href).group(1).replace('+', '.')
                        download_url = self.urls['base_url'] + dl_href
                        seeders = int(result.find('span', class_='seedy').find('a').text)
                        leechers = int(result.find('span', class_='leechy').find('a').text)
                        torrent_size = re.match(r'.*?([0-9]+,?\.?[0-9]* [KkMmGg]+[Bb]+).*', str(result), re.DOTALL).group(1)
                        size = convert_size(torrent_size) or -1

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                        if mode != 'RSS':
                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                    except (AttributeError, TypeError, KeyError, ValueError):
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 51
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'cat[]': ['TV|SD|VOSTFR', 'TV|HD|VOSTFR', 'TV|SD|VF', 'TV|HD|VF', 'TV|PACK|FR', 'TV|PACK|VOSTFR', 'TV|EMISSIONS', 'ANIME'],
            # Both ASC and DESC are available for sort direction
            'way': 'DESC'
        }

        # Units
        units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                # Sorting: Available parameters: ReleaseName, Seeders, Leechers, Snatched, Size
                search_params['order'] = ('Seeders', 'Time')[mode == 'RSS']
                search_params['search'] = re.sub(r'[()]', '', search_string)
                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(class_='torrent_table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                        continue

                    # Catégorie, Release, Date, DL, Size, C, S, L
                    labels = [label.get_text(strip=True) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result('td')
                        if len(cells) < len(labels):
                            continue

                        try:
                            title = cells[labels.index('Release')].get_text(strip=True)
                            download_url = urljoin(self.url, cells[labels.index('DL')].find('a', class_='tooltip')['href'])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(cells[labels.index('S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('L')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            size_index = labels.index('Size') if 'Size' in labels else labels.index('Taille')
                            torrent_size = cells[size_index].get_text()
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 52
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'do': 'search',
            'include_dead_torrents': 'no',
            'search_type': 't_name',
            'category': 0,
            'keywords': ''
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            td_title = ''
            if td.img:
                td_title = td.img.get('title', td.get_text(strip=True))
            if not td_title:
                td_title = td.get_text(strip=True)
            return td_title

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)
                    search_params['keywords'] = search_string

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', id='sortabletable')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [process_column_header(label) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            title = result.find('div', class_='tooltip-target').get_text(strip=True)
                            # skip if torrent has been nuked due to poor quality
                            if title.startswith('Nuked.'):
                                continue
                            download_url = result.find(
                                'img', title='Click to Download this Torrent in SSL!').parent['href']
                            if not all([title, download_url]):
                                continue

                            cells = result('td')
                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the"
                                               " minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index('Size')].get_text(strip=True)
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders,
                                    'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 53
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        if not self._check_auth:
            return results

        search_params = {
            'passkey': self.api_key
        }
        if self.freeleech:
            search_params['freeleech'] = 1

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log('Search string: ' + search_string.strip(), logger.DEBUG)
                    search_params['search'] = search_string
                else:
                    search_params.pop('search', '')

                jdata = self.get_url(self.urls['search'], params=search_params, returns='json')
                if not jdata:
                    logger.log('No data returned from provider', logger.DEBUG)
                    continue

                error_code = jdata.pop('error', {})
                if error_code.get('code'):
                    if error_code.get('code') != 2:
                        logger.log('{0}'.format(error_code.get('descr', 'Error code 2 - no description available')), logger.WARNING)
                        return results
                    continue

                account_ok = jdata.pop('user', {}).get('can_leech')
                if not account_ok:
                    logger.log('Sorry, your account is not allowed to download, check your ratio', logger.WARNING)
                    return results

                torrents = jdata.pop('torrents', None)
                if not torrents:
                    logger.log('Provider has no results for this search', logger.DEBUG)
                    continue

                for torrent in torrents:
                    try:
                        title = torrent.get('name')
                        download_url = torrent.get('download_link')
                        if not (title and download_url):
                            continue

                        seeders = torrent.get('seeders')
                        leechers = torrent.get('leechers')
                        if not seeders and mode != 'RSS':
                            logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                       (title, seeders, leechers), logger.DEBUG)
                            continue

                        size = torrent.get('size') or -1
                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}

                        if mode != 'RSS':
                            logger.log('Found result: {0} with {1} seeders and {2} leechers'.format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Exemplo n.º 54
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        freeleech = '2' if self.freeleech else '0'

        # Search Params
        # c59=1&c73=1&c5=1&c41=1&c60=1&c66=1&c65=1&c67=1&c62=1&c64=1&c61=1&search=Good+Behavior+S01E01&cat=0&incldead=0&freeleech=0&lang=0
        search_params = {
            'c5': '1', # Category: Series - DVDRip
            'c41': '1', # Category: Series - HD
            'c60': '1', # Category: Series - Pack TV
            'c62': '1', # Category: Series - BDRip
            'c64': '1', # Category: Series - VOSTFR
            'c65': '1', # Category: Series - TV 720p
            'c66': '1', # Category: Series - TV 1080p
            'c67': '1', # Category: Series - Pack TV HD
            'c73': '1', # Category: Anime
            'incldead': '0',  # Include dead torrent - 0: off 1: yes 2: only dead
            'freeleech': freeleech, # Only freeleech torrent - 0: off 1: no freeleech 2: Only freeleech
            'lang': '0' # Langugage - 0: off 1: English 2: French ....
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                logger.log('Search String: {0} for mode {1}'.format(search_strings[mode], mode), logger.DEBUG)
                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                search_params['search'] = re.sub(r'[()]', '', search_string)
                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(class_='ttable_headinner')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                        continue

                    # Catégorie, Release, Date, DL, Size, C, S, L
                    labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result('td')
                        if len(cells) < len(labels):
                            continue

                        try:
                            id = re.search('id=([0-9]+)', cells[labels.index('Nom')].find('a')['href']).group(1)
                            title = cells[labels.index('Nom')].get_text(strip=True)
                            download_url = urljoin(self.urls['download'], '?id={0}&name={1}'.format(id, title))
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(cells[labels.index('S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('L')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            size_index = labels.index('Size') if 'Size' in labels else labels.index('Taille')
                            torrent_size = cells[size_index].get_text()
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 55
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        search_params = {
            'out': 'json',
            'filter': 2101,
            'showmagnets': 'on',
            'num': 50
        }

        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_params['s'] = search_string

                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log("Invalid custom url set, please check your settings", logger.WARNING)
                        return results
                    search_url = self.custom_url
                else:
                    search_url = self.url

                torrents = self.get_url(search_url, params=search_params, returns='json')
                if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
                    logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                del torrents["total_found"]

                results = []
                for i in torrents:
                    title = torrents[i]["title"]
                    seeders = try_int(torrents[i]["seeds"], 1)
                    leechers = try_int(torrents[i]["leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log("Torrent doesn't meet minimum seeds & leechers not selecting : {0}".format(title), logger.DEBUG)
                        continue

                    t_hash = torrents[i]["torrent_hash"]
                    torrent_size = torrents[i]["torrent_size"]
                    if not all([t_hash, torrent_size]):
                        continue
                    download_url = torrents[i]["magnet"] + self._custom_trackers
                    size = convert_size(torrent_size) or -1

                    if not all([title, download_url]):
                        continue

                    item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}

                    if mode != 'RSS':
                        logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                   (title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 56
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        search_params = {
            "Episode": {"c33": 1, "c38": 1, "c32": 1, "c37": 1},
            "Season": {"c41": 1}
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != "RSS":
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_params[mode]["search"] = search_string
                data = self.get_url(self.urls["search"], params=search_params[mode], returns="text")
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    torrent_table = html.find("table", border="1")
                    torrent_rows = torrent_table("tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    # "Type", "Name", Files", "Comm.", "Added", "TTL", "Size", "Snatched", "Seeders", "Leechers"
                    labels = [label.get_text(strip=True) for label in torrent_rows[0]("td")]

                    for result in torrent_rows[1:]:
                        try:
                            cells = result("td")

                            download_url = urljoin(self.url, cells[labels.index("Name")].find("a", href=re.compile(r"download.php\?id="))["href"])
                            title_element = cells[labels.index("Name")].find("a", href=re.compile(r"details.php\?id="))
                            title = title_element.get("title", "") or title_element.get_text(strip=True)
                            if not all([title, download_url]):
                                continue

                            if self.freeleech:
                                # Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F&nbsp;L]</font>)
                                freeleech = cells[labels.index("Name")].find("font", color="green")
                                if not freeleech or freeleech.get_text(strip=True) != "[F\xa0L]":
                                    continue

                            seeders = try_int(cells[labels.index("Seeders")].get_text(strip=True))
                            leechers = try_int(cells[labels.index("Leechers")].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            # Need size for failed downloads handling
                            torrent_size = cells[labels.index("Size")].get_text(strip=True)
                            size = convert_size(torrent_size) or -1
                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}

                            if mode != "RSS":
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except (AttributeError, TypeError, ValueError):
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 57
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self._check_auth:
            return results

        search_params = {
            'tv': 'true',
            'username': self.username,
            'apikey': self.api_key
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if self.freeleech:
                    search_params['fl'] = 'true'
                else:
                    search_params.pop('fl', '')

                if mode != 'RSS':
                    logger.log("Search string: " + search_string.strip(), logger.DEBUG)
                    search_params['search'] = search_string
                else:
                    search_params.pop('search', '')

                try:
                    jdata = self.get_url(self.urls['search'], params=search_params, returns='json')
                except ValueError:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if not jdata:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                error = jdata.get('error')
                if error:
                    logger.log("{}".format(error), logger.DEBUG)
                    return results

                try:
                    if jdata['0']['total_results'] == 0:
                        logger.log("Provider has no results for this search", logger.DEBUG)
                        continue
                except StandardError:
                    continue

                for i in jdata:
                    try:
                        title = jdata[i]["release_name"]
                        download_url = jdata[i]["download_url"]
                        if not all([title, download_url]):
                            continue

                        seeders = jdata[i]["seeders"]
                        leechers = jdata[i]["leechers"]
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        torrent_size = str(jdata[i]["size"]) + ' MB'
                        size = convert_size(torrent_size) or -1
                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}

                        if mode != 'RSS':
                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Exemplo n.º 58
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # https://www.thegft.org/browse.php?view=0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search=arrow
        # Search Params
        search_params = {
            'view': 0,  # BROWSE
            'c4': 1,  # TV/XVID
            'c17': 1,  # TV/X264
            'c19': 1,  # TV/DVDRIP
            'c26': 1,  # TV/BLURAY
            'c37': 1,  # TV/DVDR
            'c47': 1,  # TV/SD
            'search': '',
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.a and td.a.img:
                result = td.a.img.get('title', td.a.get_text(strip=True))
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_params['search'] = search_string

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('div', id='torrentBrowse')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [process_column_header(label) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:

                        try:
                            cells = result('td')

                            title = cells[labels.index('Name')].find('a').find_next('a')['title'] or cells[labels.index('Name')].find('a')['title']
                            download_url = self.url + cells[labels.index('DL')].find('a')['href']
                            if not all([title, download_url]):
                                continue

                            peers = cells[labels.index('S/L')].get_text(strip=True).split('/', 1)
                            seeders = try_int(peers[0])
                            leechers = try_int(peers[1])

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the"
                                               " minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index('Size/Snatched')].get_text(strip=True).split('/', 1)[0]
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Exemplo n.º 59
0
 def _get_size(self, item):
     return try_int(item['sizebytes'], -1)