Esempio n. 1
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})
                url = self.urls['rss'] if not self.custom_url else self.urls['rss'].replace(self.urls['index'], self.custom_url)
                data = self.get_url(url, params=self.search_params)
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith('<?xml'):
                    logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                    continue

                with BS4Parser(data, 'html5lib') as parser:
                    for item in parser.findAll('item'):
                        try:
                            title = re.sub(r'^<!\[CDATA\[|\]\]>$', '', item.find('title').get_text(strip=True))
                            seeders = try_int(item.find('seeders').get_text(strip=True))
                            leechers = try_int(item.find('leechers').get_text(strip=True))
                            torrent_size = item.find('size').get_text()
                            size = convert_size(torrent_size) or -1

                            if sickbeard.TORRENT_METHOD == 'blackhole':
                                enclosure = item.find('enclosure')  # Backlog doesnt have enclosure
                                download_url = enclosure['url'] if enclosure else item.find('link').next.strip()
                                download_url = re.sub(r'(.*)/torrent/(.*).html', r'\1/download/\2.torrent', download_url)
                            else:
                                info_hash = item.find('info_hash').get_text(strip=True)
                                download_url = "magnet:?xt=urn:btih:" + info_hash + "&dn=" + title + self._custom_trackers

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                        if not all([title, download_url]):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s " % title, logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
Esempio n. 2
0
    def search(self, search_strings, age=0, ep_obj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                try:
                    self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})

                    url = self.urls['rss'] if not self.custom_url else self.urls['rss'].replace(self.urls['index'], self.custom_url)

                    data = self.get_url(url, params=self.search_params)
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser.findAll('item'):
                            title = re.sub(r'^<!\[CDATA\[|\]\]>$', '', item.find('title').text)
                            # info_hash = item.get('info_hash', '')
                            size = try_int(item.find('size').text, -1)
                            seeders = try_int(item.find('seeders').text)
                            leechers = try_int(item.find('leechers').text)
                            enclosure = item.find('enclosure')
                            download_url = enclosure['url'] if enclosure else self._magnet_from_details(item.find('link').text)

                            if not all([title, download_url]):
                                continue

                                # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s " % title, logger.DEBUG)

                            items[mode].append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log(u"Failed parsing provider. Traceback: %r" % traceback.format_exc(), logger.WARNING)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Esempio n. 3
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.I)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/{0}'.format(tmp))
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Esempio n. 4
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)
                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html.find_all(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.IGNORECASE)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/%s' % tmp)
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
Esempio n. 5
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = try_int(mo.group(1))
                m = try_int(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except Exception:
                hr = 0
                m = 0
        else:
            try:
                hr = try_int(mo.group(1))
                m = try_int(mo.group(6))
            except Exception:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(try_int(d) or 1)
    try:
        foreign_timezone = get_network_timezone(network, network_dict)
        return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
    except Exception:
        return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
Esempio n. 6
0
    def fetch_latest_hot_shows(self):
        """Get popular show information from IMDB"""

        shows = []
        result = []

        shows = anidbquery.query(QUERY_HOT)
        for show in shows:
            try:
                recommended_show = RecommendedShow(
                    show.id,
                    show.titles["x-jat"][0],
                    1,
                    show.tvdbid,
                    cache_subfolder=self.cache_subfolder,
                    rating=str(show.ratings["temporary"]["rating"]),
                    votes=str(try_int(show.ratings["temporary"]["count"], 0)),
                    image_href=show.url,
                )

                # Check cache or get and save image
                recommended_show.cache_image("http://img7.anidb.net/pics/anime/{0}".format(show.image_path))

                result.append(recommended_show)
            except:
                pass

        return result
Esempio n. 7
0
    def test_try_int(self):
        """
        Test try int
        """
        test_cases = {
            None: 0,
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            u'': 0,
            u'123': 123,
            u'-123': -123,
            u'12.3': 0,
            u'-12.3': 0,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in test.iteritems():
                self.assertEqual(try_int(candidate), result)
Esempio n. 8
0
    def test_try_int_with_default(self):
        """
        Test try int
        """
        default_value = 42
        test_cases = {
            None: default_value,
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            u'': default_value,
            u'123': 123,
            u'-123': -123,
            u'12.3': default_value,
            u'-12.3': default_value,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in test.iteritems():
                self.assertEqual(try_int(candidate, default_value), result)
Esempio n. 9
0
    def index(self, limit=None):

        if limit is None:
            if sickbeard.HISTORY_LIMIT:
                limit = int(sickbeard.HISTORY_LIMIT)
            else:
                limit = 100
        else:
            limit = try_int(limit, 100)

        sickbeard.HISTORY_LIMIT = limit

        sickbeard.save_config()

        history = self.history.get(limit)

        t = PageTemplate(rh=self, filename='history.mako')
        submenu = [
            {'title': 'Clear History', 'path': 'history/clearHistory', 'icon': 'ui-icon ui-icon-trash', 'class': 'clearhistory', 'confirm': True},
            {'title': 'Trim History', 'path': 'history/trimHistory', 'icon': 'menu-icon-cut', 'class': 'trimhistory', 'confirm': True},
        ]

        return t.render(historyResults=history.detailed, compactResults=history.compact, limit=limit,
                        submenu=submenu, title='History', header='History',
                        topmenu='history', controller='history', action='index')
Esempio n. 10
0
 def _verify_added(self, torrent_hash, attempts=5):
     self.url = self.host + 'query/propertiesGeneral/' + torrent_hash.lower()
     for i in range(attempts):
         if self._request(method='get', cookies=self.session.cookies):
             if try_int(self.response.headers.get('Content-Length')) > 0:
                 return True
         sleep(2)
     return False
Esempio n. 11
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                # Feed verified does not exist on this clone
                # search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                search_url = self.urls['feed']
                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, params={'f': search_string}, returns='text')
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(u"Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser('item'):
                            if item.category and 'tv' not in item.category.get_text(strip=True).lower():
                                continue

                            title = item.title.get_text(strip=True)
                            t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
                            items.append(result)
                except StandardError:
                    logger.log(u"Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Esempio n. 12
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        """Start searching for anime using the provided search_strings. Used for backlog and daily"""
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log('Search mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {search}'.format
                               (search=search_string), logger.DEBUG)

                    search_url = (self.urls['rss'], self.urls['api'] + search_string)[mode != 'RSS']
                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log('No data returned from provider', logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log('Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    with BS4Parser(data, 'html5lib') as html:
                        entries = html('item')
                        if not entries:
                            logger.log('Returned xml contained no results', logger.INFO)
                            continue

                        for item in entries:
                            try:
                                title = item.title.get_text(strip=True)
                                download_url = item.enclosure.get('url').strip()
                                if not (title and download_url):
                                    continue

                                # description = item.find('description')
                                size = try_int(item.enclosure.get('length', -1))

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                }

                                items.append(item)
                            except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                                logger.log('Failed parsing provider. Traceback: {0!r}'.format
                                           (traceback.format_exc()), logger.ERROR)
                                continue

                results += items

            return results
Esempio n. 13
0
    def vres(self):
        """
        The vertical found in the name

        :returns: an empty string if not found
        """
        attr = 'res'
        match = self._get_match_obj(attr)
        return None if not match else try_int(match.group('vres'))
Esempio n. 14
0
def change_SUBTITLES_FINDER_FREQUENCY(subtitles_finder_frequency):
    """
    Change frequency of subtitle thread

    :param subtitles_finder_frequency: New frequency
    """
    if subtitles_finder_frequency == '' or subtitles_finder_frequency is None:
        subtitles_finder_frequency = 1

    sickbeard.SUBTITLES_FINDER_FREQUENCY = try_int(subtitles_finder_frequency, 1)
Esempio n. 15
0
    def _get_size(self, item):
        try:
            size = item.get('links')[1].get('length', -1)
        except (AttributeError, IndexError, TypeError):
            size = -1

        if not size:
            logger.log('The size was not found in the provider response', logger.DEBUG)

        return try_int(size, -1)
Esempio n. 16
0
def min_max(val, default, low, high):
    """ Return value forced within range """

    val = try_int(val, default)

    if val < low:
        return low
    if val > high:
        return high

    return val
Esempio n. 17
0
    def __init__(self, indexer_id, media_format='normal'):
        """
        :param indexer_id: The indexer id of the show
        :param media_format: The format of the media to get. Must be either 'normal' or 'thumb'
        """

        self.indexer_id = try_int(indexer_id, 0)

        if media_format in ('normal', 'thumb'):
            self.media_format = media_format
        else:
            self.media_format = 'normal'
Esempio n. 18
0
def change_UPDATE_FREQUENCY(freq):
    """
    Change frequency of daily updater thread

    :param freq: New frequency
    """
    sickbeard.UPDATE_FREQUENCY = try_int(freq, sickbeard.DEFAULT_UPDATE_FREQUENCY)

    if sickbeard.UPDATE_FREQUENCY < sickbeard.MIN_UPDATE_FREQUENCY:
        sickbeard.UPDATE_FREQUENCY = sickbeard.MIN_UPDATE_FREQUENCY

    sickbeard.versionCheckScheduler.cycleTime = datetime.timedelta(hours=sickbeard.UPDATE_FREQUENCY)
Esempio n. 19
0
def change_DAILYSEARCH_FREQUENCY(freq):
    """
    Change frequency of daily search thread

    :param freq: New frequency
    """
    sickbeard.DAILYSEARCH_FREQUENCY = try_int(freq, sickbeard.DEFAULT_DAILYSEARCH_FREQUENCY)

    if sickbeard.DAILYSEARCH_FREQUENCY < sickbeard.MIN_DAILYSEARCH_FREQUENCY:
        sickbeard.DAILYSEARCH_FREQUENCY = sickbeard.MIN_DAILYSEARCH_FREQUENCY

    sickbeard.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.DAILYSEARCH_FREQUENCY)
Esempio n. 20
0
    def retrieveShowMetadata(self, folder):
        """
        Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
        """

        empty_return = (None, None, None)

        assert isinstance(folder, unicode)

        metadata_path = ek(os.path.join, folder, self._show_metadata_filename)

        if not ek(os.path.isdir, folder) or not ek(os.path.isfile, metadata_path):
            logger.log(u"Can't load the metadata file from " + metadata_path + ", it doesn't exist", logger.DEBUG)
            return empty_return

        logger.log(u"Loading show info from metadata file in " + metadata_path, logger.DEBUG)

        try:
            with io.open(metadata_path, 'rb') as xmlFileObj:
                showXML = etree.ElementTree(file=xmlFileObj)

            if showXML.findtext('title') is None or (showXML.findtext('tvdbid') is None and showXML.findtext('id') is None):
                logger.log(u"Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}".format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id')))
                return empty_return

            name = showXML.findtext('title')

            indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext('id')
            if indexer_id_text:
                indexer_id = try_int(indexer_id_text, None)
                if indexer_id is None or indexer_id < 1:
                    logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.DEBUG)
                    return empty_return
            else:
                logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file", logger.DEBUG)
                return empty_return

            indexer = 1
            epg_url_text = showXML.findtext('episodeguide/url')
            if epg_url_text:
                epg_url = epg_url_text.lower()
                if str(indexer_id) in epg_url:
                    if 'tvrage' in epg_url:
                        logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info", logger.WARNING)
                        return empty_return

        except Exception as e:
            logger.log(
                u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e),
                logger.WARNING)
            return empty_return

        return indexer_id, name, indexer
Esempio n. 21
0
    def get_filtered_items_from_torrents(self, mode, torrents):
        items = []
        for torrent in torrents:
            if mode == 'RSS' and 'category' in torrent and try_int(torrent['category'], 0) not in self.subcategories:
                continue

            try:
                title = torrent['name']
                torrent_id = torrent['id']
                download_url = (self.urls['download'] % torrent_id).encode('utf8')
                if not all([title, download_url]):
                    continue

                seeders = try_int(torrent['seeders'])
                leechers = try_int(torrent['leechers'])
                verified = bool(torrent['isVerified'])
                torrent_size = torrent['size']

                # Filter unseeded torrent
                if seeders < self.minseed or leechers < self.minleech:
                    if mode != 'RSS':
                        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                    continue

                if self.confirmed and not verified and mode != 'RSS':
                    logger.log(u"Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
                    continue

                size = convert_size(torrent_size) or -1
                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                if mode != 'RSS':
                    logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                items.append(item)

            except Exception:
                logger.log(u"Invalid torrent data, skipping result: {0}".format(torrent), logger.DEBUG)
                logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.DEBUG)
              
        return items
Esempio n. 22
0
def change_BACKLOG_FREQUENCY(freq):
    """
    Change frequency of backlog thread

    :param freq: New frequency
    """
    sickbeard.BACKLOG_FREQUENCY = try_int(freq, sickbeard.DEFAULT_BACKLOG_FREQUENCY)

    sickbeard.MIN_BACKLOG_FREQUENCY = sickbeard.get_backlog_cycle_time()
    if sickbeard.BACKLOG_FREQUENCY < sickbeard.MIN_BACKLOG_FREQUENCY:
        sickbeard.BACKLOG_FREQUENCY = sickbeard.MIN_BACKLOG_FREQUENCY

    sickbeard.backlogSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.BACKLOG_FREQUENCY)
Esempio n. 23
0
def change_AUTOPOSTPROCESSER_FREQUENCY(freq):
    """
    Change frequency of automatic postprocessing thread
    TODO: Make all thread frequency changers in config.py return True/False status

    :param freq: New frequency
    """
    sickbeard.AUTOPOSTPROCESSER_FREQUENCY = try_int(freq, sickbeard.DEFAULT_AUTOPOSTPROCESSER_FREQUENCY)

    if sickbeard.AUTOPOSTPROCESSER_FREQUENCY < sickbeard.MIN_AUTOPOSTPROCESSER_FREQUENCY:
        sickbeard.AUTOPOSTPROCESSER_FREQUENCY = sickbeard.MIN_AUTOPOSTPROCESSER_FREQUENCY

    sickbeard.autoPostProcesserScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.AUTOPOSTPROCESSER_FREQUENCY)
Esempio n. 24
0
    def _get_size(self, item):
        if isinstance(item, dict):
            size = item.get('size', -1)
        elif isinstance(item, (list, tuple)) and len(item) > 2:
            size = item[2]
        else:
            size = -1

        # Make sure we didn't select seeds/leechers by accident
        if not size or size < 1024 * 1024:
            size = -1

        return try_int(size, -1)
Esempio n. 25
0
def change_postprocessor_frequency(freq):
    """
    Change frequency of automatic postprocessing thread

    :param freq: New frequency
    """
    sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = try_int(freq, sickbeard.DEFAULT_AUTOPOSTPROCESSOR_FREQUENCY)

    if sickbeard.AUTOPOSTPROCESSOR_FREQUENCY < sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY:
        sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY

    sickbeard.autoPostProcessorScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.AUTOPOSTPROCESSOR_FREQUENCY)
    return True
Esempio n. 26
0
def change_update_frequency(freq):
    """
    Change frequency of daily updater thread

    :param freq: New frequency
    """
    sickbeard.UPDATE_FREQUENCY = try_int(freq, sickbeard.DEFAULT_UPDATE_FREQUENCY)

    if sickbeard.UPDATE_FREQUENCY < sickbeard.MIN_UPDATE_FREQUENCY:
        sickbeard.UPDATE_FREQUENCY = sickbeard.MIN_UPDATE_FREQUENCY

    sickbeard.versionCheckScheduler.cycleTime = datetime.timedelta(hours=sickbeard.UPDATE_FREQUENCY)
    return True
Esempio n. 27
0
def change_postprocessor_frequency(freq):
    """
    Change frequency of automatic postprocessing thread

    :param freq: New frequency
    """
    sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = try_int(freq, sickbeard.DEFAULT_AUTOPOSTPROCESSOR_FREQUENCY)

    if sickbeard.AUTOPOSTPROCESSOR_FREQUENCY < sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY:
        sickbeard.AUTOPOSTPROCESSOR_FREQUENCY = sickbeard.MIN_AUTOPOSTPROCESSOR_FREQUENCY

    sickbeard.autoPostProcessorScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.AUTOPOSTPROCESSOR_FREQUENCY)
    return True
Esempio n. 28
0
def change_daily_search_frequency(freq):
    """
    Change frequency of daily search thread

    :param freq: New frequency
    """
    sickbeard.DAILYSEARCH_FREQUENCY = try_int(freq, sickbeard.DEFAULT_DAILYSEARCH_FREQUENCY)

    if sickbeard.DAILYSEARCH_FREQUENCY < sickbeard.MIN_DAILYSEARCH_FREQUENCY:
        sickbeard.DAILYSEARCH_FREQUENCY = sickbeard.MIN_DAILYSEARCH_FREQUENCY

    sickbeard.dailySearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.DAILYSEARCH_FREQUENCY)
    return True
Esempio n. 29
0
    def _get_size(self, item):
        size = item.get('sizebytes', -1)

        # Try to get the size from the summary tag
        if size == -1:
            # Units
            units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
            summary = item.get('summary')
            if summary:
                size_match = re.search(ur'Size[^\d]*([0-9.]*.[A-Z]*)', summary)
                size = convert_size(size_match.group(1), units=units) or -1 if size_match else -1

        return try_int(size)
Esempio n. 30
0
    def _get_size(self, item):
        if isinstance(item, dict):
            size = item.get('size', -1)
        elif isinstance(item, (list, tuple)) and len(item) > 2:
            size = item[2]
        else:
            size = -1

        # Make sure we didn't select seeds/leechers by accident
        if not size or size < 1024 * 1024:
            size = -1

        return try_int(size, -1)
Esempio n. 31
0
    def providers_list(data):
        default_list = [
            x for x in (
                NewznabProvider._make_provider(x)
                for x in NewznabProvider._get_default_providers().split('!!!'))
            if x
        ]
        providers_list = [
            x for x in (NewznabProvider._make_provider(x)
                        for x in data.split('!!!')) if x
        ]
        seen_values = set()
        providers_set = []

        for provider in providers_list:
            value = provider.name

            if value not in seen_values:
                providers_set.append(provider)
                seen_values.add(value)

        providers_list = providers_set
        providers_dict = dict(
            zip([x.name for x in providers_list], providers_list))

        for default in default_list:
            if not default:
                continue

            if default.name not in providers_dict:
                default.default = True
                providers_list.append(default)
            else:
                providers_dict[default.name].default = True
                providers_dict[default.name].name = default.name
                providers_dict[default.name].url = default.url
                providers_dict[default.name].needs_auth = default.needs_auth
                providers_dict[default.name].search_mode = default.search_mode
                providers_dict[
                    default.name].search_fallback = default.search_fallback
                providers_dict[
                    default.name].enable_daily = default.enable_daily
                providers_dict[
                    default.name].enable_backlog = default.enable_backlog
                providers_dict[default.name].catIDs = ','.join([
                    x for x in providers_dict[default.name].catIDs.split(',')
                    if 5000 <= try_int(x) <= 5999
                ]) or default.catIDs

        return [x for x in providers_list if x]
Esempio n. 32
0
def change_SHOWUPDATE_HOUR(freq):
    """
    Change frequency of show updater thread

    :param freq: New frequency
    """
    sickbeard.SHOWUPDATE_HOUR = try_int(freq, sickbeard.DEFAULT_SHOWUPDATE_HOUR)

    if sickbeard.SHOWUPDATE_HOUR > 23:
        sickbeard.SHOWUPDATE_HOUR = 0
    elif sickbeard.SHOWUPDATE_HOUR < 0:
        sickbeard.SHOWUPDATE_HOUR = 0

    sickbeard.showUpdateScheduler.start_time = datetime.time(hour=sickbeard.SHOWUPDATE_HOUR)
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Esempio n. 34
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, network_dict)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Esempio n. 35
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)

                search_url = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                jdata = self.get_url(search_url, json=True)
                if not jdata:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    return []

                results = []

                for item in jdata['torrents']:
                    seeders = ('seeds' in item and item['seeds']) or 0
                    leechers = ('leeches' in item and item['leeches']) or 0
                    title = ('torrent_title' in item and item['torrent_title']) or ''
                    torrent_size = ('size' in item and item['size'])
                    size = convert_size(torrent_size) or -1
                    download_url = ('magnet_uri' in item and item['magnet_uri']) or ''

                    if not all([title, download_url]):
                        continue

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                       (title, seeders, leechers), logger.DEBUG)
                        continue

                    if mode != 'RSS':
                        logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                    item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Esempio n. 36
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)

                search_url = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string

                jdata = self.get_url(search_url, returns='json')
                if not jdata:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    return []

                results = []

                for item in jdata['torrents']:
                    seeders = ('seeds' in item and item['seeds']) or 0
                    leechers = ('leeches' in item and item['leeches']) or 0
                    title = ('torrent_title' in item and item['torrent_title']) or ''
                    torrent_size = ('size' in item and item['size'])
                    size = convert_size(torrent_size) or -1
                    download_url = ('magnet_uri' in item and item['magnet_uri']) or ''

                    if not all([title, download_url]):
                        continue

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                       (title, seeders, leechers), logger.DEBUG)
                        continue

                    if mode != 'RSS':
                        logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                    item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Esempio n. 37
0
def checkbox_to_value(option, value_on=True, value_off=False):
    """
    Turns checkbox option 'on' or 'true' to value_on (True)
    any other value returns value_off (False)
    """

    if isinstance(option, list):
        option = option[-1]
    if isinstance(option, six.string_types):
        option = six.text_type(option).strip().lower()

    if option in (True, 'on', 'true', value_on) or try_int(option) > 0:
        return value_on

    return value_off
Esempio n. 38
0
    def fetch_latest_hot_shows(self):
        """Get popular show information from IMDB"""

        shows = []
        result = []

        shows = anidbquery.query(QUERY_HOT)
        for show in shows:
            try:
                recommended_show = RecommendedShow(show.id, show.titles['x-jat'][0], 1, show.tvdbid, cache_subfolder=self.cache_subfolder,
                     rating=str(show.ratings['temporary']['rating']), votes=str(try_int(show.ratings['temporary']['count'],0)), image_href=show.url)

                # Check cache or get and save image
                recommended_show.cache_image("http://img7.anidb.net/pics/anime/{0}".format(show.image_path))

                result.append(recommended_show)
            except:
                pass

        return result
Esempio n. 39
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        # TODO Removed to allow Tests to pass... Not sure about removing it
        # if not self.show or not self.show.is_anime:
        #   return results

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode == 'RSS':
                    entries = self.__rssFeed()
                else:
                    entries = self.__getShow(search_string)

                items.extend(entries)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results.extend(items)

        return results
Esempio n. 40
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'cat[]': [
                'TV|SD|VOSTFR', 'TV|HD|VOSTFR', 'TV|SD|VF', 'TV|HD|VF',
                'TV|PACK|FR', 'TV|PACK|VOSTFR', 'TV|EMISSIONS', 'ANIME'
            ],
            # Both ASC and DESC are available for sort direction
            'way':
            'DESC'
        }

        # Units
        units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                # Sorting: Available parameters: ReleaseName, Seeders, Leechers, Snatched, Size
                search_params['order'] = ('Seeders', 'Time')[mode == 'RSS']
                search_params['search'] = re.sub(r'[()]', '', search_string)
                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(class_='torrent_table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            'Data returned from provider does not contain any torrents',
                            logger.DEBUG)
                        continue

                    # Catégorie, Release, Date, DL, Size, C, S, L
                    labels = [
                        label.get_text(strip=True)
                        for label in torrent_rows[0]('td')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result('td')
                        if len(cells) < len(labels):
                            continue

                        try:
                            title = cells[labels.index('Release')].get_text(
                                strip=True)
                            download_url = urljoin(
                                self.url, cells[labels.index('DL')].find(
                                    'a', class_='tooltip')['href'])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                cells[labels.index('S')].get_text(strip=True))
                            leechers = try_int(
                                cells[labels.index('L')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            size_index = labels.index(
                                'Size') if 'Size' in labels else labels.index(
                                    'Taille')
                            torrent_size = cells[size_index].get_text()
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    'Found result: {0} with {1} seeders and {2} leechers'
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
Esempio n. 41
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches,too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                try:
                    search_url = (self.urls['rss'], self.urls['search'] + search_string + '/s/d/1/?fmt=rss')[mode != 'RSS']

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log("No data returned from provider", logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log('Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')
                    for item in data('item'):
                        try:
                            if not item.category.text.endswith(('TV', 'Anime')):
                                continue

                            title = item.title.text
                            assert isinstance(title, six.text_type)
                            # Use the torcache link bitsnoop provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'torcache' not in download_url:
                                download_url = item.find('magneturi').next.replace('CDATA', '').strip('[]') + self._custom_trackers

                            if not (title and download_url):
                                continue

                            seeders = try_int(item.find('numseeders').text)
                            leechers = try_int(item.find('numleechers').text)
                            torrent_size = item.find('size').text
                            size = convert_size(torrent_size) or -1

                            info_hash = item.find('infohash').text

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
                        if mode != 'RSS':
                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log("Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Esempio n. 42
0
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        # select the correct category
        anime = (self.show
                 and self.show.anime) or (ep_obj and ep_obj.show
                                          and ep_obj.show.anime) or False
        self.search_params['category'] = ('tv', 'anime')[anime]

        for mode in search_strings.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                self.search_params['q'] = search_string.encode(
                    'utf-8') if mode != 'RSS' else ''
                self.search_params[
                    'field'] = 'seeders' if mode != 'RSS' else 'time_add'

                if mode != 'RSS':
                    logger.log(u"Search string: %s" % search_string,
                               logger.DEBUG)

                url_fmt_string = 'usearch' if mode != 'RSS' else search_string
                try:
                    searchURL = self.urls[
                        'search'] % url_fmt_string + '?' + urlencode(
                            self.search_params)
                    if self.custom_url:
                        searchURL = posixpath.join(
                            self.custom_url,
                            searchURL.split(
                                self.url)[1].lstrip('/'))  # Must use posixpath

                    logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
                    data = self.get_url(searchURL)
                    if not data:
                        logger.log(
                            u'URL did not return data, maybe try a custom url, or a different one',
                            logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(
                            u'Expected xml but got something else, is your mirror failing?',
                            logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')

                    entries = data.findAll('item')
                    for item in entries:
                        try:
                            title = item.title.text
                            assert isinstance(title, unicode)
                            # Use the torcache link kat provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'torcache' not in download_url:
                                download_url = item.find(
                                    'torrent:magneturi').next.replace(
                                        'CDATA', '').strip('[]')

                            if not (title and download_url):
                                continue

                            seeders = try_int(
                                item.find('torrent:seeds').text, 0)
                            leechers = try_int(
                                item.find('torrent:peers').text, 0)
                            verified = bool(
                                try_int(item.find('torrent:verified').text, 0))
                            size = try_int(
                                item.find('torrent:contentlength').text)

                            info_hash = item.find('torrent:infohash').text
                            # link = item['link']

                        except (AttributeError, TypeError, KeyError,
                                ValueError):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        if self.confirmed and not verified:
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result " + title +
                                    " but that doesn't seem like a verified result so I'm ignoring it",
                                    logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers, info_hash
                        if mode != 'RSS':
                            logger.log(u"Found result: %s " % title,
                                       logger.DEBUG)

                        items[mode].append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %r" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Esempio n. 43
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        for mode in search_params:
            items = []
            logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                self.page = 1
                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):

                    if last_page:
                        break

                    search_url = self.urls['search_page'].format(search_string, x)

                    logger.log(u'Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u'No data returned from provider', logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            table_header = html.find('tr', class_='bordo')
                            torrent_table = table_header.find_parent('table') if table_header else None
                            if not torrent_table:
                                logger.log(u'Could not find table of torrents', logger.ERROR)
                                continue

                            torrent_rows = torrent_table('tr')

                            # Continue only if one Release is found
                            if (len(torrent_rows) < 6) or (len(torrent_rows[2]('td')) == 1):
                                logger.log(u'Data returned from provider does not contain any torrents', logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 45:
                                last_page = 1

                            for result in torrent_rows[2:-3]:

                                try:
                                    link = result('td')[1].find('a')['href']
                                    title = re.sub(' +',' ', link.rsplit('/', 1)[-1].replace('_', ' '))
                                    hash = result('td')[3].find('input', class_='downarrow')['value'].upper()
                                    seeders = try_int(result('td')[5].text)
                                    leechers = try_int(result('td')[6].text)
                                    torrent_size = result('td')[2].string
                                    size = convert_size(torrent_size) or -1

                                    # Download Urls
                                    download_url = self.urls['download'] % hash
                                    if urllib.urlopen(download_url).getcode() == 404:
                                        logger.log(u'Torrent hash not found in itorrents.org, searching for magnet',
                                                   logger.DEBUG)
                                        data_detail = self.get_url(link, returns='text')
                                        with BS4Parser(data_detail, 'html5lib') as html_detail:
                                            sources_row = html_detail.find('td', class_='header2').parent
                                            source_magnet = sources_row('td')[1].find('a', class_='forbtn', title='Magnet')
                                            if source_magnet and not source_magnet == 'None':
                                                download_url = source_magnet['href']
                                            else:
                                                continue

                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(title) and not self.subtitle:
                                    logger.log(u'Torrent is subtitled, skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(title):
                                    logger.log(u'Torrent isnt english audio/subtitled , skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                ep_params = ''
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    logger.log(u'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u'Found result: {0} with {1} seeders and {2} leechers'.format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u'Failed parsing provider. Traceback: {0}'.format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
Esempio n. 44
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches,too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                try:
                    search_url = (self.urls['rss'], self.urls['search'] + search_string)[mode != 'RSS']

                    logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                    data = self.get_url(search_url)
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')

                    entries = data.findAll('item')
                    if not entries:
                        logger.log(u'Returned xml contained no results', logger.INFO)
                        continue

                    for item in entries:
                        try:
                            title = item.title.text
                            # Use the itorrents link limetorrents provides,
                            # unless it is not itorrents or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'itorrents' not in download_url:
                                download_url = item.enclosure['url']
                                # http://itorrents.org/torrent/C7203982B6F000393B1CE3A013504E5F87A46A7F.torrent?title=The-Night-of-the-Generals-(1967)[BRRip-1080p-x264-by-alE13-DTS-AC3][Lektor-i-Napisy-PL-Eng][Eng]
                                # Keep the hash a separate string for when its needed for failed
                                torrent_hash = re.match(r"(.*)([A-F0-9]{40})(.*)", download_url, re.IGNORECASE).group(2)
                                download_url = "magnet:?xt=urn:btih:" + torrent_hash + "&dn=" + title + self._custom_trackers

                            if not (title and download_url):
                                continue
                            # seeders and leechers are presented diferently when doing a search and when looking for newly added
                            if mode == 'RSS':
                                # <![CDATA[
                                # Category: <a href="http://www.limetorrents.cc/browse-torrents/TV-shows/">TV shows</a><br /> Seeds: 1<br />Leechers: 0<br />Size: 7.71 GB<br /><br /><a href="http://www.limetorrents.cc/Owen-Hart-of-Gold-Djon91-torrent-7180661.html">More @ limetorrents.cc</a><br />
                                # ]]>
                                description = item.find('description')
                                seeders = try_int(description.find_all('br')[0].next_sibling.strip().lstrip('Seeds: '))
                                leechers = try_int(description.find_all('br')[1].next_sibling.strip().lstrip('Leechers: '))
                            else:
                                # <description>Seeds: 6982 , Leechers 734</description>
                                description = item.find('description').text.partition(',')
                                seeders = try_int(description[0].lstrip('Seeds: ').strip())
                                leechers = try_int(description[2].lstrip('Leechers ').strip())

                            torrent_size = item.find('size').text

                            size = convert_size(torrent_size) or -1

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log(u"Failed parsing provider. Traceback: %r" % traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
Esempio n. 45
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        freeleech = '&free=on' if self.freeleech else ''

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
                search_url = self.urls['search'] % (self.categories, freeleech, search_string)
                search_url += ';o=seeders' if mode != 'RSS' else ''

                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
                        return results
                    search_url = urljoin(self.custom_url, search_url.split(self.url)[1])

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                try:
                    data = re.sub(r'(?im)<button.+?</button>', '', data, 0)
                    with BS4Parser(data, 'html5lib') as html:
                        if not html:
                            logger.log(u"No data returned from provider", logger.DEBUG)
                            continue

                        if html.find(text='No Torrents Found!'):
                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        torrent_table = html.find('table', id='torrents')
                        torrents = torrent_table('tr') if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrents) < 2:
                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        for result in torrents[1:]:
                            try:
                                title = result('td')[1].find('a').text
                                download_url = urljoin(search_url, result('td')[3].find('a')['href'])
                                seeders = int(result.find('td', class_='ac t_seeders').text)
                                leechers = int(result.find('td', class_='ac t_leechers').text)
                                torrent_size = result('td')[5].text
                                size = convert_size(torrent_size) or -1
                            except (AttributeError, TypeError, KeyError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                except Exception as e:
                    logger.log(u"Failed parsing provider. Error: {0!r}".format(ex(e)), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Esempio n. 46
0
 def _get_size(self, item):
     """
     Gets size info from a result item
     Returns int size or -1
     """
     return try_int(item.get('size', -1), -1)
Esempio n. 47
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang

        """
        Search query:
        http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe

        cat = 4 => Shows
        modo = listado => display results mode
        orden = fecha => order
        buscar => Search show
        pag = 1 => page number
        """

        search_params = {
            'cat': 4,
            'modo': 'listado',
            'orden': 'fecha',
            'pag': 1,
            'buscar': ''

        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            # Only search if user conditions are true
            if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
                logger.log("Show info is not spanish, skipping provider search", logger.DEBUG)
                continue

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_string = re.sub(r'S0*(\d*)E(\d*)', r'\1x\2', search_string)
                search_params['buscar'] = search_string.strip() if mode != 'RSS' else ''

                time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        torrent_table = html.find('table', class_='fichas-listado')
                        torrent_rows = torrent_table('tr') if torrent_table else []

                        if len(torrent_rows) < 2:
                            logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        for row in torrent_rows[1:]:
                            try:
                                download_url = self.urls['base_url'] + row.find('a')['href']
                                """
                                Trick for accents for this provider.

                                - data = self.get_url(self.urls['search'], params=search_params, returns='text') -
                                returns latin1 coded text and this makes that the title used for the search
                                and the title retrieved from the parsed web page doesn't match so I get
                                "No needed episodes found during backlog search for: XXXX"

                                This is not the best solution but it works.

                                First encode latin1 and then decode utf8 to remains six.text_type
                                """
                                row_title = row.find('a', class_='nombre')['title']
                                title = self._processTitle(row_title.encode('latin-1').decode('utf8'))

                                seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))
                                leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))

                                #seeders are not well reported. Set 1 in case of 0
                                seeders = max(1, seeders)

                                # Provider does not provide size
                                size = -1

                            except (AttributeError, TypeError, KeyError, ValueError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                except Exception:
                    logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.WARNING)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
Esempio n. 48
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        search_params = {
            'out': 'json',
            'filter': 2101,
            'showmagnets': 'on',
            'num': 50
        }

        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params['s'] = search_string

                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log(
                            "Invalid custom url set, please check your settings",
                            logger.WARNING)
                        return results
                    search_url = self.custom_url
                else:
                    search_url = self.url

                torrents = self.get_url(search_url,
                                        params=search_params,
                                        returns='json')
                if not (torrents and "total_found" in torrents
                        and int(torrents["total_found"]) > 0):
                    logger.log(
                        u"Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                del torrents["total_found"]

                results = []
                for i in torrents:
                    title = torrents[i]["title"]
                    seeders = try_int(torrents[i]["seeds"], 1)
                    leechers = try_int(torrents[i]["leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(
                                u"Torrent doesn't meet minimum seeds & leechers not selecting : {0}"
                                .format(title), logger.DEBUG)
                        continue

                    t_hash = torrents[i]["torrent_hash"]
                    torrent_size = torrents[i]["torrent_size"]
                    if not all([t_hash, torrent_size]):
                        continue
                    download_url = torrents[i]["magnet"] + self._custom_trackers
                    size = convert_size(torrent_size) or -1

                    if not all([title, download_url]):
                        continue

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'hash': t_hash
                    }

                    if mode != 'RSS':
                        logger.log(
                            u"Found result: {0} with {1} seeders and {2} leechers"
                            .format(title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
Esempio n. 49
0
    def retrieveShowMetadata(self, folder):
        """
        Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
        """

        empty_return = (None, None, None)

        assert isinstance(folder, six.text_type)

        metadata_path = ek(os.path.join, folder, self._show_metadata_filename)

        if not ek(os.path.isdir, folder) or not ek(os.path.isfile,
                                                   metadata_path):
            logger.log(
                "Can't load the metadata file from " + metadata_path +
                ", it doesn't exist", logger.DEBUG)
            return empty_return

        logger.log("Loading show info from metadata file in " + metadata_path,
                   logger.DEBUG)

        try:
            with io.open(metadata_path, 'rb') as xmlFileObj:
                showXML = etree.ElementTree(file=xmlFileObj)

            if showXML.findtext('title') is None or (
                    showXML.findtext('tvdbid') is None
                    and showXML.findtext('id') is None):
                logger.log(
                    "Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}"
                    .format(showXML.findtext('title'),
                            showXML.findtext('tvdbid'),
                            showXML.findtext('id')))
                return empty_return

            name = showXML.findtext('title')

            indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext(
                'id')
            if indexer_id_text:
                indexer_id = try_int(indexer_id_text, None)
                if indexer_id is None or indexer_id < 1:
                    logger.log(
                        "Invalid Indexer ID (" + str(indexer_id) +
                        "), not using metadata file", logger.DEBUG)
                    return empty_return
            else:
                logger.log(
                    "Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file",
                    logger.DEBUG)
                return empty_return

            indexer = 1
            epg_url_text = showXML.findtext('episodeguide/url')
            if epg_url_text:
                epg_url = epg_url_text.lower()
                if str(indexer_id) in epg_url and 'tvrage' in epg_url:
                    logger.log(
                        "Invalid Indexer ID (" + str(indexer_id) +
                        "), not using metadata file because it has TVRage info",
                        logger.WARNING)
                    return empty_return

        except Exception as e:
            logger.log(
                "There was an error parsing your existing metadata file: '" +
                metadata_path + "' error: " + ex(e), logger.WARNING)
            return empty_return

        return indexer_id, name, indexer
Esempio n. 50
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        freeleech = '3' if self.freeleech else '0'

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_url = self.urls['search'] % (freeleech, search_string)
                init_html = self.get_url(search_url, returns='text')
                max_page_number = 0

                if not init_html:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                try:
                    with BS4Parser(init_html, 'html5lib') as init_soup:

                        # Check to see if there is more than 1 page of results
                        pager = init_soup.find('div', {'class': 'pager'})
                        page_links = pager('a', href=True) if pager else []

                        for lnk in page_links:
                            link_text = lnk.text.strip()
                            if link_text.isdigit():
                                page_int = int(link_text)
                                if page_int > max_page_number:
                                    max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode == 'RSS':
                            max_page_number = 3
                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: {0}".format(
                            traceback.format_exc()), logger.ERROR)
                    continue

                data_response_list = [init_html]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):

                        time.sleep(1)
                        page_search_url = search_url + '&page=' + str(i)
                        # '.log(u"Search string: " + page_search_url, logger.DEBUG)
                        page_html = self.get_url(page_search_url,
                                                 returns='text')

                        if not page_html:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data_response in data_response_list:

                        with BS4Parser(data_response, 'html5lib') as html:

                            torrent_rows = html(
                                "tr", class_=re.compile('torrent_[0-9]*'))

                            # Continue only if a Release is found
                            if not torrent_rows:
                                logger.log(
                                    u"Data returned from provider does not contain any torrents",
                                    logger.DEBUG)
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find(
                                        'img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find(
                                        'a', {'class': 'torrent_name_link'
                                              })['title']
                                except Exception:
                                    logger.log(
                                        u"Unable to parse torrent title. Traceback: {0} "
                                        .format(traceback.format_exc()),
                                        logger.WARNING)
                                    continue

                                try:
                                    details_url = individual_torrent.find(
                                        'a',
                                        {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match(
                                        '.*?([0-9]+)$',
                                        details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (
                                        str(torrent_id))
                                    seeders = try_int(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_seeders'
                                            }).find('span').text.strip(), 1)
                                    leechers = try_int(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_leechers'
                                            }).find('a').text.strip(), 0)
                                    torrent_size = individual_torrent.find(
                                        'td', {
                                            'class': 'table_size'
                                        }).get_text()
                                    size = convert_size(torrent_size) or -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(
                                            u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders,
                                                    leechers), logger.DEBUG)
                                    continue

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': ''
                                }
                                if mode != 'RSS':
                                    logger.log(
                                        u"Found result: {0} with {1} seeders and {2} leechers"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: {0}".format(
                            traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
Esempio n. 51
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.show or not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params = {
                    "terms": search_string,
                    "type": 1,  # get anime types
                }

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as soup:
                    torrent_table = soup.find('table', class_='listing')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            u"Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    a = 1 if len(torrent_rows[0]('td')) < 2 else 0

                    for top, bot in zip(torrent_rows[a::2],
                                        torrent_rows[a + 1::2]):
                        try:
                            desc_top = top.find('td', class_='desc-top')
                            title = desc_top.get_text(strip=True)
                            download_url = desc_top.find('a')['href']

                            desc_bottom = bot.find(
                                'td', class_='desc-bot').get_text(strip=True)
                            size = convert_size(
                                desc_bottom.split('|')[1].strip(
                                    'Size: ')) or -1

                            stats = bot.find(
                                'td', class_='stats').get_text(strip=True)
                            sl = re.match(
                                r'S:(?P<seeders>\d+)L:(?P<leechers>\d+)C:(?:\d+)ID:(?:\d+)',
                                stats.replace(' ', ''))
                            seeders = try_int(sl.group('seeders')) if sl else 0
                            leechers = try_int(
                                sl.group('leechers')) if sl else 0
                        except StandardError:
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': ''
                        }
                        if mode != 'RSS':
                            logger.log(
                                u"Found result: {0} with {1} seeders and {2} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
Esempio n. 52
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(
                        'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                post_data = {
                    '/browse.php?': None,
                    'cata': 'yes',
                    'jxt': 8,
                    'jxw': 'b',
                    'search': search_string
                }
                post_data.update(self.categories[mode])

                if self.freeleech:
                    post_data.update({'free': 'on'})

                parsedJSON = self.get_url(self.urls['search'],
                                          post_data=post_data,
                                          returns='json')
                if not parsedJSON:
                    logger.log('No data returned from provider', logger.DEBUG)
                    continue

                try:
                    torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get(
                        'torrents', [])
                except Exception:
                    logger.log(
                        'Data returned from provider does not contain any torrents',
                        logger.DEBUG)
                    continue

                for torrent in torrents:

                    title = re.sub(
                        r'\[.*\=.*\].*\[/.*\]', '',
                        torrent['name']) if torrent['name'] else None
                    download_url = urljoin(
                        self.urls['download'], '{0}/{1}'.format(
                            torrent['id'], torrent['fname'])
                    ) if torrent['id'] and torrent['fname'] else None
                    if not all([title, download_url]):
                        continue

                    seeders = try_int(torrent['seed'])
                    leechers = try_int(torrent['leech'])

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(
                                'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'
                                .format(title, seeders,
                                        leechers), logger.DEBUG)
                        continue

                    torrent_size = torrent['size']
                    size = convert_size(torrent_size) or -1

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'hash': None
                    }

                    if mode != 'RSS':
                        logger.log(
                            'Found result: {0} with {1} seeders and {2} leechers'
                            .format(title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
Esempio n. 53
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        self.categories = "cat=" + str(self.cat)

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    self.page = 2

                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):
                    z = x * 20
                    if last_page:
                        break

                    if mode != 'RSS':
                        search_url = (self.urls['search_page'] + '&filter={2}').format(z, self.categories, search_string)
                    else:
                        search_url = self.urls['search_page'].format(z, self.categories)

                    if mode != 'RSS':
                        logger.log(u"Search string: {0}".format
                                   (search_string.decode("utf-8")), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            torrent_table = html.find('table', class_='copyright')
                            torrent_rows = torrent_table('tr') if torrent_table else []

                            # Continue only if one Release is found
                            if len(torrent_rows) < 3:
                                logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 42:
                                last_page = 1

                            for result in torrent_table('tr')[2:]:

                                try:
                                    link = result.find('td').find('a')
                                    title = link.string
                                    download_url = self.urls['download'] % result('td')[8].find('a')['href'][-8:]
                                    leechers = result('td')[3]('td')[1].text
                                    leechers = int(leechers.strip('[]'))
                                    seeders = result('td')[3]('td')[2].text
                                    seeders = int(seeders.strip('[]'))
                                    torrent_size = result('td')[3]('td')[3].text.strip('[]') + " GB"
                                    size = convert_size(torrent_size) or -1
                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(result) and not self.subtitle:
                                    logger.log(u"Torrent is subtitled, skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(result):
                                    logger.log(u"Torrent isnt english audio/subtitled , skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
Esempio n. 54
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        search_params = {
            "app_id": "sickrage2",
            "category": "tv",
            "min_seeders": try_int(self.minseed),
            "min_leechers": try_int(self.minleech),
            "limit": 100,
            "format": "json_extended",
            "ranked": try_int(self.ranked),
            "token": self.token,
        }

        if ep_obj is not None:
            ep_indexerid = ep_obj.show.indexerid
            ep_indexer = ep_obj.show.indexer
        else:
            ep_indexerid = None
            ep_indexer = None

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            if mode == "RSS":
                search_params["sort"] = "last"
                search_params["mode"] = "list"
                search_params.pop("search_string", None)
                search_params.pop("search_tvdb", None)
            else:
                search_params[
                    "sort"] = self.sorting if self.sorting else "seeders"
                search_params["mode"] = "search"

                if ep_indexer == INDEXER_TVDB and ep_indexerid:
                    search_params["search_tvdb"] = ep_indexerid
                else:
                    search_params.pop("search_tvdb", None)

            for search_string in search_strings[mode]:
                if mode != "RSS":
                    search_params["search_string"] = search_string
                    logger.log(
                        "Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                data = self.get_url(self.urls["api"],
                                    params=search_params,
                                    returns="json")
                if not isinstance(data, dict):
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                error = data.get("error")
                if error:
                    logger.log(error)
                    continue

                torrent_results = data.get("torrent_results")
                if not torrent_results:
                    logger.log(
                        "Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                for item in torrent_results:
                    try:
                        title = item.pop("title")
                        download_url = item.pop("download")
                        if not all([title, download_url]):
                            continue

                        seeders = item.pop("seeders")
                        leechers = item.pop("leechers")
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != "RSS":
                                logger.log(
                                    "Discarding torrent because it doesn't meet the"
                                    " minimum seeders or leechers: {} (S:{} L:{})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        torrent_size = item.pop("size", -1)
                        size = convert_size(torrent_size) or -1

                        item = title, download_url, size, seeders, leechers
                        if mode != "RSS":
                            logger.log(
                                "Found result: {} with {} seeders and {} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(item)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
Esempio n. 55
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    search_url = self.urls['search'] % (quote_plus(search_string), self.categories)
                    logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
                               logger.DEBUG)
                else:
                    search_url = self.urls['rss'] % self.categories

                if self.freeleech:
                    search_url = search_url.replace('active=1', 'active=5')

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data or 'please try later' in data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if data.find('No torrents here') != -1:
                    logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index('<table class="mainblockcontenttt"')
                except ValueError:
                    logger.log(u"Could not find table of torrents mainblockcontenttt", logger.DEBUG)
                    continue

                # data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8').replace('\t', '')
                data = data[index:]

                with BS4Parser(data, 'html5lib') as html:
                    if not html:
                        logger.log(u"No html data parsed from provider", logger.DEBUG)
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table', class_='mainblockcontenttt')
                    if torrent_table:
                        torrent_rows = torrent_table.find_all('tr')

                    if not torrent_rows:
                        logger.log(u"Could not find results in returned data", logger.DEBUG)
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index(u'Filename')].a.get_text(strip=True)
                            seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
                            torrent_size = cells[labels.index(u'Size')].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
                        except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
Esempio n. 56
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
        """
        Searches indexer using the params in search_strings, either for latest releases, or a string/id search
        Returns: list of results in dict form
        """
        results = []
        if not self._check_auth():
            return results

        if 'gingadaddy' not in self.url:  # gingadaddy has no caps.
            if not self.caps:
                self.get_newznab_categories(just_caps=True)

            if not self.caps:
                return results

        for mode in search_strings:
            torznab = False
            search_params = {
                't': ('search', 'tvsearch')[bool(self.use_tv_search)],
                'limit': 100,
                'offset': 0,
                'cat': self.catIDs.strip(', ') or '5030,5040',
                'maxage': sickbeard.USENET_RETENTION
            }

            if self.needs_auth and self.key:
                search_params['apikey'] = self.key

            if mode != 'RSS':
                if self.use_tv_search:
                    if 'tvdbid' in str(self.cap_tv_search):
                        search_params['tvdbid'] = ep_obj.show.indexerid

                    if ep_obj.show.air_by_date or ep_obj.show.sports:
                        date_str = str(ep_obj.airdate)
                        search_params['season'] = date_str.partition('-')[0]
                        search_params['ep'] = date_str.partition(
                            '-')[2].replace('-', '/')
                    else:
                        search_params['season'] = ep_obj.scene_season
                        search_params['ep'] = ep_obj.scene_episode

                if mode == 'Season':
                    search_params.pop('ep', '')

            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(
                        'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                    if 'tvdbid' not in search_params:
                        search_params['q'] = search_string

                time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                data = self.get_url(urljoin(self.url, 'api'),
                                    params=search_params,
                                    returns='text')
                if not data:
                    break

                with BS4Parser(data, 'html5lib') as html:
                    if not self._check_auth_from_data(html):
                        break

                    try:
                        torznab = 'xmlns:torznab' in html.rss.attrs
                    except AttributeError:
                        torznab = False

                    for item in html('item'):
                        try:
                            title = item.title.get_text(strip=True)
                            download_url = None
                            if item.link:
                                if validators.url(
                                        item.link.get_text(strip=True),
                                        require_tld=False):
                                    download_url = item.link.get_text(
                                        strip=True)
                                elif validators.url(item.link.next.strip(),
                                                    require_tld=False):
                                    download_url = item.link.next.strip()

                            if (not download_url, item.enclosure
                                    and validators.url(item.enclosure.get(
                                        'url', '').strip(),
                                                       require_tld=False)):
                                download_url = item.enclosure.get('url',
                                                                  '').strip()

                            if not (title and download_url):
                                continue

                            seeders = leechers = None
                            if 'gingadaddy' in self.url:
                                size_regex = re.search(r'\d*.?\d* [KMGT]B',
                                                       str(item.description))
                                item_size = size_regex.group(
                                ) if size_regex else -1
                            else:
                                item_size = item.size.get_text(
                                    strip=True) if item.size else -1
                                for attr in item('newznab:attr') + item(
                                        'torznab:attr'):
                                    item_size = attr['value'] if attr[
                                        'name'] == 'size' else item_size
                                    seeders = try_int(
                                        attr['value']
                                    ) if attr['name'] == 'seeders' else seeders
                                    leechers = try_int(
                                        attr['value']
                                    ) if attr['name'] == 'peers' else leechers

                            if not item_size or (torznab and
                                                 (seeders is None
                                                  or leechers is None)):
                                continue

                            size = convert_size(item_size) or -1

                            result = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers
                            }
                            items.append(result)
                        except StandardError:
                            continue

                # Since we aren't using the search string,
                # break out of the search string loop
                if 'tvdbid' in search_params:
                    break

            if torznab:
                results.sort(key=lambda d: try_int(d.get('seeders', 0)),
                             reverse=True)
            results += items

        return results