コード例 #1
0
ファイル: extratorrent.py プロジェクト: allan84/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})
                url = self.urls['rss'] if not self.custom_url else self.urls['rss'].replace(self.urls['index'], self.custom_url)
                data = self.get_url(url, params=self.search_params)
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith('<?xml'):
                    logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                    continue

                with BS4Parser(data, 'html5lib') as parser:
                    for item in parser.findAll('item'):
                        try:
                            title = re.sub(r'^<!\[CDATA\[|\]\]>$', '', item.find('title').get_text(strip=True))
                            seeders = try_int(item.find('seeders').get_text(strip=True))
                            leechers = try_int(item.find('leechers').get_text(strip=True))
                            torrent_size = item.find('size').get_text()
                            size = convert_size(torrent_size) or -1

                            if sickbeard.TORRENT_METHOD == 'blackhole':
                                enclosure = item.find('enclosure')  # Backlog doesnt have enclosure
                                download_url = enclosure['url'] if enclosure else item.find('link').next.strip()
                                download_url = re.sub(r'(.*)/torrent/(.*).html', r'\1/download/\2.torrent', download_url)
                            else:
                                info_hash = item.find('info_hash').get_text(strip=True)
                                download_url = "magnet:?xt=urn:btih:" + info_hash + "&dn=" + title + self._custom_trackers

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                        if not all([title, download_url]):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s " % title, logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #2
0
ファイル: nyaatorrents.py プロジェクト: Indigo744/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                params = {
                    "page": 'rss',
                    "cats": '1_0',  # All anime
                    "sort": 2,     # Sort Descending By Seeders
                    "order": 1
                }
                if mode != 'RSS':
                    params["term"] = search_string

                search_url = self.url + '?' + urlencode(params)
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                summary_regex = ur"(\d+) seeder\(s\), (\d+) leecher\(s\), \d+ download\(s\) - (\d+.?\d* [KMGT]iB)(.*)"
                s = re.compile(summary_regex, re.DOTALL)

                results = []
                for curItem in self.cache.getRSSFeed(search_url)['entries'] or []:
                    title = curItem['title']
                    download_url = curItem['link']
                    if not all([title, download_url]):
                        continue

                    seeders, leechers, torrent_size, verified = s.findall(curItem['summary'])[0]
                    size = convert_size(torrent_size) or -1

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                       (title, seeders, leechers), logger.DEBUG)
                        continue

                    if self.confirmed and not verified and mode != 'RSS':
                        logger.log(u"Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
                        continue

                    item = title, download_url, size, seeders, leechers
                    if mode != 'RSS':
                        logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #3
0
ファイル: cpasbien.py プロジェクト: Row/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string)

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.I)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/{0}'.format(tmp))
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #4
0
ファイル: torrentz.py プロジェクト: Arcanemagus/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                # Feed verified does not exist on this clone
                # search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                search_url = self.urls['feed']
                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, params={'f': search_string}, returns='text')
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(u"Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser('item'):
                            if item.category and 'tv' not in item.category.get_text(strip=True).lower():
                                continue

                            title = item.title.get_text(strip=True)
                            t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
                            items.append(result)
                except StandardError:
                    logger.log(u"Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #5
0
ファイル: torrentz.py プロジェクト: Indigo744/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
                if mode != 'RSS':
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                    search_url += '?q=' + urllib.parse.quote_plus(search_string)

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(u"Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser.findAll('item'):
                            if item.category and 'tv' not in item.category.text:
                                continue

                            title = item.title.text.rsplit(' ', 1)[0].replace(' ', '.')
                            t_hash = item.guid.text.rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            items.append((title, download_url, size, seeders, leechers))
                except StandardError:
                    logger.log(u"Failed parsing provider. Traceback: %r" % traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #6
0
ファイル: btdigg.py プロジェクト: Arcanemagus/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        search_params = {"p": 0}
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                search_params["q"] = search_string
                if mode != "RSS":
                    search_params["order"] = 0
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)
                else:
                    search_params["order"] = 2

                jdata = self.get_url(self.urls["api"], params=search_params, returns="json")
                if not jdata:
                    logger.log("Provider did not return data", logger.DEBUG)
                    continue

                for torrent in jdata:
                    try:
                        title = torrent.pop("name", "")
                        download_url = torrent.pop("magnet") + self._custom_trackers if torrent["magnet"] else None
                        if not all([title, download_url]):
                            continue

                        if float(torrent.pop("ff")):
                            logger.log("Ignoring result for {0} since it's been reported as fake (level = {1})".format
                                       (title, torrent["ff"]), logger.DEBUG)
                            continue

                        if not int(torrent.pop("files")):
                            logger.log("Ignoring result for {0} because it has no files".format
                                       (title), logger.DEBUG)
                            continue

                        # Provider doesn't provide seeders/leechers
                        seeders = 1
                        leechers = 0

                        torrent_size = torrent.pop("size")
                        size = convert_size(torrent_size) or -1

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                        if mode != "RSS":
                            logger.log("Found result: {0} ".format(title), logger.DEBUG)

                        items.append(item)

                    except StandardError:
                        continue

            results += items

        return results
コード例 #7
0
ファイル: cpasbien.py プロジェクト: Hydrog3n/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)
                    search_url = self.url + '/recherche/' + search_string.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html.find_all(class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(strip=True).replace("HDTV", "HDTV x264-CPasBien")
                            title = re.sub(r' Saison', ' Season', title, flags=re.IGNORECASE)
                            tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url + '/telechargement/%s' % tmp)
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(result.find(class_="up").get_text(strip=True))
                            leechers = try_int(result.find(class_="down").get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size, units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #8
0
ファイル: omgwtfnzbs.py プロジェクト: bitzorro/SickRage
    def _get_size(self, item):
        size = item.get('sizebytes', -1)

        # Try to get the size from the summary tag
        if size == -1:
            # Units
            units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
            summary = item.get('summary')
            if summary:
                size_match = re.search(ur'Size[^\d]*([0-9.]*.[A-Z]*)', summary)
                size = convert_size(size_match.group(1), units=units) or -1 if size_match else -1

        return try_int(size)
コード例 #9
0
ファイル: strike.py プロジェクト: ratoaq2/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)

                search_url = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string

                jdata = self.get_url(search_url, returns='json')
                if not jdata:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    return []

                results = []

                for item in jdata['torrents']:
                    seeders = ('seeds' in item and item['seeds']) or 0
                    leechers = ('leeches' in item and item['leeches']) or 0
                    title = ('torrent_title' in item and item['torrent_title']) or ''
                    torrent_size = ('size' in item and item['size'])
                    size = convert_size(torrent_size) or -1
                    download_url = ('magnet_uri' in item and item['magnet_uri']) or ''

                    if not all([title, download_url]):
                        continue

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                       (title, seeders, leechers), logger.DEBUG)
                        continue

                    if mode != 'RSS':
                        logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                    item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #10
0
ファイル: strike.py プロジェクト: allan84/SickRage
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)

                search_url = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                jdata = self.get_url(search_url, json=True)
                if not jdata:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    return []

                results = []

                for item in jdata['torrents']:
                    seeders = ('seeds' in item and item['seeds']) or 0
                    leechers = ('leeches' in item and item['leeches']) or 0
                    title = ('torrent_title' in item and item['torrent_title']) or ''
                    torrent_size = ('size' in item and item['size'])
                    size = convert_size(torrent_size) or -1
                    download_url = ('magnet_uri' in item and item['magnet_uri']) or ''

                    if not all([title, download_url]):
                        continue

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                        continue

                    if mode != 'RSS':
                        logger.log(u"Found result: %s " % title, logger.DEBUG)

                    item = title, download_url, size, seeders, leechers
                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #11
0
ファイル: t411.py プロジェクト: ibibah/SickRage
    def get_filtered_items_from_torrents(self, mode, torrents):
        items = []
        for torrent in torrents:
            if mode == 'RSS' and 'category' in torrent and try_int(torrent['category'], 0) not in self.subcategories:
                continue

            try:
                title = torrent['name']
                torrent_id = torrent['id']
                download_url = (self.urls['download'] % torrent_id).encode('utf8')
                if not all([title, download_url]):
                    continue

                seeders = try_int(torrent['seeders'])
                leechers = try_int(torrent['leechers'])
                verified = bool(torrent['isVerified'])
                torrent_size = torrent['size']

                # Filter unseeded torrent
                if seeders < self.minseed or leechers < self.minleech:
                    if mode != 'RSS':
                        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                    continue

                if self.confirmed and not verified and mode != 'RSS':
                    logger.log(u"Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
                    continue

                size = convert_size(torrent_size) or -1
                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                if mode != 'RSS':
                    logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                items.append(item)

            except Exception:
                logger.log(u"Invalid torrent data, skipping result: {0}".format(torrent), logger.DEBUG)
                logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.DEBUG)
              
        return items
コード例 #12
0
ファイル: thepiratebay.py プロジェクト: billwurles/SportRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        """
        205 = SD, 208 = HD, 200 = All Videos
        https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
        """
        # oder_by is 7 in browse for seeders, but 8 in search!
        search_params = {
            "q": "",
            "type": "search",
            "orderby": 8,
            "page": 0,
            "category": 200
        }

        # Units
        units = ["B", "KIB", "MIB", "GIB"]

        def process_column_header(th):
            text = ""
            if th.a:
                text = th.a.get_text(strip=True)
            if not text:
                text = th.get_text(strip=True)
            return text

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                search_urls = (self.urls["search"],
                               self.urls["rss"])[mode == "RSS"]
                if not isinstance(search_urls, list):
                    search_urls = [search_urls]

                for search_url in search_urls:
                    if self.custom_url:
                        if not validators.url(self.custom_url):
                            logger.log(
                                "Invalid custom url: {0}".format(
                                    self.custom_url), logger.WARNING)
                            return results
                        search_url = urljoin(self.custom_url,
                                             search_url.split(self.url)[1])

                    if mode != "RSS":
                        search_params["q"] = search_string
                        logger.log(
                            "Search string: {}".format(
                                search_string.decode("utf-8")), logger.DEBUG)

                        # Prevents a 302 redirect, since there is always a 301 from .se to the best mirror having an extra
                        # redirect is excessive on the provider and spams the debug log unnecessarily
                        search_url, params = self.convert_url(
                            search_url, search_params)
                        data = self.get_url(search_url,
                                            params=params,
                                            returns="text")
                    else:
                        data = self.get_url(search_url, returns="text")

                    if not data:
                        logger.log(
                            "URL did not return data, maybe try a custom url, or a different one",
                            logger.DEBUG)
                        continue

                    with BS4Parser(data, "html5lib") as html:
                        torrent_table = html.find("table", id="searchResult")
                        torrent_rows = torrent_table(
                            "tr") if torrent_table else []

                        # Continue only if at least one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                "Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        labels = [
                            process_column_header(label)
                            for label in torrent_rows[0]("th")
                        ]

                        # Skip column headers
                        for result in torrent_rows[1:]:
                            try:
                                cells = result("td")

                                # Funky js on page messing up titles, this fixes that
                                title = result.find(
                                    class_="detLink")['title'].split(
                                        'Details for ', 1)[-1]
                                download_url = result.find(
                                    title="Download this torrent using magnet"
                                )["href"] + self._custom_trackers
                                if not self.magnet_regex.match(download_url):
                                    logger.log(
                                        "Got an invalid magnet: {0}".format(
                                            download_url))
                                    logger.log(
                                        "Invalid ThePirateBay proxy please try another one",
                                        logger.DEBUG)
                                    continue

                                if not all([title, download_url]):
                                    continue

                                seeders = try_int(
                                    cells[labels.index("SE")].get_text(
                                        strip=True))
                                leechers = try_int(
                                    cells[labels.index("LE")].get_text(
                                        strip=True))

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != "RSS":
                                        logger.log(
                                            "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders,
                                                    leechers), logger.DEBUG)
                                    continue

                                # Accept Torrent only from Good People for every Episode Search
                                if self.confirmed and not result.find(
                                        alt=re.compile(r"VIP|Trusted")):
                                    if mode != "RSS":
                                        logger.log(
                                            "Found result: {0} but that doesn't seem like a trusted result so I'm ignoring it"
                                            .format(title), logger.DEBUG)
                                    continue

                                # Convert size after all possible skip scenarios
                                torrent_size = re.sub(
                                    r".*Size ([\d.]+).+([KMGT]iB).*", r"\1 \2",
                                    result.find(class_="detDesc").get_text(
                                        strip=True))
                                size = convert_size(torrent_size,
                                                    units=units) or -1

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': ''
                                }
                                if mode != "RSS":
                                    logger.log(
                                        "Found result: {0} with {1} seeders and {2} leechers"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)
                            except StandardError:
                                continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #13
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            "searchstr": "",
            "filter_cat[1]": 1,
            "filter_cat[2]": 1,
            "filter_cat[3]": 1,
            "filter_cat[4]": 1,
            "filter_cat[5]": 1
        }

        # Units
        units = ["B", "KB", "MB", "GB", "TB", "PB"]

        def process_column_header(td):
            result = ""
            if td.a and td.a.img:
                result = td.a.img.get("title", td.a.get_text(strip=True))
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode != "RSS":
                    logger.log(
                        "Search string: {search}".format(
                            search=search_string.decode("utf-8")),
                        logger.DEBUG)

                search_params["searchstr"] = search_string
                search_url = self.urls["search"]
                data = self.get_url(search_url,
                                    params=search_params,
                                    returns="text")
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    torrent_table = html.find("table", id="torrent_table")
                    torrent_rows = torrent_table.find_all(
                        "tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            "Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    # "", "", "Name /Year", "Files", "Time", "Size", "Snatches", "Seeders", "Leechers"
                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0].find_all("td")
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result.find_all("td")
                        if len(cells) < len(labels):
                            continue

                        try:
                            title = cells[labels.index("Name /Year")].find(
                                "a", dir="ltr").get_text(strip=True)
                            download_url = urljoin(
                                self.url,
                                cells[labels.index("Name /Year")].find(
                                    "a", title="Download")["href"])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                cells[labels.index("Seeders")].get_text(
                                    strip=True))
                            leechers = try_int(
                                cells[labels.index("Leechers")].get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the"
                                        " minimum seeders or leechers: {} (S:{} L:{})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index(
                                "Size")].get_text(strip=True)
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != "RSS":
                                logger.log(
                                    "Found result: {} with {} seeders and {} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #14
0
ファイル: bitcannon.py プロジェクト: puterdud92/Default
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals
        results = []

        url = "http://localhost:3000/"
        if self.custom_url:
            if not validators.url(self.custom_url, require_tld=False):
                logger.log(
                    "Invalid custom url set, please check your settings",
                    logger.WARNING)
                return results
            url = self.custom_url

        search_params = {}

        anime = ep_obj and ep_obj.show and ep_obj.show.anime
        search_params["category"] = ("tv", "anime")[bool(anime)]

        if self.api_key:
            search_params["apiKey"] = self.api_key

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                search_params["q"] = search_string
                if mode != "RSS":
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode('utf-8')), logger.DEBUG)

                search_url = urljoin(url, "api/search")
                parsed_json = self.get_url(search_url,
                                           params=search_params,
                                           returns="json")
                if not parsed_json:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if not self._check_auth_from_data(parsed_json):
                    return results

                for result in parsed_json.pop("torrents", {}):
                    try:
                        title = result.pop("title", "")

                        info_hash = result.pop("infoHash", "")
                        download_url = "magnet:?xt=urn:btih:" + info_hash
                        if not all([title, download_url, info_hash]):
                            continue

                        swarm = result.pop("swarm", None)
                        if swarm:
                            seeders = try_int(swarm.pop("seeders", 0))
                            leechers = try_int(swarm.pop("leechers", 0))
                        else:
                            seeders = leechers = 0

                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != "RSS":
                                logger.log(
                                    "Discarding torrent because it doesn't meet the "
                                    "minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        size = convert_size(result.pop("size", -1)) or -1
                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': ''
                        }
                        if mode != "RSS":
                            logger.log(
                                "Found result: {0} with {1} seeders and {2} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(item)
                    except (AttributeError, TypeError, KeyError, ValueError):
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #15
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode != "RSS":
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                url = self.urls['search'] % (search_string)
                data = self.get_url(url, returns="text")

                try:
                    parsed_json = json.loads(data)
                except ValueError as e:
                    continue

                if not isinstance(parsed_json, dict):
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                torrent_results = parsed_json['total_results']

                if not torrent_results:
                    logger.log(
                        "Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                logger.log(
                    'Number of torrents found on nCore = ' +
                    str(torrent_results), logger.INFO)

                for item in parsed_json['results']:
                    try:
                        title = item.pop("release_name")
                        download_url = item.pop("download_url")
                        if not all([title, download_url]):
                            continue

                        seeders = item.pop("seeders")
                        leechers = item.pop("leechers")

                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != "RSS":
                                logger.log(
                                    "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        torrent_size = item.pop("size", -1)
                        size = convert_size(torrent_size) or -1

                        if mode != "RSS":
                            logger.log(
                                "Found result: {0} with {1} seeders and {2} leechers with a file size {3}"
                                .format(title, seeders, leechers,
                                        size), logger.DEBUG)

                            result = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                        items.append(result)

                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items
        return results
コード例 #16
0
ファイル: rarbg.py プロジェクト: Tazor93/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        search_params = {
            "app_id": "sickrage2",
            "category": "tv",
            "min_seeders": try_int(self.minseed),
            "min_leechers": try_int(self.minleech),
            "limit": 100,
            "format": "json_extended",
            "ranked": try_int(self.ranked),
            "token": self.token,
        }

        if ep_obj is not None:
            ep_indexerid = ep_obj.show.indexerid
            ep_indexer = ep_obj.show.indexer
        else:
            ep_indexerid = None
            ep_indexer = None

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            if mode == "RSS":
                search_params["sort"] = "last"
                search_params["mode"] = "list"
                search_params.pop("search_string", None)
                search_params.pop("search_tvdb", None)
            else:
                search_params[
                    "sort"] = self.sorting if self.sorting else "seeders"
                search_params["mode"] = "search"

                if ep_indexer == INDEXER_TVDB and ep_indexerid:
                    search_params["search_tvdb"] = ep_indexerid
                else:
                    search_params.pop("search_tvdb", None)

            for search_string in search_strings[mode]:
                if mode != "RSS":
                    search_params["search_string"] = search_string
                    logger.log(
                        "Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                data = self.get_url(self.urls["api"],
                                    params=search_params,
                                    returns="json")
                if not isinstance(data, dict):
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                error = data.get("error")
                error_code = data.get("error_code")
                # Don't log when {"error":"No results found","error_code":20}
                # List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312
                if error:
                    if try_int(error_code) != 20:
                        logger.log(error)
                    continue

                torrent_results = data.get("torrent_results")
                if not torrent_results:
                    logger.log(
                        "Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                for item in torrent_results:
                    try:
                        title = item.pop("title")
                        download_url = item.pop("download")
                        if not all([title, download_url]):
                            continue

                        seeders = item.pop("seeders")
                        leechers = item.pop("leechers")
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != "RSS":
                                logger.log(
                                    "Discarding torrent because it doesn't meet the"
                                    " minimum seeders or leechers: {} (S:{} L:{})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        torrent_size = item.pop("size", -1)
                        size = convert_size(torrent_size) or -1

                        if mode != "RSS":
                            logger.log(
                                "Found result: {} with {} seeders and {} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        result = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers
                        }
                        items.append(result)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #17
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)
                # search string needs to be normalized, single quotes are apparently not allowed on the site
                # ç should also be replaced, people tend to use c instead
                replace_chars = {"'": '', "ç": 'c'}

                for k, v in replace_chars.iteritems():
                    search_string = search_string.replace(k, v)

                logger.log(
                    'Sanitized string: {0}'.format(
                        search_string.decode('utf-8')), logger.DEBUG)

                try:
                    search_params = {
                        'category': "2145",
                        'subcategory': "2184",
                        'q': re.sub(r'[()]', '', search_string)
                    }
                    data = self.get_url(self.urls['search'],
                                        params=search_params,
                                        returns='text')
                    if not data:
                        continue

                    with BS4Parser(data, 'html5lib') as html:
                        torrent_table = html.find(class_='table table-striped')
                        torrent_rows = torrent_table(
                            'tr') if torrent_table else []

                        # Continue only if at least one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                'Data returned from provider does not contain any torrents',
                                logger.DEBUG)
                            continue

                        # Skip column headers
                        for result in torrent_rows[1:]:
                            cells = result('td')
                            if len(cells) < 5:
                                continue

                            download_url = ""
                            title = cells[0].find(
                                'a',
                                class_='torrent-name').get_text(strip=True)
                            for download_img in cells[0].select('a[href] img'):
                                if download_img['src'] == urljoin(
                                        self.url,
                                        "static/icons/icon_download.gif"):
                                    download_url = urljoin(
                                        self.url, download_img.parent['href'])
                                    break

                            if not (title and download_url):
                                continue

                            seeders = try_int(cells[4].get_text(strip=True))
                            leechers = try_int(cells[5].get_text(strip=True))

                            torrent_size = cells[2].get_text()
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    'Found result: {0} with {1} seeders and {2} leechers'
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log('Failed parsing provider {}.'.format(self.name),
                               logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #18
0
ファイル: hdspace.py プロジェクト: lechat/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    search_url = self.urls['search'] % (quote_plus(
                        search_string.replace('.', ' ')), )
                else:
                    search_url = self.urls['search'] % ''

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url, returns='text')
                if not data or 'please try later' in data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    data = data.split('<div id="information"></div>')[1]
                    index = data.index('<table')
                except ValueError:
                    logger.log(u"Could not find main torrent table",
                               logger.ERROR)
                    continue

                html = BeautifulSoup(data[index:], 'html5lib')
                if not html:
                    logger.log(u"No html data parsed from provider",
                               logger.DEBUG)
                    continue

                torrents = html('tr')
                if not torrents:
                    continue

                # Skip column headers
                for result in torrents[1:]:
                    if len(result.contents) < 10:
                        # skip extraneous rows at the end
                        continue

                    try:
                        dl_href = result.find(
                            'a', attrs={'href':
                                        re.compile(r'download.php.*')})['href']
                        title = re.search('f=(.*).torrent',
                                          dl_href).group(1).replace('+', '.')
                        download_url = self.urls['base_url'] + dl_href
                        seeders = int(
                            result.find('span', class_='seedy').find('a').text)
                        leechers = int(
                            result.find('span',
                                        class_='leechy').find('a').text)
                        torrent_size = re.match(
                            r'.*?([0-9]+,?\.?[0-9]* [KkMmGg]+[Bb]+).*',
                            str(result), re.DOTALL).group(1)
                        size = convert_size(torrent_size) or -1

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': None
                        }
                        if mode != 'RSS':
                            logger.log(
                                u"Found result: {0} with {1} seeders and {2} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(item)

                    except (AttributeError, TypeError, KeyError, ValueError):
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #19
0
ファイル: bitcannon.py プロジェクト: bitzorro/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals
        results = []

        url = "http://localhost:3000/"
        if self.custom_url:
            if not validators.url(self.custom_url, require_tld=False):
                logger.log("Invalid custom url set, please check your settings", logger.WARNING)
                return results
            url = self.custom_url

        search_params = {}

        anime = ep_obj and ep_obj.show and ep_obj.show.anime
        search_params["category"] = ("tv", "anime")[bool(anime)]

        if self.api_key:
            search_params["apiKey"] = self.api_key

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                search_params["q"] = search_string
                if mode != "RSS":
                    logger.log("Search string: {}".format(search_string), logger.DEBUG)

                search_url = urljoin(url, "api/search")
                parsed_json = self.get_url(search_url, params=search_params, returns="json")
                if not parsed_json:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                if not self._check_auth_from_data(parsed_json):
                    return results

                for result in parsed_json.pop("torrents", {}):
                    try:
                        title = result.pop("title", "")

                        info_hash = result.pop("infoHash", "")
                        download_url = "magnet:?xt=urn:btih:" + info_hash
                        if not all([title, download_url, info_hash]):
                            continue

                        swarm = result.pop("swarm", None)
                        if swarm:
                            seeders = try_int(swarm.pop("seeders", 0))
                            leechers = try_int(swarm.pop("leechers", 0))
                        else:
                            seeders = leechers = 0

                        if seeders < min(self.minseed, 1):
                            if mode != "RSS":
                                logger.log("Discarding torrent because it doesn't meet the "
                                           "minimum seeders: {0}. Seeders: {1})".format
                                           (title, seeders), logger.DEBUG)
                            continue

                        size = convert_size(result.pop("size", -1)) or -1
                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': None, 'hash': None}
                        if mode != "RSS":
                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                       (title, seeders, leechers), logger.DEBUG)

                        items.append(item)
                    except (AttributeError, TypeError, KeyError, ValueError):
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #20
0
ファイル: gftracker.py プロジェクト: billwurles/SportRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # https://www.thegft.org/browse.php?view=0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search=arrow
        # Search Params
        search_params = {
            'view': 0,  # BROWSE
            'c4': 1,  # TV/XVID
            'c17': 1,  # TV/X264
            'c19': 1,  # TV/DVDRIP
            'c26': 1,  # TV/BLURAY
            'c37': 1,  # TV/DVDR
            'c47': 1,  # TV/SD
            'search': '',
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.a and td.a.img:
                result = td.a.img.get('title', td.a.get_text(strip=True))
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params['search'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('div', id='torrentBrowse')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            "Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0]('td')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:

                        try:
                            cells = result('td')

                            title = cells[labels.index('Name')].find(
                                'a').find_next('a')['title'] or cells[
                                    labels.index('Name')].find('a')['title']
                            download_url = self.url + cells[labels.index(
                                'DL')].find('a')['href']
                            if not all([title, download_url]):
                                continue

                            peers = cells[labels.index('S/L')].get_text(
                                strip=True).split('/', 1)
                            seeders = try_int(peers[0])
                            leechers = try_int(peers[1])

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the"
                                        " minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index(
                                'Size/Snatched')].get_text(strip=True).split(
                                    '/', 1)[0]
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    "Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #21
0
ファイル: scenetime.py プロジェクト: Tazor93/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals
        results = []
        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                search_url = self.urls['search'] % (quote(search_string), self.categories)
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('div', id="torrenttable")
                    torrent_rows = []
                    if torrent_table:
                        torrent_rows = torrent_table.select("tr")

                    # Continue only if one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    # Scenetime apparently uses different number of cells in #torrenttable based
                    # on who you are. This works around that by extracting labels from the first
                    # <tr> and using their index to find the correct download/seeders/leechers td.
                    labels = [label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]

                    for result in torrent_rows[1:]:
                        try:
                            cells = result.find_all('td')

                            link = cells[labels.index('Name')].find('a')
                            torrent_id = link['href'].replace('details.php?id=', '').split("&")[0]

                            title = link.get_text(strip=True)
                            download_url = self.urls['download'] % (torrent_id, "%s.torrent" % title.replace(" ", "."))

                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
                            torrent_size = cells[labels.index('Size')].get_text()

                            size = convert_size(torrent_size) or -1

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #22
0
ファイル: iptorrents.py プロジェクト: vegeto10/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        freeleech = '&free=on' if self.freeleech else ''

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
                search_url = self.urls['search'] % (self.categories, freeleech,
                                                    search_string)
                search_url += ';o=seeders' if mode != 'RSS' else ''

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                try:
                    data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
                    with BS4Parser(data, 'html5lib') as html:
                        if not html:
                            logger.log(u"No data returned from provider",
                                       logger.DEBUG)
                            continue

                        if html.find(text='No Torrents Found!'):
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        torrent_table = html.find('table', class_='torrents')
                        torrents = torrent_table('tr') if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrents) < 2:
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        for result in torrents[1:]:
                            try:
                                title = result('td')[1].find('a').text
                                download_url = self.urls['base_url'] + result(
                                    'td')[3].find('a')['href']
                                seeders = int(
                                    result.find('td',
                                                class_='ac t_seeders').text)
                                leechers = int(
                                    result.find('td',
                                                class_='ac t_leechers').text)
                                torrent_size = result('td')[5].text
                                size = convert_size(torrent_size) or -1
                            except (AttributeError, TypeError, KeyError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)

                except Exception as e:
                    logger.log(
                        u"Failed parsing provider. Error: {0!r}".format(ex(e)),
                        logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results
コード例 #23
0
ファイル: nyaatorrents.py プロジェクト: tioxxx/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log(u'Search Mode: {}'.format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(
                        u'Search string: {}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                search_params = {
                    'page': 'rss',
                    'cats': '1_0',  # All anime
                    'sort': 2,  # Sort Descending By Seeders
                    'order': 1
                }
                if mode != 'RSS':
                    search_params['term'] = search_string

                results = []
                data = self.cache.getRSSFeed(self.url,
                                             params=search_params)['entries']
                if not data:
                    logger.log(
                        'Data returned from provider does not contain any torrents',
                        logger.DEBUG)

                for curItem in data:
                    try:
                        title = curItem['title']
                        download_url = curItem['link']
                        if not all([title, download_url]):
                            continue

                        item_info = self.regex.search(curItem['summary'])
                        if not item_info:
                            logger.log(
                                'There was a problem parsing an item summary, skipping: {}'
                                .format(title), logger.DEBUG)
                            continue

                        seeders, leechers, torrent_size, verified = item_info.groups(
                        )
                        seeders = try_int(seeders)
                        leechers = try_int(leechers)

                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    'Discarding torrent because it doesn\'t meet the'
                                    ' minimum seeders or leechers: {} (S:{} L:{})'
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        if self.confirmed and not verified and mode != 'RSS':
                            logger.log(
                                'Found result {} but that doesn\'t seem like a verified result so I\'m ignoring it'
                                .format(title), logger.DEBUG)
                            continue

                        size = convert_size(torrent_size) or -1
                        result = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers
                        }
                        if mode != 'RSS':
                            logger.log(
                                'Found result: {} with {} seeders and {} leechers'
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(result)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #24
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'user': self.username,
            'passkey': self.passkey,
            'search':
            '.',  # Dummy query for RSS search, needs the search param sent.
            'latest': 'true'
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.img:
                result = td.img.get('title')
            if not result:
                result = td.get_text(strip=True)
            return result.encode('utf-8')

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                    search_params['latest'] = 'false'
                    search_params['search'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                result = json.loads(data)
                if 'results' in result:
                    for torrent in result['results']:
                        title = torrent['release_name']
                        download_url = torrent['download_url']
                        seeders = torrent['seeders']
                        leechers = torrent['leechers']
                        if seeders < self.minseed or leechers < self.minleech:
                            logger.log(
                                "Discarded {0} because with {1}/{2} seeders/leechers does not meet the requirement of {3}/{4} seeders/leechers"
                                .format(title, seeders, leechers, self.minseed,
                                        self.minleech))
                            continue

                        freeleech = torrent['freeleech']
                        if self.freeleech and not freeleech:
                            continue

                        size = torrent['size']
                        size = convert_size(size, units=units) or -1
                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': ''
                        }
                        logger.log(
                            "Found result: {0} with {1} seeders and {2} leechers"
                            .format(title, seeders, leechers), logger.DEBUG)
                        items.append(item)

                if 'error' in result:
                    logger.log(result['error'], logger.WARNING)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #25
0
ファイル: ilcorsaronero.py プロジェクト: Elettronik/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        for mode in search_params:
            items = []
            logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                self.page = 1
                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):

                    if last_page:
                        break

                    search_url = self.urls['search_page'].format(search_string, x)

                    logger.log(u'Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u'No data returned from provider', logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            table_header = html.find('tr', class_='bordo')
                            torrent_table = table_header.find_parent('table') if table_header else None
                            if not torrent_table:
                                logger.log(u'Could not find table of torrents', logger.ERROR)
                                continue

                            torrent_rows = torrent_table('tr')

                            # Continue only if one Release is found
                            if (len(torrent_rows) < 6) or (len(torrent_rows[2]('td')) == 1):
                                logger.log(u'Data returned from provider does not contain any torrents', logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 45:
                                last_page = 1

                            for result in torrent_rows[2:-3]:

                                try:
                                    link = result('td')[1].find('a')['href']
                                    title = re.sub(' +',' ', link.rsplit('/', 1)[-1].replace('_', ' '))
                                    hash = result('td')[3].find('input', class_='downarrow')['value'].upper()
                                    seeders = try_int(result('td')[5].text)
                                    leechers = try_int(result('td')[6].text)
                                    torrent_size = result('td')[2].string
                                    size = convert_size(torrent_size) or -1

                                    # Download Urls
                                    download_url = self.urls['download'] % hash
                                    if urllib.urlopen(download_url).getcode() == 404:
                                        logger.log(u'Torrent hash not found in itorrents.org, searching for magnet',
                                                   logger.DEBUG)
                                        data_detail = self.get_url(link, returns='text')
                                        with BS4Parser(data_detail, 'html5lib') as html_detail:
                                            sources_row = html_detail.find('td', class_='header2').parent
                                            source_magnet = sources_row('td')[1].find('a', class_='forbtn', title='Magnet')
                                            if source_magnet and not source_magnet == 'None':
                                                download_url = source_magnet['href']
                                            else:
                                                continue

                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(title) and not self.subtitle:
                                    logger.log(u'Torrent is subtitled, skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(title):
                                    logger.log(u'Torrent isnt english audio/subtitled , skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                ep_params = ''
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    logger.log(u'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u'Found result: {0} with {1} seeders and {2} leechers'.format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u'Failed parsing provider. Traceback: {0}'.format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
コード例 #26
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.show or not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params = {
                    "terms": search_string,
                    "type": 1,  # get anime types
                }

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as soup:
                    torrent_table = soup.find('table', class_='listing')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            u"Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    a = 1 if len(torrent_rows[0]('td')) < 2 else 0

                    for top, bot in zip(torrent_rows[a::2],
                                        torrent_rows[a + 1::2]):
                        try:
                            desc_top = top.find('td', class_='desc-top')
                            title = desc_top.get_text(strip=True)
                            download_url = desc_top.find('a')['href']

                            desc_bottom = bot.find(
                                'td', class_='desc-bot').get_text(strip=True)
                            size = convert_size(
                                desc_bottom.split('|')[1].strip(
                                    'Size: ')) or -1

                            stats = bot.find(
                                'td', class_='stats').get_text(strip=True)
                            sl = re.match(
                                r'S:(?P<seeders>\d+)L:(?P<leechers>\d+)C:(?:\d+)ID:(?:\d+)',
                                stats.replace(' ', ''))
                            seeders = try_int(sl.group('seeders')) if sl else 0
                            leechers = try_int(
                                sl.group('leechers')) if sl else 0
                        except StandardError:
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': None
                        }
                        if mode != 'RSS':
                            logger.log(
                                u"Found result: {0} with {1} seeders and {2} leechers"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #27
0
ファイル: torrentproject.py プロジェクト: DazzFX/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        search_params = {
            'out': 'json',
            'filter': 2101,
            'num': 150
        }

        for mode in search_strings:  # Mode = RSS, Season, Episode
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_params['s'] = search_string

                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log("Invalid custom url set, please check your settings", logger.WARNING)
                        return results
                    search_url = self.custom_url
                else:
                    search_url = self.url

                torrents = self.get_url(search_url, params=search_params, returns='json')
                if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
                    logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                del torrents["total_found"]

                results = []
                for i in torrents:
                    title = torrents[i]["title"]
                    seeders = try_int(torrents[i]["seeds"], 1)
                    leechers = try_int(torrents[i]["leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(u"Torrent doesn't meet minimum seeds & leechers not selecting : {0}".format(title), logger.DEBUG)
                        continue

                    t_hash = torrents[i]["torrent_hash"]
                    torrent_size = torrents[i]["torrent_size"]
                    size = convert_size(torrent_size) or -1

                    try:
                        assert seeders < 10
                        assert mode != 'RSS'
                        logger.log(u"Torrent has less than 10 seeds getting dyn trackers: " + title, logger.DEBUG)

                        if self.custom_url:
                            if not validators.url(self.custom_url):
                                logger.log("Invalid custom url set, please check your settings", logger.WARNING)
                                return results
                            trackers_url = self.custom_url
                        else:
                            trackers_url = self.url

                        trackers_url = urljoin(trackers_url, t_hash)
                        trackers_url = urljoin(trackers_url, "/trackers_json")
                        jdata = self.get_url(trackers_url, returns='json')

                        assert jdata != "maintenance"
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(["&tr=" + s for s in jdata])
                    except (Exception, AssertionError):
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers

                    if not all([title, download_url]):
                        continue

                    item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}

                    if mode != 'RSS':
                        logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format
                                   (title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #28
0
ファイル: xthor.py プロジェクト: lastdevonearth/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        """
            Séries / Pack TV 13
            Séries / TV FR 14
            Séries / HD FR 15
            Séries / TV VOSTFR 16
            Séries / HD VOSTFR 17
            Mangas (Anime) 32
            Sport 34
        """
        search_params = {
            'only_free': try_int(self.freeleech),
            'searchin': 'title',
            'incldead': 0,
            'type': 'desc',
            'c13': 1, 'c14': 1, 'c15': 1,
            'c16': 1, 'c17': 1, 'c32': 1
        }

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)

            # Sorting: 1: Name, 3: Comments, 5: Size, 6: Completed, 7: Seeders, 8: Leechers (4: Time ?)
            search_params['sort'] = (7, 4)[mode == 'RSS']
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                search_params['search'] = search_string
                search_url = self.urls['search'] + urlencode(search_params)
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find("table", class_="table2 table-bordered2")
                    torrent_rows = []
                    if torrent_table:
                        torrent_rows = torrent_table.find_all("tr")

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    def process_column_header(td):
                        result = ''
                        if td.a:
                            result = td.a.get('title', td.a.get_text(strip=True))
                        if not result:
                            result = td.get_text(strip=True)
                        return result

                    # Catégorie, Nom du Torrent, (Download), (Bookmark), Com., Taille, Compl�t�, Seeders, Leechers
                    labels = [process_column_header(label) for label in torrent_rows[0].find_all('td')]

                    for row in torrent_rows[1:]:
                        cells = row.find_all('td')
                        if len(cells) < len(labels):
                            continue
                        try:
                            title = cells[labels.index('Nom du Torrent')].get_text(strip=True)
                            download_url = self.url + '/' + row.find("a", href=re.compile("download.php"))['href']
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                continue

                            size = convert_size(cells[labels.index('Taille')].get_text(strip=True))

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)
                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #29
0
ファイル: bitsnoop.py プロジェクト: ratoaq2/SickRageSickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches,too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                try:
                    search_url = (self.urls['rss'], self.urls['search'] + search_string + '/s/d/1/?fmt=rss')[mode != 'RSS']

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')
                    for item in data.findAll('item'):
                        try:
                            if not item.category.text.endswith(('TV', 'Anime')):
                                continue

                            title = item.title.text
                            assert isinstance(title, unicode)
                            # Use the torcache link bitsnoop provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'torcache' not in download_url:
                                download_url = item.find('magneturi').next.replace('CDATA', '').strip('[]') + self._custom_trackers

                            if not (title and download_url):
                                continue

                            seeders = try_int(item.find('numseeders').text)
                            leechers = try_int(item.find('numleechers').text)
                            torrent_size = item.find('size').text
                            size = convert_size(torrent_size) or -1

                            info_hash = item.find('infohash').text

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
                        if mode != 'RSS':
                            logger.log(u"Found result: {0!s} with {1!s} seeders and {2!s} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log(u"Failed parsing provider. Traceback: {0!r}".format(traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #30
0
ファイル: norbits.py プロジェクト: bitzorro/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        """ Do the actual searching and JSON parsing"""

        results = []

        for mode in search_params:
            items = []
            logger.log('Search Mode: {}'.format(mode), logger.DEBUG)

            for search_string in search_params[mode]:
                if mode != 'RSS':
                    logger.log('Search string: {}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                post_data = {
                    'username': self.username,
                    'passkey': self.passkey,
                    'category': '2',  # TV Category
                    'search': search_string,
                }

                self._check_auth()
                parsed_json = self.get_url(self.urls['search'],
                                           post_data=json.dumps(post_data),
                                           returns='json')

                if not parsed_json:
                    return results

                if self._checkAuthFromData(parsed_json):
                    json_items = parsed_json.get('data', '')
                    if not json_items:
                        logger.log('Resulting JSON from provider is not correct, '
                                   'not parsing it', logger.ERROR)

                    for item in json_items.get('torrents', []):
                        title = item.pop('name', '')
                        download_url = '{}{}'.format(
                            self.urls['download'],
                            urlencode({'id': item.pop('id', ''), 'passkey': self.passkey}))

                        if not all([title, download_url]):
                            continue

                        seeders = try_int(item.pop('seeders', 0))
                        leechers = try_int(item.pop('leechers', 0))

                        if seeders < min(self.minseed, 1):
                            logger.log('Discarding torrent because it does not meet '
                                       'the minimum seeders: {0}. Seeders: {1})'.format
                                       (title, seeders), logger.DEBUG)
                            continue

                        info_hash = item.pop('info_hash', '')
                        size = convert_size(item.pop('size', -1), -1)

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': None, 'hash': info_hash}
                        if mode != 'RSS':
                            logger.log('Found result: {0} with {1} seeders and {2} leechers'.format(
                                title, seeders, leechers), logger.DEBUG)

                        items.append(item)
            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #31
0
ファイル: torrentz.py プロジェクト: aDarkling/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                search_url = self.urls[
                    'verified'] if self.confirmed else self.urls['feed']
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)

                    search_url += '?q=' + urllib.parse.quote_plus(
                        search_string)

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(
                        u"Expected xml but got something else, is your mirror failing?",
                        logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser.findAll('item'):
                            if item.category and 'tv' not in item.category.text:
                                continue

                            title = item.title.text.rsplit(' ', 1)[0].replace(
                                ' ', '.')
                            t_hash = item.guid.text.rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(
                                item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            items.append(
                                (title, download_url, size, seeders, leechers))
                except StandardError:
                    logger.log(
                        u"Failed parsing provider. Traceback: %r" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #32
0
    def test_convert_size(self):
        # converts pretty file sizes to integers
        self.assertEqual(convert_size('1 B'), 1)
        self.assertEqual(convert_size('1 KB'), 1024)
        self.assertEqual(
            convert_size('1 kb', use_decimal=True), 1000
        )  # can use decimal units (e.g. KB = 1000 bytes instead of 1024)

        # returns integer sizes for integers
        self.assertEqual(convert_size(0, -1), 0)
        self.assertEqual(convert_size(100, -1), 100)
        self.assertEqual(convert_size(1.312, -1),
                         1)  # returns integer sizes for floats too

        # without a default value, failures return None
        self.assertEqual(convert_size('pancakes'), None)

        # default value can be anything
        self.assertEqual(convert_size(None, -1), -1)
        self.assertEqual(convert_size('', 3.14), 3.14)
        self.assertEqual(convert_size('elephant', 'frog'), 'frog')

        # negative sizes return 0
        self.assertEqual(convert_size(-1024, -1), 0)
        self.assertEqual(convert_size('-1 GB', -1), 0)

        # can also use `or` for a default value
        self.assertEqual(convert_size(None) or 100, 100)
        self.assertEqual(convert_size(None) or 1.61803,
                         1.61803)  # default doesn't have to be integer
        self.assertEqual(convert_size(None) or '100',
                         '100')  # default doesn't have to be numeric either
        self.assertEqual(
            convert_size('-1 GB') or -1,
            -1)  # can use `or` to provide a default when size evaluates to 0

        # default units can be kwarg'd
        self.assertEqual(convert_size('1', default_units='GB'),
                         convert_size('1 GB'))

        # separator can be kwarg'd
        self.assertEqual(convert_size('1?GB', sep='?'), convert_size('1 GB'))

        # can use custom dictionary to support internationalization
        french = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']
        self.assertEqual(convert_size('1 o', units=french), 1)
        self.assertEqual(convert_size('1 go', use_decimal=True, units=french),
                         1000000000)
        self.assertEqual(convert_size('1 o'),
                         None)  # Wrong units so result is None

        # custom units need to be uppercase or they won't match
        oops = ['b', 'kb', 'Mb', 'Gb', 'tB', 'Pb']
        self.assertEqual(convert_size('1 b', units=oops), None)
        self.assertEqual(convert_size('1 B', units=oops), None)
        self.assertEqual(convert_size('1 Mb', units=oops), None)
        self.assertEqual(convert_size('1 MB', units=oops), None)
コード例 #33
0
ファイル: hounddawgs.py プロジェクト: pedro2d10/SickRage-FR
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals,too-many-branches,too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {search}".format(
                            search=search_string.decode('utf-8')),
                        logger.DEBUG)

                self.search_params['searchstr'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=self.search_params)
                if not data:
                    logger.log(u'URL did not return data', logger.DEBUG)
                    continue

                strTableStart = "<table class=\"torrent_table"
                startTableIndex = data.find(strTableStart)
                trimmedData = data[startTableIndex:]
                if not trimmedData:
                    continue

                try:
                    with BS4Parser(trimmedData, 'html5lib') as html:
                        result_table = html.find('table',
                                                 {'id': 'torrent_table'})

                        if not result_table:
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        result_tbody = result_table.find('tbody')
                        entries = result_tbody.contents
                        del entries[1::2]

                        for result in entries[1:]:

                            torrent = result.find_all('td')
                            if len(torrent) <= 1:
                                break

                            allAs = (torrent[1]).find_all('a')

                            try:
                                notinternal = result.find(
                                    'img',
                                    src='/static//common/user_upload.png')
                                if self.ranked and notinternal:
                                    logger.log(
                                        u"Found a user uploaded release, Ignoring it..",
                                        logger.DEBUG)
                                    continue
                                freeleech = result.find(
                                    'img',
                                    src='/static//common/browse/freeleech.png')
                                if self.freeleech and not freeleech:
                                    continue
                                title = allAs[2].string
                                download_url = self.urls['base_url'] + allAs[
                                    0].attrs['href']
                                torrent_size = result.find(
                                    "td", class_="nobr").find_next_sibling(
                                        "td").string
                                if torrent_size:
                                    size = convert_size(torrent_size) or -1
                                seeders = try_int(
                                    (result.findAll('td')[6]).text)
                                leechers = try_int(
                                    (result.findAll('td')[7]).text)

                            except (AttributeError, TypeError):
                                continue

                            if not title or not download_url:
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: %s with %s seeders and %s leechers"
                                    % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #34
0
ファイル: iptorrents.py プロジェクト: lastdevonearth/SickRage
    def search(
        self, search_params, age=0, ep_obj=None
    ):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        freeleech = "&free=on" if self.freeleech else ""

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != "RSS":
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
                search_url = self.urls["search"] % (self.categories, freeleech, search_string)
                search_url += ";o=seeders" if mode != "RSS" else ""
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    continue

                try:
                    data = re.sub(r"(?im)<button.+?<[\/]button>", "", data, 0)
                    with BS4Parser(data, "html5lib") as html:
                        if not html:
                            logger.log(u"No data returned from provider", logger.DEBUG)
                            continue

                        if html.find(text="No Torrents Found!"):
                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        torrent_table = html.find("table", attrs={"class": "torrents"})
                        torrents = torrent_table.find_all("tr") if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrents) < 2:
                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        for result in torrents[1:]:
                            try:
                                title = result.find_all("td")[1].find("a").text
                                download_url = self.urls["base_url"] + result.find_all("td")[3].find("a")["href"]
                                seeders = int(result.find("td", attrs={"class": "ac t_seeders"}).text)
                                leechers = int(result.find("td", attrs={"class": "ac t_leechers"}).text)
                                torrent_size = result.find_all("td")[5].text
                                size = convert_size(torrent_size) or -1
                            except (AttributeError, TypeError, KeyError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                            title, seeders, leechers
                                        ),
                                        logger.DEBUG,
                                    )
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != "RSS":
                                logger.log(
                                    u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers),
                                    logger.DEBUG,
                                )

                            items.append(item)

                except Exception as e:
                    logger.log(u"Failed parsing provider. Error: %r" % ex(e), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #35
0
ファイル: torrentday.py プロジェクト: tioxxx/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_string = '+'.join(search_string.split())

                post_data = dict(
                    {
                        '/browse.php?': None,
                        'cata': 'yes',
                        'jxt': 8,
                        'jxw': 'b',
                        'search': search_string
                    }, **self.categories[mode])

                if self.freeleech:
                    post_data.update({'free': 'on'})

                parsedJSON = self.get_url(self.urls['search'],
                                          post_data=post_data,
                                          returns='json')
                if not parsedJSON:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                try:
                    torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get(
                        'torrents', [])
                except Exception:
                    logger.log(
                        u"Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                for torrent in torrents:

                    title = re.sub(
                        r"\[.*\=.*\].*\[/.*\]", "",
                        torrent['name']) if torrent['name'] else None
                    download_url = urljoin(
                        self.urls['download'], '{}/{}'.format(
                            torrent['id'], torrent['fname'])
                    ) if torrent['id'] and torrent['fname'] else None

                    if not all([title, download_url]):
                        continue

                    seeders = int(torrent['seed']) if torrent['seed'] else 1
                    leechers = int(torrent['leech']) if torrent['leech'] else 0

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(
                                u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                .format(title, seeders,
                                        leechers), logger.DEBUG)
                        continue

                    torrent_size = torrent['size']
                    size = convert_size(torrent_size) or -1

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'hash': None
                    }

                    if mode != 'RSS':
                        logger.log(
                            u"Found result: {} with {} seeders and {} leechers"
                            .format(title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #36
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'action': 'newbrowse',
            'group': 3,
            'search': '',
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.img:
                result = td.img.get('title')
            if not result:
                result = td.get_text(strip=True)
            return result.encode('utf-8')

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params['search'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', id='torrent_table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            u"Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    # Literal:     Navn, Størrelse, Kommentarer, Tilføjet, Snatches, Seeders, Leechers
                    # Translation: Name, Size,      Comments,    Added,    Snatches, Seeders, Leechers
                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0]('td')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:

                        try:
                            title = result.find(
                                class_='croptorrenttext').get_text(strip=True)
                            download_url = self.url + result.find(
                                title="Direkte download link")['href']
                            if not all([title, download_url]):
                                continue

                            cells = result('td')

                            seeders = try_int(
                                cells[labels.index('Seeders')].get_text(
                                    strip=True))
                            leechers = try_int(
                                cells[labels.index('Leechers')].get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the"
                                        u" minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            freeleech = result.find(class_='freeleech')
                            if self.freeleech and not freeleech:
                                continue

                            torrent_size = cells[labels.index(
                                'Størrelse')].contents[0]
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #37
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        search_params = {'p': 0}

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                search_params['q'] = search_string.encode('utf-8')

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {search}".format(
                            search=search_string.decode('utf-8')),
                        logger.DEBUG)
                    search_params['order'] = '0'
                else:
                    search_params['order'] = '2'

                search_url = self.urls['api'] + '?' + urlencode(search_params)
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                jdata = self.get_url(search_url, json=True)
                if not jdata:
                    logger.log(u"No data returned to be parsed!!!",
                               logger.DEBUG)
                    continue

                try:

                    for torrent in jdata:
                        if not torrent['name']:
                            logger.log(u"Ignoring result since it has no name",
                                       logger.DEBUG)
                            continue

                        if torrent['ff']:
                            logger.log(
                                u"Ignoring result for %s since it's a fake (level = %s)"
                                % (torrent['name'], torrent['ff']),
                                logger.DEBUG)
                            continue

                        if not torrent['files']:
                            logger.log(
                                u"Ignoring result for %s without files" %
                                torrent['name'], logger.DEBUG)
                            continue

                        download_url = torrent[
                            'magnet'] + self._custom_trackers if torrent[
                                'magnet'] else None

                        # Provider doesn't provide seeders/leechers
                        seeders = 1
                        leechers = 0
                        title = torrent['name']
                        torrent_size = torrent['size']
                        size = convert_size(torrent_size) or -1

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent (Unsupported)
                        # if seeders < self.minseed or leechers < self.minleech:
                        #     if mode != 'RSS':
                        #         logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                        #     continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s " % title,
                                       logger.DEBUG)

                        items.append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.WARNING)

            # For each search mode sort all the items by seeders if available
            # items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #38
0
ファイル: gftracker.py プロジェクト: adaur/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # https://www.thegft.org/browse.php?view=0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search=arrow
        search_params = {
            'view': 0,  # BROWSE
            'c4': 1,  # TV/XVID
            'c17': 1,  # TV/X264
            'c19': 1,  # TV/DVDRIP
            'c26': 1,  # TV/BLURAY
            'c37': 1,  # TV/DVDR
            'c47': 1,  # TV/SD
            'search': '',
        }

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)

                search_params['search'] = search_string

                search_url = "%s?%s" % (self.urls['search'], urlencode(search_params))
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                # Returns top 30 results by default, expandable in user profile
                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('div', id='torrentBrowse')
                    torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                    # Continue only if at least one release is found
                    if len(torrent_rows) < 2:
                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    def process_column_header(td):
                        result = ''
                        if td.a and td.a.img:
                            result = td.a.img.get('title', td.a.get_text(strip=True))
                        if not result:
                            result = td.get_text(strip=True)
                        return result

                    labels = [process_column_header(label) for label in torrent_rows[0].find_all('td')]

                    # Skip colheader
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.find_all('td')

                            title = cells[labels.index('Name')].find('a').find_next('a')['title'] or cells[labels.index('Name')].find('a')['title']
                            download_url = self.url + cells[labels.index('DL')].find('a')['href']
                            if not all([title, download_url]):
                                continue

                            peers = cells[labels.index('S/L')].get_text(strip=True).split('/', 1)
                            seeders = try_int(peers[0])
                            leechers = try_int(peers[1])
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index('Size/Snatched')].get_text(strip=True).split('/', 1)[0]
                            size = convert_size(torrent_size) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s " % title, logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #39
0
ファイル: transmitthenet.py プロジェクト: Comptezero/SickRage
    def search(
        self, search_strings, age=0, ep_obj=None
    ):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            for search_string in search_strings[mode]:

                if mode != "RSS":
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")), logger.DEBUG)

                search_params = {
                    "searchtext": search_string,
                    "filter_freeleech": (0, 1)[self.freeleech is True],
                    "order_by": ("seeders", "time")[mode == "RSS"],
                    "order_way": "desc",
                }

                if not search_string:
                    del search_params["searchtext"]

                data = self.get_url(self.urls["search"], params=search_params, returns="text")
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                try:
                    with BS4Parser(data, "html5lib") as html:
                        torrent_table = html.find("table", {"id": "torrent_table"})
                        if not torrent_table:
                            logger.log(u"Data returned from %s does not contain any torrents" % self.name, logger.DEBUG)
                            continue

                        torrent_rows = torrent_table.findAll("tr", {"class": "torrent"})

                        # Continue only if one Release is found
                        if not torrent_rows:
                            logger.log(u"Data returned from %s does not contain any torrents" % self.name, logger.DEBUG)
                            continue

                        for torrent_row in torrent_rows:
                            freeleech = torrent_row.find("img", alt="Freeleech") is not None
                            if self.freeleech and not freeleech:
                                continue

                            download_item = torrent_row.find("a", {"title": "Download Torrent"})
                            if not download_item:
                                continue

                            download_url = urljoin(self.url, download_item["href"])

                            temp_anchor = torrent_row.find("a", {"data-src": True})
                            title = temp_anchor["data-src"].rsplit(".", 1)[0]
                            if not title:
                                title = torrent_row.find("a", onmouseout="return nd();").string
                                title = title.replace("[", "").replace("]", "").replace("/ ", "") if title else ""

                            temp_anchor = torrent_row.find("span", class_="time").parent.find_next_sibling()
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(temp_anchor.text.strip())
                            leechers = try_int(temp_anchor.find_next_sibling().text.strip())

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the"
                                        u" minimum seeders or leechers: {} (S:{} L:{})".format(
                                            title, seeders, leechers
                                        ),
                                        logger.DEBUG,
                                    )
                                continue

                            cells = torrent_row.find_all("td")
                            torrent_size = cells[5].text.strip()
                            size = convert_size(torrent_size) or -1

                            item = {
                                "title": title,
                                "link": download_url,
                                "size": size,
                                "seeders": seeders,
                                "leechers": leechers,
                                "hash": None,
                            }
                            if mode != "RSS":
                                logger.log(
                                    u"Found result: {} with {} seeders and {} leechers".format(
                                        title, seeders, leechers
                                    ),
                                    logger.DEBUG,
                                )

                            items.append(item)
                except Exception:
                    logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders
            items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
            results += items

        return results
コード例 #40
0
ファイル: kat.py プロジェクト: ratoaq2/SickRageSickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []

        anime = (self.show
                 and self.show.anime) or (ep_obj and ep_obj.show
                                          and ep_obj.show.anime) or False
        search_params = {
            "q": "",
            "field": "seeders",
            "sorder": "desc",
            "rss": 1,
            "category": ("tv", "anime")[anime]
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                search_params["q"] = search_string if mode != "RSS" else ""
                search_params[
                    "field"] = "seeders" if mode != "RSS" else "time_add"

                if mode != "RSS":
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_url = self.urls["search"] % ("usearch" if mode != "RSS"
                                                    else search_string)
                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log(
                            "Invalid custom url: {0}".format(self.custom_url),
                            logger.WARNING)
                        return results
                    search_url = urljoin(self.custom_url,
                                         search_url.split(self.url)[1])

                data = self.get_url(search_url,
                                    params=search_params,
                                    returns="text")
                if not data:
                    logger.log(
                        "URL did not return data, maybe try a custom url, or a different one",
                        logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(
                        "Expected xml but got something else, is your mirror failing?",
                        logger.INFO)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    for item in html.find_all("item"):
                        try:
                            title = item.title.get_text(strip=True)
                            # Use the torcache link kat provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure["url"]
                            if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
                                download_url = item.find(
                                    "torrent:magneturi").next.replace(
                                        "CDATA", "").strip(
                                            "[!]") + self._custom_trackers

                            if not (title and download_url):
                                continue

                            seeders = try_int(
                                item.find("torrent:seeds").get_text(
                                    strip=True))
                            leechers = try_int(
                                item.find("torrent:peers").get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            verified = bool(
                                try_int(
                                    item.find("torrent:verified").get_text(
                                        strip=True)))
                            if self.confirmed and not verified:
                                if mode != "RSS":
                                    logger.log(
                                        "Found result " + title +
                                        " but that doesn't seem like a verified result so I'm ignoring it",
                                        logger.DEBUG)
                                continue

                            torrent_size = item.find(
                                "torrent:contentlength").get_text(strip=True)
                            size = convert_size(torrent_size) or -1
                            info_hash = item.find("torrent:infohash").get_text(
                                strip=True)

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': info_hash
                            }
                            if mode != "RSS":
                                logger.log(
                                    "Found result: {0!s} with {1!s} seeders and {2!s} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)

                        except (AttributeError, TypeError, KeyError,
                                ValueError):
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results
コード例 #41
0
ファイル: kat.py プロジェクト: Arcanemagus/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []

        anime = (self.show and self.show.anime) or (ep_obj and ep_obj.show and ep_obj.show.anime) or False
        search_params = {
            "q": "",
            "field": "seeders",
            "sorder": "desc",
            "rss": 1,
            "category": ("tv", "anime")[anime]
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                search_params["q"] = search_string if mode != "RSS" else ""
                search_params["field"] = "seeders" if mode != "RSS" else "time_add"

                if mode != "RSS":
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_url = self.urls["search"] % ("usearch" if mode != "RSS" else search_string)
                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
                        return results
                    search_url = urljoin(self.custom_url, search_url.split(self.url)[1])

                data = self.get_url(search_url, params=search_params, returns="text")
                if not data:
                    logger.log("URL did not return results/data, if the results are on the site maybe try a custom url, or a different one", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    for item in html("item"):
                        try:
                            title = item.title.get_text(strip=True)
                            # Use the torcache link kat provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure["url"]
                            if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
                                download_url = item.find("torrent:magneturi").next.replace("CDATA", "").strip("[!]") + self._custom_trackers

                            if not (title and download_url):
                                continue

                            seeders = try_int(item.find("torrent:seeds").get_text(strip=True))
                            leechers = try_int(item.find("torrent:peers").get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            verified = bool(try_int(item.find("torrent:verified").get_text(strip=True)))
                            if self.confirmed and not verified:
                                if mode != "RSS":
                                    logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
                                continue

                            torrent_size = item.find("torrent:contentlength").get_text(strip=True)
                            size = convert_size(torrent_size) or -1
                            info_hash = item.find("torrent:infohash").get_text(strip=True)

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
                            if mode != "RSS":
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #42
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        self.categories = "cat=" + str(self.cat)

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    self.page = 2

                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):
                    z = x * 20
                    if last_page:
                        break

                    if mode != 'RSS':
                        search_url = (self.urls['search_page'] +
                                      '&filter={2}').format(
                                          z, self.categories, search_string)
                    else:
                        search_url = self.urls['search_page'].format(
                            z, self.categories)

                    if mode != 'RSS':
                        logger.log(
                            u"Search string: {0}".format(
                                search_string.decode("utf-8")), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u"No data returned from provider",
                                   logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            torrent_table = html.find(
                                'table', attrs={'class': 'copyright'})
                            torrent_rows = torrent_table.find_all(
                                'tr') if torrent_table else []

                            # Continue only if one Release is found
                            if len(torrent_rows) < 3:
                                logger.log(
                                    u"Data returned from provider does not contain any torrents",
                                    logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 42:
                                last_page = 1

                            for result in torrent_table.find_all('tr')[2:]:

                                try:
                                    link = result.find('td').find('a')
                                    title = link.string
                                    download_url = self.urls[
                                        'download'] % result.find_all(
                                            'td')[8].find('a')['href'][-8:]
                                    leechers = result.find_all(
                                        'td')[3].find_all('td')[1].text
                                    leechers = int(leechers.strip('[]'))
                                    seeders = result.find_all(
                                        'td')[3].find_all('td')[2].text
                                    seeders = int(seeders.strip('[]'))
                                    torrent_size = result.find_all(
                                        'td')[3].find_all('td')[3].text.strip(
                                            '[]') + " GB"
                                    size = convert_size(torrent_size) or -1
                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(
                                    self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(
                                        title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(
                                        result) and not self.subtitle:
                                    logger.log(
                                        u"Torrent is subtitled, skipping: {0!s} "
                                        .format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(
                                        result):
                                    logger.log(
                                        u"Torrent isnt english audio/subtitled , skipping: {0!s} "
                                        .format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)',
                                                       search_string)[0]
                                show_title = search_show
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower(
                                ) and search_show.lower() in show_title.lower(
                                ):
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '',
                                                   title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(
                                            u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders,
                                                    leechers), logger.DEBUG)
                                    continue

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': None
                                }
                                if mode != 'RSS':
                                    logger.log(
                                        u"Found result: {0!s} with {1!s} seeders and {2!s} leechers"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(
                            u"Failed parsing provider. Traceback: {0!s}".
                            format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                           reverse=True)

                results += items

        return results
コード例 #43
0
ファイル: abnormal.py プロジェクト: ArthurGarnier/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'cat[]': ['TV|SD|VOSTFR', 'TV|HD|VOSTFR', 'TV|SD|VF', 'TV|HD|VF', 'TV|PACK|FR', 'TV|PACK|VOSTFR', 'TV|EMISSIONS', 'ANIME'],
            # Both ASC and DESC are available for sort direction
            'way': 'DESC'
        }

        # Units
        units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                # Sorting: Available parameters: ReleaseName, Seeders, Leechers, Snatched, Size
                search_params['order'] = ('Seeders', 'Time')[mode == 'RSS']
                search_params['search'] = re.sub(r'[()]', '', search_string)
                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(class_='torrent_table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                        continue

                    # Catégorie, Release, Date, DL, Size, C, S, L
                    labels = [label.get_text(strip=True) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result('td')
                        if len(cells) < len(labels):
                            continue

                        try:
                            title = cells[labels.index('Release')].get_text(strip=True)
                            download_url = urljoin(self.url, cells[labels.index('DL')].find('a', class_='tooltip')['href'])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(cells[labels.index('S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('L')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            size_index = labels.index('Size') if 'Size' in labels else labels.index('Taille')
                            torrent_size = cells[size_index].get_text()
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #44
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'do': 'search',
            'include_dead_torrents': 'no',
            'search_type': 't_name',
            'category': 0,
            'keywords': ''
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            td_title = ''
            if td.img:
                td_title = td.img.get('title', td.get_text(strip=True))
            if not td_title:
                td_title = td.get_text(strip=True)
            return td_title

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)
                    search_params['keywords'] = search_string

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', id='sortabletable')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [process_column_header(label) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            title = result.find('div', class_='tooltip-target').get_text(strip=True)
                            # skip if torrent has been nuked due to poor quality
                            if title.startswith('Nuked.'):
                                continue
                            download_url = result.find(
                                'img', title='Click to Download this Torrent in SSL!').parent['href']
                            if not all([title, download_url]):
                                continue

                            cells = result('td')
                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the"
                                               " minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index('Size')].get_text(strip=True)
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders,
                                    'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #45
0
ファイル: torrentday.py プロジェクト: Eiber/SickRage-Medusa
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log('Search mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {search}'.format
                               (search=search_string), logger.DEBUG)

                search_string = '+'.join(search_string.split())

                post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 8, 'jxw': 'b', 'search': search_string},
                                 **self.categories[mode])

                if self.freeleech:
                    post_data.update({'free': 'on'})

                try:
                    response = self.get_url(self.urls['search'], post_data=post_data, returns='response')
                    response.raise_for_status()
                except RequestException as msg:
                    logger.log(u'Error while connecting to provider: {error}'.format(error=msg), logger.ERROR)
                    continue

                try:
                    jdata = response.json()
                except ValueError:  # also catches JSONDecodeError if simplejson is installed
                    logger.log('Data returned from provider is not json', logger.ERROR)
                    self.session.cookies.clear()
                    continue

                torrents = jdata.get('Fs', [dict()])[0].get('Cn', {}).get('torrents', [])
                if not torrents:
                    logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                    continue

                for torrent in torrents:
                    try:
                        title = re.sub(r'\[.*=.*\].*\[/.*\]', '', torrent['name']) if torrent['name'] else None
                        download_url = urljoin(self.urls['download'], '{}/{}'.format(torrent['id'], torrent['fname'])) if torrent['id'] and torrent['fname'] else None
                        if not all([title, download_url]):
                            continue

                        seeders = int(torrent['seed']) if torrent['seed'] else 1
                        leechers = int(torrent['leech']) if torrent['leech'] else 0

                        # Filter unseeded torrent
                        if seeders < min(self.minseed, 1):
                            if mode != 'RSS':
                                logger.log("Discarding torrent because it doesn't meet the "
                                           "minimum seeders: {0}. Seeders: {1}".format
                                           (title, seeders), logger.DEBUG)
                            continue

                        torrent_size = torrent['size']
                        size = convert_size(torrent_size) or -1

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'pubdate': None,
                            'hash': None,
                        }
                        if mode != 'RSS':
                            logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                       (title, seeders, leechers), logger.DEBUG)

                        items.append(item)
                    except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                        logger.log('Failed parsing provider. Traceback: {0!r}'.format
                                   (traceback.format_exc()), logger.ERROR)
                        continue

            results += items

        return results
コード例 #46
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        search_url = self.urls['search']
        download_url = self.urls['download']
        if self.custom_url:
            if not validators.url(self.custom_url):
                logger.log("Invalid custom url: {0}".format(self.custom_url),
                           logger.WARNING)
                return results

            search_url = urljoin(self.custom_url,
                                 search_url.split(self.url)[1])
            download_url = urljoin(self.custom_url,
                                   download_url.split(self.url)[1])

        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(
                        'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                post_data = {
                    '/browse.php?': None,
                    'cata': 'yes',
                    'jxt': 8,
                    'jxw': 'b',
                    'search': search_string
                }
                post_data.update(self.categories[mode])

                if self.freeleech:
                    post_data.update({'free': 'on'})

                parsed_json = self.get_url(search_url,
                                           post_data=post_data,
                                           returns='json')
                if not parsed_json:
                    logger.log('No data returned from provider', logger.DEBUG)
                    self.session.cookies.clear()
                    continue

                try:
                    torrents = parsed_json.get('Fs', [])[0].get('Cn', {}).get(
                        'torrents', [])
                except Exception:
                    logger.log(
                        'Data returned from provider does not contain any torrents',
                        logger.DEBUG)
                    continue

                for torrent in torrents:

                    title = re.sub(
                        r'\[.*\=.*\].*\[/.*\]', '',
                        torrent['name']) if torrent['name'] else None
                    torrent_url = urljoin(download_url, '{0}/{1}'.format(torrent['id'], torrent['fname'])) if torrent['id'] and torrent['fname'] else \
                        None
                    if not all([title, torrent_url]):
                        continue

                    seeders = try_int(torrent['seed'])
                    leechers = try_int(torrent['leech'])

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log(
                                'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'
                                .format(title, seeders,
                                        leechers), logger.DEBUG)
                        continue

                    torrent_size = torrent['size']
                    size = convert_size(torrent_size) or -1

                    item = {
                        'title': title,
                        'link': torrent_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'hash': ''
                    }

                    if mode != 'RSS':
                        logger.log(
                            'Found result: {0} with {1} seeders and {2} leechers'
                            .format(title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #47
0
ファイル: tvchaosuk.py プロジェクト: Arcanemagus/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'do': 'search',
            'search_type': 't_name',
            'category': 0,
            'include_dead_torrents': 'no',
            'submit': 'search'
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(ur'(.*)S0?', ur'\1Series ', search_string)

                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                search_params['keywords'] = search_string
                data = self.get_url(self.urls['search'], post_data=search_params, returns='text')
                if not data:
                    logger.log('No data returned from provider', logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(id='sortabletable')
                    torrent_rows = torrent_table("tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [label.img['title'] if label.img else label.get_text(strip=True) for label in torrent_rows[0]('td')]
                    for torrent in torrent_rows[1:]:
                        try:
                            if self.freeleech and not torrent.find('img', alt=re.compile('Free Torrent')):
                                continue

                            title = torrent.find(class_='tooltip-content').div.get_text(strip=True)
                            download_url = torrent.find(title='Click to Download this Torrent!').parent['href']
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(torrent.find(title='Seeders').get_text(strip=True))
                            leechers = try_int(torrent.find(title='Leechers').get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log('Discarding torrent because it doesn\'t meet the'
                                               ' minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            # Chop off tracker/channel prefix or we cant parse the result!
                            if mode != 'RSS' and search_params['keywords']:
                                show_name_first_word = re.search(ur'^[^ .]+', search_params['keywords']).group()
                                if not title.startswith(show_name_first_word):
                                    title = re.sub(ur'.*(' + show_name_first_word + '.*)', ur'\1', title)

                            # Change title from Series to Season, or we can't parse
                            if mode == 'Season':
                                title = re.sub(ur'(.*)(?i)Series', ur'\1Season', title)

                            # Strip year from the end or we can't parse it!
                            title = re.sub(ur'(.*)[\. ]?\(\d{4}\)', ur'\1', title)
                            title = re.sub(ur'\s+', ur' ', title)

                            torrent_size = torrent('td')[labels.index('Size')].get_text(strip=True)
                            size = convert_size(torrent_size, units=units) or -1

                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            item = {'title': title + '.hdtv.x264', 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers}
                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #48
0
ファイル: cpasbien.py プロジェクト: lastdevonearth/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)
                    search_url = self.url + '/recherche/' + search_string.replace(
                        '.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/view_cat.php?categorie=series&trie=date-d'

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html.find_all(
                        class_=re.compile('ligne[01]'))
                    for result in torrent_rows:
                        try:
                            title = result.find(class_="titre").get_text(
                                strip=True).replace("HDTV",
                                                    "HDTV x264-CPasBien")
                            tmp = result.find("a")['href'].split(
                                '/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url +
                                            '/telechargement/%s' % tmp)
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                result.find(class_="up").get_text(strip=True))
                            leechers = try_int(
                                result.find(class_="down").get_text(
                                    strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find(class_="poid").get_text(
                                strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: %s with %s seeders and %s leechers"
                                    % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #49
0
ファイル: newpct.py プロジェクト: ratoaq2/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        """
        Search query:
        http://www.newpct.com/index.php?l=doSearch&q=fringe&category_=All&idioma_=1&bus_de_=All

        q => Show name
        category_ = Category 'Shows' (767)
        idioma_ = Language Spanish (1)
        bus_de_ = Date from (All, hoy)

        """
        results = []

        # Only search if user conditions are true
        lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang

        search_params = {
            'l': 'doSearch',
            'q': '',
            'category_': 'All',
            'idioma_': 1,
            'bus_de_': 'All'
        }

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {}'.format(mode), logger.DEBUG)

            # Only search if user conditions are true
            if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
                logger.log('Show info is not spanish, skipping provider search', logger.DEBUG)
                continue

            search_params['bus_de_'] = 'All' if mode != 'RSS' else 'hoy'

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log('Search string: {}'.format(search_string.decode('utf-8')),
                               logger.DEBUG)

                search_params['q'] = search_string

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', id='categoryTable')
                    torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 3:  # Headers + 1 Torrent + Pagination
                        logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                        continue

                    # 'Fecha', 'Título', 'Tamaño', ''
                    # Date, Title, Size
                    labels = [label.get_text(strip=True) for label in torrent_rows[0].find_all('th')]
                    for row in torrent_rows[1:-1]:
                        try:
                            cells = row.find_all('td')

                            torrent_row = row.find('a')
                            title = self._processTitle(torrent_row.get('title', ''))
                            download_url = torrent_row.get('href', '')
                            if not all([title, download_url]):
                                continue

                            # Provider does not provide seeders/leechers
                            seeders = 1
                            leechers = 0
                            torrent_size = cells[labels.index('Tamaño')].get_text(strip=True)

                            size = convert_size(torrent_size) or -1
                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                            if mode != 'RSS':
                                logger.log('Found result: {}'.format(title), logger.DEBUG)

                            items.append(item)
                        except (AttributeError, TypeError):
                            continue

            results += items

        return results
コード例 #50
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        search_params = {
            "c41": 1, "c33": 1, "c38": 1, "c32": 1, "c37": 1
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != "RSS":
                    logger.log("Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                search_params["search"] = search_string
                data = self.get_url(self.urls["search"], params=search_params, returns="text")
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    torrent_table = html.find("table", border="1")
                    torrent_rows = torrent_table.find_all("tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    # "Type", "Name", Files", "Comm.", "Added", "TTL", "Size", "Snatched", "Seeders", "Leechers"
                    labels = [label.get_text(strip=True) for label in torrent_rows[0].find_all("td")]

                    for result in torrent_rows[1:]:
                        try:
                            cells = result.find_all("td")

                            download_url = urljoin(self.url, cells[labels.index("Name")].find("a", href=re.compile(r"download.php\?id="))["href"])
                            title_element = cells[labels.index("Name")].find("a", href=re.compile(r"details.php\?id="))
                            title = title_element.get("title", "") or title_element.get_text(strip=True)
                            if not all([title, download_url]):
                                continue

                            if self.freeleech:
                                # Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F&nbsp;L]</font>)
                                freeleech = cells[labels.index("Name")].find("font", color="green")
                                if not freeleech or freeleech.get_text(strip=True) != "[F\xa0L]":
                                    continue

                            seeders = try_int(cells[labels.index("Seeders")].get_text(strip=True))
                            leechers = try_int(cells[labels.index("Leechers")].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            # Need size for failed downloads handling
                            torrent_size = cells[labels.index("Size")].get_text(strip=True)
                            size = convert_size(torrent_size) or -1
                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}

                            if mode != "RSS":
                                logger.log("Found result: {} with {} seeders and {} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except (AttributeError, TypeError):
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #51
0
ファイル: limetorrents.py プロジェクト: Indigo744/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches,too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                try:
                    search_url = (self.urls['rss'], self.urls['search'] + search_string)[mode != 'RSS']

                    logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                    data = self.get_url(search_url)
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(u'Expected xml but got something else, is your mirror failing?', logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')

                    entries = data.findAll('item')
                    if not entries:
                        logger.log(u'Returned xml contained no results', logger.INFO)
                        continue

                    for item in entries:
                        try:
                            title = item.title.text
                            # Use the itorrents link limetorrents provides,
                            # unless it is not itorrents or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'itorrents' not in download_url:
                                download_url = item.enclosure['url']
                                # http://itorrents.org/torrent/C7203982B6F000393B1CE3A013504E5F87A46A7F.torrent?title=The-Night-of-the-Generals-(1967)[BRRip-1080p-x264-by-alE13-DTS-AC3][Lektor-i-Napisy-PL-Eng][Eng]
                                # Keep the hash a separate string for when its needed for failed
                                torrent_hash = re.match(r"(.*)([A-F0-9]{40})(.*)", download_url, re.IGNORECASE).group(2)
                                download_url = "magnet:?xt=urn:btih:" + torrent_hash + "&dn=" + title + self._custom_trackers

                            if not (title and download_url):
                                continue
                            # seeders and leechers are presented diferently when doing a search and when looking for newly added
                            if mode == 'RSS':
                                # <![CDATA[
                                # Category: <a href="http://www.limetorrents.cc/browse-torrents/TV-shows/">TV shows</a><br /> Seeds: 1<br />Leechers: 0<br />Size: 7.71 GB<br /><br /><a href="http://www.limetorrents.cc/Owen-Hart-of-Gold-Djon91-torrent-7180661.html">More @ limetorrents.cc</a><br />
                                # ]]>
                                description = item.find('description')
                                seeders = try_int(description.find_all('br')[0].next_sibling.strip().lstrip('Seeds: '))
                                leechers = try_int(description.find_all('br')[1].next_sibling.strip().lstrip('Leechers: '))
                            else:
                                # <description>Seeds: 6982 , Leechers 734</description>
                                description = item.find('description').text.partition(',')
                                seeders = try_int(description[0].lstrip('Seeds: ').strip())
                                leechers = try_int(description[2].lstrip('Leechers ').strip())

                            torrent_size = item.find('size').text

                            size = convert_size(torrent_size) or -1

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log(u"Failed parsing provider. Traceback: %r" % traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #52
0
ファイル: hdtorrents.py プロジェクト: tioxxx/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    search_url = self.urls['search'] % (
                        quote_plus(search_string), self.categories)
                    logger.log(
                        u"Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)
                else:
                    search_url = self.urls['rss'] % self.categories

                if self.freeleech:
                    search_url = search_url.replace('active=1', 'active=5')

                data = self.get_url(search_url, returns='text')
                if not data or 'please try later' in data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if data.find('No torrents here') != -1:
                    logger.log(
                        u"Data returned from provider does not contain any torrents",
                        logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index(
                        '<table class="mainblockcontenttt"')
                except ValueError:
                    logger.log(
                        u"Could not find table of torrents mainblockcontenttt",
                        logger.DEBUG)
                    continue

                data = data[index:]

                with BS4Parser(data, 'html5lib') as html:
                    if not html:
                        logger.log(u"No html data parsed from provider",
                                   logger.DEBUG)
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table',
                                              class_='mainblockcontenttt')
                    if torrent_table:
                        torrent_rows = torrent_table.find_all('tr')

                    if not torrent_rows:
                        logger.log(u"Could not find results in returned data",
                                   logger.DEBUG)
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [
                        label.a.get_text(strip=True)
                        if label.a else label.get_text(strip=True)
                        for label in torrent_rows[0].find_all('td')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index(
                                u'Filename')].a.get_text(strip=True)
                            seeders = try_int(
                                cells[labels.index(u'S')].get_text(strip=True))
                            leechers = try_int(
                                cells[labels.index(u'L')].get_text(strip=True))
                            torrent_size = cells[labels.index(
                                u'Size')].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.url + '/' + cells[labels.index(
                                u'Dl')].a['href']
                        except (AttributeError, TypeError, KeyError,
                                ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': None
                        }
                        if mode != 'RSS':
                            logger.log(
                                u"Found result: %s with %s seeders and %s leechers"
                                % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results
コード例 #53
0
ファイル: danishbits.py プロジェクト: pedro2d10/SickRageFR
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'action': 'newbrowse',
            'group': 3,
            'search': '',
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.img:
                result = td.img.get('title')
            if not result:
                result = td.get_text(strip=True)
            return result.encode('utf-8')

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
                               logger.DEBUG)

                search_params['search'] = search_string

                search_url = "%s?%s" % (self.urls['search'], urlencode(search_params))
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', class_='torrent_table')
                    torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    # Literal:     Navn, Størrelse, Kommentarer, Tilføjet, Snatches, Seeders, Leechers
                    # Translation: Name, Size,      Comments,    Added,    Snatches, Seeders, Leechers
                    labels = [process_column_header(label) for label in torrent_rows[0].find_all('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:

                        try:
                            title = result.find(class_='croptorrenttext').get_text(strip=True)
                            download_url = self.url + result.find(title="Direkte download link")['href']
                            if not all([title, download_url]):
                                continue

                            cells = result.find_all('td')

                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the"
                                               u" minimum seeders or leechers: {} (S:{} L:{})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            freeleech = result.find(class_='freeleech')
                            if self.freeleech and not freeleech:
                                continue

                            torrent_size = cells[labels.index('Størrelse')].contents[0]
                            size = convert_size(torrent_size, units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: {} with {} seeders and {} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #54
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log('Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)

                search_params = {
                    'page': 'rss',
                    'c': '1_0',  # Category: All anime
                    's': 'id',  # Sort by: 'id'=Date / 'size' / 'name' / 'seeders' / 'leechers' / 'downloads'
                    'o': 'desc',  # Sort direction: asc / desc
                    'f': ('0', '2')[self.confirmed]  # Quality filter: 0 = None / 1 = No Remakes / 2 = Trusted Only
                }
                if mode != 'RSS':
                    search_params['q'] = search_string

                results = []
                data = self.cache.get_rss_feed(self.url, params=search_params)['entries']
                if not data:
                    logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                    continue

                for curItem in data:
                    try:
                        title = curItem['title']
                        download_url = curItem['link']
                        if not all([title, download_url]):
                            continue

                        seeders = try_int(curItem['nyaa_seeders'])
                        leechers = try_int(curItem['nyaa_leechers'])
                        torrent_size = curItem['nyaa_size']
                        info_hash = curItem['nyaa_infohash']

                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log('Discarding torrent because it doesn\'t meet the'
                                           ' minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                           (title, seeders, leechers), logger.DEBUG)
                            continue

                        size = convert_size(torrent_size, units=['BYTES', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']) or -1
                        result = {'title': title, 'link': download_url, 'size': size,
                                  'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
                        if mode != 'RSS':
                            logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                       (title, seeders, leechers), logger.DEBUG)

                        items.append(result)
                    except StandardError:
                        continue

            # For each search mode sort all the items by seeders
            items.sort(key=lambda d: d.get('seeders', 0), reverse=True)
            results += items

        return results
コード例 #55
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        """
        TokyoToshokan search and parsing

        :param search_string: A dict with mode (key) and the search value (value)
        :param age: Not used
        :param ep_obj: Not used
        :returns: A list of search results (structure)
        """
        results = []
        if self.show and not self.show.is_anime:
            return results

        for mode in search_strings:
            items = []
            logger.log('Search mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {search}'.format
                               (search=search_string), logger.DEBUG)

                search_params = {
                    'terms': search_string,
                    'type': 1,  # get anime types
                }

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    logger.log('No data returned from provider', logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as soup:
                    torrent_table = soup.find('table', class_='listing')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one release is found
                    if len(torrent_rows) < 2:
                        logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                        continue

                    a = 1 if len(torrent_rows[0]('td')) < 2 else 0

                    # Skip column headers
                    for top, bot in zip(torrent_rows[a::2], torrent_rows[a + 1::2]):
                        try:
                            desc_top = top.find('td', class_='desc-top')
                            title = desc_top.get_text(strip=True) if desc_top else None
                            download_url = desc_top.find('a')['href'] if desc_top else None
                            if not all([title, download_url]):
                                continue

                            stats = bot.find('td', class_='stats').get_text(strip=True)
                            sl = re.match(r'S:(?P<seeders>\d+)L:(?P<leechers>\d+)C:(?:\d+)ID:(?:\d+)', stats.replace(' ', ''))
                            seeders = try_int(sl.group('seeders')) if sl else 0
                            leechers = try_int(sl.group('leechers')) if sl else 0

                            # Filter unseeded torrent
                            if seeders < min(self.minseed, 1):
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the "
                                               "minimum seeders: {0}. Seeders: {1}".format
                                               (title, seeders), logger.DEBUG)
                                continue

                            desc_bottom = bot.find('td', class_='desc-bot').get_text(strip=True)
                            size = convert_size(desc_bottom.split('|')[1].strip('Size: ')) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'pubdate': None,
                                'hash': None,
                            }
                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                            logger.log('Failed parsing provider. Traceback: {0!r}'.format
                                       (traceback.format_exc()), logger.ERROR)
                            continue

            results += items

        return results
コード例 #56
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals,too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            if mode != 'RSS':
                logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_url = self.urls['search'] % (quote(search_string), self.categories[mode])

                try:
                    data = self.get_url(search_url, returns='text')
                    time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                except Exception as e:
                    logger.log(u"Unable to fetch data. Error: {0}".format(repr(e)), logger.WARNING)

                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', id='torrents-table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    for result in torrent_table('tr')[1:]:

                        try:
                            link = result.find('td', class_='ttr_name').find('a')
                            url = result.find('td', class_='td_dl').find('a')

                            title = link.string
                            if re.search(r'\.\.\.', title):
                                data = self.get_url(urljoin(self.url, link['href']), returns='text')
                                if data:
                                    with BS4Parser(data) as details_html:
                                        title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
                            download_url = self.urls['download'] % url['href']
                            seeders = int(result.find('td', class_='ttr_seeders').string)
                            leechers = int(result.find('td', class_='ttr_leechers').string)
                            torrent_size = result.find('td', class_='ttr_size').contents[0]

                            size = convert_size(torrent_size) or -1
                        except (AttributeError, TypeError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
                        if mode != 'RSS':
                            logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #57
0
ファイル: tntvillage.py プロジェクト: Arcanemagus/SickRage
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        self.categories = "cat=" + str(self.cat)

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    self.page = 2

                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):
                    z = x * 20
                    if last_page:
                        break

                    if mode != 'RSS':
                        search_url = (self.urls['search_page'] + '&filter={2}').format(z, self.categories, search_string)
                    else:
                        search_url = self.urls['search_page'].format(z, self.categories)

                    if mode != 'RSS':
                        logger.log(u"Search string: {0}".format
                                   (search_string.decode("utf-8")), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            torrent_table = html.find('table', class_='copyright')
                            torrent_rows = torrent_table('tr') if torrent_table else []

                            # Continue only if one Release is found
                            if len(torrent_rows) < 3:
                                logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 42:
                                last_page = 1

                            for result in torrent_table('tr')[2:]:

                                try:
                                    link = result.find('td').find('a')
                                    title = link.string
                                    download_url = self.urls['download'] % result('td')[8].find('a')['href'][-8:]
                                    leechers = result('td')[3]('td')[1].text
                                    leechers = int(leechers.strip('[]'))
                                    seeders = result('td')[3]('td')[2].text
                                    seeders = int(seeders.strip('[]'))
                                    torrent_size = result('td')[3]('td')[3].text.strip('[]') + " GB"
                                    size = convert_size(torrent_size) or -1
                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(result) and not self.subtitle:
                                    logger.log(u"Torrent is subtitled, skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(result):
                                    logger.log(u"Torrent isnt english audio/subtitled , skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
コード例 #58
0
ファイル: tvchaosuk.py プロジェクト: badwha109-168/Sick-Rage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'do': 'search',
            'search_type': 't_name',
            'category': 0,
            'include_dead_torrents': 'no',
            'submit': 'search'
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        for mode in search_strings:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode == 'Season':
                    search_string = re.sub(r'(.*)S0?', r'\1Series ', search_string)

                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                search_params['keywords'] = search_string
                data = self.get_url(self.urls['search'], post_data=search_params, returns='text')
                if not data:
                    logger.log('No data returned from provider', logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(id='sortabletable')
                    torrent_rows = torrent_table("tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [label.img['title'] if label.img else label.get_text(strip=True) for label in torrent_rows[0]('td')]
                    for torrent in torrent_rows[1:]:
                        try:
                            if self.freeleech and not torrent.find('img', alt=re.compile('Free Torrent')):
                                continue

                            title = torrent.find(class_='tooltip-content').div.get_text(strip=True)
                            download_url = torrent.find(title='Click to Download this Torrent!').parent['href']
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(torrent.find(title='Seeders').get_text(strip=True))
                            leechers = try_int(torrent.find(title='Leechers').get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log('Discarding torrent because it doesn\'t meet the'
                                               ' minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            # Chop off tracker/channel prefix or we cant parse the result!
                            if mode != 'RSS' and search_params['keywords']:
                                show_name_first_word = re.search(r'^[^ .]+', search_params['keywords']).group()
                                if not title.startswith(show_name_first_word):
                                    title = re.sub(r'.*(' + show_name_first_word + '.*)', r'\1', title)

                            # Change title from Series to Season, or we can't parse
                            if mode == 'Season':
                                title = re.sub(r'(.*)(?i)Series', r'\1Season', title)

                            # Strip year from the end or we can't parse it!
                            title = re.sub(r'(.*)[\. ]?\(\d{4}\)', r'\1', title)
                            title = re.sub(r'\s+', r' ', title)

                            torrent_size = torrent('td')[labels.index('Size')].get_text(strip=True)
                            size = convert_size(torrent_size, units=units) or -1

                            if mode != 'RSS':
                                logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                           (title, seeders, leechers), logger.DEBUG)

                            item = {'title': title + '.hdtv.x264', 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers}
                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #59
0
ファイル: hdtorrents.py プロジェクト: pedro2d10/SickRageFR
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    search_url = self.urls['search'] % (quote_plus(search_string), self.categories)
                    logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
                               logger.DEBUG)
                else:
                    search_url = self.urls['rss'] % self.categories

                if self.freeleech:
                    search_url = search_url.replace('active=1', 'active=5')

                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data or 'please try later' in data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if data.find('No torrents here') != -1:
                    logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index('<table class="mainblockcontenttt"')
                except ValueError:
                    logger.log(u"Could not find table of torrents mainblockcontenttt", logger.DEBUG)
                    continue

                # data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8').replace('\t', '')
                data = data[index:]

                with BS4Parser(data, 'html5lib') as html:
                    if not html:
                        logger.log(u"No html data parsed from provider", logger.DEBUG)
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table', class_='mainblockcontenttt')
                    if torrent_table:
                        torrent_rows = torrent_table.find_all('tr')

                    if not torrent_rows:
                        logger.log(u"Could not find results in returned data", logger.DEBUG)
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index(u'Filename')].a.get_text(strip=True)
                            seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
                            torrent_size = cells[labels.index(u'Size')].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
                        except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)

            results += items

        return results
コード例 #60
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches,too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                try:
                    search_url = (self.urls['rss'], self.urls['search'] +
                                  search_string)[mode != 'RSS']

                    logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                    data = self.get_url(search_url)
                    if not data:
                        logger.log(u"No data returned from provider",
                                   logger.DEBUG)
                        continue

                    if not data.startswith('<?xml'):
                        logger.log(
                            u'Expected xml but got something else, is your mirror failing?',
                            logger.INFO)
                        continue

                    data = BeautifulSoup(data, 'html5lib')

                    entries = data.findAll('item')
                    if not entries:
                        logger.log(u'Returned xml contained no results',
                                   logger.INFO)
                        continue

                    for item in entries:
                        try:
                            title = item.title.text
                            # Use the itorrents link limetorrents provides,
                            # unless it is not itorrents or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure['url']
                            if sickbeard.TORRENT_METHOD != "blackhole" or 'itorrents' not in download_url:
                                download_url = item.enclosure['url']
                                # http://itorrents.org/torrent/C7203982B6F000393B1CE3A013504E5F87A46A7F.torrent?title=The-Night-of-the-Generals-(1967)[BRRip-1080p-x264-by-alE13-DTS-AC3][Lektor-i-Napisy-PL-Eng][Eng]
                                # Keep the hash a separate string for when its needed for failed
                                torrent_hash = re.match(
                                    r"(.*)([A-F0-9]{40})(.*)", download_url,
                                    re.IGNORECASE).group(2)
                                download_url = "magnet:?xt=urn:btih:" + torrent_hash + "&dn=" + title + self._custom_trackers

                            if not (title and download_url):
                                continue
                            # seeders and leechers are presented diferently when doing a search and when looking for newly added
                            if mode == 'RSS':
                                # <![CDATA[
                                # Category: <a href="http://www.limetorrents.cc/browse-torrents/TV-shows/">TV shows</a><br /> Seeds: 1<br />Leechers: 0<br />Size: 7.71 GB<br /><br /><a href="http://www.limetorrents.cc/Owen-Hart-of-Gold-Djon91-torrent-7180661.html">More @ limetorrents.cc</a><br />
                                # ]]>
                                description = item.find('description')
                                seeders = try_int(
                                    description.find_all('br')
                                    [0].next_sibling.strip().lstrip('Seeds: '))
                                leechers = try_int(
                                    description.find_all('br')[1].next_sibling.
                                    strip().lstrip('Leechers: '))
                            else:
                                # <description>Seeds: 6982 , Leechers 734</description>
                                description = item.find(
                                    'description').text.partition(',')
                                seeders = try_int(
                                    description[0].lstrip('Seeds: ').strip())
                                leechers = try_int(
                                    description[2].lstrip('Leechers ').strip())

                            torrent_size = item.find('size').text

                            size = convert_size(torrent_size) or -1

                        except (AttributeError, TypeError, KeyError,
                                ValueError):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                logger.log(
                                    u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)
                            continue

                        item = {
                            'title': title,
                            'link': download_url,
                            'size': size,
                            'seeders': seeders,
                            'leechers': leechers,
                            'hash': None
                        }
                        if mode != 'RSS':
                            logger.log(
                                u"Found result: %s with %s seeders and %s leechers"
                                % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    logger.log(
                        u"Failed parsing provider. Traceback: %r" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results