コード例 #1
0
    def _find_season_quality(self, title, torrent_link, ep_number):
        """ Return the modified title of a Season Torrent with the quality found inspecting torrent file list """

        mediaExtensions = [
            'avi', 'mkv', 'wmv', 'divx', 'vob', 'dvr-ms', 'wtv', 'ts'
            'ogv', 'rar', 'zip', 'mp4'
        ]

        quality = Quality.UNKNOWN

        fileName = None

        data = self.getURL(torrent_link)
        if not data:
            return None

        try:
            with BS4Parser(data, features=["html5lib", "permissive"]) as soup:
                file_table = soup.find('table',
                                       attrs={'class': 'torrentFileList'})

                if not file_table:
                    return None

                files = [
                    x.text for x in file_table.find_all(
                        'td', attrs={'class': 'torFileName'})
                ]
                videoFiles = filter(
                    lambda x: x.rpartition(".")[2].lower() in mediaExtensions,
                    files)

                #Filtering SingleEpisode/MultiSeason Torrent
                if len(videoFiles) < ep_number or len(videoFiles) > float(
                        ep_number * 1.1):
                    logger.log(
                        u"Result " + title + " have " + str(ep_number) +
                        " episode and episodes retrived in torrent are " +
                        str(len(videoFiles)), logger.DEBUG)
                    logger.log(
                        u"Result " + title +
                        " Seem to be a Single Episode or MultiSeason torrent, skipping result...",
                        logger.DEBUG)
                    return None

                if Quality.sceneQuality(title) != Quality.UNKNOWN:
                    return title

                for fileName in videoFiles:
                    quality = Quality.sceneQuality(os.path.basename(fileName))
                    if quality != Quality.UNKNOWN: break

                if fileName is not None and quality == Quality.UNKNOWN:
                    quality = Quality.assumeQuality(os.path.basename(fileName))

                if quality == Quality.UNKNOWN:
                    logger.log(
                        u"Unable to obtain a Season Quality for " + title,
                        logger.DEBUG)
                    return None

                try:
                    myParser = NameParser(showObj=self.show)
                    parse_result = myParser.parse(fileName)
                except (InvalidNameException, InvalidShowException):
                    return None

                logger.log(
                    u"Season quality for " + title + " is " +
                    Quality.qualityStrings[quality], logger.DEBUG)

                if parse_result.series_name and parse_result.season_number:
                    title = parse_result.series_name + ' S%02d' % int(
                        parse_result.season_number
                    ) + ' ' + self._reverseQuality(quality)

                return title

        except Exception, e:
            logger.log(
                u"Failed parsing " + self.name + " Traceback: " +
                traceback.format_exc(), logger.ERROR)
コード例 #2
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                self.search_params['search'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=self.search_params,
                                    returns='text')
                if not data:
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        result_linkz = html.findAll(
                            'a', href=re.compile("torrents-details"))

                        if not result_linkz:
                            logger.log(
                                u"Data returned from provider do not contains any torrent",
                                logger.DEBUG)
                            continue

                        if result_linkz:
                            for link in result_linkz:
                                title = link.text
                                download_url = self.urls['base_url'] + link[
                                    'href']
                                download_url = download_url.replace(
                                    "torrents-details", "download")
                                # FIXME
                                size = -1
                                seeders = 1
                                leechers = 0

                                if not title or not download_url:
                                    continue

                                # Filter unseeded torrent
                                # if seeders < self.minseed or leechers < self.minleech:
                                #    if mode != 'RSS':
                                #        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                #                   (title, seeders, leechers), logger.DEBUG)
                                #    continue

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': None
                                }
                                if mode != 'RSS':
                                    logger.log(u"Found result: %s " % title,
                                               logger.DEBUG)

                                items.append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results
コード例 #3
0
ファイル: torrent9.py プロジェクト: wkaminski1/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []
        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                    search_url = self.url + '/search_torrent/' + search_string.replace(
                        '.', '-').replace(' ', '-') + '.html,trie-seeds-d'
                else:
                    search_url = self.url + '/torrents_series.html,trie-date-d'

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_rows = html.findAll('tr')
                    for result in torrent_rows:
                        try:
                            title = result.find('a').get_text(
                                strip=False).replace("HDTV",
                                                     "HDTV x264-Torrent9")
                            title = re.sub(r' Saison',
                                           ' Season',
                                           title,
                                           flags=re.I)
                            tmp = result.find("a")['href'].split(
                                '/')[-1].replace('.html', '.torrent').strip()
                            download_url = (self.url +
                                            '/get_torrent/{0}'.format(tmp) +
                                            ".torrent")
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                result.find(class_="seed_ok").get_text(
                                    strip=True))
                            leechers = try_int(
                                result.find_all('td')[3].get_text(strip=True))
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find_all('td')[1].get_text(
                                strip=True)

                            units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    "Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #4
0
ファイル: morethantv.py プロジェクト: zaibon/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'tags_type': 1,
            'order_by': 'time',
            'order_way': 'desc',
            'action': 'basic',
            'searchsubmit': 1,
            'searchstr': ''
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.a and td.a.img:
                result = td.a.img.get('title', td.a.get_text(strip=True))
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log("Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_params['searchstr'] = search_string

                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find('table', class_='torrent_table')
                    torrent_rows = torrent_table('tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                        continue

                    labels = [process_column_header(label) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            # skip if torrent has been nuked due to poor quality
                            if result.find('img', alt='Nuked'):
                                continue

                            title = result.find('a', title='View torrent').get_text(strip=True)
                            download_url = urljoin(self.url, result.find('span', title='Download').parent['href'])
                            if not all([title, download_url]):
                                continue

                            cells = result('td')
                            seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                            leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log("Discarding torrent because it doesn't meet the"
                                               " minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index('Size')].get_text(strip=True)
                            size = convert_size(torrent_size, units=units) or -1

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
                                           (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
コード例 #5
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        """
        205 = SD, 208 = HD, 200 = All Videos
        https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
        """
        # oder_by is 7 in browse for seeders, but 8 in search!
        search_params = {
            "q": "",
            "type": "search",
            "orderby": 8,
            "page": 0,
            "category": 200
        }

        # Units
        units = ["B", "KIB", "MIB", "GIB"]

        def process_column_header(th):
            text = ""
            if th.a:
                text = th.a.get_text(strip=True)
            if not text:
                text = th.get_text(strip=True)
            return text

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                search_urls = (self.urls["search"],
                               self.urls["rss"])[mode == "RSS"]
                if not isinstance(search_urls, list):
                    search_urls = [search_urls]

                for search_url in search_urls:
                    if self.custom_url:
                        if not validators.url(self.custom_url):
                            logger.log(
                                "Invalid custom url: {0}".format(
                                    self.custom_url), logger.WARNING)
                            return results
                        search_url = urljoin(self.custom_url,
                                             search_url.split(self.url)[1])

                    if mode != "RSS":
                        search_params["q"] = search_string
                        logger.log(
                            "Search string: {}".format(
                                search_string.decode("utf-8")), logger.DEBUG)

                        # Prevents a 302 redirect, since there is always a 301 from .se to the best mirror having an extra
                        # redirect is excessive on the provider and spams the debug log unnecessarily
                        search_url, params = self.convert_url(
                            search_url, search_params)
                        data = self.get_url(search_url,
                                            params=params,
                                            returns="text")
                    else:
                        data = self.get_url(search_url, returns="text")

                    if not data:
                        logger.log(
                            "URL did not return data, maybe try a custom url, or a different one",
                            logger.DEBUG)
                        continue

                    with BS4Parser(data, "html5lib") as html:
                        torrent_table = html.find("table", id="searchResult")
                        torrent_rows = torrent_table(
                            "tr") if torrent_table else []

                        # Continue only if at least one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                "Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        labels = [
                            process_column_header(label)
                            for label in torrent_rows[0]("th")
                        ]

                        # Skip column headers
                        for result in torrent_rows[1:]:
                            try:
                                cells = result("td")

                                # Funky js on page messing up titles, this fixes that
                                title = result.find(
                                    class_="detLink")['title'].split(
                                        'Details for ', 1)[-1]
                                download_url = result.find(
                                    title="Download this torrent using magnet"
                                )["href"] + self._custom_trackers
                                if not self.magnet_regex.match(download_url):
                                    logger.log(
                                        "Got an invalid magnet: {0}".format(
                                            download_url))
                                    logger.log(
                                        "Invalid ThePirateBay proxy please try another one",
                                        logger.DEBUG)
                                    continue

                                if not all([title, download_url]):
                                    continue

                                seeders = try_int(
                                    cells[labels.index("SE")].get_text(
                                        strip=True))
                                leechers = try_int(
                                    cells[labels.index("LE")].get_text(
                                        strip=True))

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != "RSS":
                                        logger.log(
                                            "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders,
                                                    leechers), logger.DEBUG)
                                    continue

                                # Accept Torrent only from Good People for every Episode Search
                                if self.confirmed and not result.find(
                                        alt=re.compile(r"VIP|Trusted")):
                                    if mode != "RSS":
                                        logger.log(
                                            "Found result: {0} but that doesn't seem like a trusted result so I'm ignoring it"
                                            .format(title), logger.DEBUG)
                                    continue

                                # Convert size after all possible skip scenarios
                                torrent_size = re.sub(
                                    r".*Size ([\d.]+).+([KMGT]iB).*", r"\1 \2",
                                    result.find(class_="detDesc").get_text(
                                        strip=True))
                                size = convert_size(torrent_size,
                                                    units=units) or -1

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': ''
                                }
                                if mode != "RSS":
                                    logger.log(
                                        "Found result: {0} with {1} seeders and {2} leechers"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)
                            except StandardError:
                                continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #6
0
ファイル: iptorrents.py プロジェクト: keithzg/SickGear
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': 'detail',
            'get': 'download'
        }.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(
                    search_string,
                    unicode) and unidecode(search_string) or search_string
                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
                search_url = self.urls['search'] % (
                    self._categories_string(mode, '%s', ';'), search_string,
                    (';free', '')[not self.freeleech],
                    (';o=seeders', '')['Cache' == mode])

                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib',
                                                   'permissive']) as soup:
                        torrent_table = soup.find(id='torrents') or soup.find(
                            'table', class_='torrents')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all(
                            'tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(
                                    tr,
                                    header_strip=
                                    '(?i)(?:leechers|seeders|size);')
                                seeders, leechers = [
                                    tryInt(
                                        tr.find('td', class_='t_' +
                                                x).get_text().strip())
                                    for x in 'seeders', 'leechers'
                                ]
                                if self._reject_item(seeders, leechers):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title')
                                         or info.get_text()).strip()
                                size = cells[head['size']].get_text().strip()
                                download_url = self._link(
                                    tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except (StandardError, Exception):
                    logger.log(
                        u'Failed to parse. Traceback: %s' %
                        traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
コード例 #7
0
    def search(self, search_params, age=0, ep_obj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self.login():
            return results

        for mode in search_params.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)

                searchURL = self.urls['search'] % (freeleech, search_string)
                logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
                init_html = self.get_url(searchURL)
                max_page_number = 0

                if not init_html:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                try:
                    with BS4Parser(init_html, 'html5lib') as init_soup:

                        # Check to see if there is more than 1 page of results
                        pager = init_soup.find('div', {'class': 'pager'})
                        if pager:
                            page_links = pager.find_all('a', href=True)
                        else:
                            page_links = []

                        if len(page_links) > 0:
                            for lnk in page_links:
                                link_text = lnk.text.strip()
                                if link_text.isdigit():
                                    page_int = int(link_text)
                                    if page_int > max_page_number:
                                        max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode == 'RSS':
                            max_page_number = 3
                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)
                    continue

                data_response_list = [init_html]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):

                        time.sleep(1)
                        page_searchURL = searchURL + '&page=' + str(i)
                        # '.log(u"Search string: " + page_searchURL, logger.DEBUG)
                        page_html = self.get_url(page_searchURL)

                        if not page_html:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data_response in data_response_list:

                        with BS4Parser(data_response, 'html5lib') as html:

                            torrent_rows = html.findAll(
                                "tr", {"class": re.compile('torrent_[0-9]*')})

                            # Continue only if a Release is found
                            if len(torrent_rows) == 0:
                                logger.log(
                                    u"Data returned from provider does not contain any torrents",
                                    logger.DEBUG)
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find(
                                        'img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find(
                                        'a', {'class': 'torrent_name_link'
                                              })['title']
                                except Exception:
                                    logger.log(
                                        u"Unable to parse torrent title. Traceback: %s "
                                        % traceback.format_exc(),
                                        logger.WARNING)
                                    continue

                                try:
                                    details_url = individual_torrent.find(
                                        'a',
                                        {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match(
                                        '.*?([0-9]+)$',
                                        details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (
                                        str(torrent_id))
                                    seeders = try_int(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_seeders'
                                            }).find('span').text.strip(), 1)
                                    leechers = try_int(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_leechers'
                                            }).find('a').text.strip(), 0)
                                    # FIXME
                                    size = -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(
                                            u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders,
                                                    leechers), logger.DEBUG)
                                    continue

                                item = title, download_url, size, seeders, leechers
                                if mode != 'RSS':
                                    logger.log(u"Found result: %s " % title,
                                               logger.DEBUG)

                                items[mode].append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #8
0
ファイル: iptorrents.py プロジェクト: Djang0/SickRage-1
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        freeleech = '&free=on' if self.freeleech else ''

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:
                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
                search_url = self.urls['search'] % (self.categories, freeleech,
                                                    search_string)
                search_url += ';o=seeders' if mode != 'RSS' else ''

                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log(
                            "Invalid custom url: {0}".format(self.custom_url),
                            logger.WARNING)
                        return results
                    search_url = urljoin(self.custom_url,
                                         search_url.split(self.url)[1])

                data = self.get_url(search_url, returns='text')
                if not data:
                    continue

                try:
                    data = re.sub(r'(?im)<button.+?</button>', '', data, 0)
                    with BS4Parser(data, 'html5lib') as html:
                        if not html:
                            logger.log(u"No data returned from provider",
                                       logger.DEBUG)
                            continue

                        if html.find(text='No Torrents Found!'):
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        torrent_table = html.find('table', id='torrents')
                        torrents = torrent_table('tr') if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrents) < 2:
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        for result in torrents[1:]:
                            try:
                                title = result('td')[1].find('a').text
                                download_url = urljoin(
                                    search_url,
                                    result('td')[3].find('a')['href'])
                                seeders = int(
                                    result.find('td',
                                                class_='ac t_seeders').text)
                                leechers = int(
                                    result.find('td',
                                                class_='ac t_leechers').text)
                                torrent_size = result('td')[5].text
                                size = convert_size(torrent_size) or -1
                            except (AttributeError, TypeError, KeyError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)

                except Exception as e:
                    logger.log(
                        u"Failed parsing provider. Error: {0!r}".format(ex(e)),
                        logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)

            results += items

        return results
コード例 #9
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        for mode in search_params:
            items = []
            logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                self.page = 1
                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):

                    if last_page:
                        break

                    search_url = self.urls['search_page'].format(
                        search_string, x)

                    logger.log(
                        u'Search string: {0}'.format(
                            search_string.decode('utf-8')), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u'No data returned from provider',
                                   logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            table_header = html.find('tr', class_='bordo')
                            torrent_table = table_header.find_parent(
                                'table') if table_header else None
                            if not torrent_table:
                                logger.log(u'Could not find table of torrents',
                                           logger.ERROR)
                                continue

                            torrent_rows = torrent_table('tr')

                            # Continue only if one Release is found
                            if (len(torrent_rows) < 6) or (len(
                                    torrent_rows[2]('td')) == 1):
                                logger.log(
                                    u'Data returned from provider does not contain any torrents',
                                    logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 45:
                                last_page = 1

                            for result in torrent_rows[2:-3]:

                                try:
                                    link = result('td')[1].find('a')['href']
                                    title = re.sub(
                                        ' +', ' ',
                                        link.rsplit('/',
                                                    1)[-1].replace('_', ' '))
                                    hash = result('td')[3].find(
                                        'input',
                                        class_='downarrow')['value'].upper()
                                    seeders = try_int(result('td')[5].text)
                                    leechers = try_int(result('td')[6].text)
                                    torrent_size = result('td')[2].string
                                    size = convert_size(torrent_size) or -1

                                    # Download Urls
                                    download_url = self.urls['download'] % hash
                                    if urllib.urlopen(
                                            download_url).getcode() == 404:
                                        logger.log(
                                            u'Torrent hash not found in itorrents.org, searching for magnet',
                                            logger.DEBUG)
                                        data_detail = self.get_url(
                                            link, returns='text')
                                        with BS4Parser(
                                                data_detail,
                                                'html5lib') as html_detail:
                                            sources_row = html_detail.find(
                                                'td', class_='header2').parent
                                            source_magnet = sources_row(
                                                'td')[1].find('a',
                                                              class_='forbtn',
                                                              title='Magnet')
                                            if source_magnet and not source_magnet == 'None':
                                                download_url = source_magnet[
                                                    'href']
                                            else:
                                                continue

                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(
                                    self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(
                                        title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(
                                        title) and not self.subtitle:
                                    logger.log(
                                        u'Torrent is subtitled, skipping: {0} '
                                        .format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(
                                        title):
                                    logger.log(
                                        u'Torrent isnt english audio/subtitled , skipping: {0} '
                                        .format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)',
                                                       search_string)[0]
                                show_title = search_show
                                ep_params = ''
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower(
                                ) and search_show.lower() in show_title.lower(
                                ):
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '',
                                                   title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    logger.log(
                                        u'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                    continue

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': ''
                                }
                                if mode != 'RSS':
                                    logger.log(
                                        u'Found result: {0} with {1} seeders and {2} leechers'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(
                            u'Failed parsing provider. Traceback: {0}'.format(
                                traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                           reverse=True)

                results += items

        return results
コード例 #10
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self.url:
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': r'torrent.?(\d+)',
            'versrc': r'ver\.',
            'verified': 'Verified'
        }.iteritems())

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                search_string = isinstance(
                    search_string,
                    unicode) and unidecode(search_string) or search_string

                search_url = self.urls['browse'] if 'Cache' == mode \
                    else self.urls['search'] % (urllib.quote_plus(search_string).replace('+', '-'))

                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException
                    with BS4Parser(html.replace('thead', 'tr')) as soup:

                        tbl = soup.find(
                            'div',
                            class_=('panel panel-default',
                                    'table-responsive')['Cache' == mode])
                        if None is tbl:
                            raise generic.HaltParseException
                        tbl = tbl.find(
                            'table',
                            class_=
                            'table table-striped table-bordered table-hover table-condensed'
                        )
                        tbl_rows = [] if not tbl else tbl.find_all('tr')

                        if 2 > len(tbl_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in tbl_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(
                                    tr)
                                seeders, leechers, size = [
                                    tryInt(n, n) for n in [
                                        cells[head[x]].get_text().strip()
                                        for x in 'seed', 'leech', 'size'
                                    ]
                                ]
                                if self._reject_item(
                                        seeders,
                                        leechers,
                                        verified=self.confirmed and not (
                                            tr.find('img', src=rc['versrc'])
                                            or tr.find('img',
                                                       title=rc['verified']))):
                                    continue

                                info = tr.find('a', href=rc['info']) or {}
                                title = info and info.get_text().strip()
                                tid_href = info and tryInt(rc['info'].findall(
                                    info['href'])[0])
                                download_url = tid_href and self._link(
                                    tid_href)
                            except (AttributeError, TypeError, ValueError,
                                    IndexError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))
コード例 #11
0
ファイル: hounddawgs.py プロジェクト: EggieCode/SickRage
    def _doSearch(self,
                  search_strings,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self._doLogin():
            return results

        for mode in search_strings.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)

                self.search_params['searchstr'] = search_string

                data = self.getURL(self.urls['search'],
                                   params=self.search_params)

                strTableStart = "<table class=\"torrent_table"
                startTableIndex = data.find(strTableStart)
                trimmedData = data[startTableIndex:]
                if not trimmedData:
                    continue

                try:
                    with BS4Parser(trimmedData,
                                   features=["html5lib",
                                             "permissive"]) as html:
                        result_table = html.find('table',
                                                 {'id': 'torrent_table'})

                        if not result_table:
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        result_tbody = result_table.find('tbody')
                        entries = result_tbody.contents
                        del entries[1::2]

                        for result in entries[1:]:

                            torrent = result.find_all('td')
                            if len(torrent) <= 1:
                                break

                            allAs = (torrent[1]).find_all('a')

                            try:
                                link = self.urls['base_url'] + allAs[2].attrs[
                                    'href']
                                #url = result.find('td', attrs={'class': 'quickdownload'}).find('a')
                                title = allAs[2].string
                                #Trimming title so accepted by scene check(Feature has been rewuestet i forum)
                                title = title.replace("custom.", "")
                                title = title.replace("CUSTOM.", "")
                                title = title.replace("Custom.", "")
                                title = title.replace("dk", "")
                                title = title.replace("DK", "")
                                title = title.replace("Dk", "")
                                title = title.replace("subs.", "")
                                title = title.replace("SUBS.", "")
                                title = title.replace("Subs.", "")

                                download_url = self.urls['base_url'] + allAs[
                                    0].attrs['href']
                                id = link.replace(
                                    self.urls['base_url'] + 'torrents.php?id=',
                                    '')
                                #FIXME
                                size = -1
                                seeders = 1
                                leechers = 0

                            except (AttributeError, TypeError):
                                continue

                            if not title or not download_url:
                                continue

                            #Filter unseeded torrent
                            #if seeders < self.minseed or leechers < self.minleech:
                            #    if mode != 'RSS':
                            #        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                            #    continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s " % title,
                                           logger.DEBUG)

                            items[mode].append(item)

                except Exception, e:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #12
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        self.categories = "cat=" + str(self.cat)

        for mode in search_params:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    self.page = 2

                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):
                    z = x * 20
                    if last_page:
                        break

                    if mode != 'RSS':
                        search_url = (self.urls['search_page'] + '&filter={2}').format(z, self.categories, search_string)
                    else:
                        search_url = self.urls['search_page'].format(z, self.categories)

                    if mode != 'RSS':
                        logger.log("Search string: {0}".format
                                   (search_string.decode("utf-8")), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log("No data returned from provider", logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            torrent_table = html.find('table', class_='copyright')
                            torrent_rows = torrent_table('tr') if torrent_table else []

                            # Continue only if one Release is found
                            if len(torrent_rows) < 3:
                                logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 42:
                                last_page = 1

                            for result in torrent_table('tr')[2:]:

                                try:
                                    link = result.find('td').find('a')
                                    title = link.string
                                    download_url = self.urls['download'] % result('td')[8].find('a')['href'][-8:]
                                    leechers = result('td')[3]('td')[0].text
                                    leechers = int(leechers.strip('[]'))
                                    seeders = result('td')[3]('td')[1].text
                                    seeders = int(seeders.strip('[]'))
                                    torrent_size = result('td')[3]('td')[3].text.strip('[]') + " GB"
                                    size = convert_size(torrent_size) or -1
                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(result) and not self.subtitle:
                                    logger.log("Torrent is subtitled, skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(result):
                                    logger.log("Torrent isnt english audio/subtitled , skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
コード例 #13
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang

        """
        Search query:
        http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe

        cat = 4 => Shows
        modo = listado => display results mode
        orden = fecha => order
        buscar => Search show
        pag = 1 => page number
        """

        search_params = {
            'cat': 4,
            'modo': 'listado',
            'orden': 'fecha',
            'pag': 1,
            'buscar': ''

        }

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)

            # Only search if user conditions are true
            if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
                logger.log(u"Show info is not spanish, skipping provider search", logger.DEBUG)
                continue

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    logger.log(u"Search string: {0}".format
                               (search_string.decode("utf-8")), logger.DEBUG)

                search_string = re.sub(r'S0*(\d*)E(\d*)', r'\1x\2', search_string)
                search_params['buscar'] = search_string.strip() if mode != 'RSS' else ''

                time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                data = self.get_url(self.urls['search'], params=search_params, returns='text')
                if not data:
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        torrent_table = html.find('table', class_='fichas-listado')
                        torrent_rows = torrent_table('tr') if torrent_table else []

                        if len(torrent_rows) < 2:
                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                            continue

                        for row in torrent_rows[1:]:
                            try:
                                download_url = self.urls['base_url'] + row.find('a')['href']
                                title = self._processTitle(row.find('a', class_='nombre')['title'])
                                seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))
                                leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))

                                # Provider does not provide size
                                size = -1

                            except (AttributeError, TypeError, KeyError, ValueError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                            if mode != 'RSS':
                                logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                except Exception:
                    logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.WARNING)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #14
0
    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                for url in self.urls:
                    if mode != 'RSS':
                        searchURL = url + 'usearch/%s/?field=seeders&sorder=desc' % (
                            urllib.quote(unidecode(search_string)))
                        logger.log(u"Search string: " + searchURL,
                                   logger.DEBUG)
                    else:
                        searchURL = url + 'tv/?field=time_add&sorder=desc'
                        logger.log(u"KAT cache update URL: " + searchURL,
                                   logger.DEBUG)

                    html = self.getURL(searchURL)
                    if html:
                        self.url = url
                        break

                if not html:
                    continue

                try:
                    with BS4Parser(html, features=["html5lib",
                                                   "permissive"]) as soup:
                        torrent_table = soup.find('table',
                                                  attrs={'class': 'data'})
                        torrent_rows = torrent_table.find_all(
                            'tr') if torrent_table else []

                        #Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                u"The data returned from " + self.name +
                                " does not contain any torrents",
                                logger.WARNING)
                            continue

                        for tr in torrent_rows[1:]:
                            try:
                                link = urlparse.urljoin(
                                    self.url, (tr.find('div', {
                                        'class': 'torrentname'
                                    }).find_all('a')[1])['href'])
                                id = tr.get('id')[-7:]
                                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                                        or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                                url = tr.find('a', 'imagnet')['href']
                                verified = True if tr.find(
                                    'a', 'iverify') else False
                                trusted = True if tr.find(
                                    'img', {'alt': 'verified'}) else False
                                seeders = int(tr.find_all('td')[-2].text)
                                leechers = int(tr.find_all('td')[-1].text)
                            except (AttributeError, TypeError):
                                continue

                            if mode != 'RSS' and (seeders < self.minseed
                                                  or leechers < self.minleech):
                                continue

                            if self.confirmed and not verified:
                                logger.log(
                                    u"KAT Provider found result " + title +
                                    " but that doesn't seem like a verified result so I'm ignoring it",
                                    logger.DEBUG)
                                continue

                            #Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
                            if mode == 'Season' and search_mode == 'sponly':
                                ep_number = int(
                                    epcount /
                                    len(set(allPossibleShowNames(self.show))))
                                title = self._find_season_quality(
                                    title, link, ep_number)

                            if not title or not url:
                                continue

                            item = title, url, id, seeders, leechers

                            items[mode].append(item)

                except Exception, e:
                    logger.log(
                        u"Failed to parsing " + self.name + " Traceback: " +
                        traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #15
0
    def _doSearch(self,
                  search_params,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        results = data = []

        if not self._doLogin():
            return results

        for search_string in [search_params]:

            if isinstance(search_string, unicode):
                search_string = unidecode(search_string)

            searchURLS = []
            if search_mode == 'sponly':
                searchURLS += [
                    self.urls['archive'] % (urllib.quote(search_string))
                ]
            else:
                searchURLS += [
                    self.urls['search'] %
                    (urllib.quote(search_string), self.categories)
                ]
                searchURLS += [
                    self.urls['nonscene'] % (urllib.quote(search_string))
                ]
                searchURLS += [
                    self.urls['foreign'] % (urllib.quote(search_string))
                ]

            for searchURL in searchURLS:
                logger.log(u"Search string: " + searchURL, logger.DEBUG)
                try:
                    data = self.getURL(searchURL)
                    time.sleep(cpu_presets[sickbeard.CPU_PRESET])
                except Exception as e:
                    logger.log(
                        u"Unable to fetch data reason: {0}".format(str(e)),
                        logger.WARNING)

                if not data:
                    continue

                with BS4Parser(data, features=["html5lib",
                                               "permissive"]) as html:
                    torrent_table = html.find('table',
                                              attrs={'id': 'torrents-table'})
                    torrent_rows = torrent_table.find_all(
                        'tr') if torrent_table else []

                    #Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        info = u'The Data returned from %s does not contain any torrent' % self.name
                        if html.title:
                            info += ' (%s)' % html.title
                        logger.log(info, logger.DEBUG)
                        continue

                    for result in torrent_table.find_all('tr')[1:]:

                        try:
                            link = result.find('td',
                                               attrs={
                                                   'class': 'ttr_name'
                                               }).find('a')
                            all_urls = result.find('td',
                                                   attrs={
                                                       'class': 'td_dl'
                                                   }).find_all('a', limit=2)
                            # Foreign section contain two links, the others one
                            if self._isSection('Foreign', data):
                                url = all_urls[1]
                            else:
                                url = all_urls[0]

                            title = link.string
                            if re.search('\.\.\.', title):
                                data = self.getURL(self.url + "/" +
                                                   link['href'])
                                if data:
                                    with BS4Parser(data) as details_html:
                                        title = re.search(
                                            '(?<=").+(?<!")',
                                            details_html.title.string).group(0)
                            download_url = self.urls['download'] % url['href']
                            id = int(link['href'].replace('details?id=', ''))
                            seeders = int(
                                result.find('td',
                                            attrs={
                                                'class': 'ttr_seeders'
                                            }).string)
                            leechers = int(
                                result.find('td',
                                            attrs={
                                                'class': 'ttr_leechers'
                                            }).string)
                        except (AttributeError, TypeError):
                            continue

                        if not title or not download_url or seeders < self.minseed or leechers < self.minleech:
                            continue

                        item = title, download_url, id, seeders, leechers
                        logger.log(
                            u"Found result: " + title.replace(' ', '.') +
                            " (" + searchURL + ")", logger.DEBUG)

                        results.append(item)

        results.sort(key=lambda tup: tup[3], reverse=True)

        return results
コード例 #16
0
ファイル: blutopia.py プロジェクト: RedSparr0w/SickGear
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': 'torrents',
            'get': '(.*?download)_check(.*)'
        }.items())
        log = ''
        if self.filter:
            non_marked = 'f0' in self.filter
            # if search_any, use unselected to exclude, else use selected to keep
            filters = ([f for f in self.may_filter if f in self.filter],
                       [f for f in self.may_filter
                        if f not in self.filter])[non_marked]
            filters += (
                ((all([x in filters for x in 'free', 'double', 'feat'])
                  and ['freedoublefeat'] or []) +
                 (all([x in filters
                       for x in 'free', 'double']) and ['freedouble'] or []) +
                 (all([x in filters
                       for x in 'feat', 'double']) and ['featdouble'] or [])),
                ((not all([x not in filters for x in 'free', 'double', 'feat'])
                  and ['freedoublefeat'] or []) +
                 (not all([x not in filters
                           for x in 'free', 'double']) and ['freedouble']
                  or []) +
                 (not all([x not in filters
                           for x in 'feat', 'double']) and ['featdouble']
                  or [])))[non_marked]
            rc['filter'] = re.compile('(?i)^(%s)$' % '|'.join([
                '%s' % f for f in filters
                if (f in self.may_filter and self.may_filter[f][1]) or f
            ]))
            log = '%sing (%s) ' % (('keep', 'skipp')[non_marked], ', '.join([
                f in self.may_filter and self.may_filter[f][0] or f
                for f in filters
            ]))
        for mode in search_params.keys():
            if mode in ['Season', 'Episode']:
                show_type = self.show.air_by_date and 'Air By Date' \
                    or self.show.is_sports and 'Sports' or None
                if show_type:
                    logger.log(
                        u'Provider does not carry shows of type: [%s], skipping'
                        % show_type, logger.DEBUG)
                    return results

            for search_string in search_params[mode]:
                search_string = isinstance(
                    search_string,
                    unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (self.token, '+'.join(
                    search_string.split()), self._categories_string(
                        mode, ''), '', '', '')

                resp = self.get_url(search_url, json=True)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not resp or not resp.get('rows'):
                        raise generic.HaltParseException

                    html = '<html><body>%s</body></html>' % \
                           self.resp.replace('</tbody>', '%s</tbody>' % ''.join(resp.get('result', [])))
                    with BS4Parser(html, features=['html5lib',
                                                   'permissive']) as soup:
                        torrent_table = soup.find('table', class_='table')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all(
                            'tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            if any(self.filter):
                                marked = ','.join([
                                    x.attrs.get('data-original-title',
                                                '').lower()
                                    for x in tr.find_all(
                                        'i',
                                        attrs={
                                            'class': [
                                                'text-gold', 'fa-diamond',
                                                'fa-certificate'
                                            ]
                                        })
                                ])
                                # noinspection PyTypeChecker
                                munged = ''.join(
                                    filter(marked.__contains__,
                                           ['free', 'double', 'feat']))
                                if ((non_marked
                                     and rc['filter'].search(munged)) or
                                    (not non_marked
                                     and not rc['filter'].search(munged))):
                                    continue
                            try:
                                head = head if None is not head else self._header_row(
                                    tr, {
                                        'seed': r'circle-up',
                                        'leech': r'circle-down',
                                        'size': r'fa-file'
                                    })
                                seeders, leechers, size = [
                                    tryInt(n, n) for n in [
                                        cells[head[x]].get_text().strip()
                                        for x in 'seed', 'leech', 'size'
                                    ]
                                ]
                                if self._peers_fail(mode, seeders, leechers):
                                    continue

                                title = tr.find(
                                    'a',
                                    href=rc['info'])['data-original-title']
                                download_url = self._link(rc['get'].sub(
                                    r'\1\2',
                                    tr.find('a', href=rc['get'])['href']))
                            except (AttributeError, TypeError, ValueError,
                                    IndexError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))
コード例 #17
0
    def _doSearch(self,
                  search_params,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        logger.log(u"_doSearch started with ..." + str(search_params),
                   logger.DEBUG)

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        # check for auth
        if not self._doLogin():
            return False

        for mode in search_params.keys():

            for search_string in search_params[mode]:

                if isinstance(search_string, unicode):
                    search_string = unidecode(search_string)

                searchURL = self.urls['search'] % (urllib.quote(search_string),
                                                   self.categories)

                logger.log(u"Search string: " + searchURL, logger.DEBUG)

                r = self.opener.open(searchURL)
                with BS4Parser(r, features=["html5lib", "permissive"]) as html:
                    resultsTable = html.find("div", {"class": "DataGrid"})
                    logger.log(u"Page opened", logger.DEBUG)

                    if resultsTable:
                        logger.log(u"We have results ", logger.DEBUG)
                        rows = resultsTable.findAll("ul")

                        for row in rows:
                            link = row.find("a", title=True)
                            title = link['title']

                            autogetURL = self.url + '/' + (row.find(
                                "li", {
                                    "class": "torrents_name"
                                }).find('a')['href'][1:]).replace(
                                    '#FTD_MENU', '&menu=4')
                            r = self.opener.open(autogetURL, 'wb').read()
                            with BS4Parser(r,
                                           features=["html5lib",
                                                     "permissive"]) as html:
                                downloadURL = html.find(
                                    "div", {
                                        "class": "autoget"
                                    }).find('a')['href']
                                item = title, downloadURL
                                logger.log(u"Download URL : " + downloadURL,
                                           logger.DEBUG)

                                items[mode].append(item)

            results += items[mode]

        return results
コード例 #18
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        # TV, Episodes, BoxSets, Episodes HD, Animation, Anime, Cartoons
        # 2,26,27,32,7,34,35

        # Units
        units = ["B", "KB", "MB", "GB", "TB", "PB"]

        def process_column_header(td):
            result = ""
            if td.a:
                result = td.a.get("title")
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != "RSS":
                    logger.log(
                        "Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                    categories = ["2", "7", "35"]
                    categories += ["26", "32"] if mode == "Episode" else ["27"]
                    if self.show and self.show.is_anime:
                        categories += ["34"]
                else:
                    categories = ["2", "26", "27", "32", "7", "34", "35"]

                search_params = {
                    "categories": ",".join(categories),
                    "query": search_string
                }

                data = self.get_url(self.urls["search"],
                                    params=search_params,
                                    returns="text")
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    torrent_table = html.find("table", id="torrenttable")
                    torrent_rows = torrent_table.find_all(
                        "tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            "Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0].find_all("th")
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            title = result.find(
                                "td",
                                class_="name").find("a").get_text(strip=True)
                            download_url = urljoin(
                                self.url,
                                result.find(
                                    "td",
                                    class_="quickdownload").find("a")["href"])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                result.find(
                                    "td",
                                    class_="seeders").get_text(strip=True))
                            leechers = try_int(
                                result.find(
                                    "td",
                                    class_="leechers").get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the"
                                        " minimum seeders or leechers: {} (S:{} L:{})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = result.find_all("td")[labels.index(
                                "Size")].get_text()
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': None
                            }
                            if mode != "RSS":
                                logger.log(
                                    "Found result: {} with {} seeders and {} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #19
0
ファイル: alpharatio.py プロジェクト: thatguy11/SickRage
    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                if isinstance(search_string, unicode):
                    search_string = unidecode(search_string)

                searchURL = self.urls['search'] % (search_string, self.catagories)

                data = self.getURL(searchURL)
                if not data:
                    continue

                try:
                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
                        torrent_table = html.find('table', attrs={'id': 'torrent_table'})
                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                        #Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(u"The Data returned from " + self.name + " does not contain any torrents",
                                       logger.DEBUG)
                            continue

                        for result in torrent_rows[1:]:
                            cells = result.find_all('td')
                            link = result.find('a', attrs = {'dir': 'ltr'})
                            url = result.find('a', attrs = {'title': 'Download'})

                            try:
                                title = link.contents[0]
                                download_url = self.urls['download'] % (url['href'])
                                id = link['href'][-6:]
                                seeders = cells[len(cells)-2].contents[0]
                                leechers = cells[len(cells)-1].contents[0]
                            except (AttributeError, TypeError):
                                continue

                            #Filter unseeded torrent
                            if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
                                continue

                            if not title or not download_url:
                                continue

                            item = title, download_url, id, seeders, leechers
                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)

                            items[mode].append(item)

                except Exception, e:
                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #20
0
ファイル: bitsoup.py プロジェクト: MaximusCub/SickRageTV
    def _doSearch(self,
                  search_params,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                if isinstance(search_string, unicode):
                    search_string = unidecode(search_string)

                searchURL = self.urls['search'] % (urllib.quote(search_string),
                                                   self.categories)

                logger.log(u"Search string: " + searchURL, logger.DEBUG)

                data = self.getURL(searchURL)
                if not data:
                    continue

                try:
                    with BS4Parser(data, "html.parser") as html:
                        torrent_table = html.find('table',
                                                  attrs={'class': 'koptekst'})
                        torrent_rows = torrent_table.find_all(
                            'tr') if torrent_table else []

                        #Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                u"The Data returned from " + self.name +
                                " do not contains any torrent", logger.DEBUG)
                            continue

                        for result in torrent_rows[1:]:
                            cells = result.find_all('td')

                            link = cells[1].find('a')
                            download_url = self.urls['download'] % cells[
                                2].find('a')['href']

                            id = link['href']
                            id = id.replace('details.php?id=', '')
                            id = id.replace('&hit=1', '')

                            try:
                                title = link.getText()
                                id = int(id)
                                seeders = int(cells[10].getText())
                                leechers = int(cells[11].getText())
                            except (AttributeError, TypeError):
                                continue

                            #Filter unseeded torrent
                            if mode != 'RSS' and (seeders < self.minseed
                                                  or leechers < self.minleech):
                                continue

                            if not title or not download_url:
                                continue

                            item = title, download_url, id, seeders, leechers
                            logger.log(
                                u"Found result: " + title.replace(' ', '.') +
                                " (" + searchURL + ")", logger.DEBUG)

                            items[mode].append(item)

                except Exception, e:
                    logger.log(
                        u"Failed parsing " + self.name + " Traceback: " +
                        traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #21
0
    def search(self, search_strings, age=0, ep_obj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self.login():
            return results

        for mode in search_strings.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)

                self.search_params['search'] = search_string

                data = self.get_url(self.urls['search'],
                                    params=self.search_params)
                if not data:
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        result_linkz = html.findAll(
                            'a', href=re.compile("torrents-details"))

                        if not result_linkz:
                            logger.log(
                                u"Data returned from provider do not contains any torrent",
                                logger.DEBUG)
                            continue

                        if result_linkz:
                            for link in result_linkz:
                                title = link.text
                                download_url = self.urls['base_url'] + link[
                                    'href']
                                download_url = download_url.replace(
                                    "torrents-details", "download")
                                # FIXME
                                size = -1
                                seeders = 1
                                leechers = 0

                                if not title or not download_url:
                                    continue

                                # Filter unseeded torrent
                                # if seeders < self.minseed or leechers < self.minleech:
                                #    if mode != 'RSS':
                                #        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
                                #    continue

                                item = title, download_url, size, seeders, leechers
                                if mode != 'RSS':
                                    logger.log(u"Found result: %s " % title,
                                               logger.DEBUG)

                                items[mode].append(item)

                except Exception as e:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #22
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []

        anime = (self.show and self.show.anime) or (ep_obj and ep_obj.show and ep_obj.show.anime) or False
        search_params = {
            "q": "",
            "field": "seeders",
            "sorder": "desc",
            "rss": 1,
            "category": ("tv", "anime")[anime]
        }

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                search_params["q"] = search_string if mode != "RSS" else ""
                search_params["field"] = "seeders" if mode != "RSS" else "time_add"

                if mode != "RSS":
                    logger.log("Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)

                search_url = self.urls["search"] % ("usearch" if mode != "RSS" else search_string)
                if self.custom_url:
                    if not validators.url(self.custom_url):
                        logger.log("Invalid custom url: {}".format(self.custom_url), logger.WARNING)
                        return results
                    search_url = urljoin(self.custom_url, search_url.split(self.url)[1])

                data = self.get_url(search_url, params=search_params, returns="text")
                if not data:
                    logger.log("URL did not return data, maybe try a custom url, or a different one", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    for item in html.find_all("item"):
                        try:
                            title = item.title.get_text(strip=True)
                            # Use the torcache link kat provides,
                            # unless it is not torcache or we are not using blackhole
                            # because we want to use magnets if connecting direct to client
                            # so that proxies work.
                            download_url = item.enclosure["url"]
                            if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
                                download_url = item.find("torrent:magneturi").next.replace("CDATA", "").strip("[!]") + self._custom_trackers

                            if not (title and download_url):
                                continue

                            seeders = try_int(item.find("torrent:seeds").get_text(strip=True))
                            leechers = try_int(item.find("torrent:peers").get_text(strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})".format
                                               (title, seeders, leechers), logger.DEBUG)
                                continue

                            verified = bool(try_int(item.find("torrent:verified").get_text(strip=True)))
                            if self.confirmed and not verified:
                                if mode != "RSS":
                                    logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
                                continue

                            torrent_size = item.find("torrent:contentlength").get_text(strip=True)
                            size = convert_size(torrent_size) or -1
                            info_hash = item.find("torrent:infohash").get_text(strip=True)

                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
                            if mode != "RSS":
                                logger.log("Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)

                        except (AttributeError, TypeError, KeyError, ValueError):
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results
コード例 #23
0
ファイル: transmitthenet.py プロジェクト: zaibon/SickRage
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-branches, too-many-locals, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params = {
                    'searchtext': search_string,
                    'filter_freeleech': (0, 1)[self.freeleech is True],
                    'order_by': ('seeders', 'time')[mode == 'RSS'],
                    "order_way": "desc"
                }

                if not search_string:
                    del search_params['searchtext']

                data = self.get_url(self.urls['search'],
                                    params=search_params,
                                    returns='text')
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        torrent_table = html.find('table',
                                                  {'id': 'torrent_table'})
                        if not torrent_table:
                            logger.log(
                                "Data returned from {0} does not contain any torrents"
                                .format(self.name), logger.DEBUG)
                            continue

                        labels = [
                            x.get_text(strip=True) or x.a.img.get('alt')
                            for x in torrent_table.find(
                                'tr', class_='colhead').find_all('td')
                        ]
                        torrent_rows = torrent_table('tr', class_='torrent')

                        # Continue only if one Release is found
                        if not torrent_rows:
                            logger.log(
                                "Data returned from {0} does not contain any torrents"
                                .format(self.name), logger.DEBUG)
                            continue

                        for torrent_row in torrent_rows:
                            freeleech = torrent_row.find(
                                'img', alt="Freeleech") is not None
                            if self.freeleech and not freeleech:
                                continue

                            # Normal Download Link
                            download_item = torrent_row.find(
                                'a', {'title': 'Download Torrent'})

                            if not download_item:
                                # If the user has downloaded it
                                download_item = torrent_row.find(
                                    'a', {
                                        'title':
                                        'Previously Grabbed Torrent File'
                                    })
                            if not download_item:
                                # If the user is seeding
                                download_item = torrent_row.find(
                                    'a',
                                    {'title': 'Currently Seeding Torrent'})
                            if not download_item:
                                # If the user is leeching
                                download_item = torrent_row.find(
                                    'a',
                                    {'title': 'Currently Leeching Torrent'})
                            if not download_item:
                                # If there are none
                                continue

                            download_url = urljoin(self.url,
                                                   download_item['href'])

                            temp_anchor = torrent_row.find(
                                'a', {"data-src": True})
                            title = temp_anchor['data-src'].rsplit('.', 1)[0]
                            if not all([title, download_url]):
                                continue

                            cells = torrent_row('td')
                            seeders = try_int(
                                cells[labels.index('Seeders')].text.strip())
                            leechers = try_int(
                                cells[labels.index('Leechers')].get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the"
                                        " minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            size = temp_anchor['data-filesize'] or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != 'RSS':
                                logger.log(
                                    "Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                except Exception:
                    logger.log(
                        "Failed parsing provider. Traceback: {0}".format(
                            traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #24
0
ファイル: freshontv.py プロジェクト: jossandra/SickRage
    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                if isinstance(search_string, unicode):
                    search_string = unidecode(search_string)

                searchURL = self.urls['search'] % (freeleech, search_string)

                logger.log(u"Search string: " + searchURL, logger.DEBUG)

                # returns top 15 results by default, expandable in user profile to 100
                data = self.getURL(searchURL)
                if not data:
                    continue

                try:
                    with BS4Parser(data, features=["html5lib",
                                                   "permissive"]) as html:
                        torrent_table = html.find('table',
                                                  attrs={'class': 'frame'})
                        torrent_rows = torrent_table.findChildren(
                            'tr') if torrent_table else []

                        #Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                u"The Data returned from " + self.name +
                                " do not contains any torrent", logger.DEBUG)
                            continue

                        # skip colheader
                        for result in torrent_rows[1:]:
                            cells = result.findChildren('td')

                            link = cells[1].find(
                                'a', attrs={'class': 'torrent_name_link'})
                            #skip if torrent has been nuked due to poor quality
                            if cells[1].find('img', alt='Nuked') != None:
                                continue

                            torrent_id = link['href'].replace(
                                '/details.php?id=', '')

                            try:
                                if link.has_key('title'):
                                    title = cells[1].find(
                                        'a', {'class': 'torrent_name_link'
                                              })['title']
                                else:
                                    title = link.contents[0]
                                download_url = self.urls['download'] % (
                                    torrent_id)
                                id = int(torrent_id)

                                seeders = int(cells[8].find(
                                    'a', {
                                        'class': 'link'
                                    }).span.contents[0].strip())
                                leechers = int(cells[9].find(
                                    'a', {
                                        'class': 'link'
                                    }).contents[0].strip())
                            except (AttributeError, TypeError):
                                continue

                            #Filter unseeded torrent
                            if mode != 'RSS' and (seeders < self.minseed
                                                  or leechers < self.minleech):
                                continue

                            if not title or not download_url:
                                continue

                            item = title, download_url, id, seeders, leechers
                            logger.log(
                                u"Found result: " + title + "(" + searchURL +
                                ")", logger.DEBUG)

                            items[mode].append(item)

                except Exception, e:
                    logger.log(
                        u"Failed parsing " + self.name + " Traceback: " +
                        traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #25
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        for mode in search_params:
            items = []
            logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:
                if search_string == '':
                    continue

                search_string = six.text_type(search_string).replace('.', ' ')
                logger.log(
                    u'Search string: {0}'.format(
                        search_string.decode('utf-8')), logger.DEBUG)

                last_page = False
                for page in range(0, self.max_pages):
                    if last_page:
                        break

                    logger.log('Processing page {0} of results'.format(page),
                               logger.DEBUG)
                    search_url = self.urls['search'].format(
                        search_string, page)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u'No data returned from provider',
                                   logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            table_header = html.find('tr', class_='bordo')
                            torrent_table = table_header.find_parent(
                                'table') if table_header else None
                            if not torrent_table:
                                logger.log(u'Could not find table of torrents',
                                           logger.ERROR)
                                continue

                            torrent_rows = torrent_table('tr')

                            # Continue only if one Release is found
                            if len(torrent_rows) < 6 or len(
                                    torrent_rows[2]('td')) == 1:
                                logger.log(
                                    u'Data returned from provider does not contain any torrents',
                                    logger.DEBUG)
                                last_page = True
                                continue

                            if len(torrent_rows) < 45:
                                last_page = True

                            for result in torrent_rows[2:-3]:
                                result_cols = result('td')
                                if len(result_cols) == 1:
                                    # Ignore empty rows in the middle of the table
                                    continue
                                try:
                                    info_link = result('td')[1].find(
                                        'a')['href']
                                    title = re.sub(
                                        ' +', ' ',
                                        info_link.rsplit('/', 1)[-1].replace(
                                            '_', ' '))
                                    info_hash = result('td')[3].find(
                                        'input',
                                        class_='downarrow')['value'].upper()
                                    download_url = self._magnet_from_result(
                                        info_hash, title)
                                    seeders = try_int(result('td')[5].text)
                                    leechers = try_int(result('td')[6].text)
                                    torrent_size = result('td')[2].string
                                    size = convert_size(torrent_size) or -1

                                except (AttributeError, IndexError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(
                                    self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(
                                        title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(
                                        title) and not self.subtitle:
                                    logger.log(
                                        u'Torrent is subtitled, skipping: {0}'.
                                        format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(
                                        title):
                                    logger.log(
                                        u'Torrent isn\'t english audio/subtitled, skipping: {0}'
                                        .format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)',
                                                       search_string)[0]
                                show_title = search_show
                                ep_params = ''
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower(
                                ) and search_show.lower() in show_title.lower(
                                ):
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '',
                                                   title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    logger.log(
                                        u'Discarding torrent because it doesn\'t meet the minimum'
                                        u' seeders or leechers: {0} (S:{1} L:{2})'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                    continue

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                    'seeders': seeders,
                                    'leechers': leechers,
                                    'hash': info_hash
                                }
                                if mode != 'RSS':
                                    logger.log(
                                        u'Found result: {0} with {1} seeders and {2} leechers'
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)

                                items.append(item)

                    except Exception as error:
                        logger.log(
                            u'Failed parsing provider. Error: {0}'.format(
                                error), logger.ERROR)

                # For each search mode sort all the items by seeders if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                           reverse=True)

                results += items

        return results
コード例 #26
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:
                search_url = self.urls[
                    'verified'] if self.confirmed else self.urls['feed']
                if mode != 'RSS':
                    logger.log(
                        u"Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                data = self.get_url(search_url,
                                    params={'q': search_string},
                                    returns='text')
                if not data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if not data.startswith("<?xml"):
                    logger.log(
                        u"Expected xml but got something else, is your mirror failing?",
                        logger.INFO)
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as parser:
                        for item in parser('item'):
                            if item.category and 'tv' not in item.category.get_text(
                                    strip=True):
                                continue

                            title = item.title.text.rsplit(' ', 1)[0].replace(
                                ' ', '.')
                            t_hash = item.guid.text.rsplit('/', 1)[-1]

                            if not all([title, t_hash]):
                                continue

                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + self._custom_trackers
                            torrent_size, seeders, leechers = self._split_description(
                                item.find('description').text)
                            size = convert_size(torrent_size) or -1

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            result = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': t_hash
                            }
                            items.append(result)
                except StandardError:
                    logger.log(
                        u"Failed parsing provider. Traceback: {0!r}".format(
                            traceback.format_exc()), logger.ERROR)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #27
0
ファイル: alpharatio.py プロジェクト: youdroid/SickChill
    def search(self, search_strings, age=0, ep_obj=None):
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            "searchstr": "",
            "filter_cat[1]": 1,
            "filter_cat[2]": 1,
            "filter_cat[3]": 1,
            "filter_cat[4]": 1,
            "filter_cat[5]": 1
        }

        # Units
        units = ["B", "KB", "MB", "GB", "TB", "PB"]

        def process_column_header(td):
            result = ""
            if td.a and td.a.img:
                result = td.a.img.get("title", td.a.get_text(strip=True))
            if not result:
                result = td.get_text(strip=True)
            return result

        for mode in search_strings:
            items = []
            logger.log("Search Mode: {0}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:
                if mode != "RSS":
                    logger.log(
                        "Search string: {0}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params["searchstr"] = search_string
                search_url = self.urls["search"]
                data = self.get_url(search_url,
                                    params=search_params,
                                    returns="text")
                if not data:
                    logger.log("No data returned from provider", logger.DEBUG)
                    continue

                with BS4Parser(data, "html5lib") as html:
                    torrent_table = html.find("table", id="torrent_table")
                    torrent_rows = torrent_table("tr") if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            "Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    # "", "", "Name /Year", "Files", "Time", "Size", "Snatches", "Seeders", "Leechers"
                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0]("td")
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        cells = result("td")
                        if len(cells) < len(labels):
                            continue

                        try:
                            title = cells[labels.index("Name /Year")].find(
                                "a", dir="ltr").get_text(strip=True)
                            download_url = urljoin(
                                self.url,
                                cells[labels.index("Name /Year")].find(
                                    "a", title="Download")["href"])
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                cells[labels.index("Seeders")].get_text(
                                    strip=True))
                            leechers = try_int(
                                cells[labels.index("Leechers")].get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.log(
                                        "Discarding torrent because it doesn't meet the"
                                        " minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index(
                                "Size")].get_text(strip=True)
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = {
                                'title': title,
                                'link': download_url,
                                'size': size,
                                'seeders': seeders,
                                'leechers': leechers,
                                'hash': ''
                            }
                            if mode != "RSS":
                                logger.log(
                                    "Found result: {0} with {1} seeders and {2} leechers"
                                    .format(title, seeders,
                                            leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)),
                       reverse=True)
            results += items

        return results
コード例 #28
0
ファイル: speedcd.py プロジェクト: p0psicles/SickRage-1
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches
        results = []
        if not self.login():
            return results

        # http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
        # Search Params
        search_params = {
            'c2': 1,  # TV/Episodes
            'c30': 1,  # Anime
            'c41': 1,  # TV/Packs
            'c49': 1,  # TV/HD
            'c50': 1,  # TV/Sports
            'c52': 1,  # TV/B-Ray
            'c55': 1,  # TV/Kids
            'search': '',
        }

        # Units
        units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']

        def process_column_header(td):
            result = ''
            if td.a and td.a.img:
                result = td.a.img.get('alt', td.a.get_text(strip=True))
            if td.img and not result:
                result = td.img.get('alt', '')
            if not result:
                result = td.get_text(strip=True)
            return result

        if self.freeleech:
            search_params['freeleech'] = 'on'

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(
                        u"Search string: {}".format(
                            search_string.decode("utf-8")), logger.DEBUG)

                search_params['search'] = search_string

                search_url = "%s?%s" % (self.urls['search'],
                                        urlencode(search_params))
                logger.log(u"Search URL: %s" % search_url, logger.DEBUG)

                data = self.get_url(search_url)
                if not data:
                    continue

                with BS4Parser(data, 'html5lib') as html:
                    torrent_table = html.find(
                        'div', class_='boxContent').find('table')
                    torrent_rows = torrent_table.find_all(
                        'tr') if torrent_table else []

                    # Continue only if at least one Release is found
                    if len(torrent_rows) < 2:
                        logger.log(
                            u"Data returned from provider does not contain any torrents",
                            logger.DEBUG)
                        continue

                    labels = [
                        process_column_header(label)
                        for label in torrent_rows[0].find_all('th')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.find_all('td')

                            title = cells[labels.index('Title')].find(
                                'a', class_='torrent').get_text()
                            download_url = self.url + cells[labels.index(
                                'Download')].find(
                                    title='Download').parent['href']
                            if not all([title, download_url]):
                                continue

                            seeders = try_int(
                                cells[labels.index('Seeders')].get_text(
                                    strip=True))
                            leechers = try_int(
                                cells[labels.index('Leechers')].get_text(
                                    strip=True))

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            torrent_size = cells[labels.index(
                                'Size')].get_text()
                            # TODO: Make convert_size work with 123.12GB
                            torrent_size = torrent_size[:
                                                        -2] + ' ' + torrent_size[
                                                            -2:]
                            size = convert_size(torrent_size,
                                                units=units) or -1

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(
                                    u"Found result: %s with %s seeders and %s leechers"
                                    % (title, seeders, leechers), logger.DEBUG)

                            items.append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda tup: tup[3], reverse=True)
            results += items

        return results
コード例 #29
0
    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self._doLogin():
            return []

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                if isinstance(search_string, unicode):
                    search_string = unidecode(search_string)

                searchURL = self.urls['search'] % (search_string, self.categories)

                logger.log(u"Search string: " + searchURL, logger.DEBUG)

                data = self.getURL(searchURL)
                if not data:
                    continue

                try:
                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
                        torrent_table = html.find('table', attrs={'border': '1'})
                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                        #Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
                                       logger.DEBUG)
                            continue

                        for result in torrent_rows[1:]:
                            cells = result.find_all('td')

                            link = cells[1].find('a', attrs={'class': 'index'})

                            full_id = link['href'].replace('details.php?id=', '')
                            torrent_id = full_id.split("&")[0]

                            try:
                                if link.has_key('title'):
                                    title = cells[1].find('a', {'class': 'index'})['title']
                                else:
                                    title = link.contents[0]
                                download_url = self.urls['download'] % (torrent_id, link.contents[0])
                                id = int(torrent_id)
                                seeders = int(cells[8].find('span').contents[0])
                                leechers = int(cells[9].find('span').contents[0])
                            except (AttributeError, TypeError):
                                continue

                            #Filter unseeded torrent
                            if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
                                continue

                            if not title or not download_url:
                                continue

                            item = title, download_url, id, seeders, leechers
                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)

                            items[mode].append(item)

                except Exception, e:
                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]
コード例 #30
0
    def search(self, search_strings, age=0, ep_obj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self.login():
            return results

        for mode in search_strings.keys():
            logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    logger.log(u"Search string: %s " % search_string,
                               logger.DEBUG)

                searchURL = self.urls['search'] % (search_string,
                                                   self.categories)
                logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)

                data = self.get_url(searchURL)
                if not data:
                    continue

                try:
                    with BS4Parser(data, 'html5lib') as html:
                        torrent_table = html.find(
                            'table', attrs={'id': 'torrent_table'})
                        torrent_rows = torrent_table.find_all(
                            'tr') if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            logger.log(
                                u"Data returned from provider does not contain any torrents",
                                logger.DEBUG)
                            continue

                        for result in torrent_rows[1:]:
                            cells = result.find_all('td')
                            link = result.find('a', attrs={'dir': 'ltr'})
                            url = result.find('a', attrs={'title': 'Download'})

                            try:
                                title = link.contents[0]
                                download_url = self.urls['download'] % (
                                    url['href'])
                                seeders = cells[len(cells) - 2].contents[0]
                                leechers = cells[len(cells) - 1].contents[0]
                                # FIXME
                                size = -1
                            except (AttributeError, TypeError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    logger.log(
                                        u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                        .format(title, seeders,
                                                leechers), logger.DEBUG)
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                logger.log(u"Found result: %s " % title,
                                           logger.DEBUG)

                            items[mode].append(item)

                except Exception:
                    logger.log(
                        u"Failed parsing provider. Traceback: %s" %
                        traceback.format_exc(), logger.WARNING)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results