def search_propers():

    if not sickbeard.DOWNLOAD_PROPERS:
        return

    logger.log(u'Beginning search for new propers')

    age_shows, age_anime = sickbeard.BACKLOG_DAYS + 2, 14
    aired_since_shows = datetime.datetime.today() - datetime.timedelta(days=age_shows)
    aired_since_anime = datetime.datetime.today() - datetime.timedelta(days=age_anime)
    recent_shows, recent_anime = _recent_history(aired_since_shows, aired_since_anime)
    if recent_shows or recent_anime:
        propers = _get_proper_list(aired_since_shows, recent_shows, recent_anime)

        if propers:
            _download_propers(propers)
    else:
        logger.log(u'No downloads or snatches found for the last %s%s days to use for a propers search' %
                   (age_shows, ('', ' (%s for anime)' % age_anime)[helpers.has_anime()]))

    _set_last_proper_search(datetime.datetime.today().toordinal())

    run_at = ''
    proper_sch = sickbeard.properFinderScheduler
    if None is proper_sch.start_time:
        run_in = proper_sch.lastRun + proper_sch.cycleTime - datetime.datetime.now()
        run_at = u', next check '
        if datetime.timedelta() > run_in:
            run_at += u'imminent'
        else:
            hours, remainder = divmod(run_in.seconds, 3600)
            minutes, seconds = divmod(remainder, 60)
            run_at += u'in approx. ' + ('%dh, %dm' % (hours, minutes) if 0 < hours else '%dm, %ds' % (minutes, seconds))

    logger.log(u'Completed the search for new propers%s' % run_at)
示例#2
0
def search_propers():

    if not sickbeard.DOWNLOAD_PROPERS:
        return

    logger.log(u'Beginning search for new propers')

    age_shows, age_anime = 2, 14
    aired_since_shows = datetime.datetime.today() - datetime.timedelta(days=age_shows)
    aired_since_anime = datetime.datetime.today() - datetime.timedelta(days=age_anime)
    recent_shows, recent_anime = _recent_history(aired_since_shows, aired_since_anime)
    if recent_shows or recent_anime:
        propers = _get_proper_list(aired_since_shows, recent_shows, recent_anime)

        if propers:
            _download_propers(propers)
    else:
        logger.log(u'No downloads or snatches found for the last %s%s days to use for a propers search' %
                   (age_shows, ('', ' (%s for anime)' % age_anime)[helpers.has_anime()]))

    _set_last_proper_search(datetime.datetime.today().toordinal())

    run_at = ''
    if None is sickbeard.properFinderScheduler.start_time:
        run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
        hours, remainder = divmod(run_in.seconds, 3600)
        minutes, seconds = divmod(remainder, 60)
        run_at = u', next check in approx. ' + (
            '%dh, %dm' % (hours, minutes) if 0 < hours else '%dm, %ds' % (minutes, seconds))

    logger.log(u'Completed the search for new propers%s' % run_at)
示例#3
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = '+'.join(search_string.split())
                post_data = dict(
                    {
                        '/browse.php?': None,
                        'cata': 'yes',
                        'jxt': 8,
                        'jxw': 'b',
                        'search': search_string
                    }, **self.categories[(mode, 'Episode')['Propers' == mode]])
                if ('Cache' == mode
                        and has_anime()) or (mode in ['Season', 'Episode']
                                             and self.show
                                             and self.show.is_anime):
                    post_data.update({'c29': 1})

                if self.freeleech:
                    post_data.update({'free': 'on'})

                data_json = self.get_url(self.urls['search'],
                                         post_data=post_data,
                                         json=True)

                cnt = len(items[mode])
                try:
                    if not data_json:
                        raise generic.HaltParseException
                    torrents = data_json.get('Fs')[0].get('Cn').get('torrents')

                    for item in torrents:
                        seeders, leechers, size = [
                            tryInt(n, n) for n in
                            [item.get(x) for x in 'seed', 'leech', 'size']
                        ]
                        if self._peers_fail(mode, seeders, leechers):
                            continue

                        title = re.sub(r'\[.*=.*\].*\[/.*\]', '', item['name'])

                        download_url = self.urls['get'] % (item['id'],
                                                           item['fname'])

                        if title and download_url:
                            items[mode].append((title, download_url, seeders,
                                                self._bytesizer(size)))

                except Exception:
                    time.sleep(1.1)
示例#4
0
    def _set_categories(self, mode):
        # set up categories
        html = self.get_url(self.urls['edit'])
        if self.should_skip():
            return None, None
        try:
            form = re.findall('(?is).*(<form.*?save.*?</form>)', html)[0]
            save_url = self._link(re.findall('(?i)action="([^"]+?)"', form)[0])
            tags = re.findall(r'(?is)(<input[^>]*?name=[\'"][^\'"]+[^>]*)',
                              form)
        except (BaseException, Exception):
            return None, None

        cats, params = [], {}
        attrs = [[(re.findall(r'(?is)%s=[\'"]([^\'"]+)' % attr, c) or [''])[0]
                  for attr in ['type', 'name', 'value', 'checked']]
                 for c in tags]
        for itype, name, value, checked in attrs:
            if 'cat' == name[0:3] and 'checkbox' == itype.lower():
                if any(checked):
                    try:
                        cats += [re.findall(r'(\d+)[^\d]*$', name)[0]]
                    except IndexError:
                        pass
            elif 'hidden' == itype.lower() or 'nothing' in name or \
                    (itype.lower() in ['checkbox', 'radio'] and any(checked)):
                params[name] = value
        selects = re.findall('(?is)(<select.*?</select>)', form)
        for select in selects:
            name, values, index = None, None, 0
            try:
                name = re.findall(r'(?is)<select\sname="([^"]+)"', select)[0]
                values = re.findall(
                    '(?is)value="([^"]+)"[^"]+("selected"|</option)', select)
                index = ['"selected"' in x[1] for x in values].index(True)
            except ValueError:
                pass
            except IndexError:
                continue
            params[name] = values[index][0]

        restore = params.copy()
        restore.update(dict(('cat%s' % c, 'yes') for c in cats))
        params.update(
            dict(('cat%s' % c, 'yes') for c in (
                self.categories[(mode, 'Episode')['Propers' == mode]] +
                ([], self.categories['anime'])[
                    (re.search('(Ca|Pr)', mode) and has_anime()) or all([
                        re.search('(Se|Ep)', mode) and self.show
                        and self.show.is_anime
                    ])])))
        params['torrentsperpage'] = 40
        self.get_url(save_url, post_data=params)
        if self.should_skip():
            return None, None

        return save_url, restore
示例#5
0
    def _categories_string(self, mode='Cache', template='c%s=1', delimiter='&'):

        return delimiter.join([('%s', template)[any(template)] % c for c in sorted(
            'shows' in self.categories and (isinstance(self.categories['shows'], type([])) and
                                            self.categories['shows'] or [self.categories['shows']]) or
            self.categories[(mode, 'Episode')['Propers' == mode]] +
            ([], self.categories.get('anime') or [])[
                (mode in ['Cache', 'Propers'] and helpers.has_anime()) or
                ((mode in ['Season', 'Episode']) and self.show and self.show.is_anime)])])
示例#6
0
    def _categories_string(self,
                           mode='Cache',
                           template='c%s=1',
                           delimiter='&'):

        return delimiter.join([
            ('%s', template)[any(template)] % c
            for c in sorted(self.categories['shows'] +
                            ([], [] if 'anime' not in
                             self.categories else self.categories['anime'])[
                                 ('Cache' == mode and helpers.has_anime()) or (
                                     (mode in ['Season', 'Episode'])
                                     and self.show and self.show.is_anime)])
        ])
示例#7
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        for mode in search_params.keys():
            search_show = mode in ['Season', 'Episode']
            if not search_show and helpers.has_anime():
                search_params[mode] *= (1, 2)['Cache' == mode]

            for enum, search_string in enumerate(search_params[mode]):
                search_url = self.urls['search'] % \
                    (('tv', 'anime')[(search_show and bool(self.show and self.show.is_anime)) or bool(enum)],
                     (re.sub('[\.\s]+', ' ', search_string), 'x264')['Cache' == mode])

                data_json = self.get_url(search_url, json=True)

                cnt = len(items[mode])
                try:
                    for item in data_json['torrents']:
                        seeders, leechers, title, download_magnet, size = [
                            tryInt(n, n) for n in [
                                item.get(x) for x in [
                                    'seeds', 'leeches', 'torrent_title',
                                    'magnet_uri', 'size'
                                ]
                            ]
                        ]
                        if self._peers_fail(mode, seeders, leechers):
                            continue

                        if title and download_magnet:
                            items[mode].append(
                                (title, download_magnet, seeders,
                                 self._bytesizer(size)))

                except Exception:
                    pass
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            self._sort_seeders(mode, items)

            results = list(set(results + items[mode]))

        return results
示例#8
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = '+'.join(search_string.split())
                post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 8, 'jxw': 'b', 'search': search_string},
                                 **self.categories[(mode, 'Episode')['Propers' == mode]])
                if ('Cache' == mode and has_anime()) or (
                        mode in ['Season', 'Episode'] and self.show and self.show.is_anime):
                    post_data.update({'c29': 1})

                if self.freeleech:
                    post_data.update({'free': 'on'})

                data_json = self.get_url(self.urls['search'], post_data=post_data, json=True)

                cnt = len(items[mode])
                try:
                    if not data_json:
                        raise generic.HaltParseException
                    torrents = data_json.get('Fs')[0].get('Cn').get('torrents')

                    for item in torrents:
                        seeders, leechers, size = [tryInt(n, n) for n in [item.get(x) for x in 'seed', 'leech', 'size']]
                        if self._peers_fail(mode, seeders, leechers):
                            continue

                        title = re.sub(r'\[.*=.*\].*\[/.*\]', '', item['name'])

                        download_url = self.urls['get'] % (item['id'], item['fname'])

                        if title and download_url:
                            items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except Exception:
                    time.sleep(1.1)
示例#9
0
    def _categories_string(self, mode='Cache', template='c%s=1', delimiter='&'):

        return delimiter.join([('%s', template)[any(template)] % c for c in sorted(self.categories['shows'] + (
            [], [] if 'anime' not in self.categories else self.categories['anime'])[
            ('Cache' == mode and helpers.has_anime()) or ((mode in ['Season', 'Episode']) and self.show and self.show.is_anime)])])
示例#10
0
    def _search_provider(self, search_params, search_mode='eponly', epcount=0, **kwargs):

        results = []
        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'link': 'normal', 'get': '^magnet', 'verif': 'verif'}.items())
        url = 0
        for mode in search_params.keys():
            search_show = mode in ['Season', 'Episode']
            if not search_show and has_anime():
                search_params[mode] *= (1, 2)['Cache' == mode]
                'Propers' == mode and search_params[mode].append('v1|v2|v3|v4|v5')

            for enum, search_string in enumerate(search_params[mode]):
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string

                self.url = self.urls['base'][url]
                search_url = self.url + (self.urls['search'] % urllib.quote('%scategory:%s' % (
                    ('', '%s ' % search_string)['Cache' != mode],
                    ('tv', 'anime')[(search_show and bool(self.show and self.show.is_anime)) or bool(enum)])))

                self.session.headers.update({'Referer': search_url})
                html = self.get_url(search_url + self.urls['sorted'])

                cnt = len(items[mode])
                try:
                    if not html or 'kastatic' not in html or self._has_no_results(html) or re.search(r'(?is)<(?:h\d)[^>]*>.*?(?:did\snot\smatch)', html):
                        if html and 'kastatic' not in html:
                            url += (1, 0)[url == len(self.urls['base'])]
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', attrs={'class': 'data'})
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        for tr in torrent_rows[1:]:
                            try:
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -5)]]
                                if self._peers_fail(mode, seeders, leechers):
                                    continue

                                info = tr.find('div', {'class': 'torrentname'})
                                title = (info.find_all('a')[1].get_text() or info.find('a', 'cellMainLink').get_text())\
                                    .strip()
                                link = self.url + info.find('a', {'class': rc['link']})['href'].lstrip('/')

                                download_magnet = tr.find('a', href=rc['get'])['href']
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if self.confirmed and not (tr.find('a', title=rc['verif']) or tr.find('i', title=rc['verif'])):
                                logger.log(u'Skipping untrusted non-verified result: %s' % title, logger.DEBUG)
                                continue

                            # Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
                            if 'Season' == mode and 'sponly' == search_mode:
                                ep_number = int(epcount / len(set(show_name_helpers.allPossibleShowNames(self.show))))
                                title = self._find_season_quality(title, link, ep_number)

                            if title and download_magnet:
                                items[mode].append((title, download_magnet, seeders, self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except Exception:
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            self._sort_seeders(mode, items)

            results = list(set(results + items[mode]))

        return results