예제 #1
0
    def update_providers(needed=common.neededQualities(need_all=True)):
        orig_thread_name = threading.currentThread().name
        threads = []

        providers = [
            x for x in sickbeard.providers.sortedProviderList()
            if x.is_active() and x.enable_recentsearch
        ]
        for cur_provider in providers:
            if not cur_provider.cache.should_update():
                continue

            if not threads:
                logger.log('Updating provider caches with recent upload data')

            # spawn a thread for each provider to save time waiting for slow response providers
            threads.append(
                threading.Thread(target=cur_provider.cache.updateCache,
                                 kwargs={'needed': needed},
                                 name='%s :: [%s]' %
                                 (orig_thread_name, cur_provider.name)))
            # start the thread we just created
            threads[-1].start()

        if not len(providers):
            logger.log(
                'No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes',
                logger.WARNING)

        if threads:
            # wait for all threads to finish
            for t in threads:
                t.join()

            logger.log('Finished updating provider caches')
예제 #2
0
    def cache_data(self, needed=neededQualities(need_all=True), **kwargs):

        if self.should_skip():
            return []

        api_key = self._init_api()
        if False is api_key:
            return self.search_html(needed=needed, **kwargs)
        results = []
        cats = self._get_cats(needed=needed)
        if None is not api_key:
            params = {'search': '',
                      'user': self.username,
                      'api': api_key,
                      'eng': 1,
                      'catid': ','.join(cats)}  # SD,HD

            url = self.urls['cache'] % urllib.urlencode(params)

            response = self.get_url(url)
            if self.should_skip():
                return results

            data = feedparser.parse(response.replace('<xml', '<?xml').replace('>\n<info>', '?>\n<feed>\n<info>')
                                    .replace('<search_req>\n', '').replace('</search_req>\n', '')
                                    .replace('post>\n', 'entry>\n').replace('</xml>', '</feed>'))
            if data and 'entries' in data:
                results = data.entries

            self._log_search('Cache', len(results), url)
        return results
예제 #3
0
파일: newznab.py 프로젝트: keithzg/SickGear
    def updateCache(self, needed=neededQualities(need_all=True), **kwargs):

        result = []

        if 4489 != sickbeard.RECENTSEARCH_FREQUENCY or self.should_update():
            n_spaces = {}
            try:
                check = self._checkAuth()
                if isinstance(check, bool) and not check:
                    items = None
                else:
                    (items, n_spaces) = self.provider.cache_data(needed=needed)
            except (StandardError, Exception):
                items = None

            if items:
                self._clearCache()

                # parse data
                cl = []
                for item in items:
                    ci = self._parseItem(n_spaces, item)
                    if ci is not None:
                        cl.append(ci)

                if 0 < len(cl):
                    my_db = self.get_db()
                    my_db.mass_action(cl)

            # set updated as time the attempt to fetch data is
            self.setLastUpdate()

        return result
예제 #4
0
    def update_providers(needed=common.neededQualities(need_all=True)):
        orig_thread_name = threading.currentThread().name
        threads = []

        providers = [x for x in sickbeard.providers.sortedProviderList() if x.is_active() and x.enable_recentsearch]
        for cur_provider in providers:
            if not cur_provider.cache.should_update():
                continue

            if not threads:
                logger.log('Updating provider caches with recent upload data')

            # spawn a thread for each provider to save time waiting for slow response providers
            threads.append(threading.Thread(target=cur_provider.cache.updateCache,
                                            kwargs={'needed': needed},
                                            name='%s :: [%s]' % (orig_thread_name, cur_provider.name)))
            # start the thread we just created
            threads[-1].start()

        if not len(providers):
            logger.log('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes',
                       logger.WARNING)

        if threads:
            # wait for all threads to finish
            for t in threads:
                t.join()

            logger.log('Finished updating provider caches')
예제 #5
0
    def search_html(self, search='', search_mode='', needed=neededQualities(need_all=True), **kwargs):

        results = []
        if None is self.cookies:
            return results

        cats = self._get_cats(needed=needed)

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': r'send\?', 'nuked': r'\bnuked',
                                                             'cat': 'cat=(?:%s)' % '|'.join(cats)}.items())
        mode = ('search', 'cache')['' == search]
        search_url = self.urls[mode + '_html'] % search
        html = self.get_url(search_url)
        if self.should_skip():
            return results
        cnt = len(results)
        try:
            if not html:
                raise generic.HaltParseException

            with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                torrent_table = soup.find('table', attrs={'id': 'table_table'})
                torrent_rows = []
                if torrent_table:
                    torrent_rows = torrent_table.find('tbody').find_all('tr')

                if 1 > len(torrent_rows):
                    raise generic.HaltParseException

                for tr in torrent_rows:
                    try:
                        if tr.find('img', src=rc['nuked']) or not tr.find('a', href=rc['cat']):
                            continue

                        title = tr.find('a', href=rc['info']).get_text().strip()
                        download_url = tr.find('a', href=rc['get'])
                        age = tr.find_all('td')[-1]['data-sort']
                    except (AttributeError, TypeError, ValueError):
                        continue

                    if title and download_url and age:
                        results.append({'release': title, 'getnzb': self._link(download_url['href']),
                                        'usenetage': int(age.strip())})

        except generic.HaltParseException:
            time.sleep(1.1)
            pass
        except (StandardError, Exception):
            logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)

        mode = (mode, search_mode)['Propers' == search_mode]
        self._log_search(mode, len(results) - cnt, search_url)
        return results
예제 #6
0
def get_needed_qualites(needed=None):
    if not isinstance(needed, neededQualities):
        needed = neededQualities()
    if not sickbeard.DOWNLOAD_PROPERS or needed.all_needed:
        return needed

    age_shows, age_anime = sickbeard.BACKLOG_DAYS + 2, 14
    aired_since_shows = datetime.datetime.today() - datetime.timedelta(
        days=age_shows)
    aired_since_anime = datetime.datetime.today() - datetime.timedelta(
        days=age_anime)

    my_db = db.DBConnection()
    sql_results = my_db.select(
        'SELECT DISTINCT s.indexer, s.indexer_id, e.season, e.episode FROM history as h'
        +
        ' INNER JOIN tv_episodes AS e ON (h.showid == e.showid AND h.season == e.season AND h.episode == e.episode)'
        + ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
        ' WHERE h.date >= %s' %
        min(aired_since_shows, aired_since_anime).strftime(dateFormat) +
        ' AND (%s)' % ' OR '.join([
            'h.action LIKE "%%%02d"' % x
            for x in SNATCHED_ANY + [DOWNLOADED, FAILED]
        ]))

    for sql_episode in sql_results:
        if needed.all_needed:
            break
        try:
            show = helpers.find_show_by_id(
                sickbeard.showList,
                {int(sql_episode['indexer']): int(sql_episode['indexer_id'])})
        except MultipleShowObjectsException:
            continue
        if show:
            needed.check_needed_types(show)
            if needed.all_show_qualities_needed(
                    show) or needed.all_qualities_needed:
                continue
            ep_obj = show.getEpisode(season=sql_episode['season'],
                                     episode=sql_episode['episode'])
            if ep_obj:
                ep_status, ep_quality = Quality.splitCompositeStatus(
                    ep_obj.status)
                if ep_status in SNATCHED_ANY + [DOWNLOADED, ARCHIVED]:
                    needed.check_needed_qualities([ep_quality])

    return needed
예제 #7
0
    def _search_provider(self,
                         search,
                         search_mode='eponly',
                         epcount=0,
                         retention=0,
                         needed=neededQualities(need_all=True),
                         **kwargs):

        api_key = self._init_api()
        if False is api_key:
            return self.search_html(search,
                                    search_mode,
                                    needed=needed,
                                    **kwargs)
        results = []
        cats = self._get_cats(needed=needed)
        if None is not api_key:
            params = {
                'user': self.username,
                'api': api_key,
                'eng': 1,
                'nukes': 1,
                'catid': ','.join(cats),  # SD,HD
                'retention': retention or sickbeard.USENET_RETENTION or 0,
                'search': search
            }

            search_url = self.urls['search'] % urllib.urlencode(params)

            data_json = self.get_url(search_url, json=True)
            if self.should_skip():
                return results
            if data_json and self._check_auth_from_data(data_json,
                                                        is_xml=False):
                for item in data_json:
                    if 'release' in item and 'getnzb' in item:
                        if item.get('nuked', '').startswith('1'):
                            continue
                        results.append(item)

            mode = search_mode
            if 'eponly' == search_mode:
                mode = 'Episode'
            elif 'sponly' == search_mode:
                mode = 'Season'
            self._log_search(mode, len(results), search_url)
        return results
예제 #8
0
    def cache_data(self, needed=neededQualities(need_all=True), **kwargs):

        if self.should_skip():
            return []

        api_key = self._init_api()
        if False is api_key:
            return self.search_html(needed=needed, **kwargs)
        results = []
        cats = self._get_cats(needed=needed)
        if None is not api_key:
            params = {
                'search': '',
                'user': self.username,
                'api': api_key,
                'eng': 1,
                'catid': ','.join(cats)
            }  # SD,HD

            url = self.urls['cache'] % urllib.urlencode(params)

            response = self.get_url(url)
            if self.should_skip():
                return results

            data = feedparser.parse(
                response.replace('<xml', '<?xml').replace(
                    '>\n<info>', '?>\n<feed>\n<info>').replace(
                        '<search_req>\n',
                        '').replace('</search_req>\n',
                                    '').replace('post>\n', 'entry>\n').replace(
                                        '</xml>', '</feed>'))
            if data and 'entries' in data:
                results = data.entries

            self._log_search('Cache', len(results), url)
        return results
예제 #9
0
    def _search_provider(self, search, search_mode='eponly', epcount=0, retention=0,
                         needed=neededQualities(need_all=True), **kwargs):

        api_key = self._init_api()
        if False is api_key:
            return self.search_html(search, search_mode, needed=needed, **kwargs)
        results = []
        cats = self._get_cats(needed=needed)
        if None is not api_key:
            params = {'user': self.username,
                      'api': api_key,
                      'eng': 1,
                      'nukes': 1,
                      'catid': ','.join(cats),  # SD,HD
                      'retention': retention or sickbeard.USENET_RETENTION or 0,
                      'search': search}

            search_url = self.urls['search'] % urllib.urlencode(params)

            data_json = self.get_url(search_url, json=True)
            if self.should_skip():
                return results
            if data_json and self._check_auth_from_data(data_json, is_xml=False):
                for item in data_json:
                    if 'release' in item and 'getnzb' in item:
                        if item.get('nuked', '').startswith('1'):
                            continue
                        results.append(item)

            mode = search_mode
            if 'eponly' == search_mode:
                mode = 'Episode'
            elif 'sponly' == search_mode:
                mode = 'Season'
            self._log_search(mode, len(results), search_url)
        return results
예제 #10
0
    def run(self):
        generic_queue.QueueItem.run(self)

        try:
            self._change_missing_episodes()

            show_list = sickbeard.showList
            from_date = datetime.date.fromordinal(1)
            needed = common.neededQualities()
            for curShow in show_list:
                if curShow.paused:
                    continue

                wanted_eps = wanted_episodes(curShow,
                                             from_date,
                                             unaired=sickbeard.SEARCH_UNAIRED)

                if wanted_eps:
                    if not needed.all_needed:
                        if not needed.all_types_needed:
                            needed.check_needed_types(curShow)
                        if not needed.all_qualities_needed:
                            for w in wanted_eps:
                                if needed.all_qualities_needed:
                                    break
                                if not w.show.is_anime and not w.show.is_sports:
                                    needed.check_needed_qualities(
                                        w.wantedQuality)

                    self.episodes.extend(wanted_eps)

            if sickbeard.DOWNLOAD_PROPERS:
                properFinder.get_needed_qualites(needed)

            self.update_providers(needed=needed)
            self._check_for_propers(needed)

            if not self.episodes:
                logger.log(u'No search of cache for episodes required')
                self.success = True
            else:
                num_shows = len(set([ep.show.name for ep in self.episodes]))
                logger.log(u'Found %d needed episode%s spanning %d show%s' %
                           (len(self.episodes),
                            helpers.maybe_plural(len(self.episodes)),
                            num_shows, helpers.maybe_plural(num_shows)))

                try:
                    logger.log(u'Beginning recent search for episodes')
                    found_results = search.search_for_needed_episodes(
                        self.episodes)

                    if not len(found_results):
                        logger.log(u'No needed episodes found')
                    else:
                        for result in found_results:
                            # just use the first result for now
                            logger.log(u'Downloading %s from %s' %
                                       (result.name, result.provider.name))
                            self.success = search.snatch_episode(result)

                            helpers.cpu_sleep()

                except (StandardError, Exception):
                    logger.log(traceback.format_exc(), logger.ERROR)

                if None is self.success:
                    self.success = False

        finally:
            self.finish()
예제 #11
0
파일: newznab.py 프로젝트: keithzg/SickGear
    def _search_provider(self,
                         search_params,
                         needed=neededQualities(need_all=True),
                         max_items=400,
                         try_all_searches=False,
                         **kwargs):

        results, n_spaces = [], {}
        if self.should_skip():
            return results, n_spaces

        api_key = self._check_auth()
        if isinstance(api_key, bool) and not api_key:
            return results, n_spaces

        base_params = {
            't':
            'tvsearch',
            'maxage':
            sickbeard.USENET_RETENTION or 0,
            'limit':
            self.limits,
            'attrs':
            ','.join([
                k for k, v in
                NewznabConstants.providerToIndexerMapping.iteritems()
                if v in self.caps
            ]),
            'offset':
            0
        }

        uc_only = all([re.search('(?i)usenet_crawler', self.get_id())])
        base_params_uc = {'num': self.limits, 'dl': '1', 'i': '64660'}

        if isinstance(api_key, basestring) and api_key not in ('0', ''):
            base_params['apikey'] = api_key
            base_params_uc['r'] = api_key

        results, n_spaces = [], {}
        total, cnt, search_url, exit_log = 0, len(results), '', True

        cat_sport = self.cats.get(NewznabConstants.CAT_SPORT, ['5060'])
        cat_anime = self.cats.get(NewznabConstants.CAT_ANIME, ['5070'])
        cat_hd = self.cats.get(NewznabConstants.CAT_HD, ['5040'])
        cat_sd = self.cats.get(NewznabConstants.CAT_SD, ['5030'])
        cat_uhd = self.cats.get(NewznabConstants.CAT_UHD)
        cat_webdl = self.cats.get(NewznabConstants.CAT_WEBDL)

        for mode in search_params.keys():
            if self.should_skip(log_warning=False):
                break
            for i, params in enumerate(search_params[mode]):

                if self.should_skip(log_warning=False):
                    break

                # category ids
                cat = []
                if 'Episode' == mode or 'Season' == mode:
                    if not (any(x in params for x in [
                            v for c, v in self.caps.iteritems() if c not in [
                                NewznabConstants.SEARCH_EPISODE,
                                NewznabConstants.SEARCH_SEASON
                            ]
                    ]) or not self.supports_tvdbid()):
                        logger.log(
                            'Show is missing either an id or search term for search'
                        )
                        continue

                if needed.need_anime:
                    cat.extend(cat_anime)
                if needed.need_sports:
                    cat.extend(cat_sport)

                if needed.need_hd:
                    cat.extend(cat_hd)
                if needed.need_sd:
                    cat.extend(cat_sd)
                if needed.need_uhd and cat_uhd is not None:
                    cat.extend(cat_uhd)
                if needed.need_webdl and cat_webdl is not None:
                    cat.extend(cat_webdl)

                if self.cat_ids or len(cat):
                    base_params['cat'] = ','.join(
                        sorted(
                            set((self.cat_ids.split(',') if self.
                                 cat_ids else []) + cat)))
                    base_params_uc['t'] = base_params['cat']

                request_params = base_params.copy()
                # if ('Propers' == mode or 'nzbs_org' == self.get_id()) \
                if 'Propers' == mode \
                        and 'q' in params and not (any(x in params for x in ['season', 'ep'])):
                    request_params['t'] = 'search'
                request_params.update(params)

                # deprecated; kept here as bookmark for new haspretime:0|1 + nuked:0|1 can be used here instead
                # if hasattr(self, 'filter'):
                #     if 'nzbs_org' == self.get_id():
                #         request_params['rls'] = ((0, 1)['so' in self.filter], 2)['snn' in self.filter]

                # workaround a strange glitch
                if sum(ord(i) for i in self.get_id()) in [
                        383
                ] and 5 == 14 - request_params['maxage']:
                    request_params['maxage'] += 1

                offset = 0
                batch_count = not 0
                first_date = last_date = None

                # hardcoded to stop after a max of 4 hits (400 items) per query
                while (offset <= total) and (offset <
                                             max_items) and batch_count:
                    cnt = len(results)

                    if 'Cache' == mode and uc_only:
                        search_url = '%srss?%s' % (
                            self.url, urllib.urlencode(base_params_uc))
                    else:
                        search_url = '%sapi?%s' % (
                            self.url, urllib.urlencode(request_params))
                    i and time.sleep(2.1)

                    data = self.get_url(search_url)

                    if self.should_skip() or not data:
                        break

                    # hack this in until it's fixed server side
                    if not data.startswith('<?xml'):
                        data = '<?xml version="1.0" encoding="ISO-8859-1" ?>%s' % data

                    try:
                        parsed_xml, n_spaces = self.cache.parse_and_get_ns(
                            data)
                        items = parsed_xml.findall('channel/item')
                    except (StandardError, Exception):
                        logger.log(
                            'Error trying to load %s RSS feed' % self.name,
                            logger.WARNING)
                        break

                    if not self._check_auth_from_data(parsed_xml, search_url):
                        break

                    if 'rss' != parsed_xml.tag:
                        logger.log(
                            'Resulting XML from %s isn\'t RSS, not parsing it'
                            % self.name, logger.WARNING)
                        break

                    i and time.sleep(2.1)

                    for item in items:

                        title, url = self._title_and_url(item)
                        if title and url:
                            results.append(item)
                        else:
                            logger.log(
                                'The data returned from %s is incomplete, this result is unusable'
                                % self.name, logger.DEBUG)

                    # get total and offset attributes
                    try:
                        if 0 == total:
                            total = (helpers.tryInt(
                                parsed_xml.find(
                                    './/%sresponse' % n_spaces['newznab']).get(
                                        'total', 0)), 1000)['Cache' == mode]
                            hits = (total // self.limits +
                                    int(0 < (total % self.limits)))
                            hits += int(0 == hits)
                        offset = helpers.tryInt(
                            parsed_xml.find('.//%sresponse' %
                                            n_spaces['newznab']).get(
                                                'offset', 0))
                    except (AttributeError, KeyError):
                        if not uc_only:
                            break
                        total = len(items)

                    # No items found, prevent from doing another search
                    if 0 == total:
                        break

                    # Cache mode, prevent from doing another search
                    if 'Cache' == mode:
                        if items and len(items):
                            if not first_date:
                                first_date = self._parse_pub_date(items[0])
                            last_date = self._parse_pub_date(items[-1])
                        if not first_date or not last_date or not self._last_recent_search or \
                                last_date <= self.last_recent_search or uc_only:
                            break

                    if offset != request_params['offset']:
                        logger.log(
                            'Ask your newznab provider to fix their newznab responses'
                        )
                        break

                    request_params['offset'] += request_params['limit']
                    if total <= request_params['offset']:
                        break

                    # there are more items available than the amount given in one call, grab some more
                    items = total - request_params['offset']
                    logger.log(
                        '%s more item%s to fetch from a batch of up to %s items.'
                        % (items, helpers.maybe_plural(items),
                           request_params['limit']), logger.DEBUG)

                    batch_count = self._log_result(results, mode, cnt,
                                                   search_url)
                    exit_log = False

                if 'Cache' == mode and first_date:
                    self.last_recent_search = first_date

                if exit_log:
                    self._log_search(mode, total, search_url)

                if not try_all_searches and any(x in request_params for x in [
                        v for c, v in self.caps.iteritems() if c not in [
                            NewznabConstants.SEARCH_EPISODE, NewznabConstants.
                            SEARCH_SEASON, NewznabConstants.SEARCH_TEXT
                        ]
                ]) and len(results):
                    break

        return results, n_spaces
예제 #12
0
파일: newznab.py 프로젝트: keithzg/SickGear
    def choose_search_mode(self, episodes, ep_obj, hits_per_page=100):
        searches = [
            e for e in episodes
            if (not ep_obj.show.is_scene and e.season == ep_obj.season) or (
                ep_obj.show.is_scene and e.scene_season == ep_obj.scene_season)
        ]

        needed = neededQualities()
        needed.check_needed_types(ep_obj.show)
        for s in searches:
            if needed.all_qualities_needed:
                break
            if not s.show.is_anime and not s.show.is_sports:
                if not getattr(s, 'wantedQuality', None):
                    # this should not happen, the creation is missing for the search in this case
                    logger.log(
                        'wantedQuality property was missing for search, creating it',
                        logger.WARNING)
                    ep_status, ep_quality = Quality.splitCompositeStatus(
                        ep_obj.status)
                    s.wantedQuality = get_wanted_qualities(ep_obj,
                                                           ep_status,
                                                           ep_quality,
                                                           unaired=True)
                needed.check_needed_qualities(s.wantedQuality)

        if not hasattr(ep_obj, 'eps_aired_in_season'):
            # this should not happen, the creation is missing for the search in this case
            logger.log(
                'eps_aired_in_season property was missing for search, creating it',
                logger.WARNING)
            ep_count, ep_count_scene = get_aired_in_season(ep_obj.show)
            ep_obj.eps_aired_in_season = ep_count.get(ep_obj.season, 0)
            ep_obj.eps_aired_in_scene_season = ep_count_scene.get(ep_obj.scene_season, 0) if ep_obj.show.is_scene else \
                ep_obj.eps_aired_in_season

        per_ep, limit_per_ep = 0, 0
        if needed.need_sd and not needed.need_hd:
            per_ep, limit_per_ep = 10, 25
        if needed.need_hd:
            if not needed.need_sd:
                per_ep, limit_per_ep = 30, 90
            else:
                per_ep, limit_per_ep = 40, 120
        if needed.need_uhd or (needed.need_hd and
                               not self.cats.get(NewznabConstants.CAT_UHD)):
            per_ep += 4
            limit_per_ep += 10
        if ep_obj.show.is_anime or ep_obj.show.is_sports or ep_obj.show.air_by_date:
            rel_per_ep, limit_per_ep = 5, 10
        else:
            rel_per_ep = per_ep
        rel = max(
            1,
            int(
                ceil((ep_obj.eps_aired_in_scene_season if ep_obj.show.is_scene
                      else ep_obj.eps_aired_in_season * rel_per_ep) /
                     hits_per_page)))
        rel_limit = max(
            1,
            int(
                ceil((ep_obj.eps_aired_in_scene_season if ep_obj.show.is_scene
                      else ep_obj.eps_aired_in_season * limit_per_ep) /
                     hits_per_page)))
        season_search = rel < (len(searches) * 100 // hits_per_page)
        if not season_search:
            needed = neededQualities()
            needed.check_needed_types(ep_obj.show)
            if not ep_obj.show.is_anime and not ep_obj.show.is_sports:
                if not getattr(ep_obj, 'wantedQuality', None):
                    ep_status, ep_quality = Quality.splitCompositeStatus(
                        ep_obj.status)
                    ep_obj.wantedQuality = get_wanted_qualities(ep_obj,
                                                                ep_status,
                                                                ep_quality,
                                                                unaired=True)
                needed.check_needed_qualities(ep_obj.wantedQuality)
        else:
            if not ep_obj.show.is_anime and not ep_obj.show.is_sports:
                for ep in episodes:
                    if not getattr(ep, 'wantedQuality', None):
                        ep_status, ep_quality = Quality.splitCompositeStatus(
                            ep.status)
                        ep.wantedQuality = get_wanted_qualities(ep,
                                                                ep_status,
                                                                ep_quality,
                                                                unaired=True)
                    needed.check_needed_qualities(ep.wantedQuality)
        return (season_search, needed,
                (hits_per_page * 100 // hits_per_page * 2,
                 hits_per_page * int(ceil(rel_limit * 1.5)))[season_search])
예제 #13
0
    def run(self):
        generic_queue.QueueItem.run(self)

        try:
            self._change_missing_episodes()

            show_list = sickbeard.showList
            from_date = datetime.date.fromordinal(1)
            needed = common.neededQualities()
            for curShow in show_list:
                if curShow.paused:
                    continue

                wanted_eps = wanted_episodes(curShow, from_date, unaired=sickbeard.SEARCH_UNAIRED)

                if wanted_eps:
                    if not needed.all_needed:
                        if not needed.all_types_needed:
                            needed.check_needed_types(curShow)
                        if not needed.all_qualities_needed:
                            for w in wanted_eps:
                                if needed.all_qualities_needed:
                                    break
                                if not w.show.is_anime and not w.show.is_sports:
                                    needed.check_needed_qualities(w.wantedQuality)

                    self.episodes.extend(wanted_eps)

            if sickbeard.DOWNLOAD_PROPERS:
                properFinder.get_needed_qualites(needed)

            self.update_providers(needed=needed)
            self._check_for_propers(needed)

            if not self.episodes:
                logger.log(u'No search of cache for episodes required')
                self.success = True
            else:
                num_shows = len(set([ep.show.name for ep in self.episodes]))
                logger.log(u'Found %d needed episode%s spanning %d show%s'
                           % (len(self.episodes), helpers.maybe_plural(len(self.episodes)),
                              num_shows, helpers.maybe_plural(num_shows)))

                try:
                    logger.log(u'Beginning recent search for episodes')
                    found_results = search.search_for_needed_episodes(self.episodes)

                    if not len(found_results):
                        logger.log(u'No needed episodes found')
                    else:
                        for result in found_results:
                            # just use the first result for now
                            logger.log(u'Downloading %s from %s' % (result.name, result.provider.name))
                            self.success = search.snatch_episode(result)
                            if self.success:
                                for ep in result.episodes:
                                    self.snatched_eps.add((ep.show.indexer, ep.show.indexerid, ep.season, ep.episode))

                            helpers.cpu_sleep()

                except (StandardError, Exception):
                    logger.log(traceback.format_exc(), logger.ERROR)

                if None is self.success:
                    self.success = False

        finally:
            self.finish()
예제 #14
0
    def search_html(self,
                    search='',
                    search_mode='',
                    needed=neededQualities(need_all=True),
                    **kwargs):

        results = []
        if None is self.cookies:
            return results

        cats = self._get_cats(needed=needed)

        rc = dict(
            (k, re.compile('(?i)' + v)) for (k, v) in {
                'info': 'detail',
                'get': r'send\?',
                'nuked': r'\bnuked',
                'cat': 'cat=(?:%s)' % '|'.join(cats)
            }.items())
        mode = ('search', 'cache')['' == search]
        search_url = self.urls[mode + '_html'] % search
        html = self.get_url(search_url)
        if self.should_skip():
            return results
        cnt = len(results)
        try:
            if not html:
                raise generic.HaltParseException

            with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                torrent_table = soup.find('table', attrs={'id': 'table_table'})
                torrent_rows = []
                if torrent_table:
                    torrent_rows = torrent_table.find('tbody').find_all('tr')

                if 1 > len(torrent_rows):
                    raise generic.HaltParseException

                for tr in torrent_rows:
                    try:
                        if tr.find('img', src=rc['nuked']) or not tr.find(
                                'a', href=rc['cat']):
                            continue

                        title = tr.find('a',
                                        href=rc['info']).get_text().strip()
                        download_url = tr.find('a', href=rc['get'])
                        age = tr.find_all('td')[-1]['data-sort']
                    except (AttributeError, TypeError, ValueError):
                        continue

                    if title and download_url and age:
                        results.append({
                            'release':
                            title,
                            'getnzb':
                            self._link(download_url['href']),
                            'usenetage':
                            int(age.strip())
                        })

        except generic.HaltParseException:
            time.sleep(1.1)
            pass
        except (StandardError, Exception):
            logger.log(
                u'Failed to parse. Traceback: %s' % traceback.format_exc(),
                logger.ERROR)

        mode = (mode, search_mode)['Propers' == search_mode]
        self._log_search(mode, len(results) - cnt, search_url)
        return results