コード例 #1
0
    def _active_state(self, ids=None):
        # type: (Optional[AnyStr, list]) -> list
        """
        Fetch state of items, return items that are actually downloading or seeding
        :param ids: Optional id(s) to get state info for. None to get all
        :return: Zero or more object(s) assigned with state `down`loading or `seed`ing
        """
        downloaded = (lambda item: float(item.get('progress') or 0) * (item.get('size') or 0))  # bytes
        wanted = (lambda item: item.get('priority'))  # wanted will == tally/downloaded if all files are selected
        base_state = (lambda t, gp, f: dict(
            id=t['hash'], title=t['name'], total_size=gp.get('total_size') or 0,
            added_ts=gp.get('addition_date'), last_completed_ts=gp.get('completion_date'),
            last_started_ts=None, seed_elapsed_secs=gp.get('seeding_time'),
            wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None,
            wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None,
            tally_down=sum(map_list(lambda tf: downloaded(tf) or 0, f)) or None,
            tally_up=gp.get('total_uploaded'),
            state='done' if 'pausedUP' == t.get('state') else ('down', 'seed')['up' in t.get('state').lower()]
        ))
        file_list = (lambda ti: self._client_request(
            ('torrents/files', 'query/propertiesFiles/%s' % ti['hash'])[not self.api_ns],
            params=({'hash': ti['hash']}, {})[not self.api_ns], json=True) or {})
        valid_stat = (lambda ti: not self._ignore_state(ti)
                      and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti))))
        result = map_list(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)),
                          filter_list(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and
                                      valid_stat(t), self._tinf(ids, False)))

        return result
コード例 #2
0
ファイル: __init__.py プロジェクト: v0re/SickGear
    def get_torrents(self, view='main'):
        """Get list of all torrents in specified view

        @return: list of L{Torrent} instances

        @rtype: list

        @todo: add validity check for specified view
        """
        self.torrents = []
        retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self), torrent_methods)
        mc = rpc.Multicall(self)

        if self.method_exists('d.multicall2'):
            mc.add('d.multicall2', '', view, 'd.hash=',
                   *map_list(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=', retriever_methods))
        else:
            mc.add('d.multicall', view, 'd.get_hash=',
                   *map_list(lambda m1: m1.rpc_call + '=', retriever_methods))

        results = mc.call()[0]  # only sent one call, only need first result

        for result in results:
            self.torrents.append(
                Torrent(self, info_hash=result[0],
                        **dict((mc.varname, rpc.process_result(mc, r))
                               for (mc, r) in list(zip(retriever_methods, result[1:])))))  # result[0]=info_hash

        self._manage_torrent_cache()
        return self.torrents
コード例 #3
0
    def _active_state(self, ids=None):
        # type: (Optional[list]) -> list
        """
        Fetch state of items, return items that are actually downloading or seeding
        :param ids: Optional id(s) to get state info for. None to get all
        :return: Zero or more object(s) assigned with state `down`loading or `seed`ing
        """
        tasks = self._tinf(ids)
        downloaded = (lambda item, d=0: item.get('size_downloaded') or d
                      )  # bytes
        wanted = (
            lambda item: item.get('wanted')
        )  # wanted will == tally/downloaded if all files are selected
        base_state = (lambda t, d, tx, f: dict(
            id=t['id'],
            title=t['title'],
            total_size=t.get('size') or 0,
            added_ts=d.get('create_time'),
            last_completed_ts=d.get('completed_time'),
            last_started_ts=d.get('started_time'),
            seed_elapsed_secs=d.get('seedelapsed'),
            wanted_size=sum(
                map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)
            ) or None,
            wanted_down=sum(
                map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)
            ) or None,
            tally_down=downloaded(tx),
            tally_up=tx.get('size_uploaded'),
            state='done'
            if re.search('finish', t['status']) else ('seed', 'down')[any(
                filter_list(
                    lambda tf: wanted(tf) and
                    (downloaded(tf, -1) < tf.get('size', 0)), f))]))
        # only available during "download" and "seeding"
        file_list = (lambda t: t.get('additional', {}).get('file', {}))
        valid_stat = (lambda ti: not ti.get('error') and isinstance(
            ti.get('status'), string_types) and sum(
                map_list(lambda tf: wanted(tf) and downloaded(tf) or 0,
                         file_list(ti))))
        result = map_list(
            lambda t: base_state(t,
                                 t.get('additional', {}).get('detail', {}),
                                 t.get('additional', {}).get('transfer', {}),
                                 file_list(t)),
            filter_list(
                lambda t: t['status'] in
                ('downloading', 'seeding', 'finished') and valid_stat(t),
                tasks))

        return result
コード例 #4
0
    def _perform_task(self, method, ids, filter_func, pause_first=False):
        # type: (AnyStr, Union[AnyStr, list], Callable, bool) -> Union[bool, list]
        """
        Set up and send a method to client
        :param method: Either `resume` or `delete`
        :param ids: Id(s) to perform method on
        :param filter_func: Call back function to filter tasks as failed or erroneous
        :param pause_first: True if task should be paused prior to invoking method
        :return: True if success, Id(s) that could not be acted upon, else Falsy if failure
        """
        if isinstance(ids, (string_types, list)):
            rids = ids if isinstance(ids, list) else map_list(
                lambda x: x.strip(), ids.split(','))

            result = pause_first and self._pause_torrent(
                rids)  # get items not paused
            result = (isinstance(result, list) and result or [])
            for t_id in list(
                    set(rids) -
                (isinstance(result, list) and set(result) or set())
            ):  # perform on paused ids
                if True is not self._action(method, t_id, filter_func):
                    result += [t_id]  # failed item

            return result or True
コード例 #5
0
    def is_min_server_version(self, version, host, token):
        """ Test if Emby `host` server version is greater than or equal `version` arg

        :param version: Major, Minor, Build, Revision
        :type version: List
        :param host: Emby host
        :type host: Basestring
        :param token: Accesstoken
        :type token: Basestring
        :return: True if Emby `host` server version is greater than or equal `version` arg, otherwise False
        :rtype: bool
        """
        self.response = None
        response = sickbeard.helpers.get_url(
            'http://%s/emby/System/Info/Public' % host,
            headers={
                'Content-type': 'application/json',
                'X-MediaBrowser-Token': token
            },
            timeout=20,
            hooks=dict(response=self._cb_response),
            json=True)

        return self.response and self.response.get('ok') and 200 == self.response.get('status_code') and \
            version <= map_list(lambda x: int(x), response.get('Version', '0.0.0.0').split('.'))
コード例 #6
0
    def _search_show(self, name, **kwargs):
        # type: (AnyStr, Optional[Any]) -> List[TVInfoShow]
        def map_data(data):
            data['poster'] = data.get('image')
            return data

        return map_list(map_data, self.get_series(name))
コード例 #7
0
ファイル: __init__.py プロジェクト: spydoor17/SickGear
def find_method(rpc_call):
    """Return L{Method} instance associated with given RPC call"""
    try:
        rpc_call = rpc_call.lower()
        return next(filter_iter(lambda m: rpc_call in map_list(
            lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', []))),
                      rtorrent.methods + rtorrent.torrent.methods +
                      rtorrent.file.methods + rtorrent.tracker.methods + rtorrent.peer.methods))
    except IndexError:
        return -1
コード例 #8
0
ファイル: show_name_helpers.py プロジェクト: valnar1/SickGear
def get_show_names_all_possible(show_obj, season=-1, scenify=True, spacer='.'):
    # type: (sickbeard.tv.TVShow, int, bool, AnyStr) -> List[AnyStr]
    """

    :param show_obj: show object
    :param season: season
    :param scenify:
    :param spacer: spacer
    :return:
    """
    show_names = list(set(allPossibleShowNames(
        show_obj, season=season)))  # type: List[AnyStr]
    if scenify:
        show_names = map_list(sanitize_scene_name, show_names)
    return url_encode(show_names, spacer)
コード例 #9
0
def _get_absolute_numbering_for_show(tbl, tvid, prodid):
    """

    :param tbl: table name
    :type tbl: AnyStr
    :param tvid: tvid
    :type tvid: int
    :param prodid: prodid
    :type prodid: int or long
    :return:
    :rtype: Dict
    """
    result = {}

    if None is not prodid:
        if 'tv_episodes' == tbl:
            xem_refresh(tvid, prodid)

        my_db = db.DBConnection()
        # noinspection SqlResolve
        rows = my_db.select(
            'SELECT season, episode, absolute_number, scene_absolute_number'
            ' FROM %s' % tbl + ' WHERE indexer = ? AND %s = ?' %
            ('indexer_id', 'showid')['tv_episodes' == tbl] +
            ' AND scene_absolute_number != 0'
            ' ORDER BY season, episode', [int(tvid), int(prodid)])

        for row in rows:
            season, episode, abs_num = map_list(
                lambda x: try_int(row[x], None),
                ('season', 'episode', 'absolute_number'))
            if None is season and None is episode and None is not abs_num:
                season, episode, _ = _get_sea(tvid,
                                              prodid,
                                              absolute_number=abs_num)

            if None is not season and None is not episode:
                scene_absolute_number = try_int(row['scene_absolute_number'],
                                                None)
                if None is not scene_absolute_number:
                    result[(season, episode)] = scene_absolute_number

    return result
コード例 #10
0
def xem_refresh(tvid, prodid, force=False):
    """
    Refresh data from xem for a tv show

    :param tvid:
    :type tvid: int
    :param prodid:
    :type prodid: int
    :param force:
    :type force: bool
    """
    if None is prodid:
        return

    tvid, prodid = int(tvid), int(prodid)
    tvinfo = sickbeard.TVInfoAPI(tvid)

    if 'xem_origin' not in tvinfo.config or prodid not in xem_ids_list.get(
            tvid, []):
        return

    xem_origin = tvinfo.config['xem_origin']

    # XEM API URL
    url = 'http://thexem.de/map/all?id=%s&origin=%s&destination=scene' % (
        prodid, xem_origin)

    max_refresh_age_secs = 86400  # 1 day

    my_db = db.DBConnection()
    rows = my_db.select(
        'SELECT last_refreshed'
        ' FROM xem_refresh'
        ' WHERE indexer = ? AND indexer_id = ?', [tvid, prodid])
    if rows:
        last_refresh = int(rows[0]['last_refreshed'])
        refresh = int(time.mktime(datetime.datetime.today().timetuple())
                      ) > last_refresh + max_refresh_age_secs
    else:
        refresh = True

    if refresh or force:
        logger.log(
            u'Looking up XEM scene mapping for show %s on %s' %
            (prodid, tvinfo.name), logger.DEBUG)

        # mark refreshed
        my_db.upsert(
            'xem_refresh',
            dict(last_refreshed=int(
                time.mktime(datetime.datetime.today().timetuple()))),
            dict(indexer=tvid, indexer_id=prodid))

        try:
            parsed_json = sickbeard.helpers.get_url(url,
                                                    parse_json=True,
                                                    timeout=90)
            if not parsed_json or '' == parsed_json:
                logger.log(
                    u'No XEM data for show %s on %s' % (prodid, tvinfo.name),
                    logger.MESSAGE)
                return

            if 'success' in parsed_json['result']:
                cl = map_list(
                    lambda entry: [
                        'UPDATE tv_episodes'
                        ' SET scene_season = ?, scene_episode = ?, scene_absolute_number = ?'
                        ' WHERE indexer = ? AND showid = ?'
                        ' AND season = ? AND episode = ?',
                        [
                            entry.get('scene%s' %
                                      ('', '_2')['scene_2' in entry]).get(v)
                            for v in ('season', 'episode', 'absolute')
                        ] + [tvid, prodid] + [
                            entry.get(xem_origin).get(v)
                            for v in ('season', 'episode')
                        ]
                    ], filter_iter(lambda x: 'scene' in x,
                                   parsed_json['data']))

                if 0 < len(cl):
                    my_db = db.DBConnection()
                    my_db.mass_action(cl)
            else:
                logger.log(
                    u'Empty lookup result - no XEM data for show %s on %s' %
                    (prodid, tvinfo.name), logger.DEBUG)
        except (BaseException, Exception) as e:
            logger.log(
                u'Exception refreshing XEM data for show ' + str(prodid) +
                ' on ' + tvinfo.name + ': ' + ex(e), logger.WARNING)
            logger.log(traceback.format_exc(), logger.ERROR)
コード例 #11
0
ファイル: common.py プロジェクト: swipswaps/SickGear
                         status in (DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER)):
                    return []
        return self[qualities][quality][self.wantedlist]


for (attr_name, qual_val) in [
    ('SNATCHED', SNATCHED),
    ('SNATCHED_PROPER', SNATCHED_PROPER),
    ('SNATCHED_BEST', SNATCHED_BEST),
    ('DOWNLOADED', DOWNLOADED),
    ('ARCHIVED', ARCHIVED),
    ('FAILED', FAILED),
]:
    setattr(
        Quality, attr_name,
        map_list(lambda qk: Quality.compositeStatus(qual_val, qk),
                 iterkeys(Quality.qualityStrings)))
Quality.SNATCHED_ANY = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST

SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], [])
HD = Quality.combineQualities([
    Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL,
    Quality.HDBLURAY, Quality.FULLHDBLURAY
], [])  # HD720p + HD1080p
HD720p = Quality.combineQualities(
    [Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], [])
HD1080p = Quality.combineQualities(
    [Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], [])
UHD2160p = Quality.combineQualities([Quality.UHD4KWEB], [])
ANY = Quality.combineQualities([
    Quality.SDTV, Quality.SDDVD, Quality.HDTV, Quality.FULLHDTV,
    Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY,
コード例 #12
0
ファイル: horriblesubs.py プロジェクト: valnar1/SickGear
    def _search_provider(self, search_params, **kwargs):

        results = []
        if self.show_obj and not self.show_obj.is_anime:
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v))
                   for (k, v) in iteritems({'nodots': r'[\.\s]+'})])

        for mode in search_params:
            for search_string in search_params[mode]:
                search_string = unidecode(search_string)

                search_url = self.urls['browse'] if 'Cache' == mode else \
                    self.urls['search'] % (rc['nodots'].sub(' ', search_string), str(time.time()).replace('.', '3'))

                data, html = 2 * [None]
                if 'Cache' == mode:
                    data = self.cache.get_rss(search_url)
                else:
                    html = self.get_url(search_url)

                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if None is not data:
                        for cur_item in data.get('entries', []):
                            title, download_url = cur_item.get(
                                'title'), self._link(cur_item.get('link'))
                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, '', ''))
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser('<html><body>%s</body></html>' %
                                   html) as soup:
                        for link in soup.find_all('a'):
                            try:
                                variants = map_list(
                                    lambda t: t.get_text().replace(
                                        'SD', '480p'),
                                    link.find_all('span', class_='badge'))
                                map_consume(
                                    lambda t: t.decompose(),
                                    link.find_all('span') +
                                    link.find_all('div'))
                                title = '[HorribleSubs] ' + re.sub(
                                    r'\s*\[HorribleSubs\]\s*', '',
                                    link.get_text())
                                download_url = self._link(link.get('href'))
                                if title and download_url:
                                    items[mode] += map_list(
                                        lambda _v:
                                        ('%s [%s]' % (title, _v), '%s-%s' %
                                         (download_url, _v), '', ''), variants)
                            except (AttributeError, TypeError, ValueError):
                                continue

                except generic.HaltParseException:
                    pass
                except (BaseException, Exception):
                    logger.log(
                        u'Failed to parse. Traceback: %s' %
                        traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
コード例 #13
0
def _download_propers(proper_list):
    # type: (List[Proper]) -> None
    """
    download propers from given list

    :param proper_list: proper list
    """
    verified_propers = True
    consumed_proper = []
    downloaded_epid = set()

    _epid = operator.attrgetter('tvid', 'prodid', 'season', 'episode')
    while verified_propers:
        verified_propers = set()

        # get verified list; sort the list of unique Propers for highest proper_level, newest first
        for cur_proper in sorted(
                filter_iter(lambda p: p not in consumed_proper,
                            # allows Proper to fail or be rejected and another to be tried (with a different name)
                            filter_iter(lambda p: _epid(p) not in downloaded_epid, proper_list)),
                key=operator.attrgetter('properlevel', 'date'), reverse=True):  # type: Proper

            epid = _epid(cur_proper)

            # if the show is in our list and there hasn't been a Proper already added for that particular episode
            # then add it to our list of Propers
            if epid not in map_list(_epid, verified_propers):
                logger.log('Proper may be useful [%s]' % cur_proper.name)
                verified_propers.add(cur_proper)
            else:
                # use Proper with the highest level
                remove_propers = set()
                map_consume(lambda vp: remove_propers.add(vp),
                            filter_iter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level),
                                        verified_propers))

                if remove_propers:
                    verified_propers -= remove_propers
                    logger.log('A more useful Proper [%s]' % cur_proper.name)
                    verified_propers.add(cur_proper)

        for cur_proper in list(verified_propers):
            consumed_proper += [cur_proper]

            # scene release checking
            scene_only = getattr(cur_proper.provider, 'scene_only', False)
            non_scene_fallback = getattr(cur_proper.provider, 'scene_loose', False) \
                or getattr(cur_proper.provider, 'scene_loose_active', False)
            scene_rej_nuked = getattr(cur_proper.provider, 'scene_rej_nuked', False)
            scene_nuked_active = getattr(cur_proper.provider, 'scene_nuked_active', False)
            if any([scene_only, non_scene_fallback, scene_rej_nuked, scene_nuked_active]) \
                    and not cur_proper.parsed_show_obj.is_anime:
                scene_or_contain = getattr(cur_proper.provider, 'scene_or_contain', '')
                scene_contains = False
                if scene_only and scene_or_contain:
                    re_extras = dict(re_prefix='.*', re_suffix='.*')
                    r = show_name_helpers.contains_any(cur_proper.name, scene_or_contain, **re_extras)
                    if None is not r and r:
                        scene_contains = True

                if scene_contains and not scene_rej_nuked:
                    reject = False
                else:
                    reject, url = search.can_reject(cur_proper.name)
                    if reject:
                        if isinstance(reject, string_types):
                            if scene_rej_nuked and not scene_nuked_active:
                                logger.log('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url),
                                           logger.DEBUG)
                            else:
                                logger.log('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url),
                                           logger.DEBUG)
                                reject = False
                        elif scene_contains or non_scene_fallback:
                            reject = False
                        else:
                            logger.log('Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG)

                if reject:
                    continue

            # make the result object
            ep_obj = cur_proper.parsed_show_obj.get_episode(cur_proper.season, cur_proper.episode)
            result = cur_proper.provider.get_result([ep_obj], cur_proper.url)
            if None is result:
                continue
            result.name = cur_proper.name
            result.quality = cur_proper.quality
            result.version = cur_proper.version
            result.properlevel = cur_proper.proper_level
            result.is_repack = cur_proper.is_repack
            result.puid = cur_proper.puid

            # snatch it
            if search.snatch_episode(result, SNATCHED_PROPER):
                downloaded_epid.add(_epid(cur_proper))
コード例 #14
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if self.show_obj and not self.show_obj.is_anime:
            return results

        items = {'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v))
                   for (k, v) in iteritems({
                       'nodots': r'[\.\s]+',
                       'stats': r'S:\s*?(\d)+\s*L:\s*(\d+)',
                       'size': r'size:\s*(\d+[.,]\d+\w+)'
                   })])

        for mode in search_params:
            for search_string in search_params[mode]:
                params = urlencode({
                    'terms':
                    rc['nodots'].sub(' ', search_string).encode('utf-8'),
                    'type':
                    1
                })

                search_url = '%ssearch.php?%s' % (self.url, params)

                html = self.get_url(search_url)
                if self.should_skip():
                    return self._sort_seeding(mode, results)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(
                            html,
                            parse_only=dict(table={
                                'class': (lambda at: at and 'listing' in at)
                            })) as tbl:
                        tbl_rows = [] if not tbl else tbl.find_all('tr')
                        if tbl_rows:
                            a = (0, 1)[None is not tbl_rows[0].find(
                                'td', class_='centertext')]

                            for top, bottom in zip(tbl_rows[a::2],
                                                   tbl_rows[a + 1::2]):
                                try:
                                    bottom_text = bottom.get_text() or ''
                                    stats = rc['stats'].findall(bottom_text)
                                    seeders, leechers = (0,
                                                         0) if not stats else [
                                                             try_int(n)
                                                             for n in stats[0]
                                                         ]

                                    size = rc['size'].findall(bottom_text)
                                    size = size and size[0] or -1

                                    info = top.find('td', class_='desc-top')
                                    title = info and re.sub(
                                        r'[ .]{2,}', '.',
                                        info.get_text().strip())
                                    links = info and map_list(
                                        lambda l: l.get('href', ''),
                                        info.find_all('a')) or None
                                    download_url = self._link(
                                        (filter_list(lambda l: 'magnet:' in l,
                                                     links)
                                         or filter_list(
                                             lambda l: not re.search(
                                                 r'(magnet:|\.se).+', l),
                                             links))[0])
                                except (AttributeError, TypeError, ValueError,
                                        IndexError):
                                    continue

                                if title and download_url:
                                    items[mode].append(
                                        (title, download_url, seeders,
                                         self._bytesizer(size)))

                except (BaseException, Exception):
                    time.sleep(1.1)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
コード例 #15
0
ファイル: search_backlog.py プロジェクト: v0re/SickGear
    def search_backlog(
            self,
            which_shows=None,  # type: Optional[List[TVShow]]
            force_type=NORMAL_BACKLOG,  # type: int
            force=False  # type: bool
    ):
        """
        start backlog for given list of shows or start next scheduled backlog

        :param which_shows: optional list of shows to backlog search
        :param force_type: type of backlog
        :param force: force backlog
        :return: nothing
        :rtype: None
        """
        if self.amActive and not which_shows:
            logger.log(u'Backlog is still running, not starting it again',
                       logger.DEBUG)
            return

        if which_shows:
            show_list = which_shows
            standard_backlog = False
        else:
            show_list = sickbeard.showList
            standard_backlog = True

        now = datetime.datetime.now()
        any_torrent_enabled = continued_backlog = False
        if not force and standard_backlog and (
                datetime.datetime.now() - datetime.datetime.fromtimestamp(
                    self._get_last_runtime())) < datetime.timedelta(hours=23):
            any_torrent_enabled = any(
                map_iter(
                    lambda x: x.is_active() and x.enable_backlog and x.
                    providerType == GenericProvider.TORRENT,
                    sickbeard.providers.sortedProviderList()))
            if not any_torrent_enabled:
                logger.log(
                    'Last scheduled backlog run was within the last day, skipping this run.',
                    logger.DEBUG)
                return

        if not self.providers_active(any_torrent_enabled, standard_backlog):
            logger.log(
                'No NZB/Torrent provider has active searching enabled in config/Media Providers,'
                ' cannot start backlog.', logger.WARNING)
            return

        self._get_last_backlog()
        self.amActive = True
        self.amPaused = False

        cur_date = datetime.date.today().toordinal()
        from_date = datetime.date.fromordinal(1)
        limited_from_date = datetime.date.today() - datetime.timedelta(
            days=sickbeard.BACKLOG_DAYS)

        limited_backlog = False
        if standard_backlog and (any_torrent_enabled
                                 or sickbeard.BACKLOG_NOFULL):
            logger.log(
                u'Running limited backlog for episodes missed during the last %s day(s)'
                % str(sickbeard.BACKLOG_DAYS))
            from_date = limited_from_date
            limited_backlog = True

        runparts = []
        if standard_backlog and not any_torrent_enabled and sickbeard.BACKLOG_NOFULL:
            logger.log(
                u'Skipping automated full backlog search because it is disabled in search settings'
            )

        my_db = db.DBConnection('cache.db')
        if standard_backlog and not any_torrent_enabled and not sickbeard.BACKLOG_NOFULL:
            sql_result = my_db.select(
                'SELECT * FROM backlogparts WHERE part in (SELECT MIN(part) FROM backlogparts)'
            )
            if sql_result:
                sl = []
                part_nr = int(sql_result[0]['part'])
                for s in sql_result:
                    show_obj = find_show_by_id(
                        {int(s['indexer']): int(s['indexerid'])})
                    if show_obj:
                        sl.append(show_obj)
                        runparts.append(show_obj.tvid_prodid)
                show_list = sl
                continued_backlog = True
                my_db.action('DELETE FROM backlogparts WHERE part = ?',
                             [part_nr])

        forced = standard_backlog and force_type != NORMAL_BACKLOG

        wanted_list = []
        for cur_show_obj in show_list:
            if not cur_show_obj.paused:
                w = wanted_episodes(
                    cur_show_obj,
                    from_date,
                    make_dict=True,
                    unaired=(sickbeard.SEARCH_UNAIRED
                             and not sickbeard.UNAIRED_RECENT_SEARCH_ONLY))
                if w:
                    wanted_list.append(w)

        parts = []
        if standard_backlog and not any_torrent_enabled and not continued_backlog and not sickbeard.BACKLOG_NOFULL:
            fullbacklogparts = sum([len(w) for w in wanted_list if w
                                    ]) // sickbeard.BACKLOG_FREQUENCY
            h_part = []
            counter = 0
            for w in wanted_list:  # type: Dict
                f = False
                for season, segment in iteritems(
                        w):  # type: int, List[TVEpisode]
                    counter += 1
                    if not f:
                        h_part.append(segment[0].show_obj.tvid_prodid)
                        f = True
                if counter > fullbacklogparts:
                    counter = 0
                    parts.append(h_part)
                    h_part = []

            if h_part:
                parts.append(h_part)

        if not runparts and parts:
            runparts = parts[0]
            wanted_list = filter_list(
                lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid
                in runparts, wanted_list)

        limited_wanted_list = []
        if standard_backlog and not any_torrent_enabled and runparts:
            for cur_show_obj in sickbeard.showList:
                if not cur_show_obj.paused and cur_show_obj.tvid_prodid not in runparts:
                    w = wanted_episodes(
                        cur_show_obj,
                        limited_from_date,
                        make_dict=True,
                        unaired=(sickbeard.SEARCH_UNAIRED
                                 and not sickbeard.UNAIRED_RECENT_SEARCH_ONLY))
                    if w:
                        limited_wanted_list.append(w)

        self.add_backlog_item(wanted_list, standard_backlog, limited_backlog,
                              forced, any_torrent_enabled)
        if standard_backlog and not any_torrent_enabled and limited_wanted_list:
            self.add_backlog_item(limited_wanted_list, standard_backlog, True,
                                  forced, any_torrent_enabled)

        if standard_backlog and not sickbeard.BACKLOG_NOFULL and not any_torrent_enabled and not continued_backlog:
            # noinspection SqlConstantCondition
            cl = ([], [['DELETE FROM backlogparts WHERE 1=1']])[any(parts)]
            for i, l in enumerate(parts):
                if 0 == i:
                    continue
                cl += map_list(
                    lambda m: [
                        'INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)',
                        [i + 1] + TVidProdid(m).list
                    ], l)

            if 0 < len(cl):
                my_db.mass_action(cl)

        # don't consider this an actual backlog search if we only did recent eps
        # or if we only did certain shows
        if from_date == datetime.date.fromordinal(1) and standard_backlog:
            self._set_last_backlog(cur_date)
            self._get_last_backlog()

        if standard_backlog and not any_torrent_enabled:
            self._set_last_runtime(now)

        self.amActive = False
        self._reset_progress_indicator()
コード例 #16
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self.url:
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        quote_fx = (lambda t: quote(t, safe='~()*!.\''))
        for mode in search_params:
            for search_string in search_params[mode]:
                search_url = self.url
                cnt = len(items[mode])
                try:
                    for token in self._get_tokens():
                        if self.should_skip():
                            return results
                        if not token:
                            continue

                        params = dict(token=token[0], ent=token[1])
                        if 'Cache' != mode:
                            params.update(
                                {'ss': quote_fx(unidecode(search_string))})

                        data_json = None
                        vals = [i for i in range(3, 8)]
                        random.SystemRandom().shuffle(vals)
                        for x in vals[0], vals[2], vals[4]:
                            time.sleep(x)
                            params.update(dict(ts=self.ts()))
                            search_url = self.urls[
                                ('search', 'browse')['Cache' == mode]] % params
                            # decode json below as get resp will false -ve to 'nodata' when no search results
                            html_json = self.get_url(search_url)
                            if None is not html_json:
                                data_json = json.loads(html_json)
                                if data_json or 'Cache' != mode:
                                    break
                            if self.should_skip():
                                return results

                        for item in filter_iter(
                                lambda di: re.match(
                                    '(?i).*?(tv|television)',
                                    di.get('type', '') or di.get(
                                        'category', '')) and
                            (not self.confirmed or di.get('trusted') or di.get(
                                'verified')), data_json or {}):
                            seeders, leechers, size = map_list(
                                lambda arg: try_int(*([
                                    item.get(arg[0]) if None is not item.get(
                                        arg[0]) else item.get(arg[1])
                                ]) * 2),
                                (('seeder', 'seed'), ('leecher', 'leech'),
                                 ('size', 'size')))
                            if self._reject_item(seeders, leechers):
                                continue
                            title = item.get('name') or item.get('title')
                            download_url = item.get('magnet') or item.get(
                                'magnetLink')
                            if not download_url:
                                source = item.get('site') or item.get('source')
                                link = self._link(
                                    item.get('url') or item.get('pageLink'))
                                if not source or not link:
                                    continue
                                download_url = self.urls['get'] % dict(
                                    token=token[0],
                                    src=quote_fx(source),
                                    url=b64encodestring(quote_fx(link)),
                                    ts='%(ts)s')
                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders, size))

                except generic.HaltParseException:
                    pass
                except (BaseException, Exception):
                    logger.log(
                        u'Failed to parse. Traceback: %s' %
                        traceback.format_exc(), logger.ERROR)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results