Ejemplo n.º 1
0
def sortedProviderList():
    initialList = sickbeard.providerList + sickbeard.newznabProviderList + sickbeard.torrentRssProviderList
    providerDict = dict(zip([x.get_id() for x in initialList], initialList))

    newList = []

    # add all modules in the priority list, in order
    for curModule in sickbeard.PROVIDER_ORDER:
        if curModule in providerDict:
            newList.append(providerDict[curModule])

    if not sickbeard.PROVIDER_ORDER:
        nzb = filter_list(
            lambda p: p.providerType == generic.GenericProvider.NZB,
            itervalues(providerDict))
        tor = filter_list(
            lambda p: p.providerType != generic.GenericProvider.NZB,
            itervalues(providerDict))
        newList = sorted(filter_iter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \
            sorted(filter_iter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \
            sorted(filter_iter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \
            sorted(filter_iter(lambda p: p.anime_only, tor), key=lambda v: v.get_id())

    # add any modules that are missing from that list
    for curModule in providerDict:
        if providerDict[curModule] not in newList:
            newList.append(providerDict[curModule])

    return newList
Ejemplo n.º 2
0
def getTorrentRssProviderList(data):
    providerList = filter_list(
        lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')])

    seen_values = set()
    providerListDeduped = []
    for d in providerList:
        value = d.name
        if value not in seen_values:
            providerListDeduped.append(d)
            seen_values.add(value)

    return filter_list(lambda _x: _x, providerList)
Ejemplo n.º 3
0
    def _active_state(self, ids=None):
        # type: (Optional[list]) -> list
        """
        Fetch state of items, return items that are actually downloading or seeding
        :param ids: Optional id(s) to get state info for. None to get all
        :return: Zero or more object(s) assigned with state `down`loading or `seed`ing
        """
        tasks = self._tinf(ids)
        downloaded = (lambda item, d=0: item.get('size_downloaded') or d
                      )  # bytes
        wanted = (
            lambda item: item.get('wanted')
        )  # wanted will == tally/downloaded if all files are selected
        base_state = (lambda t, d, tx, f: dict(
            id=t['id'],
            title=t['title'],
            total_size=t.get('size') or 0,
            added_ts=d.get('create_time'),
            last_completed_ts=d.get('completed_time'),
            last_started_ts=d.get('started_time'),
            seed_elapsed_secs=d.get('seedelapsed'),
            wanted_size=sum(
                map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)
            ) or None,
            wanted_down=sum(
                map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)
            ) or None,
            tally_down=downloaded(tx),
            tally_up=tx.get('size_uploaded'),
            state='done'
            if re.search('finish', t['status']) else ('seed', 'down')[any(
                filter_list(
                    lambda tf: wanted(tf) and
                    (downloaded(tf, -1) < tf.get('size', 0)), f))]))
        # only available during "download" and "seeding"
        file_list = (lambda t: t.get('additional', {}).get('file', {}))
        valid_stat = (lambda ti: not ti.get('error') and isinstance(
            ti.get('status'), string_types) and sum(
                map_list(lambda tf: wanted(tf) and downloaded(tf) or 0,
                         file_list(ti))))
        result = map_list(
            lambda t: base_state(t,
                                 t.get('additional', {}).get('detail', {}),
                                 t.get('additional', {}).get('transfer', {}),
                                 file_list(t)),
            filter_list(
                lambda t: t['status'] in
                ('downloading', 'seeding', 'finished') and valid_stat(t),
                tasks))

        return result
Ejemplo n.º 4
0
    def _active_state(self, ids=None):
        # type: (Optional[AnyStr, list]) -> list
        """
        Fetch state of items, return items that are actually downloading or seeding
        :param ids: Optional id(s) to get state info for. None to get all
        :return: Zero or more object(s) assigned with state `down`loading or `seed`ing
        """
        downloaded = (lambda item: float(item.get('progress') or 0) * (item.get('size') or 0))  # bytes
        wanted = (lambda item: item.get('priority'))  # wanted will == tally/downloaded if all files are selected
        base_state = (lambda t, gp, f: dict(
            id=t['hash'], title=t['name'], total_size=gp.get('total_size') or 0,
            added_ts=gp.get('addition_date'), last_completed_ts=gp.get('completion_date'),
            last_started_ts=None, seed_elapsed_secs=gp.get('seeding_time'),
            wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None,
            wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None,
            tally_down=sum(map_list(lambda tf: downloaded(tf) or 0, f)) or None,
            tally_up=gp.get('total_uploaded'),
            state='done' if 'pausedUP' == t.get('state') else ('down', 'seed')['up' in t.get('state').lower()]
        ))
        file_list = (lambda ti: self._client_request(
            ('torrents/files', 'query/propertiesFiles/%s' % ti['hash'])[not self.api_ns],
            params=({'hash': ti['hash']}, {})[not self.api_ns], json=True) or {})
        valid_stat = (lambda ti: not self._ignore_state(ti)
                      and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti))))
        result = map_list(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)),
                          filter_list(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and
                                      valid_stat(t), self._tinf(ids, False)))

        return result
Ejemplo n.º 5
0
    def get_torrents(self, view='main'):
        """Get list of all torrents in specified view

        @return: list of L{Torrent} instances

        @rtype: list

        @todo: add validity check for specified view
        """
        self.torrents = []
        retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self), torrent_methods)
        mc = rpc.Multicall(self)

        if self.method_exists('d.multicall2'):
            mc.add('d.multicall2', '', view, 'd.hash=',
                   *map_list(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=', retriever_methods))
        else:
            mc.add('d.multicall', view, 'd.get_hash=',
                   *map_list(lambda m1: m1.rpc_call + '=', retriever_methods))

        results = mc.call()[0]  # only sent one call, only need first result

        for result in results:
            self.torrents.append(
                Torrent(self, info_hash=result[0],
                        **dict((mc.varname, rpc.process_result(mc, r))
                               for (mc, r) in list(zip(retriever_methods, result[1:])))))  # result[0]=info_hash

        self._manage_torrent_cache()
        return self.torrents
Ejemplo n.º 6
0
    def get_trackers(self):
        """Get list of Tracker instances for given torrent.

        @return: L{Tracker} instances
        @rtype: list

        @note: also assigns return value to self.trackers
        """
        self.trackers = []
        retriever_methods = filter_list(
            lambda m: m.is_retriever() and m.is_available(self._rt_obj),
            tracker_methods)
        mc = rpc.Multicall(self)

        # need to leave 2nd arg empty (dunno why)
        mc.add('t.multicall', self.info_hash, '',
               *[method.rpc_call + '=' for method in retriever_methods])

        results = mc.call()[0]  # only sent one call, only need first result

        for result in results:
            results_dict = {}
            # build results_dict
            for mc, r in zip(retriever_methods, result):
                results_dict[mc.varname] = rpc.process_result(mc, r)

            self.trackers.append(
                Tracker(self._rt_obj, self.info_hash, **results_dict))

        return self.trackers
Ejemplo n.º 7
0
    def _add_torrent(self, cmd, data):
        # type: (AnyStr, TorrentSearchResult) -> Optional[bool]
        """
        Create client task
        :param cmd: Command for client API v6, converted up for newer API
        :param data: A populated search result object
        :return: True if created, else Falsy if nothing created
        """
        if self._tinf(data.hash):
            logger.log('Could not create task, the hash is already in use', logger.ERROR)
            return

        label = sickbeard.TORRENT_LABEL.replace(' ', '_')
        params = dict(
            ([('category', label), ('label', label)], [])[not label]
            + ([('paused', ('false', 'true')[bool(sickbeard.TORRENT_PAUSED)])], [])[not sickbeard.TORRENT_PAUSED]
            + ([('savepath', sickbeard.TORRENT_PATH)], [])[not sickbeard.TORRENT_PATH]
        )

        if 'download' == cmd:
            params.update(dict(urls=data.url))
            kwargs = dict(post_data=params)
        else:
            kwargs = dict(post_data=params, files={'torrents': ('%s.torrent' % data.name, data.content)})

        task_stamp = int(timestamp_near(datetime.now()))
        response = self._client_request(('torrents/add', 'command/%s' % cmd)[not self.api_ns], **kwargs)

        if True is response:
            for s in (1, 3, 5, 10, 15, 30, 60):
                if filter_list(lambda t: task_stamp <= t['addition_date'], self._tinf(data.hash)):
                    return data.hash
                time.sleep(s)
            return True
Ejemplo n.º 8
0
    def _find_season_quality(self, title, torrent_id, ep_number):
        """ Return the modified title of a Season Torrent with the quality found inspecting torrent file list """

        if not self.url:
            return False

        quality = Quality.UNKNOWN
        file_name = None
        data = self.get_url('%sajax_details_filelist.php?id=%s' % (self.url, torrent_id))
        if self.should_skip() or not data:
            return None

        files_list = re.findall('<td.+>(.*?)</td>', data)

        if not files_list:
            logger.log(u'Unable to get the torrent file list for ' + title, logger.ERROR)

        video_files = filter_list(lambda x: x.rpartition('.')[2].lower() in mediaExtensions, files_list)

        # Filtering SingleEpisode/MultiSeason Torrent
        if ep_number > len(video_files) or float(ep_number * 1.1) < len(video_files):
            logger.log(u'Result %s has episode %s and total episodes retrieved in torrent are %s'
                       % (title, str(ep_number), str(len(video_files))), logger.DEBUG)
            logger.log(u'Result %s seems to be a single episode or multiseason torrent, skipping result...'
                       % title, logger.DEBUG)
            return None

        if Quality.UNKNOWN != Quality.sceneQuality(title):
            return title

        for file_name in video_files:
            quality = Quality.sceneQuality(os.path.basename(file_name))
            if Quality.UNKNOWN != quality:
                break

        if None is not file_name and Quality.UNKNOWN == quality:
            quality = Quality.assumeQuality(os.path.basename(file_name))

        if Quality.UNKNOWN == quality:
            logger.log(u'Unable to obtain a Season Quality for ' + title, logger.DEBUG)
            return None

        try:
            my_parser = NameParser(show_obj=self.show_obj, indexer_lookup=False)
            parse_result = my_parser.parse(file_name)
        except (InvalidNameException, InvalidShowException):
            return None

        logger.log(u'Season quality for %s is %s' % (title, Quality.qualityStrings[quality]), logger.DEBUG)

        if parse_result.series_name and parse_result.season_number:
            title = '%s S%02d %s' % (parse_result.series_name,
                                     int(parse_result.season_number),
                                     self._reverse_quality(quality))

        return title
Ejemplo n.º 9
0
    def regulate_title(item, t_param):

        if 'tags' not in item or not any(item['tags']):
            return t_param

        t = ['']
        bl = r'[*\[({]+\s*'
        br = r'\s*[})\]*]+'
        title = re.sub('(.*?)((?i)%sproper%s)(.*)' % (bl, br), r'\1\3\2',
                       item['groupName'])
        for r in (r'\s+-\s+', r'(?:19|20)\d\d(?:\-\d\d\-\d\d)?',
                  r'S\d\d+(?:E\d\d+)?'):
            m = re.findall('(.*%s)(.*)' % r, title)
            if any(m) and len(m[0][0]) > len(t[0]):
                t = m[0]
        t = (tuple(title), t)[any(t)]

        tag_str = '_'.join(item['tags'])
        tags = [
            re.findall(x, tag_str, flags=re.X)
            for x in ('(?i)%sProper%s|\bProper\b$' % (bl, br),
                      r'(?i)\d{3,4}(?:[pi]|hd)', '''
                 (?i)(hr.ws.pdtv|blu.?ray|hddvd|
                 pdtv|hdtv|dsr|tvrip|web.?(?:dl|rip)|dvd.?rip|b[r|d]rip|mpeg-?2)
                 ''', '''
                 (?i)([hx].?26[45]|divx|xvid)
                 ''', '''
                 (?i)(avi|mkv|mp4|sub(?:b?ed|pack|s))
                 ''')
        ]

        title = (
            '%s`%s' %
            (re.sub(
                '|'.join(
                    ['|'.join([re.escape(y) for y in x])
                     for x in tags if x]).strip('|'), '', t[-1]),
             re.sub(r'(?i)(\d{3,4})hd', r'\1p', '`'.join(
                 ['`'.join(x) for x in tags[:-1]]).rstrip('`')) +
             ('', '`hdtv')[not any(tags[2])] +
             ('', '`x264')[not any(tags[3])]))
        for r in [(r'(?i)(?:\W(?:Series|Season))?\W(Repack)\W', r'`\1`'),
                  ('(?i)%s(Proper)%s' % (bl, br), r'`\1`'),
                  (r'%s\s*%s' % (bl, br), '`')]:
            title = re.sub(r[0], r[1], title)

        grp = filter_list(lambda rn: '.release' in rn.lower(), item['tags'])
        title = '%s%s-%s' % (
            ('', t[0])[1 < len(t)], title,
            (any(grp) and grp[0] or 'nogrp').upper().replace('.RELEASE', ''))

        for r in [(r'\s+[-]?\s+|\s+`|`\s+', '`'), ('`+', '.')]:
            title = re.sub(r[0], r[1], title)

        title += +any(tags[4]) and ('.%s' % tags[4][0]) or ''
        return title
Ejemplo n.º 10
0
 def get_html(_resp):
     for cur_item in _resp:
         if isinstance(cur_item, list):
             _html = filter_list(
                 lambda s: isinstance(s, string_types) and
                 'password' in s, cur_item)
             if not _html:
                 _html = get_html(cur_item)
             if _html:
                 return _html
Ejemplo n.º 11
0
def getNewznabProviderList(data):
    # type: (AnyStr) -> List
    defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')]
    providerList = make_unique_list(filter_list(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')]),
                                    defaultList)

    providerDict = dict(zip([x.name for x in providerList], providerList))

    for curDefault in defaultList:
        if not curDefault:
            continue

        if curDefault.name not in providerDict:
            curDefault.default = True
            providerList.append(curDefault)
        else:
            providerDict[curDefault.name].default = True
            for k in ('name', 'url', 'needs_auth', 'search_mode', 'search_fallback',
                      'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog',
                      'server_type'):
                setattr(providerDict[curDefault.name], k, getattr(curDefault, k))

    return filter_list(lambda _x: _x, providerList)
Ejemplo n.º 12
0
def getNewznabProviderList(data):
    defaultList = [
        makeNewznabProvider(x)
        for x in getDefaultNewznabProviders().split('!!!')
    ]
    providerList = filter_list(
        lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')])

    seen_values = set()
    providerListDeduped = []
    for d in providerList:
        value = d.name
        if value not in seen_values:
            providerListDeduped.append(d)
            seen_values.add(value)

    providerList = providerListDeduped
    providerDict = dict(zip([x.name for x in providerList], providerList))

    for curDefault in defaultList:
        if not curDefault:
            continue

        if curDefault.name not in providerDict:
            curDefault.default = True
            providerList.append(curDefault)
        else:
            providerDict[curDefault.name].default = True
            for k in ('name', 'url', 'needs_auth', 'search_mode',
                      'search_fallback', 'enable_recentsearch',
                      'enable_backlog', 'enable_scheduled_backlog',
                      'server_type'):
                setattr(providerDict[curDefault.name], k,
                        getattr(curDefault, k))

    return filter_list(lambda _x: _x, providerList)
Ejemplo n.º 13
0
 def logged_in(self, y):
     if all([
             None is y or 'logout' in y,
             bool(
                 filter_list(lambda c: 'remember_web_' in c,
                             iterkeys(self.session.cookies)))
     ]):
         if None is not y:
             self.shows = dict(
                 re.findall(r'<option value="(\d+)">(.*?)</option>', y))
             for k, v in iteritems(self.shows):
                 self.shows[k] = sanitize_scene_name(
                     html_unescape(unidecode(decode_str(v))))
         return True
     return False
Ejemplo n.º 14
0
    def listPropers(self, date=None):
        """

        :param date: date
        :type date: datetime.date
        :return:
        :rtype:
        """
        my_db = self.get_db()
        sql = "SELECT * FROM provider_cache WHERE name LIKE '%.PROPER.%' OR name LIKE '%.REPACK.%' " \
              "OR name LIKE '%.REAL.%' AND provider = ?"

        if date:
            sql += ' AND time >= ' + str(int(time.mktime(date.timetuple())))

        return filter_list(lambda x: x['indexerid'] != 0, my_db.select(sql, [self.providerID]))
Ejemplo n.º 15
0
def determineReleaseName(dir_name=None, nzb_name=None):
    # type: (AnyStr, AnyStr) -> Union[AnyStr, None]
    """Determine a release name from an nzb and/or folder name
    :param dir_name: dir name
    :param nzb_name: nzb name
    :return: None or release name
    """

    if None is not nzb_name:
        logger.log(u'Using nzb name for release name.')
        return nzb_name.rpartition('.')[0]

    if not dir_name or not ek.ek(os.path.isdir, dir_name):
        return None

    # try to get the release name from nzb/nfo
    file_types = ["*.nzb", "*.nfo"]

    for search in file_types:

        reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)
        files = [
            file_name for file_name in ek.ek(os.listdir, dir_name)
            if ek.ek(os.path.isfile, ek.ek(os.path.join, dir_name, file_name))
        ]
        results = filter_list(reg_expr.search, files)

        if 1 == len(results):
            found_file = ek.ek(os.path.basename, results[0])
            found_file = found_file.rpartition('.')[0]
            if pass_wordlist_checks(found_file):
                logger.log(u"Release name (" + found_file +
                           ") found from file (" + results[0] + ")")
                return found_file.rpartition('.')[0]

    # If that fails, we try the folder
    folder = ek.ek(os.path.basename, dir_name)
    if pass_wordlist_checks(folder):
        # NOTE: Multiple failed downloads will change the folder name.
        # (e.g., appending #s)
        # Should we handle that?
        logger.log(u"Folder name (" + folder +
                   ") appears to be a valid release name. Using it.")
        return folder

    return None
Ejemplo n.º 16
0
 def _client_has(tasks, uri=None, files=None):
     # type: (list, Optional[dict], Optional[dict]) -> list
     """
     Check if uri or file exists in task list
     :param tasks: Tasks list
     :param uri: URI to check against
     :param files: File to check against
     :return: Zero or more found record(s).
     """
     result = []
     if uri or files:
         u = isinstance(uri, dict) and (uri.get('uri', '')
                                        or '').lower() or None
         f = isinstance(files, dict) and (files.get(
             'file', [''])[0]).lower() or None
         result = filter_list(
             lambda t: u and t['additional']['detail']['uri'].lower() == u
             or f and t['additional']['detail']['uri'].lower() in f, tasks)
     return result
Ejemplo n.º 17
0
    def _add_torrent(self, uri=None, files=None):
        # type: (Optional[dict], Optional[dict]) -> Optional[AnyStr, bool]
        """
        Create client task
        :param uri: URI param for client API
        :param files: file param for client API
        :return: Id of task in client, True if created but no id found, else Falsy if nothing created
        """
        if self._testmode:
            # noinspection PyUnresolvedReferences
            return self._testid

        tasks = self._tinf()
        if self._client_has(tasks, uri=uri):
            return self._error(
                'Could not create task, the magnet URI is in use')
        if self._client_has(tasks, files=files):
            return self._error(
                'Could not create task, torrent file already added')

        params = dict()
        if uri:
            params.update(uri)
        if 1 < self._task_version and sickbeard.TORRENT_PATH:
            params['destination'] = re.sub(r'^/(volume\d*/)?', '',
                                           sickbeard.TORRENT_PATH)

        task_stamp = int(SGDatetime.now().totimestamp(default=0))
        response = self._client_request('create', t_params=params, files=files)
        # noinspection PyUnresolvedReferences
        if response and response.get('success'):
            for s in (1, 3, 5, 10, 15, 30, 60):
                tasks = filter_list(
                    lambda t: task_stamp <= t['additional']['detail'][
                        'create_time'], self._tinf())
                try:
                    return str(
                        self._client_has(tasks, uri, files)[0].get('id'))
                except IndexError:
                    time.sleep(s)
            return True
Ejemplo n.º 18
0
    def get_files(self):
        """Get list of File instances for given torrent.

        @return: L{File} instances
        @rtype: list

        @note: also assigns return value to self.files
        """

        self.files = []
        retriever_methods = filter_list(
            lambda m: m.is_retriever() and m.is_available(self._rt_obj),
            file_methods)
        mc = rpc.Multicall(self)

        # 2nd arg can be anything, but it'll return all files in torrent
        mc.add('f.multicall', self.info_hash, '',
               *[method.rpc_call + '=' for method in retriever_methods])

        results = mc.call()[0]  # only sent one call, only need first result

        offset_method_index = retriever_methods.index(
            rpc.find_method('f.get_offset'))

        # make a list of the offsets of all the files, sort appropriately
        offset_list = sorted([r[offset_method_index] for r in results])

        for result in results:
            results_dict = {}
            # build results_dict
            for mc, r in zip(retriever_methods, result):
                results_dict[mc.varname] = rpc.process_result(mc, r)

            # get proper index positions for each file (based on the file
            # offset)
            f_index = offset_list.index(results_dict['offset'])

            self.files.append(
                File(self._rt_obj, self.info_hash, f_index, **results_dict))

        return self.files
Ejemplo n.º 19
0
 def _migrate_v15(self):
     try:
         neb = filter_list(lambda p: 'Nebulance' in p.name, sickbeard.providers.sortedProviderList())[0]
     except (BaseException, Exception):
         return
     # get the old settings from the file and store them in the new variable names
     old_id = 'transmithe_net'
     old_id_uc = old_id.upper()
     neb.enabled = bool(check_setting_int(self.config_obj, old_id_uc, old_id, 0))
     setattr(neb, 'username', check_setting_str(self.config_obj, old_id_uc, old_id + '_username', ''))
     neb.password = check_setting_str(self.config_obj, old_id_uc, old_id + '_password', '')
     neb.minseed = check_setting_int(self.config_obj, old_id_uc, old_id + '_minseed', 0)
     neb.minleech = check_setting_int(self.config_obj, old_id_uc, old_id + '_minleech', 0)
     neb.freeleech = bool(check_setting_int(self.config_obj, old_id_uc, old_id + '_freeleech', 0))
     neb.enable_recentsearch = bool(check_setting_int(
         self.config_obj, old_id_uc, old_id + '_enable_recentsearch', 1)) or not getattr(neb, 'supports_backlog')
     neb.enable_backlog = bool(check_setting_int(self.config_obj, old_id_uc, old_id + '_enable_backlog', 1))
     neb.search_mode = check_setting_str(self.config_obj, old_id_uc, old_id + '_search_mode', 'eponly')
     neb.search_fallback = bool(check_setting_int(self.config_obj, old_id_uc, old_id + '_search_fallback', 0))
     neb.seed_time = check_setting_int(self.config_obj, old_id_uc, old_id + '_seed_time', '')
     neb._seed_ratio = check_setting_str(self.config_obj, old_id_uc, old_id + '_seed_ratio', '')
Ejemplo n.º 20
0
    def update_providers(needed=common.NeededQualities(need_all=True)):
        # type: (sickbeard.common.NeededQualities) -> None
        """

        :param needed: needed class
        :type needed: common.NeededQualities
        """
        orig_thread_name = threading.currentThread().name
        threads = []

        providers = filter_list(
            lambda x: x.is_active() and x.enable_recentsearch,
            sickbeard.providers.sortedProviderList())
        for cur_provider in providers:
            if not cur_provider.cache.should_update():
                continue

            if not threads:
                logger.log('Updating provider caches with recent upload data')

            # spawn a thread for each provider to save time waiting for slow response providers
            threads.append(
                threading.Thread(target=cur_provider.cache.updateCache,
                                 kwargs={'needed': needed},
                                 name='%s :: [%s]' %
                                 (orig_thread_name, cur_provider.name)))
            # start the thread we just created
            threads[-1].start()

        if not len(providers):
            logger.log(
                'No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes',
                logger.WARNING)

        if threads:
            # wait for all threads to finish
            for t in threads:
                t.join()

            logger.log('Finished updating provider caches')
Ejemplo n.º 21
0
    def _client_request(self, method, t_id=None, t_params=None, files=None):
        # type: (AnyStr, Optional[AnyStr], Optional[dict], Optional[dict]) -> Union[bool, list, object]
        """
        Send a request to client
        :param method: Api task to invoke
        :param t_id: Optional id to perform task on
        :param t_params: Optional additional task request parameters
        :param files: Optional file to send
        :return: True if t_id success, DS API response object if t_params success, list of error items,
         else Falsy if failure
        """
        if self._testmode:
            return True

        params = dict(method=method,
                      api='SYNO.DownloadStation.Task',
                      version='1',
                      _sid=self.auth)
        if t_id:
            params['id'] = t_id
        if t_params:
            params.update(t_params)

        self._errmsg = None
        response = {}
        kw_args = (dict(method='get', params=params),
                   dict(method='post', data=params))[method in ('create', )]
        kw_args.update(dict(files=files))
        try:
            response = self._request(**kw_args).json()
            if not response.get('success'):
                raise ValueError
        except (BaseException, Exception):
            return self._error_task(response)

        if None is not t_id and None is t_params and 'create' != method:
            return filter_list(lambda r: r.get('error'),
                               response.get('data', {})) or True

        return response
Ejemplo n.º 22
0
    def _tinf(self, ids=None, err=False):
        # type: (Optional[list], bool) -> list
        """
        Fetch client task information
        :param ids: Optional id(s) to get task info for. None to get all task info
        :param err: Optional return error dict instead of empty array
        :return: Zero or more task object(s) from response
        """
        result = []
        rids = (ids if isinstance(ids, (list, type(None))) else
                [x.strip() for x in ids.split(',')]) or [None]
        getinfo = None is not ids
        for rid in rids:
            try:
                if not self._testmode:
                    # noinspection PyTypeChecker
                    tasks = self._client_request(
                        ('list', 'getinfo')[getinfo],
                        t_id=rid,
                        t_params=dict(additional='detail,file,transfer'
                                      ))['data']['tasks']
                else:
                    # noinspection PyUnresolvedReferences
                    tasks = (filter_list(lambda d: d.get('id') == rid,
                                         self._testdata),
                             self._testdata)[not rid]
                result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \
                    or ([], [{'error': True, 'id': rid}])[err]
            except (BaseException, Exception):
                if getinfo:
                    result += [dict(error=True, id=rid)]
        for t in filter_iter(
                lambda d: isinstance(d.get('title'), string_types) and d.get(
                    'title'), result):
            t['title'] = unquote_plus(t.get('title'))

        return result
Ejemplo n.º 23
0
    def _authorised(self, **kwargs):
        result = False
        if self.digest and 'None' not in self.digest and 'login_chk' in self.urls:
            digest = [x[::-1] for x in self.digest[::-1].rpartition('=')]
            self.digest = digest[2] + digest[1] + quote(unquote(digest[0]))
            self.session.cookies = cookiejar_from_dict(
                dict({digest[2]: quote(unquote(digest[0]))}))
            html = self.get_url(self.urls['login_chk'], skip_auth=True)
            result = html and 'RSS' in html and 'type="password"' not in html

        if not result and not self.failure_count:
            if self.url and self.digest:
                self.get_url(self.urls['logout'],
                             skip_auth=True,
                             post_data={
                                 'submit.x': 24,
                                 'submit.y': 11
                             })
            self.digest = ''
            self.session.cookies.clear()
            json = self.get_url(self.urls['login_1'],
                                skip_auth=True,
                                post_data={'username': self.username},
                                parse_json=True)
            resp = filter_list(lambda l: isinstance(l, list),
                               json.get('Fs', []))

            def get_html(_resp):
                for cur_item in _resp:
                    if isinstance(cur_item, list):
                        _html = filter_list(
                            lambda s: isinstance(s, string_types) and
                            'password' in s, cur_item)
                        if not _html:
                            _html = get_html(cur_item)
                        if _html:
                            return _html

            params = {}
            html = get_html(resp)
            if html:
                tags = re.findall(r'(?is)(<input[^>]*?name=[\'"][^\'"]+[^>]*)',
                                  html[0])
                attrs = [[(re.findall(r'(?is)%s=[\'"]([^\'"]+)' % attr, x)
                           or [''])[0] for attr in ['type', 'name', 'value']]
                         for x in tags]
                for itype, name, value in attrs:
                    if 'password' in [itype, name]:
                        params[name] = self.password
                    if name not in ('username',
                                    'password') and 'password' != itype:
                        params.setdefault(name, value)

            if params:
                html = self.get_url(self.urls['login_2'],
                                    skip_auth=True,
                                    post_data=params)
                if html and 'RSS' in html:
                    self.digest = None
                    if self.session.cookies.get('inSpeed_speedian'):
                        self.digest = 'inSpeed_speedian=%s' % self.session.cookies.get(
                            'inSpeed_speedian')
                    sickbeard.save_config()
                    result = True
                    logger.log('Cookie details for %s updated.' % self.name,
                               logger.DEBUG)
            elif not self.failure_count:
                logger.log(
                    'Invalid cookie details for %s and login failed. Check settings'
                    % self.name, logger.ERROR)
        return result
Ejemplo n.º 24
0
def get_url(
        url,  # type: AnyStr
        post_data=None,  # type: Optional
        params=None,  # type: Optional
        headers=None,  # type: Optional[Dict]
        timeout=30,  # type: int
        session=None,  # type: Optional[requests.Session]
        parse_json=False,  # type: bool
        raise_status_code=False,  # type: bool
        raise_exceptions=False,  # type: bool
        as_binary=False,  # type: bool
        encoding=None,  # type: Optional[AnyStr]
        **kwargs):
    # type: (...) -> Optional[Union[AnyStr, bool, bytes, Dict, Tuple[Union[Dict, List], requests.Session]]]
    """
    Either
    1) Returns a byte-string retrieved from the url provider.
    2) Return True/False if success after using kwargs 'savefile' set to file pathname.
    3) Returns Tuple response, session if success after setting kwargs 'resp_sess' True.
    4) JSON Dict if parse_json=True.

    :param url: url
    :param post_data: post data
    :param params:
    :param headers: headers to add
    :param timeout: timeout
    :param session: optional session object
    :param parse_json: return JSON Dict
    :param raise_status_code: raise exception for status codes
    :param raise_exceptions: raise exceptions
    :param as_binary: return bytes instead of text
    :param encoding: overwrite encoding return header if as_binary is False
    :param kwargs:
    :return:
    """

    response_attr = ('text', 'content')[as_binary]

    # selectively mute some errors
    mute = filter_list(lambda x: kwargs.pop(x, False), [
        'mute_connect_err', 'mute_read_timeout', 'mute_connect_timeout',
        'mute_http_error'
    ])

    # reuse or instantiate request session
    resp_sess = kwargs.pop('resp_sess', None)
    if None is session:
        session = CloudflareScraper.create_scraper()
        session.headers.update({'User-Agent': USER_AGENT})

    # download and save file or simply fetch url
    savename = kwargs.pop('savename', None)
    if savename:
        # session streaming
        session.stream = True

    if not kwargs.pop('nocache', False):
        cache_dir = CACHE_DIR or get_system_temp_dir()
        session = CacheControl(sess=session,
                               cache=caches.FileCache(
                                   ek.ek(os.path.join, cache_dir, 'sessions')))

    provider = kwargs.pop('provider', None)

    # handle legacy uses of `json` param
    if kwargs.get('json'):
        parse_json = kwargs.pop('json')

    # session master headers
    req_headers = {
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip,deflate'
    }
    if headers:
        req_headers.update(headers)
    if hasattr(session, 'reserved') and 'headers' in session.reserved:
        req_headers.update(session.reserved['headers'] or {})
    session.headers.update(req_headers)

    # session parameters
    session.params = params

    # session ssl verify
    session.verify = False

    # don't trust os environments (auth, proxies, ...)
    session.trust_env = False

    response = None
    try:
        # sanitise url
        parsed = list(urlparse(url))
        parsed[2] = re.sub('/{2,}', '/',
                           parsed[2])  # replace two or more / with one
        url = urlunparse(parsed)

        # session proxies
        if PROXY_SETTING:
            (proxy_address, pac_found) = proxy_setting(PROXY_SETTING, url)
            msg = '%sproxy for url: %s' % (('', 'PAC parsed ')[pac_found], url)
            if None is proxy_address:
                logger.debug('Proxy error, aborted the request using %s' % msg)
                return
            elif proxy_address:
                logger.debug('Using %s' % msg)
                session.proxies = {
                    'http': proxy_address,
                    'https': proxy_address
                }

        # decide if we get or post data to server
        if post_data or 'post_json' in kwargs:
            if True is post_data:
                post_data = None

            if post_data:
                kwargs.setdefault('data', post_data)

            if 'post_json' in kwargs:
                kwargs.setdefault('json', kwargs.pop('post_json'))

            response = session.post(url, timeout=timeout, **kwargs)
        else:
            response = session.get(url, timeout=timeout, **kwargs)
            if response.ok and not response.content and 'url=' in response.headers.get(
                    'Refresh', '').lower():
                url = response.headers.get('Refresh').lower().split(
                    'url=')[1].strip('/')
                if not url.startswith('http'):
                    parsed[2] = '/%s' % url
                    url = urlunparse(parsed)
                response = session.get(url, timeout=timeout, **kwargs)

        # if encoding is not in header try to use best guess
        # ignore downloads with savename
        if not savename and not as_binary:
            if encoding:
                response.encoding = encoding
            elif not response.encoding or 'charset' not in response.headers.get(
                    'Content-Type', ''):
                response.encoding = response.apparent_encoding

        # noinspection PyProtectedMember
        if provider and provider._has_signature(response.text):
            return getattr(response, response_attr)

        if raise_status_code:
            response.raise_for_status()

        if not response.ok:
            http_err_text = 'CloudFlare Ray ID' in response.text and \
                            'CloudFlare reports, "Website is offline"; ' or ''
            if response.status_code in http_error_code:
                http_err_text += http_error_code[response.status_code]
            elif response.status_code in range(520, 527):
                http_err_text += 'Origin server connection failure'
            else:
                http_err_text = 'Custom HTTP error code'
                if 'mute_http_error' not in mute:
                    logger.debug(
                        u'Response not ok. %s: %s from requested url %s' %
                        (response.status_code, http_err_text, url))
            return

    except requests.exceptions.HTTPError as e:
        if raise_status_code:
            response.raise_for_status()
        logger.warning(u'HTTP error %s while loading URL%s' %
                       (e.errno, _maybe_request_url(e)))
        return
    except requests.exceptions.ConnectionError as e:
        if 'mute_connect_err' not in mute:
            logger.warning(u'Connection error msg:%s while loading URL%s' %
                           (ex(e), _maybe_request_url(e)))
        if raise_exceptions:
            raise e
        return
    except requests.exceptions.ReadTimeout as e:
        if 'mute_read_timeout' not in mute:
            logger.warning(u'Read timed out msg:%s while loading URL%s' %
                           (ex(e), _maybe_request_url(e)))
        if raise_exceptions:
            raise e
        return
    except (requests.exceptions.Timeout, socket.timeout) as e:
        if 'mute_connect_timeout' not in mute:
            logger.warning(
                u'Connection timed out msg:%s while loading URL %s' %
                (ex(e), _maybe_request_url(e, url)))
        if raise_exceptions:
            raise e
        return
    except (BaseException, Exception) as e:
        if ex(e):
            logger.warning(
                u'Exception caught while loading URL %s\r\nDetail... %s\r\n%s'
                % (url, ex(e), traceback.format_exc()))
        else:
            logger.warning(
                u'Unknown exception while loading URL %s\r\nDetail... %s' %
                (url, traceback.format_exc()))
        if raise_exceptions:
            raise e
        return

    if parse_json:
        try:
            data_json = response.json()
            if resp_sess:
                return ({}, data_json)[isinstance(data_json,
                                                  (dict, list))], session
            return ({}, data_json)[isinstance(data_json, (dict, list))]
        except (TypeError, Exception) as e:
            logger.warning(u'JSON data issue from URL %s\r\nDetail... %s' %
                           (url, ex(e)))
            if raise_exceptions:
                raise e
            return None

    if savename:
        try:
            write_file(savename,
                       response,
                       raw=True,
                       raise_exceptions=raise_exceptions)
        except (BaseException, Exception) as e:
            if raise_exceptions:
                raise e
            return
        return True

    if resp_sess:
        return getattr(response, response_attr), session

    return getattr(response, response_attr)
Ejemplo n.º 25
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v))
                   for (k, v) in iteritems({'get': 'magnet'})])
        urls = []
        for mode in search_params:
            for search_string in search_params[mode]:
                if 'Cache' == mode:
                    search_url = self.urls['browse']
                else:
                    search_string = unidecode(search_string)
                    show_name = filter_list(
                        lambda x: x.lower() == re.sub(r'\s.*', '',
                                                      search_string.lower()),
                        list_values(self.shows))
                    if not show_name:
                        continue
                    search_url = self.urls['search'] % list_keys(
                        self.shows)[list_values(self.shows).index(
                            show_name[0])]

                if search_url in urls:
                    continue
                urls += [search_url]

                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html) as soup:
                        tbl_rows = soup.select('ul.user-timeline > li')

                        if not len(tbl_rows):
                            raise generic.HaltParseException

                        for tr in tbl_rows:
                            try:
                                anchor = tr.find('a', href=rc['get'])
                                title = self.regulate_title(anchor)
                                download_url = self._link(anchor['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, None, None))

                except generic.HaltParseException:
                    pass
                except (BaseException, Exception):
                    logger.log(
                        u'Failed to parse. Traceback: %s' %
                        traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Ejemplo n.º 26
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v))
                   for (k, v) in iteritems({
                       'info': '/t/',
                       'get': 'download',
                       'fl': r'\[freeleech\]'
                   })])

        for mode in search_params:
            rc['cats'] = re.compile(
                r'(?i)browse/(?:%s)' %
                self._categories_string(mode, template='', delimiter='|'))
            for search_string in search_params[mode]:
                post_data = dict(
                    jxt=2,
                    jxw='b',
                    route='/browse/%s%s/q/%s' %
                    (self._categories_string(mode, '%s', '/'),
                     ('/freeleech', '')[not self.freeleech],
                     search_string.replace('.', ' ').replace('^@^', '.')))

                data_json = self.get_url(self.urls['search'],
                                         post_data=post_data,
                                         parse_json=True)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    html = filter_list(lambda l: isinstance(l, list),
                                       data_json.get('Fs', []))
                    while html:
                        if html and all(
                                isinstance(x, string_types) for x in html):
                            str_lengths = [len(x) for x in html]
                            html = html[str_lengths.index(max(str_lengths))]
                            break
                        html = filter_list(lambda l: isinstance(l, list), html)
                        if html and 0 < len(html):
                            html = html[0]

                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, parse_only='table') as tbl:
                        tbl_rows = [] if not tbl else tbl.find_all('tr')

                        if 2 > len(tbl_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in tbl_rows[1:]:
                            cells = tr.find_all('td')
                            if 4 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(
                                    tr)
                                seeders, leechers, size = [
                                    try_int(n, n) for n in [
                                        cells[head[x]].get_text().strip()
                                        for x in ('seed', 'leech', 'size')
                                    ]
                                ]
                                if None is tr.find(
                                        'a',
                                        href=rc['cats']) or self._reject_item(
                                            seeders, leechers, self.freeleech
                                            and (None is rc['fl'].search(
                                                cells[1].get_text()))):
                                    continue

                                info = tr.find('a', 'torrent') or tr.find(
                                    'a', href=rc['info'])
                                title = (info.attrs.get('title')
                                         or info.get_text()).strip()
                                download_url = self._link(
                                    tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))

                except (BaseException, Exception):
                    time.sleep(1.1)

                self._log_search(mode,
                                 len(items[mode]) - cnt,
                                 ('search string: ' + search_string,
                                  self.name)['Cache' == mode])

            results = self._sort_seeding(mode, results + items[mode])

        return results
Ejemplo n.º 27
0
def _get_proper_list(aired_since_shows,  # type: datetime.datetime
                     recent_shows,  # type: List[Tuple[int, int]]
                     recent_anime,  # type:  List[Tuple[int, int]]
                     proper_dict=None  # type:  Dict[AnyStr, List[Proper]]
                     ):
    # type: (...) -> List[Proper]
    """

    :param aired_since_shows: date since aired
    :param recent_shows: list of recent shows
    :param recent_anime: list of recent anime shows
    :param proper_dict: dict with provider keys containing Proper objects
    :return: list of propers
    """
    propers = {}
    # make sure the episode has been downloaded before
    history_limit = datetime.datetime.now() - datetime.timedelta(days=30)

    my_db = db.DBConnection()
    # for each provider get a list of arbitrary Propers
    orig_thread_name = threading.currentThread().name
    # filter provider list for:
    # 1. from recent search: recent search enabled providers
    # 2. native proper search: active search enabled providers
    provider_list = filter_list(
        lambda p: p.is_active() and (p.enable_recentsearch, p.enable_backlog)[None is proper_dict],
        sickbeard.providers.sortedProviderList())
    search_threads = []

    if None is proper_dict:
        # if not a recent proper search create a thread per provider to search for Propers
        proper_dict = {}
        for cur_provider in provider_list:
            if not recent_anime and cur_provider.anime_only:
                continue

            provider_id = cur_provider.get_id()

            logger.log('Searching for new Proper releases at [%s]' % cur_provider.name)
            proper_dict[provider_id] = []

            search_threads.append(threading.Thread(target=_search_provider,
                                                   kwargs={'cur_provider': cur_provider,
                                                           'provider_propers': proper_dict[provider_id],
                                                           'aired_since_shows': aired_since_shows,
                                                           'recent_shows': recent_shows,
                                                           'recent_anime': recent_anime},
                                                   name='%s :: [%s]' % (orig_thread_name, cur_provider.name)))

            search_threads[-1].start()

        # wait for all searches to finish
        for cur_thread in search_threads:
            cur_thread.join()

    for cur_provider in provider_list:
        if not recent_anime and cur_provider.anime_only:
            continue

        found_propers = proper_dict.get(cur_provider.get_id(), [])
        if not found_propers:
            continue

        # if they haven't been added by a different provider than add the Proper to the list
        for cur_proper in found_propers:
            name = _generic_name(cur_proper.name)
            if name in propers:
                continue

            try:
                np = NameParser(False, show_obj=cur_proper.parsed_show_obj, indexer_lookup=False)
                parse_result = np.parse(cur_proper.name)
            except (InvalidNameException, InvalidShowException, Exception):
                continue

            # get the show object
            cur_proper.parsed_show_obj = (cur_proper.parsed_show_obj
                                          or helpers.find_show_by_id(parse_result.show_obj.tvid_prodid))
            if None is cur_proper.parsed_show_obj:
                logger.log('Skip download; cannot find show with ID [%s] at %s' %
                           (cur_proper.prodid, sickbeard.TVInfoAPI(cur_proper.tvid).name), logger.ERROR)
                continue

            cur_proper.tvid = cur_proper.parsed_show_obj.tvid
            cur_proper.prodid = cur_proper.parsed_show_obj.prodid

            if not (-1 != cur_proper.prodid and parse_result.series_name and parse_result.episode_numbers
                    and (cur_proper.tvid, cur_proper.prodid) in recent_shows + recent_anime):
                continue

            # only get anime Proper if it has release group and version
            if parse_result.is_anime and not parse_result.release_group and -1 == parse_result.version:
                logger.log('Ignored Proper with no release group and version in name [%s]' % cur_proper.name,
                           logger.DEBUG)
                continue

            if not show_name_helpers.pass_wordlist_checks(cur_proper.name, parse=False, indexer_lookup=False,
                                                          show_obj=cur_proper.parsed_show_obj):
                logger.log('Ignored unwanted Proper [%s]' % cur_proper.name, logger.DEBUG)
                continue

            re_x = dict(re_prefix='.*', re_suffix='.*')
            result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_ignore_words,
                                                    rx=cur_proper.parsed_show_obj.rls_ignore_words_regex, **re_x)
            if None is not result and result:
                logger.log('Ignored Proper containing ignore word [%s]' % cur_proper.name, logger.DEBUG)
                continue

            result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_require_words,
                                                    rx=cur_proper.parsed_show_obj.rls_require_words_regex, **re_x)
            if None is not result and not result:
                logger.log('Ignored Proper for not containing any required word [%s]' % cur_proper.name, logger.DEBUG)
                continue

            cur_size = getattr(cur_proper, 'size', None)
            if failed_history.has_failed(cur_proper.name, cur_size, cur_provider.name):
                continue

            cur_proper.season = parse_result.season_number if None is not parse_result.season_number else 1
            cur_proper.episode = parse_result.episode_numbers[0]
            # check if we actually want this Proper (if it's the right quality)
            sql_result = my_db.select(
                'SELECT release_group, status, version, release_name'
                ' FROM tv_episodes'
                ' WHERE indexer = ? AND showid = ?'
                ' AND season = ? AND episode = ?'
                ' LIMIT 1',
                [cur_proper.tvid, cur_proper.prodid,
                 cur_proper.season, cur_proper.episode])
            if not sql_result:
                continue

            # only keep the Proper if we already retrieved the same quality ep (don't get better/worse ones)
            # check if we want this release: same quality as current, current has correct status
            # restrict other release group releases to Proper's
            old_status, old_quality = Quality.splitCompositeStatus(int(sql_result[0]['status']))
            cur_proper.quality = Quality.nameQuality(cur_proper.name, parse_result.is_anime)
            cur_proper.is_repack, cur_proper.properlevel = Quality.get_proper_level(
                parse_result.extra_info_no_name(), parse_result.version, parse_result.is_anime, check_is_repack=True)
            cur_proper.proper_level = cur_proper.properlevel    # local non global value
            if old_status in SNATCHED_ANY:
                old_release_group = ''
                # noinspection SqlResolve
                history_results = my_db.select(
                    'SELECT resource FROM history'
                    ' WHERE indexer = ? AND showid = ?'
                    ' AND season = ? AND episode = ? AND quality = ? AND date >= ?'
                    ' AND (%s) ORDER BY date DESC LIMIT 1' % ' OR '.join(
                        ['action = "%d%02d"' % (old_quality, x) for x in SNATCHED_ANY]),
                    [cur_proper.tvid, cur_proper.prodid,
                     cur_proper.season, cur_proper.episode, cur_proper.quality,
                     history_limit.strftime(history.dateFormat)])
                if len(history_results):
                    try:
                        old_release_group = np.parse(history_results[0]['resource']).release_group
                    except (BaseException, Exception):
                        pass
            else:
                old_release_group = sql_result[0]['release_group']
            try:
                same_release_group = parse_result.release_group.lower() == old_release_group.lower()
            except (BaseException, Exception):
                same_release_group = parse_result.release_group == old_release_group
            if old_status not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED] \
                    or cur_proper.quality != old_quality \
                    or (cur_proper.is_repack and not same_release_group):
                continue

            np = NameParser(False, show_obj=cur_proper.parsed_show_obj, indexer_lookup=False)
            try:
                extra_info = np.parse(sql_result[0]['release_name']).extra_info_no_name()
            except (BaseException, Exception):
                extra_info = None
            # don't take Proper of the same level we already downloaded
            old_proper_level, old_extra_no_name, old_name = \
                get_old_proper_level(cur_proper.parsed_show_obj, cur_proper.tvid, cur_proper.prodid,
                                     cur_proper.season, parse_result.episode_numbers,
                                     old_status, cur_proper.quality, extra_info,
                                     parse_result.version, parse_result.is_anime)
            if cur_proper.proper_level <= old_proper_level:
                continue

            is_web = (old_quality in (Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.UHD4KWEB) or
                      (old_quality == Quality.SDTV and
                       isinstance(sql_result[0]['release_name'], string_types) and
                       re.search(r'\Wweb.?(dl|rip|.([hx]\W?26[45]|hevc))\W', sql_result[0]['release_name'], re.I)))

            if is_web:
                old_name = (old_name, sql_result[0]['release_name'])[old_name in ('', None)]
                old_webdl_type = get_webdl_type(old_extra_no_name, old_name)
                new_webdl_type = get_webdl_type(parse_result.extra_info_no_name(), cur_proper.name)
                if old_webdl_type != new_webdl_type:
                    logger.log('Ignored Proper webdl source [%s], does not match existing webdl source [%s] for [%s]'
                               % (old_webdl_type, new_webdl_type, cur_proper.name), logger.DEBUG)
                    continue

            # for webdls, prevent Propers from different groups
            log_same_grp = 'Ignored Proper from release group [%s] does not match existing group [%s] for [%s]' \
                           % (parse_result.release_group, old_release_group, cur_proper.name)
            if sickbeard.PROPERS_WEBDL_ONEGRP and is_web and not same_release_group:
                logger.log(log_same_grp, logger.DEBUG)
                continue

            # check if we actually want this Proper (if it's the right release group and a higher version)
            if parse_result.is_anime:
                old_version = int(sql_result[0]['version'])
                if not (-1 < old_version < parse_result.version):
                    continue
                if not same_release_group:
                    logger.log(log_same_grp, logger.DEBUG)
                    continue
                found_msg = 'Found anime Proper v%s to replace v%s' % (parse_result.version, old_version)
            else:
                found_msg = 'Found Proper [%s]' % cur_proper.name

            # noinspection SqlResolve
            history_results = my_db.select(
                'SELECT resource FROM history'
                ' WHERE indexer = ? AND showid = ?'
                ' AND season = ? AND episode = ? AND quality = ? AND date >= ?'
                ' AND (%s)' % ' OR '.join(['action LIKE "%%%02d"' % x for x in SNATCHED_ANY + [DOWNLOADED, ARCHIVED]]),
                [cur_proper.tvid, cur_proper.prodid,
                 cur_proper.season, cur_proper.episode, cur_proper.quality,
                 history_limit.strftime(history.dateFormat)])

            # skip if the episode has never downloaded, because a previous quality is required to match the Proper
            if not len(history_results):
                logger.log('Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name, logger.DEBUG)
                continue

            # make sure that none of the existing history downloads are the same Proper as the download candidate
            clean_proper_name = _generic_name(helpers.remove_non_release_groups(
                cur_proper.name, cur_proper.parsed_show_obj.is_anime))
            is_same = False
            for hitem in history_results:
                # if the result exists in history already we need to skip it
                if clean_proper_name == _generic_name(helpers.remove_non_release_groups(
                        ek.ek(os.path.basename, hitem['resource']))):
                    is_same = True
                    break
            if is_same:
                logger.log('Ignored Proper already in history [%s]' % cur_proper.name)
                continue

            logger.log(found_msg, logger.DEBUG)

            # finish populating the Proper instance
            # cur_proper.show_obj = cur_proper.parsed_show_obj.prodid
            cur_proper.provider = cur_provider
            cur_proper.extra_info = parse_result.extra_info
            cur_proper.extra_info_no_name = parse_result.extra_info_no_name
            cur_proper.release_group = parse_result.release_group

            cur_proper.is_anime = parse_result.is_anime
            cur_proper.version = parse_result.version

            propers[name] = cur_proper

        cur_provider.log_result('Propers', len(propers), '%s' % cur_provider.name)

    return list_values(propers)
Ejemplo n.º 28
0
    def search_backlog(
            self,
            which_shows=None,  # type: Optional[List[TVShow]]
            force_type=NORMAL_BACKLOG,  # type: int
            force=False  # type: bool
    ):
        """
        start backlog for given list of shows or start next scheduled backlog

        :param which_shows: optional list of shows to backlog search
        :param force_type: type of backlog
        :param force: force backlog
        :return: nothing
        :rtype: None
        """
        if self.amActive and not which_shows:
            logger.log(u'Backlog is still running, not starting it again',
                       logger.DEBUG)
            return

        if which_shows:
            show_list = which_shows
            standard_backlog = False
        else:
            show_list = sickbeard.showList
            standard_backlog = True

        now = datetime.datetime.now()
        any_torrent_enabled = continued_backlog = False
        if not force and standard_backlog and (
                datetime.datetime.now() - datetime.datetime.fromtimestamp(
                    self._get_last_runtime())) < datetime.timedelta(hours=23):
            any_torrent_enabled = any(
                map_iter(
                    lambda x: x.is_active() and x.enable_backlog and x.
                    providerType == GenericProvider.TORRENT,
                    sickbeard.providers.sortedProviderList()))
            if not any_torrent_enabled:
                logger.log(
                    'Last scheduled backlog run was within the last day, skipping this run.',
                    logger.DEBUG)
                return

        if not self.providers_active(any_torrent_enabled, standard_backlog):
            logger.log(
                'No NZB/Torrent provider has active searching enabled in config/Media Providers,'
                ' cannot start backlog.', logger.WARNING)
            return

        self._get_last_backlog()
        self.amActive = True
        self.amPaused = False

        cur_date = datetime.date.today().toordinal()
        from_date = datetime.date.fromordinal(1)
        limited_from_date = datetime.date.today() - datetime.timedelta(
            days=sickbeard.BACKLOG_DAYS)

        limited_backlog = False
        if standard_backlog and (any_torrent_enabled
                                 or sickbeard.BACKLOG_NOFULL):
            logger.log(
                u'Running limited backlog for episodes missed during the last %s day(s)'
                % str(sickbeard.BACKLOG_DAYS))
            from_date = limited_from_date
            limited_backlog = True

        runparts = []
        if standard_backlog and not any_torrent_enabled and sickbeard.BACKLOG_NOFULL:
            logger.log(
                u'Skipping automated full backlog search because it is disabled in search settings'
            )

        my_db = db.DBConnection('cache.db')
        if standard_backlog and not any_torrent_enabled and not sickbeard.BACKLOG_NOFULL:
            sql_result = my_db.select(
                'SELECT * FROM backlogparts WHERE part in (SELECT MIN(part) FROM backlogparts)'
            )
            if sql_result:
                sl = []
                part_nr = int(sql_result[0]['part'])
                for s in sql_result:
                    show_obj = find_show_by_id(
                        {int(s['indexer']): int(s['indexerid'])})
                    if show_obj:
                        sl.append(show_obj)
                        runparts.append(show_obj.tvid_prodid)
                show_list = sl
                continued_backlog = True
                my_db.action('DELETE FROM backlogparts WHERE part = ?',
                             [part_nr])

        forced = standard_backlog and force_type != NORMAL_BACKLOG

        wanted_list = []
        for cur_show_obj in show_list:
            if not cur_show_obj.paused:
                w = wanted_episodes(
                    cur_show_obj,
                    from_date,
                    make_dict=True,
                    unaired=(sickbeard.SEARCH_UNAIRED
                             and not sickbeard.UNAIRED_RECENT_SEARCH_ONLY))
                if w:
                    wanted_list.append(w)

        parts = []
        if standard_backlog and not any_torrent_enabled and not continued_backlog and not sickbeard.BACKLOG_NOFULL:
            fullbacklogparts = sum([len(w) for w in wanted_list if w
                                    ]) // sickbeard.BACKLOG_FREQUENCY
            h_part = []
            counter = 0
            for w in wanted_list:  # type: Dict
                f = False
                for season, segment in iteritems(
                        w):  # type: int, List[TVEpisode]
                    counter += 1
                    if not f:
                        h_part.append(segment[0].show_obj.tvid_prodid)
                        f = True
                if counter > fullbacklogparts:
                    counter = 0
                    parts.append(h_part)
                    h_part = []

            if h_part:
                parts.append(h_part)

        if not runparts and parts:
            runparts = parts[0]
            wanted_list = filter_list(
                lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid
                in runparts, wanted_list)

        limited_wanted_list = []
        if standard_backlog and not any_torrent_enabled and runparts:
            for cur_show_obj in sickbeard.showList:
                if not cur_show_obj.paused and cur_show_obj.tvid_prodid not in runparts:
                    w = wanted_episodes(
                        cur_show_obj,
                        limited_from_date,
                        make_dict=True,
                        unaired=(sickbeard.SEARCH_UNAIRED
                                 and not sickbeard.UNAIRED_RECENT_SEARCH_ONLY))
                    if w:
                        limited_wanted_list.append(w)

        self.add_backlog_item(wanted_list, standard_backlog, limited_backlog,
                              forced, any_torrent_enabled)
        if standard_backlog and not any_torrent_enabled and limited_wanted_list:
            self.add_backlog_item(limited_wanted_list, standard_backlog, True,
                                  forced, any_torrent_enabled)

        if standard_backlog and not sickbeard.BACKLOG_NOFULL and not any_torrent_enabled and not continued_backlog:
            # noinspection SqlConstantCondition
            cl = ([], [['DELETE FROM backlogparts WHERE 1=1']])[any(parts)]
            for i, l in enumerate(parts):
                if 0 == i:
                    continue
                cl += map_list(
                    lambda m: [
                        'INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)',
                        [i + 1] + TVidProdid(m).list
                    ], l)

            if 0 < len(cl):
                my_db.mass_action(cl)

        # don't consider this an actual backlog search if we only did recent eps
        # or if we only did certain shows
        if from_date == datetime.date.fromordinal(1) and standard_backlog:
            self._set_last_backlog(cur_date)
            self._get_last_backlog()

        if standard_backlog and not any_torrent_enabled:
            self._set_last_runtime(now)

        self.amActive = False
        self._reset_progress_indicator()
Ejemplo n.º 29
0
def available_generators():
    return filter_list(lambda x: x not in ('generic', 'helpers'), __all__)
Ejemplo n.º 30
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if self.show_obj and not self.show_obj.is_anime:
            return results

        items = {'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v))
                   for (k, v) in iteritems({
                       'nodots': r'[\.\s]+',
                       'stats': r'S:\s*?(\d)+\s*L:\s*(\d+)',
                       'size': r'size:\s*(\d+[.,]\d+\w+)'
                   })])

        for mode in search_params:
            for search_string in search_params[mode]:
                params = urlencode({
                    'terms':
                    rc['nodots'].sub(' ', search_string).encode('utf-8'),
                    'type':
                    1
                })

                search_url = '%ssearch.php?%s' % (self.url, params)

                html = self.get_url(search_url)
                if self.should_skip():
                    return self._sort_seeding(mode, results)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(
                            html,
                            parse_only=dict(table={
                                'class': (lambda at: at and 'listing' in at)
                            })) as tbl:
                        tbl_rows = [] if not tbl else tbl.find_all('tr')
                        if tbl_rows:
                            a = (0, 1)[None is not tbl_rows[0].find(
                                'td', class_='centertext')]

                            for top, bottom in zip(tbl_rows[a::2],
                                                   tbl_rows[a + 1::2]):
                                try:
                                    bottom_text = bottom.get_text() or ''
                                    stats = rc['stats'].findall(bottom_text)
                                    seeders, leechers = (0,
                                                         0) if not stats else [
                                                             try_int(n)
                                                             for n in stats[0]
                                                         ]

                                    size = rc['size'].findall(bottom_text)
                                    size = size and size[0] or -1

                                    info = top.find('td', class_='desc-top')
                                    title = info and re.sub(
                                        r'[ .]{2,}', '.',
                                        info.get_text().strip())
                                    links = info and map_list(
                                        lambda l: l.get('href', ''),
                                        info.find_all('a')) or None
                                    download_url = self._link(
                                        (filter_list(lambda l: 'magnet:' in l,
                                                     links)
                                         or filter_list(
                                             lambda l: not re.search(
                                                 r'(magnet:|\.se).+', l),
                                             links))[0])
                                except (AttributeError, TypeError, ValueError,
                                        IndexError):
                                    continue

                                if title and download_url:
                                    items[mode].append(
                                        (title, download_url, seeders,
                                         self._bytesizer(size)))

                except (BaseException, Exception):
                    time.sleep(1.1)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results