コード例 #1
0
ファイル: subtitle_list.py プロジェクト: AlinaKay/Flexget
 def _expired(cls, file, config):
     added_interval = datetime.combine(date.today(), time()) - file['added']
     if file['remove_after'] and added_interval > parse_timedelta(file['remove_after']):
         return True
     elif config.get('remove_after') and added_interval > parse_timedelta(config['remove_after']):
         return True
     return False
コード例 #2
0
ファイル: subtitle_list.py プロジェクト: love12345678/Flexget
 def _expired(cls, file, config):
     added_interval = datetime.combine(date.today(), time()) - file['added']
     if file['remove_after'] and added_interval > parse_timedelta(file['remove_after']):
         return True
     elif config.get('remove_after') and added_interval > parse_timedelta(config['remove_after']):
         return True
     return False
コード例 #3
0
 def prepare_config(self, config):
     settings = {}
     for entry in config:
         if isinstance(entry, dict):
             if entry.get('field') and not entry.get('field').isspace():
                 key = entry.get('field')
                 settings[key] = entry
                 delta = settings[key].get('delta_distance')
                 if delta and isinstance(delta, str):
                     settings[key]['delta_distance'] = parse_timedelta(delta)
                 limit = settings[key].get('upper_limit')
                 if limit and isinstance(limit, str):
                     settings[key]['upper_limit'] = parse_timedelta(limit)
     return settings
コード例 #4
0
 def prepare_config(self, config):
     settings = {}
     for entry in config:
         if isinstance(entry, dict):
             if entry.get('field') and not entry.get('field').isspace():
                 key = entry.get('field')
                 settings[key] = entry
                 delta = settings[key].get('delta_distance')
                 if delta and isinstance(delta, str):
                     settings[key]['delta_distance'] = parse_timedelta(
                         delta)
                 limit = settings[key].get('upper_limit')
                 if limit and isinstance(limit, str):
                     settings[key]['upper_limit'] = parse_timedelta(limit)
     return settings
コード例 #5
0
        def consider_entry(_entry, _link):
            try:
                discount, seeders, leechers, hr, expired_time = NexusPHP._get_info(
                    task, _link, config)
            except plugin.PluginError as e:
                raise e
            except Exception as e:
                log.info('NexusPHP._get_info: ' + str(e))
                return

            remember = config['remember']

            if config['discount']:
                if discount not in config['discount']:
                    _entry.reject('%s does not match discount' % discount,
                                  remember=remember)  # 优惠信息不匹配
                    return

            if config['left-time'] and expired_time:
                left_time = expired_time - datetime.now()
                # 实际剩余时间 < 'left-time'
                if left_time < parse_timedelta(config['left-time']):
                    _entry.reject('its discount time only left [%s]' %
                                  left_time,
                                  remember=remember)  # 剩余时间不足
                    return

            if config['hr'] is False and hr:
                _entry.reject('it is HR', remember=True)  # 拒绝HR

            if config['seeders']:
                seeder_max = config['seeders']['max']
                seeder_min = config['seeders']['min']
                if len(seeders) not in range(seeder_min, seeder_max + 1):
                    _entry.reject('%d is out of range of seeder' %
                                  len(seeders),
                                  remember=True)  # 做种人数不匹配
                    return

            if config['leechers']:
                leecher_max = config['leechers']['max']
                leecher_min = config['leechers']['min']
                if len(leechers) not in range(leecher_min, leecher_max + 1):
                    _entry.reject('%d is out of range of leecher' %
                                  len(leechers),
                                  remember=True)  # 下载人数不匹配
                    return

                if len(leechers) != 0:
                    max_complete = max(
                        leechers, key=lambda x: x['completed'])['completed']
                else:
                    max_complete = 0
                if max_complete > config['leechers']['max_complete']:
                    _entry.reject('%f is more than max_complete' %
                                  max_complete,
                                  remember=True)  # 最大完成度不匹配
                    return

            _entry.accept()
コード例 #6
0
    def on_task_urlrewrite(self, task, config):
        if config is False:
            return
        # Create the conversion target directory
        converted_path = os.path.join(task.manager.config_base, 'converted')
        # Calculate the timeout config in seconds (Py2.6 compatible, can be replaced with total_seconds in 2.7)
        timeout = 60
        if config and isinstance(config, dict):
            timeout_delta = parse_timedelta(config['timeout'])
            timeout = (timeout_delta.seconds + timeout_delta.days * 24 * 3600)
        if not os.path.isdir(converted_path):
            os.mkdir(converted_path)

        for entry in task.accepted:
            if entry['url'].startswith('magnet:'):
                entry.setdefault('urls', [entry['url']])
                try:
                    log.info('Converting entry {} magnet URI to a torrent file'.format(entry['title']))
                    torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)
                except BaseException as e:
                    message = 'Unable to convert Magnet URI for entry {}: {}'.format(entry['title'], e)
                    log.error(message)
                    continue
                # Windows paths need an extra / prepended to them for url
                if not torrent_file.startswith('/'):
                    torrent_file = '/' + torrent_file
                entry['url'] = torrent_file
                entry['file'] = torrent_file
                entry['urls'].append('file://{}'.format(torrent_file))
コード例 #7
0
ファイル: digest.py プロジェクト: Flexget/Flexget
 def on_task_input(self, task, config):
     entries = []
     with Session() as session:
         digest_entries = session.query(DigestEntry).filter(DigestEntry.list == config['list'])
         # Remove any entries older than the expire time, if defined.
         if isinstance(config.get('expire'), basestring):
             expire_time = parse_timedelta(config['expire'])
             digest_entries.filter(DigestEntry.added < datetime.now() - expire_time).delete()
         for index, digest_entry in enumerate(
             digest_entries.order_by(DigestEntry.added.desc()).all()
         ):
             # Just remove any entries past the limit, if set.
             if 0 < config.get('limit', -1) <= index:
                 session.delete(digest_entry)
                 continue
             entry = digest_entry.entry
             if config.get('restore_state') and entry.get('digest_state'):
                 # Not sure this is the best way, but we don't want hooks running on this task
                 # (like backlog hooking entry.fail)
                 entry._state = entry['digest_state']
             entries.append(entry)
             # If expire is 'True', we remove it after it is output once.
             if config.get('expire', True) is True:
                 session.delete(digest_entry)
     return entries
コード例 #8
0
ファイル: retry_failed.py プロジェクト: oscarb-se/Flexget
 def retry_time(self, fail_count, config):
     """Return the timedelta an entry that has failed `fail_count` times before should wait before being retried."""
     base_retry_time = parse_timedelta(config['retry_time'])
     # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
     base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
     retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count)
     return timedelta(seconds=retry_secs)
コード例 #9
0
def db_cleanup(manager, session):
    value = datetime.datetime.now() - parse_timedelta('7 days')
    for discover_entry in (
        session.query(DiscoverEntry).filter(DiscoverEntry.last_execution <= value).all()
    ):
        log.debug('deleting %s', discover_entry)
        session.delete(discover_entry)
コード例 #10
0
ファイル: retry_failed.py プロジェクト: ARLahan/Flexget
 def on_task_learn(self, task, config):
     if config is False:
         return
     config = self.prepare_config(config)
     base_retry_time = parse_timedelta(config['retry_time'])
     retry_time_multiplier = config['retry_time_multiplier']
     for entry in task.failed:
         item = task.session.query(FailedEntry).filter(FailedEntry.title == entry['title']).\
             filter(FailedEntry.url == entry['original_url']).first()
         if item:
             # Do not count the failure on this run when adding additional retry time
             fail_count = item.count - 1
             # Don't bother saving this if it has met max retries
             if fail_count >= config['max_retries']:
                 continue
             # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
             base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
             retry_secs = base_retry_secs * (retry_time_multiplier ** fail_count)
             retry_time = timedelta(seconds=retry_secs)
         else:
             retry_time = base_retry_time
         if self.backlog:
             self.backlog.instance.add_backlog(task, entry, amount=retry_time)
         if retry_time:
             fail_reason = item.reason if item else entry.get('reason', 'unknown')
             entry.reject(reason='Waiting before trying failed entry again. (failure reason: %s)' %
                 fail_reason, remember_time=retry_time)
             # Cause a task rerun, to look for alternate releases
             task.rerun()
コード例 #11
0
    def on_task_download(self, task, config):
        if config is False:
            return
        # Create the conversion target directory
        converted_path = os.path.join(task.manager.config_base, 'converted')
        # Calculate the timeout config in seconds (Py2.6 compatible, can be replaced with total_seconds in 2.7)
        timeout = 60
        if config and isinstance(config, dict):
            timeout_delta = parse_timedelta(config['timeout'])
            timeout = (timeout_delta.seconds + timeout_delta.days * 24 * 3600)
        if not os.path.isdir(converted_path):
            os.mkdir(converted_path)

        for entry in task.accepted:
            if entry['url'].startswith('magnet:'):
                entry.setdefault('urls', [entry['url']])
                try:
                    log.info(
                        'Converting entry {} magnet URI to a torrent file'.
                        format(entry['title']))
                    torrent_file = self.magnet_to_torrent(
                        entry['url'], converted_path, timeout)
                except BaseException as e:
                    message = 'Unable to convert Magnet URI for entry {}: {}'.format(
                        entry['title'], e)
                    log.error(message)
                    continue
                # Windows paths need an extra / prepended to them for url
                if not torrent_file.startswith('/'):
                    torrent_file = '/' + torrent_file
                entry['url'] = torrent_file
                entry['file'] = torrent_file
                # make sure it's first in the list because of how download plugin works
                entry['urls'].insert(0, 'file://{}'.format(torrent_file))
コード例 #12
0
 def estimated(self, entries, estimation_mode):
     """
     :param dict estimation_mode: mode -> loose, strict, ignore
     :return: Entries that we have estimated to be available
     """
     estimator = get_plugin_by_name('estimate_release').instance
     result = []
     for entry in entries:
         est_date = estimator.estimate(entry)
         if est_date is None:
             log.debug('No release date could be determined for %s', entry['title'])
             if estimation_mode['mode'] == 'strict':
                 entry.reject('has no release date')
                 entry.complete()
             else:
                 result.append(entry)
             continue
         if isinstance(est_date, datetime.date):
             # If we just got a date, add a time so we can compare it to now()
             est_date = datetime.datetime.combine(est_date, datetime.time())
         if datetime.datetime.now() >= est_date:
             log.debug('%s has been released at %s', entry['title'], est_date)
             result.append(entry)
         elif datetime.datetime.now() >= est_date - parse_timedelta(estimation_mode['optimistic']):
             log.debug('%s will be released at %s. Ignoring release estimation because estimated release date is '
                       'in less than %s', entry['title'], est_date, estimation_mode['optimistic'])
             result.append(entry)
         else:
             entry.reject('has not been released')
             entry.complete()
             log.verbose("%s hasn't been released yet (Expected: %s)", entry['title'], est_date)
     return result
コード例 #13
0
ファイル: retry_failed.py プロジェクト: s-m-b/Flexget
 def on_task_exit(self, task, config):
     if config is False:
         return
     config = self.prepare_config(config)
     base_retry_time = parse_timedelta(config['retry_time'])
     retry_time_multiplier = config['retry_time_multiplier']
     for entry in task.failed:
         item = task.session.query(FailedEntry).filter(FailedEntry.title == entry['title']).\
                                         filter(FailedEntry.url == entry['original_url']).first()
         if item:
             # Do not count the failure on this run when adding additional retry time
             fail_count = item.count - 1
             # Don't bother saving this if it has met max retries
             if fail_count >= config['max_retries']:
                 continue
             # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
             base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
             retry_secs = base_retry_secs * (retry_time_multiplier ** fail_count)
             retry_time = timedelta(seconds=retry_secs)
         else:
             retry_time = base_retry_time
         if self.backlog:
             self.backlog.add_backlog(task, entry, amount=retry_time)
         if retry_time:
             fail_reason = item.reason if item else entry.get('reason', 'unknown')
             task.reject(entry, reason='Waiting before trying failed entry again. (failure reason: %s)' %
                                       fail_reason, remember_time=retry_time)
             # Cause a task rerun, to look for alternate releases
             task.rerun()
コード例 #14
0
ファイル: discover.py プロジェクト: Flexget/Flexget
def db_cleanup(manager, session):
    value = datetime.datetime.now() - parse_timedelta('7 days')
    for discover_entry in (
        session.query(DiscoverEntry).filter(DiscoverEntry.last_execution <= value).all()
    ):
        log.debug('deleting %s', discover_entry)
        session.delete(discover_entry)
コード例 #15
0
    def on_task_urlrewrite(self, task, config):
        if config is False:
            return
        config = self.prepare_config(config)
        # Create the conversion target directory
        converted_path = os.path.join(task.manager.config_base, 'converted')

        timeout = parse_timedelta(config['timeout']).total_seconds()

        if not os.path.isdir(converted_path):
            os.mkdir(converted_path)

        for entry in task.accepted:
            if entry['url'].startswith('magnet:'):
                entry.setdefault('urls', [entry['url']])
                try:
                    log.info(
                        'Converting entry {} magnet URI to a torrent file'.
                        format(entry['title']))
                    self.process(entry, converted_path, timeout)
                except (plugin.PluginError, TypeError) as e:
                    log.error('Unable to convert Magnet URI for entry %s: %s',
                              entry['title'], e)
                    if config['force']:
                        entry.fail('Magnet URI conversion failed')
                    continue
コード例 #16
0
ファイル: config_schema.py プロジェクト: sean797/Flexget
def parse_interval(interval_string):
    """Takes an interval string from the config and turns it into a :class:`datetime.timedelta` object."""
    regexp = r'^\d+ (second|minute|hour|day|week)s?$'
    if not re.match(regexp, interval_string):
        raise ValueError(
            "should be in format 'x (seconds|minutes|hours|days|weeks)'")
    return parse_timedelta(interval_string)
コード例 #17
0
 def on_task_exit(self, task, config):
     config = self.prepare_config(config)
     if not config['enabled'] or task.options.learn:
         return
     if not self.client:
         self.client = self.create_rpc_client(config)
     nrat = float(config['min_ratio']) if 'min_ratio' in config else None
     nfor = parse_timedelta(config['finished_for']) if 'finished_for' in config else None
     delete_files = bool(config['delete_files']) if 'delete_files' in config else False
     
     remove_ids = []
     for torrent in self.client.get_torrents():
         log.verbose('Torrent "%s": status: "%s" - ratio: %s - date done: %s' % 
                     (torrent.name, torrent.status, torrent.ratio, torrent.date_done))
         downloaded, dummy = self.torrent_info(torrent)
         if (downloaded and ((nrat is None and nfor is None) or
                             (nrat and (nrat <= torrent.ratio)) or
                             (nfor and ((torrent.date_done + nfor) <= datetime.now())))):
             if task.options.test:
                 log.info('Would remove finished torrent `%s` from transmission' % torrent.name)
                 continue
             log.info('Removing finished torrent `%s` from transmission' % torrent.name)
             remove_ids.append(torrent.id)
     if remove_ids:
         self.client.remove_torrent(remove_ids, delete_files)
コード例 #18
0
    def on_task_download(self, task, config):
        if config is False:
            return
        config = self.prepare_config(config)
        # Create the conversion target directory
        converted_path = os.path.join(task.manager.config_base, 'converted')

        timeout = parse_timedelta(config['timeout']).total_seconds()

        if not os.path.isdir(converted_path):
            os.mkdir(converted_path)

        for entry in task.accepted:
            if entry['url'].startswith('magnet:'):
                entry.setdefault('urls', [entry['url']])
                try:
                    log.info(
                        'Converting entry {} magnet URI to a torrent file'.
                        format(entry['title']))
                    torrent_file = self.magnet_to_torrent(
                        entry['url'], converted_path, timeout)
                except (plugin.PluginError, TypeError) as e:
                    log.error('Unable to convert Magnet URI for entry %s: %s',
                              entry['title'], e)
                    continue
                # Windows paths need an extra / prepended to them for url
                if not torrent_file.startswith('/'):
                    torrent_file = '/' + torrent_file
                entry['url'] = torrent_file
                entry['file'] = torrent_file
                # make sure it's first in the list because of how download plugin works
                entry['urls'].insert(0, 'file://{}'.format(torrent_file))
コード例 #19
0
ファイル: age.py プロジェクト: AlinaKay/Flexget
    def on_task_filter(self, task, config):
        for entry in task.entries:
            field = config['field']
            if field not in entry:
                entry.fail('Field {0} does not exist'.format(field))
                continue

            field_value = entry[field]

            if isinstance(field_value, datetime):
                field_date = field_value
            elif isinstance(field_value, float):
                field_date = datetime.fromtimestamp(field_value)
            elif isinstance(field_value, str):
                try:
                    field_date = dateutil_parse(entry[field])
                except ValueError:
                    log.warning('Entry %s ignored: %s is not a valid date', entry['title'], field_value)
                    continue
            else:
                log.warning('Entry %s ignored: %s is not a valid date', entry['title'], field_value)
                continue

            age_cutoff = datetime.now() - parse_timedelta(config['age'])

            if field_date < age_cutoff:
                info_string = 'Date in field `{0}` is older than {1}'.format(field, config['age'])
                if config['action'] == 'accept':
                    entry.accept(info_string)
                else:
                    entry.reject(info_string)
                log.debug('Entry %s was %sed because date in field `%s` is older than %s', entry['title'],
                          config['action'], field, config['age'])
コード例 #20
0
 def on_entry_reject(self,
                     entry,
                     task=None,
                     remember=None,
                     remember_time=None,
                     **kwargs):
     # We only remember rejections that specify the remember keyword argument
     if not remember and not remember_time:
         return
     expires = None
     if remember_time:
         if isinstance(remember_time, basestring):
             remember_time = parse_timedelta(remember_time)
         expires = datetime.now() + remember_time
     if not entry.get('title') or not entry.get('original_url'):
         log.debug(
             'Can\'t remember rejection for entry without title or url.')
         return
     message = 'Remembering rejection of `%s`' % entry['title']
     if remember_time:
         message += ' for %i minutes' % (remember_time.seconds / 60)
     log.info(message)
     (remember_task_id, ) = task.session.query(
         RememberTask.id).filter(RememberTask.name == task.name).first()
     task.session.add(
         RememberEntry(title=entry['title'],
                       url=entry['original_url'],
                       task_id=remember_task_id,
                       rejected_by=task.current_plugin,
                       reason=kwargs.get('reason'),
                       expires=expires))
     # The test stops passing when this is taken out for some reason...
     task.session.flush()
コード例 #21
0
    def add_backlog(self, task, entry, amount='', session=None):
        """Add single entry to task backlog

        If :amount: is not specified, entry will only be injected on next execution."""
        snapshot = entry.snapshots.get('after_input')
        if not snapshot:
            if task.current_phase != 'input':
                # Not having a snapshot is normal during input phase, don't display a warning
                log.warning(
                    'No input snapshot available for `%s`, using current state'
                    % entry['title'])
            snapshot = entry
        expire_time = datetime.now() + parse_timedelta(amount)
        backlog_entry = session.query(BacklogEntry).filter(BacklogEntry.title == entry['title']).\
            filter(BacklogEntry.task == task.name).first()
        if backlog_entry:
            # If there is already a backlog entry for this, update the expiry time if necessary.
            if backlog_entry.expire < expire_time:
                log.debug('Updating expiry time for %s' % entry['title'])
                backlog_entry.expire = expire_time
        else:
            log.debug('Saving %s' % entry['title'])
            backlog_entry = BacklogEntry()
            backlog_entry.title = entry['title']
            backlog_entry.entry = snapshot
            backlog_entry.task = task.name
            backlog_entry.expire = expire_time
            session.add(backlog_entry)
コード例 #22
0
ファイル: digest.py プロジェクト: cash2one/flexget
 def on_task_input(self, task, config):
     entries = []
     with Session() as session:
         digest_entries = session.query(DigestEntry).filter(
             DigestEntry.list == config['list'])
         # Remove any entries older than the expire time, if defined.
         if isinstance(config.get('expire'), basestring):
             expire_time = parse_timedelta(config['expire'])
             digest_entries.filter(
                 DigestEntry.added < datetime.now() - expire_time).delete()
         for index, digest_entry in enumerate(
                 digest_entries.order_by(DigestEntry.added.desc()).all()):
             # Just remove any entries past the limit, if set.
             if 0 < config.get('limit', -1) <= index:
                 session.delete(digest_entry)
                 continue
             entry = digest_entry.entry
             if config.get('restore_state') and entry.get('digest_state'):
                 # Not sure this is the best way, but we don't want hooks running on this task
                 # (like backlog hooking entry.fail)
                 entry._state = entry['digest_state']
             entries.append(entry)
             # If expire is 'True', we remove it after it is output once.
             if config.get('expire', True) is True:
                 session.delete(digest_entry)
     return entries
コード例 #23
0
ファイル: api_tvrage.py プロジェクト: Gyran/Flexget
def lookup_series(name=None, session=None):
    series = None
    res = session.query(TVRageLookup).filter(TVRageLookup.name == name.lower()).first()

    if res:
        series = res.series
        # if too old result, clean the db and refresh it
        interval = parse_timedelta(update_interval)
        if datetime.datetime.now() > series.last_update + interval:
            log.debug('Refreshing tvrage info for %s', name)
        else:
            return series
    log.debug('Fetching tvrage info for %s' % name)
    try:
        fetched = tvrage.api.Show(name.encode('utf-8'))
    except tvrage.exceptions.ShowNotFound:
        raise LookupError('Could not find show %s' % name)
    except (timeout, AttributeError):
        # AttributeError is due to a bug in tvrage package trying to access URLError.code
        raise LookupError('Timed out while connecting to tvrage')
    if not series:
        series = session.query(TVRageSeries).filter(TVRageSeries.showid == fetched.showid).first()
    if not series:
        series = TVRageSeries(fetched)
        session.add(series)
        session.add(TVRageLookup(unicode(fetched.name), series))
    else:
        series.update(fetched)
    if name.lower() != fetched.name.lower():
        session.add(TVRageLookup(name, series))
    return series
コード例 #24
0
ファイル: discover.py プロジェクト: cash2one/flexget
 def estimated(self, entries, estimation_mode):
     """
     :param dict estimation_mode: mode -> loose, strict, ignore
     :return: Entries that we have estimated to be available
     """
     estimator = get_plugin_by_name('estimate_release').instance
     result = []
     for entry in entries:
         est_date = estimator.estimate(entry)
         if est_date is None:
             log.debug('No release date could be determined for %s', entry['title'])
             if estimation_mode['mode'] == 'strict':
                 entry.reject('has no release date')
                 entry.complete()
             else:
                 result.append(entry)
             continue
         if isinstance(est_date, datetime.date):
             # If we just got a date, add a time so we can compare it to now()
             est_date = datetime.datetime.combine(est_date, datetime.time())
         if datetime.datetime.now() >= est_date:
             log.debug('%s has been released at %s', entry['title'], est_date)
             result.append(entry)
         elif datetime.datetime.now() >= est_date - parse_timedelta(estimation_mode['optimistic']):
             log.debug('%s will be released at %s. Ignoring release estimation because estimated release date is '
                       'in less than %s', entry['title'], est_date, estimation_mode['optimistic'])
             result.append(entry)
         else:
             entry.reject('has not been released')
             entry.complete()
             log.verbose("%s hasn't been released yet (Expected: %s)", entry['title'], est_date)
     return result
コード例 #25
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        nrat = float(config['min_ratio']) if 'min_ratio' in config else None
        nfor = parse_timedelta(config['finished_for']) if 'finished_for' in config else None
        delete_files = bool(config['delete_files']) if 'delete_files' in config else False
        trans_checks = bool(config['transmission_seed_limits']) if 'transmission_seed_limits' in config else False
        
        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose('Torrent "%s": status: "%s" - ratio: %s - date done: %s' %
                        (torrent.name, torrent.status, torrent.ratio, torrent.date_done))
            downloaded, dummy = self.torrent_info(torrent)
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
            if (downloaded and ((nrat is None and nfor is None and trans_checks is None) or
                                (trans_checks and ((seed_ratio_ok is None and idle_limit_ok is None) or
                                 (seed_ratio_ok is True or idle_limit_ok is True))) or
                                (nrat and (nrat <= torrent.ratio)) or
                                (nfor and ((torrent.date_done + nfor) <= datetime.now())))):
                if task.options.test:
                    log.info('Would remove finished torrent `%s` from transmission' % torrent.name)
                    continue
                log.info('Removing finished torrent `%s` from transmission' % torrent.name)
                remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, delete_files)
コード例 #26
0
 def on_task_exit(self, task, config):
     config = self.prepare_config(config)
     if not config["enabled"] or task.options.learn:
         return
     if not self.client:
         self.client = self.create_rpc_client(config)
     nrat = float(config["min_ratio"]) if "min_ratio" in config else None
     nfor = parse_timedelta(config["finished_for"]) if "finished_for" in config else None
     remove_ids = []
     for torrent in self.client.get_torrents():
         log.verbose(
             'Torrent "%s": status: "%s" - ratio: %s - date done: %s'
             % (torrent.name, torrent.status, torrent.ratio, torrent.date_done)
         )
         downloaded, dummy = self.torrent_info(torrent)
         if downloaded and (
             (nrat is None and nfor is None)
             or (nrat and (nrat <= torrent.ratio))
             or (nfor and ((torrent.date_done + nfor) <= datetime.now()))
         ):
             if task.options.test:
                 log.info("Would remove finished torrent `%s` from transmission" % torrent.name)
                 continue
             log.info("Removing finished torrent `%s` from transmission" % torrent.name)
             remove_ids.append(torrent.id)
     if remove_ids:
         self.client.remove_torrent(remove_ids)
コード例 #27
0
ファイル: remember_rejected.py プロジェクト: EnJens/Flexget
 def on_entry_reject(self, entry, task=None, remember=None, remember_time=None, **kwargs):
     # We only remember rejections that specify the remember keyword argument
     if not remember and not remember_time:
         return
     expires = None
     if remember_time:
         if isinstance(remember_time, basestring):
             remember_time = parse_timedelta(remember_time)
         expires = datetime.now() + remember_time
     if not entry.get("title") or not entry.get("original_url"):
         log.debug("Can't remember rejection for entry without title or url.")
         return
     message = "Remembering rejection of `%s`" % entry["title"]
     if remember_time:
         message += " for %i minutes" % (remember_time.seconds / 60)
     log.info(message)
     (remember_task_id,) = task.session.query(RememberTask.id).filter(RememberTask.name == task.name).first()
     task.session.add(
         RememberEntry(
             title=entry["title"],
             url=entry["original_url"],
             task_id=remember_task_id,
             rejected_by=task.current_plugin,
             reason=kwargs.get("reason"),
             expires=expires,
         )
     )
     # The test stops passing when this is taken out for some reason...
     task.session.flush()
コード例 #28
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None
        preserve_tracker_re = (
            re.compile(config['preserve_tracker'], re.IGNORECASE)
            if 'preserve_tracker' in config
            else None
        )

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            logger.verbose(
                'Torrent "{}": status: "{}" - ratio: {} - date added: {}',
                torrent.name,
                torrent.status,
                torrent.ratio,
                torrent.date_added,
            )
            downloaded, dummy = self.torrent_info(torrent, config)
            if not downloaded:
                continue
            if config.get('transmission_seed_limits'):
                seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
                if not seed_ratio_ok or not idle_limit_ok:
                    continue
            if 'min_ratio' in config:
                if torrent.ratio < config['min_ratio']:
                    continue
            if 'finished_for' in config:
                # done date might be invalid if this torrent was added to transmission when already completed
                started_seeding = datetime.fromtimestamp(max(torrent.addedDate, torrent.doneDate))
                if started_seeding + parse_timedelta(config['finished_for']) > datetime.now():
                    continue
            tracker_hosts = (
                urlparse(tracker['announce']).hostname for tracker in torrent.trackers
            )
            if 'tracker' in config:
                if not any(tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if 'preserve_tracker' in config:
                if any(preserve_tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if config.get('directories'):
                if not any(
                    re.search(d, torrent.downloadDir, re.IGNORECASE) for d in config['directories']
                ):
                    continue
            if task.options.test:
                logger.info('Would remove finished torrent `{}` from transmission', torrent.name)
                continue
            logger.info('Removing finished torrent `{}` from transmission', torrent.name)
            remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, config.get('delete_files'))
コード例 #29
0
ファイル: retry_failed.py プロジェクト: blitmaster/Flexget
 def retry_time(self, fail_count, config):
     """Return the timedelta an entry that has failed `fail_count` times before should wait before being retried."""
     base_retry_time = parse_timedelta(config['retry_time'])
     # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
     base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
     retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count)
     return timedelta(seconds=retry_secs)
コード例 #30
0
ファイル: backlog.py プロジェクト: AlinaKay/Flexget
    def add_backlog(self, task, entry, amount='', session=None):
        """Add single entry to task backlog

        If :amount: is not specified, entry will only be injected on next execution."""
        snapshot = entry.snapshots.get('after_input')
        if not snapshot:
            if task.current_phase != 'input':
                # Not having a snapshot is normal during input phase, don't display a warning
                log.warning('No input snapshot available for `%s`, using current state' % entry['title'])
            snapshot = entry
        expire_time = datetime.now() + parse_timedelta(amount)
        backlog_entry = session.query(BacklogEntry).filter(BacklogEntry.title == entry['title']). \
            filter(BacklogEntry.task == task.name).first()
        if backlog_entry:
            # If there is already a backlog entry for this, update the expiry time if necessary.
            if backlog_entry.expire < expire_time:
                log.debug('Updating expiry time for %s' % entry['title'])
                backlog_entry.expire = expire_time
        else:
            log.debug('Saving %s' % entry['title'])
            backlog_entry = BacklogEntry()
            backlog_entry.title = entry['title']
            backlog_entry.entry = snapshot
            backlog_entry.task = task.name
            backlog_entry.expire = expire_time
            session.add(backlog_entry)
コード例 #31
0
    def on_task_download(self, task, config):
        if config is False:
            return
        config = self.prepare_config(config)
        # Create the conversion target directory
        converted_path = os.path.join(task.manager.config_base, "converted")

        timeout = parse_timedelta(config["timeout"]).total_seconds()

        if not os.path.isdir(converted_path):
            os.mkdir(converted_path)

        for entry in task.accepted:
            if entry["url"].startswith("magnet:"):
                entry.setdefault("urls", [entry["url"]])
                try:
                    log.info("Converting entry {} magnet URI to a torrent file".format(entry["title"]))
                    torrent_file = self.magnet_to_torrent(entry["url"], converted_path, timeout)
                except (plugin.PluginError, TypeError) as e:
                    log.error("Unable to convert Magnet URI for entry %s: %s", entry["title"], e)
                    continue
                # Windows paths need an extra / prepended to them for url
                if not torrent_file.startswith("/"):
                    torrent_file = "/" + torrent_file
                entry["url"] = torrent_file
                entry["file"] = torrent_file
                # make sure it's first in the list because of how download plugin works
                entry["urls"].insert(0, "file://{}".format(torrent_file))
コード例 #32
0
 def on_task_start(self, task, config):
     # Allow reruns
     if task.is_rerun:
         return
     if task.manager.options.learn:
         log.info('Ignoring task %s interval for --learn' % task.name)
         return
     last_time = task.simple_persistence.get('last_time')
     if not last_time:
         log.info('No previous run recorded, running now')
     elif task.manager.options.interval_ignore:
         log.info('Ignoring interval because of --now')
     else:
         log.debug('last_time: %r' % last_time)
         log.debug('interval: %s' % config)
         next_time = last_time + parse_timedelta(config)
         log.debug('next_time: %r' % next_time)
         if datetime.datetime.now() < next_time:
             log.debug('interval not met')
             log.verbose(
                 'Interval %s not met on task %s. Use --now to override.' %
                 (config, task.name))
             task.abort('Interval not met', silent=True)
             return
     log.debug('interval passed')
     task.simple_persistence['last_time'] = datetime.datetime.now()
コード例 #33
0
ファイル: api_tvrage.py プロジェクト: andir/Flexget
def lookup_series(name=None, session=None):
    series = None
    res = session.query(TVRageLookup).filter(
        TVRageLookup.name == name.lower()).first()

    if res and not res.series:
        # The lookup failed in the past for this series, retry every week
        # TODO: 1.2 this should also retry with --retry or whatever flag imdb lookup is using for that
        if res.failed_time and res.failed_time > datetime.datetime.now(
        ) - datetime.timedelta(days=7):
            raise LookupError('Could not find show %s' % name)
    elif res:
        series = res.series
        # if too old result, clean the db and refresh it
        interval = parse_timedelta(UPDATE_INTERVAL)
        if datetime.datetime.now() > series.last_update + interval:
            log.debug('Refreshing tvrage info for %s', name)
        else:
            return series

    def store_failed_lookup():
        if res:
            res.series = None
            res.failed_time = datetime.datetime.now()
        else:
            session.add(
                TVRageLookup(name, None, failed_time=datetime.datetime.now()))
            session.commit()

    log.debug('Fetching tvrage info for %s' % name)
    try:
        fetched = tvrage.api.Show(name.encode('utf-8'))
    except tvrage.exceptions.ShowNotFound:
        store_failed_lookup()
        raise LookupError('Could not find show %s' % name)
    except (timeout, AttributeError):
        # AttributeError is due to a bug in tvrage package trying to access URLError.code
        raise LookupError('Timed out while connecting to tvrage')
    # Make sure the result is close enough to the search
    if difflib.SequenceMatcher(a=name, b=fetched.name).ratio() < 0.7:
        log.debug('Show result `%s` was not a close enough match for `%s`' %
                  (fetched.name, name))
        store_failed_lookup()
        raise LookupError('Could not find show %s' % name)
    if not series:
        series = session.query(TVRageSeries).filter(
            TVRageSeries.showid == fetched.showid).first()
    if not series:
        series = TVRageSeries(fetched)
        session.add(series)
        session.add(TVRageLookup(unicode(fetched.name), series))
    else:
        series.update(fetched)
    if name.lower() != fetched.name.lower():
        if res:
            res.series = series
        else:
            session.add(TVRageLookup(name, series))
    return series
コード例 #34
0
ファイル: api_tvrage.py プロジェクト: DColl/Flexget
def lookup_series(name=None, session=None):
    series = None
    res = session.query(TVRageLookup).filter(TVRageLookup.name == name.lower()).first()

    if res and not res.series:
        # The lookup failed in the past for this series, retry every week
        # TODO: 1.2 this should also retry with --retry or whatever flag imdb lookup is using for that
        if res.failed_time and res.failed_time > datetime.datetime.now() - datetime.timedelta(days=7):
            raise LookupError('Could not find show %s' % name)
    elif res:
        series = res.series
        # if too old result, clean the db and refresh it
        interval = parse_timedelta(UPDATE_INTERVAL)
        if datetime.datetime.now() > series.last_update + interval:
            log.debug('Refreshing tvrage info for %s', name)
        else:
            return series

    def store_failed_lookup():
        if res:
            res.series = None
            res.failed_time = datetime.datetime.now()
        else:
            session.add(TVRageLookup(name, None, failed_time=datetime.datetime.now()))
            session.commit()

    log.debug('Fetching tvrage info for %s' % name)
    try:
        fetched = tvrage.api.Show(name.encode('utf-8'))
    except tvrage.exceptions.ShowNotFound:
        store_failed_lookup()
        raise LookupError('Could not find show %s' % name)
    except (timeout, AttributeError):
        # AttributeError is due to a bug in tvrage package trying to access URLError.code
        raise LookupError('Timed out while connecting to tvrage')
    except TypeError:
        # TODO: There should be option to pass tvrage id directly from within series plugin via "set:" (like tvdb_id)
        # and search directly for tvrage id. This is problematic, because 3rd party TVRage API does not support this.
        raise LookupError('Returned invalid data for "%s". This is often caused when TVRage is missing episode info'
                          % name)
    # Make sure the result is close enough to the search
    if difflib.SequenceMatcher(a=name, b=fetched.name).ratio() < 0.7:
        log.debug('Show result `%s` was not a close enough match for `%s`' % (fetched.name, name))
        store_failed_lookup()
        raise LookupError('Could not find show %s' % name)
    if not series:
        series = session.query(TVRageSeries).filter(TVRageSeries.showid == fetched.showid).first()
    if not series:
        series = TVRageSeries(fetched)
        session.add(series)
        session.add(TVRageLookup(unicode(fetched.name), series))
    else:
        series.update(fetched)
    if name.lower() != fetched.name.lower():
        if res:
            res.series = series
        else:
            session.add(TVRageLookup(name, series))
    return series
コード例 #35
0
    def test_rejected_pagination(self, api_client, link_headers):
        base_reject_entry = dict(title='test_title_',
                                 url='test_url_',
                                 rejected_by='rejected_by_',
                                 reason='reason_')
        number_of_entries = 200

        with Session() as session:
            task = RememberTask(name='rejected API test')
            session.add(task)
            session.commit()

            for i in range(number_of_entries):
                r_entry = copy.deepcopy(base_reject_entry)
                for key, value in r_entry.items():
                    r_entry[key] = value + str(i)
                expires = datetime.now() + parse_timedelta('1 hours')
                session.add(
                    RememberEntry(expires=expires, task_id=task.id, **r_entry))

        # Default values
        rsp = api_client.get('/rejected/')
        assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 50
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 50

        links = link_headers(rsp)
        assert links['last']['page'] == 4
        assert links['next']['page'] == 2

        # Change page size
        rsp = api_client.get('/rejected/?per_page=100')
        assert rsp.status_code == 200
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 100
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 100

        links = link_headers(rsp)
        assert links['last']['page'] == 2
        assert links['next']['page'] == 2

        # Get different page
        rsp = api_client.get('/rejected/?page=2')
        assert rsp.status_code == 200
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 50
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 50

        links = link_headers(rsp)
        assert links['last']['page'] == 4
        assert links['next']['page'] == 3
        assert links['prev']['page'] == 1
コード例 #36
0
ファイル: requests.py プロジェクト: wicastchen/Flexget
    def set_domain_delay(self, domain, delay):
        """
        Registers a minimum interval between requests to `domain`

        :param domain: The domain to set the interval on
        :param delay: The amount of time between requests, can be a timedelta or string like '3 seconds'
        """
        self.domain_delay[domain] = {'delay': parse_timedelta(delay)}
コード例 #37
0
ファイル: requests.py プロジェクト: StunMan/Flexget
    def set_domain_delay(self, domain, delay):
        """
        Registers a minimum interval between requests to `domain`

        :param domain: The domain to set the interval on
        :param delay: The amount of time between requests, can be a timedelta or string like '3 seconds'
        """
        self.domain_delay[domain] = {'delay': parse_timedelta(delay)}
コード例 #38
0
 def __init__(self, name, persist=None):
     # Cast name to unicode to prevent sqlalchemy warnings when filtering
     self.name = str(name)
     # Parse persist time
     self.persist = persist and parse_timedelta(persist)
     # Will be set when wrapped function is called
     self.config_hash = None
     self.cache_name = None
コード例 #39
0
ファイル: cached_input.py プロジェクト: luizoti/Flexget
 def __init__(self, name: str, persist: str = None) -> None:
     # Cast name to unicode to prevent sqlalchemy warnings when filtering
     self.name = str(name)
     # Parse persist time
     self.persist: Optional[timedelta] = parse_timedelta(
         persist) if persist else None
     # Will be set when wrapped function is called
     self.config_hash = None
     self.cache_name = None
コード例 #40
0
ファイル: transmission.py プロジェクト: Flexget/Flexget
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None
        preserve_tracker_re = (
            re.compile(config['preserve_tracker'], re.IGNORECASE)
            if 'preserve_tracker' in config
            else None
        )

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose(
                'Torrent "%s": status: "%s" - ratio: %s -  date added: %s'
                % (torrent.name, torrent.status, torrent.ratio, torrent.date_added)
            )
            downloaded, dummy = self.torrent_info(torrent, config)
            if not downloaded:
                continue
            if config.get('transmission_seed_limits'):
                seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
                if not seed_ratio_ok or not idle_limit_ok:
                    continue
            if 'min_ratio' in config:
                if torrent.ratio < config['min_ratio']:
                    continue
            if 'finished_for' in config:
                # done date might be invalid if this torrent was added to transmission when already completed
                started_seeding = datetime.fromtimestamp(max(torrent.addedDate, torrent.doneDate))
                if started_seeding + parse_timedelta(config['finished_for']) > datetime.now():
                    continue
            tracker_hosts = (
                urlparse(tracker['announce']).hostname for tracker in torrent.trackers
            )
            if 'tracker' in config:
                if not any(tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if 'preserve_tracker' in config:
                if any(preserve_tracker_re.search(tracker) for tracker in tracker_hosts):
                    continue
            if config.get('directories'):
                if not any(
                    re.search(d, torrent.downloadDir, re.IGNORECASE) for d in config['directories']
                ):
                    continue
            if task.options.test:
                log.info('Would remove finished torrent `%s` from transmission', torrent.name)
                continue
            log.info('Removing finished torrent `%s` from transmission', torrent.name)
            remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, config.get('delete_files'))
コード例 #41
0
    def on_task_filter(self, task, config):
        config = self.prepare_config(config)

        if not config or not config['target']:
            return

        identified_by = '{{ id }}' if config['identified_by'] == 'auto' else config['identified_by']

        grouped_entries = group_entries(task.accepted + task.undecided, identified_by)
        if not grouped_entries:
            return

        with Session() as session:
            # Prefetch Data
            existing_ids = session.query(EntryUpgrade).filter(EntryUpgrade.id.in_(grouped_entries.keys())).all()
            existing_ids = {e.id: e for e in existing_ids}

            for identifier, entries in grouped_entries.items():
                if not entries:
                    continue

                existing = existing_ids.get(identifier)
                if not existing:
                    # No existing, do_nothing
                    continue

                log.debug('Looking for upgrades for identifier %s (within %s entries)', identifier, len(entries))

                # Check if passed allowed timeframe
                if config['timeframe']:
                    expires = existing.first_seen + parse_timedelta(config['timeframe'])
                    if expires <= datetime.now():
                        # Timeframe reached, allow
                        log.debug('Skipping upgrade with identifier %s as timeframe reached', identifier)
                        continue

                # Filter out lower quality and propers
                action_on_lower = entry_actions[config['on_lower']] if config['on_lower'] != 'do_nothing' else None
                upgradeable = self.filter_entries(entries, existing, config['target'], action_on_lower)

                # Skip if we have no entries after filtering
                if not upgradeable:
                    continue

                # Sort entities in order of quality and best proper
                upgradeable.sort(key=lambda e: (e['quality'], e.get('proper_count', 0)), reverse=True)

                # First entry will be the best quality
                best = upgradeable.pop(0)
                best.accept('upgraded quality')
                log.debug('Found %s as upgraded quality for identifier %s', best['title'], identifier)

                # Process rest
                for entry in upgradeable:
                    log.debug('Skipping %s as lower quality then best %s', entry['title'], best['title'])
                    if action_on_lower:
                        action_on_lower(entry, 'lower quality then best match')
コード例 #42
0
ファイル: subtitle_queue.py プロジェクト: tsnoam/Flexget
def queue_get(session=None):
    subs = session.query(QueuedSubtitle).filter(QueuedSubtitle.downloaded == False).all()
    # remove any items that have expired
    for sub_item in subs:
        if sub_item.added + parse_timedelta(sub_item.stop_after) < datetime.combine(date.today(), time()):
            log.debug('%s has expired. Removing.' % sub_item.title)
            subs.remove(sub_item)
            session.delete(sub_item)
    return subs
コード例 #43
0
    def estimated(self, entries, estimation_mode):
        """
        :param dict estimation_mode: mode -> loose, strict, ignore
        :return: Entries that we have estimated to be available
        """
        estimator = plugin.get('estimate_release', self)
        result = []
        for entry in entries:
            estimation = estimator.estimate(entry)

            est_date = estimation['entity_date']
            data_exists = estimation['data_exists']

            if est_date is None:
                if estimation_mode['mode'] == 'strict':
                    logger.debug('No release date could be determined for {}',
                                 entry['title'])
                    entry.reject('has no release date')
                    entry.complete()
                elif estimation_mode['mode'] == 'smart' and data_exists:
                    logger.debug(
                        'No release date could be determined for {}, but exists data',
                        entry['title'],
                    )
                    entry.reject('exists but has no release date')
                    entry.complete()
                elif estimation_mode['mode'] == 'smart' and not data_exists:
                    logger.debug(
                        'Discovering because mode is \'{}\' and no data is found for entry',
                        estimation_mode['mode'],
                    )
                    result.append(entry)
                else:
                    result.append(entry)
                continue
            if isinstance(est_date, datetime.date):
                # If we just got a date, add a time so we can compare it to now()
                est_date = datetime.datetime.combine(est_date, datetime.time())
            if datetime.datetime.now() >= est_date:
                logger.debug('{} has been released at {}', entry['title'],
                             est_date)
                result.append(entry)
            elif datetime.datetime.now() >= est_date - parse_timedelta(
                    estimation_mode['optimistic']):
                logger.debug(
                    '{} will be released at {}. Ignoring release estimation because estimated release date is in less than {}',
                    entry['title'],
                    est_date,
                    estimation_mode['optimistic'],
                )
                result.append(entry)
            else:
                entry.reject('has not been released')
                entry.complete()
                logger.verbose("{} hasn't been released yet (Expected: {})",
                               entry['title'], est_date)
        return result
コード例 #44
0
ファイル: test_rejected_api.py プロジェクト: Flexget/Flexget
    def test_rejected_pagination(self, api_client, link_headers):
        base_reject_entry = dict(
            title='test_title_', url='test_url_', rejected_by='rejected_by_', reason='reason_'
        )
        number_of_entries = 200

        with Session() as session:
            task = RememberTask(name='rejected API test')
            session.add(task)
            session.commit()

            for i in range(number_of_entries):
                r_entry = copy.deepcopy(base_reject_entry)
                for key, value in r_entry.items():
                    r_entry[key] = value + str(i)
                expires = datetime.now() + parse_timedelta('1 hours')
                session.add(RememberEntry(expires=expires, task_id=task.id, **r_entry))

        # Default values
        rsp = api_client.get('/rejected/')
        assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 50
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 50

        links = link_headers(rsp)
        assert links['last']['page'] == 4
        assert links['next']['page'] == 2

        # Change page size
        rsp = api_client.get('/rejected/?per_page=100')
        assert rsp.status_code == 200
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 100
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 100

        links = link_headers(rsp)
        assert links['last']['page'] == 2
        assert links['next']['page'] == 2

        # Get different page
        rsp = api_client.get('/rejected/?page=2')
        assert rsp.status_code == 200
        data = json.loads(rsp.get_data(as_text=True))

        assert len(data) == 50
        assert int(rsp.headers['total-count']) == 200
        assert int(rsp.headers['count']) == 50

        links = link_headers(rsp)
        assert links['last']['page'] == 4
        assert links['next']['page'] == 3
        assert links['prev']['page'] == 1
コード例 #45
0
ファイル: subtitle_queue.py プロジェクト: MikeyCanuck/Flexget
def queue_get(session=None):
    subs = session.query(QueuedSubtitle).filter(QueuedSubtitle.downloaded == False).all()
    # remove any items that have expired
    for sub_item in subs:
        if sub_item.added + parse_timedelta(sub_item.stop_after) < datetime.combine(date.today(), time()):
            log.debug('%s has expired. Removing.' % sub_item.title)
            subs.remove(sub_item)
            session.delete(sub_item)
    return subs
コード例 #46
0
    def interval_expired(self, config, task, entries):
        """
        Maintain some limit levels so that we don't hammer search
        sites with unreasonable amount of queries.

        :return: Entries that are up for ``config['interval']``
        """
        config.setdefault('interval', '5 hour')
        interval = parse_timedelta(config['interval'])
        if task.options.discover_now:
            log.info('Ignoring interval because of --discover-now')
        result = []
        interval_count = 0
        with Session() as session:
            for entry in entries:
                discover_entry = (
                    session.query(DiscoverEntry)
                    .filter(DiscoverEntry.title == entry['title'])
                    .filter(DiscoverEntry.task == task.name)
                    .first()
                )

                if not discover_entry:
                    log.debug('%s -> No previous run recorded', entry['title'])
                    discover_entry = DiscoverEntry(entry['title'], task.name)
                    session.add(discover_entry)
                if (
                    not task.is_rerun and task.options.discover_now
                ) or not discover_entry.last_execution:
                    # First time we execute (and on --discover-now) we randomize time to avoid clumping
                    delta = multiply_timedelta(interval, random.random())
                    discover_entry.last_execution = datetime.datetime.now() - delta
                else:
                    next_time = discover_entry.last_execution + interval
                    log.debug(
                        'last_time: %r, interval: %s, next_time: %r, ',
                        discover_entry.last_execution,
                        config['interval'],
                        next_time,
                    )
                    if datetime.datetime.now() < next_time:
                        log.debug('interval not met')
                        interval_count += 1
                        entry.reject('discover interval not met')
                        entry.complete()
                        continue
                    discover_entry.last_execution = datetime.datetime.now()
                log.trace('interval passed for %s', entry['title'])
                result.append(entry)
        if interval_count and not task.is_rerun:
            log.verbose(
                'Discover interval of %s not met for %s entries. Use --discover-now to override.',
                config['interval'],
                interval_count,
            )
        return result
コード例 #47
0
ファイル: discover.py プロジェクト: Flexget/Flexget
    def interval_expired(self, config, task, entries):
        """
        Maintain some limit levels so that we don't hammer search
        sites with unreasonable amount of queries.

        :return: Entries that are up for ``config['interval']``
        """
        config.setdefault('interval', '5 hour')
        interval = parse_timedelta(config['interval'])
        if task.options.discover_now:
            log.info('Ignoring interval because of --discover-now')
        result = []
        interval_count = 0
        with Session() as session:
            for entry in entries:
                discover_entry = (
                    session.query(DiscoverEntry)
                    .filter(DiscoverEntry.title == entry['title'])
                    .filter(DiscoverEntry.task == task.name)
                    .first()
                )

                if not discover_entry:
                    log.debug('%s -> No previous run recorded', entry['title'])
                    discover_entry = DiscoverEntry(entry['title'], task.name)
                    session.add(discover_entry)
                if (
                    not task.is_rerun and task.options.discover_now
                ) or not discover_entry.last_execution:
                    # First time we execute (and on --discover-now) we randomize time to avoid clumping
                    delta = multiply_timedelta(interval, random.random())
                    discover_entry.last_execution = datetime.datetime.now() - delta
                else:
                    next_time = discover_entry.last_execution + interval
                    log.debug(
                        'last_time: %r, interval: %s, next_time: %r, ',
                        discover_entry.last_execution,
                        config['interval'],
                        next_time,
                    )
                    if datetime.datetime.now() < next_time:
                        log.debug('interval not met')
                        interval_count += 1
                        entry.reject('discover interval not met')
                        entry.complete()
                        continue
                    discover_entry.last_execution = datetime.datetime.now()
                log.trace('interval passed for %s', entry['title'])
                result.append(entry)
        if interval_count and not task.is_rerun:
            log.verbose(
                'Discover interval of %s not met for %s entries. Use --discover-now to override.',
                config['interval'],
                interval_count,
            )
        return result
コード例 #48
0
def add_rejected_entry(entry):
    with Session() as session:
        task = RememberTask(name='rejected API test')
        session.add(task)
        session.commit()
        expires = datetime.now() + parse_timedelta('1 hours')
        session.add(
            RememberEntry(title=entry['test_title'], url=entry['test_url'], task_id=task.id,
                          rejected_by=entry['rejected_by'], reason=entry['reason'], expires=expires))
        session.commit()
コード例 #49
0
def add_rejected_entry(entry):
    with Session() as session:
        task = RememberTask(name='rejected API test')
        session.add(task)
        session.commit()

        expires = datetime.now() + parse_timedelta('1 hours')
        session.add(
            RememberEntry(title=entry['test_title'], url=entry['test_url'], task_id=task.id,
                          rejected_by=entry['rejected_by'], reason=entry['reason'], expires=expires))
コード例 #50
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        nrat = float(config['min_ratio']) if 'min_ratio' in config else None
        nfor = parse_timedelta(
            config['finished_for']) if 'finished_for' in config else None
        delete_files = bool(
            config['delete_files']) if 'delete_files' in config else False
        trans_checks = bool(
            config['transmission_seed_limits']
        ) if 'transmission_seed_limits' in config else False

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose(
                'Torrent "%s": status: "%s" - ratio: %s -  date added: %s - date done: %s'
                % (torrent.name, torrent.status, torrent.ratio,
                   torrent.date_added, torrent.date_done))
            downloaded, dummy = self.torrent_info(torrent)
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(
                torrent, session)
            is_clean_all = nrat is None and nfor is None and trans_checks is None
            is_minratio_reached = nrat and (nrat <= torrent.ratio)
            is_transmission_seedlimit_unset = trans_checks and seed_ratio_ok is None and idle_limit_ok is None
            is_transmission_seedlimit_reached = trans_checks and seed_ratio_ok is True
            is_transmission_idlelimit_reached = trans_checks and idle_limit_ok is True
            is_torrent_seed_only = torrent.date_done <= torrent.date_added
            is_torrent_idlelimit_since_added_reached = nfor and (
                torrent.date_added + nfor) <= datetime.now()
            is_torrent_idlelimit_since_finished_reached = nfor and (
                torrent.date_done + nfor) <= datetime.now()
            if (downloaded and
                (is_clean_all or is_transmission_seedlimit_unset
                 or is_transmission_seedlimit_reached
                 or is_transmission_idlelimit_reached or is_minratio_reached or
                 (is_torrent_seed_only
                  and is_torrent_idlelimit_since_added_reached) or
                 (not is_torrent_seed_only
                  and is_torrent_idlelimit_since_finished_reached))):
                if task.options.test:
                    log.info(
                        'Would remove finished torrent `%s` from transmission'
                        % torrent.name)
                    continue
                log.info('Removing finished torrent `%s` from transmission' %
                         torrent.name)
                remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, delete_files)
コード例 #51
0
ファイル: retry_failed.py プロジェクト: cash2one/flexget
 def retry_time(self, fail_count, config):
     """Return the timedelta an entry that has failed `fail_count` times before should wait before being retried."""
     base_retry_time = parse_timedelta(config['retry_time'])
     # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
     base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
     retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count)
     # prevent OverflowError: date value out of range, cap to 30 days
     max = 60 * 60 * 24 * 30
     if retry_secs > max:
         retry_secs = max
     return timedelta(seconds=retry_secs)
コード例 #52
0
ファイル: retry_failed.py プロジェクト: Flexget/Flexget
 def retry_time(self, fail_count, config):
     """Return the timedelta an entry that has failed `fail_count` times before should wait before being retried."""
     base_retry_time = parse_timedelta(config['retry_time'])
     # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
     base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
     retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count)
     # prevent OverflowError: date value out of range, cap to 30 days
     max = 60 * 60 * 24 * 30
     if retry_secs > max:
         retry_secs = max
     return timedelta(seconds=retry_secs)
コード例 #53
0
 def __init__(self, domain, tokens, rate, wait=True):
     """
     :param int tokens: Size of bucket
     :param rate: Amount of time to accrue 1 token. Either `timedelta` or interval string.
     :param bool wait: If true, will wait for a token to be available. If false, errors when token is not available.
     """
     super(TokenBucketLimiter, self).__init__(domain)
     self.max_tokens = tokens
     self.rate = parse_timedelta(rate)
     self.wait = wait
     # Restore previous state for this domain, or establish new state cache
     self.state = self.state_cache.setdefault(domain, {'tokens': self.max_tokens, 'last_update': datetime.now()})
コード例 #54
0
ファイル: requests.py プロジェクト: JorisDeRieck/Flexget
 def __init__(self, domain, tokens, rate, wait=True):
     """
     :param int tokens: Size of bucket
     :param rate: Amount of time to accrue 1 token. Either `timedelta` or interval string.
     :param bool wait: If true, will wait for a token to be available. If false, errors when token is not available.
     """
     super(TokenBucketLimiter, self).__init__(domain)
     self.max_tokens = tokens
     self.rate = parse_timedelta(rate)
     self.wait = wait
     # Restore previous state for this domain, or establish new state cache
     self.state = self.state_cache.setdefault(domain, {"tokens": self.max_tokens, "last_update": datetime.now()})
コード例 #55
0
    def on_task_exit(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled'] or task.options.learn:
            return
        if not self.client:
            self.client = self.create_rpc_client(config)
        nrat = float(config['min_ratio']) if 'min_ratio' in config else None
        nfor = parse_timedelta(config['finished_for']) if 'finished_for' in config else None
        delete_files = bool(config['delete_files']) if 'delete_files' in config else False
        trans_checks = bool(config['transmission_seed_limits']) if 'transmission_seed_limits' in config else False
        tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None
        preserve_tracker_re = re.compile(config['preserve_tracker'], re.IGNORECASE) if 'preserve_tracker' in config else None
        directories_re = config.get('directories')

        session = self.client.get_session()

        remove_ids = []
        for torrent in self.client.get_torrents():
            log.verbose('Torrent "%s": status: "%s" - ratio: %s -  date added: %s - date done: %s' %
                        (torrent.name, torrent.status, torrent.ratio, torrent.date_added, torrent.date_done))
            downloaded, dummy = self.torrent_info(torrent, config)
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
            tracker_hosts = (urlparse(tracker['announce']).hostname for tracker in torrent.trackers)
            is_clean_all = nrat is None and nfor is None and trans_checks is False
            is_minratio_reached = nrat and (nrat <= torrent.ratio)
            is_transmission_seedlimit_unset = trans_checks and seed_ratio_ok is None and idle_limit_ok is None
            is_transmission_seedlimit_reached = trans_checks and seed_ratio_ok is True
            is_transmission_idlelimit_reached = trans_checks and idle_limit_ok is True
            is_torrent_seed_only = torrent.date_done <= torrent.date_added
            is_torrent_idlelimit_since_added_reached = nfor and (torrent.date_added + nfor) <= datetime.now()
            is_torrent_idlelimit_since_finished_reached = nfor and (torrent.date_done + nfor) <= datetime.now()
            is_tracker_matching = not tracker_re or any(tracker_re.search(host) for host in tracker_hosts)
            is_preserve_tracker_matching = False
            if preserve_tracker_re is not None:
                is_preserve_tracker_matching = any(preserve_tracker_re.search(host) for host in tracker_hosts)
            is_directories_matching = not directories_re or any(
                re.compile(directory, re.IGNORECASE).search(torrent.downloadDir) for directory in directories_re)
            if (downloaded and (is_clean_all or
                                is_transmission_seedlimit_unset or
                                is_transmission_seedlimit_reached or
                                is_transmission_idlelimit_reached or
                                is_minratio_reached or
                                (is_torrent_seed_only and is_torrent_idlelimit_since_added_reached) or
                                (not is_torrent_seed_only and is_torrent_idlelimit_since_finished_reached)) and
                    is_directories_matching and (not is_preserve_tracker_matching and is_tracker_matching)):
                if task.options.test:
                    log.info('Would remove finished torrent `%s` from transmission', torrent.name)
                    continue
                log.info('Removing finished torrent `%s` from transmission', torrent.name)
                remove_ids.append(torrent.id)
        if remove_ids:
            self.client.remove_torrent(remove_ids, delete_files)
コード例 #56
0
ファイル: series.py プロジェクト: askielboe/Flexget
    def process_timeframe(self, feed, config, eps, series_name):
        """
        Runs the timeframe logic to determine if we should wait for a better quality.
        Saves current best to backlog if timeframe has not expired.

        :returns: True - if we should keep the quality (or qualities) restriction
                  False - if the quality restriction should be released, due to timeframe expiring
        """

        if 'timeframe' not in config:
            return True

        best = eps[0]

        # parse options
        log.debug('timeframe: %s' % config['timeframe'])
        try:
            timeframe = parse_timedelta(config['timeframe'])
        except ValueError:
            raise PluginWarning('Invalid time format', log)

        # Make sure we only start timing from the first seen quality that matches min and max requirements.
        min_quality = config.get('min_quality') and qualities.get(config['min_quality'])
        max_quality = config.get('max_quality') and qualities.get(config['max_quality'])
        first_seen = self.get_first_seen(feed.session, best, min_quality, max_quality)
        expires = first_seen + timeframe
        log.debug('timeframe: %s, first_seen: %s, expires: %s' % (timeframe, first_seen, expires))

        stop = feed.manager.options.stop_waiting.lower() == series_name.lower()
        if expires <= datetime.now() or stop:
            # Expire timeframe, accept anything
            log.info('Timeframe expired, releasing quality restriction.')
            return False
        else:
            # verbose waiting, add to backlog
            diff = expires - datetime.now()

            hours, remainder = divmod(diff.seconds, 3600)
            hours += diff.days * 24
            minutes, seconds = divmod(remainder, 60)

            entry = self.parser2entry[best]
            log.info('Timeframe waiting %s for %sh:%smin, currently best is %s' % \
                (series_name, hours, minutes, entry['title']))

            # add best entry to backlog (backlog is able to handle duplicate adds)
            if self.backlog:
                self.backlog.add_backlog(feed, entry)
            return True
コード例 #57
0
 def on_entry_reject(self, entry, remember=None, remember_time=None, **kwargs):
     # We only remember rejections that specify the remember keyword argument
     if not (remember or remember_time):
         return
     if not entry.get("title") or not entry.get("original_url"):
         log.debug("Can't remember rejection for entry without title or url.")
         return
     if remember_time:
         if isinstance(remember_time, basestring):
             remember_time = parse_timedelta(remember_time)
     message = "Remembering rejection of `%s`" % entry["title"]
     if remember_time:
         message += " for %i minutes" % (remember_time.seconds / 60)
     log.info(message)
     entry["remember_rejected"] = remember_time or remember
コード例 #58
0
 def on_entry_reject(self, entry, remember=None, remember_time=None, **kwargs):
     # We only remember rejections that specify the remember keyword argument
     if not (remember or remember_time):
         return
     if not entry.get('title') or not entry.get('original_url'):
         log.debug('Can\'t remember rejection for entry without title or url.')
         return
     if remember_time:
         if isinstance(remember_time, basestring):
             remember_time = parse_timedelta(remember_time)
     message = 'Remembering rejection of `%s`' % entry['title']
     if remember_time:
         message += ' for %i minutes' % (remember_time.seconds / 60)
     log.info(message)
     entry['remember_rejected'] = remember_time or remember
コード例 #59
0
ファイル: plugin_digest.py プロジェクト: FaridGaffoor/Flexget
 def on_task_input(self, task, config):
     entries = []
     with Session() as session:
         digest_entries = session.query(DigestEntry).filter(DigestEntry.list == config["list"])
         # Remove any entries older than the expire time, if defined.
         if isinstance(config.get("expire"), basestring):
             expire_time = parse_timedelta(config["expire"])
             digest_entries.filter(DigestEntry.added < datetime.now() - expire_time).delete()
         for index, digest_entry in enumerate(digest_entries.order_by(DigestEntry.added.desc()).all()):
             # Just remove any entries past the limit, if set.
             if 0 < config.get("limit", -1) <= index:
                 session.delete(digest_entry)
                 continue
             entries.append(Entry(digest_entry.entry))
             # If expire is 'True', we remove it after it is output once.
             if config.get("expire", True) is True:
                 session.delete(digest_entry)
     return entries