def test_register_by_class(self): class TestPlugin(object): pass class Oneword(object): pass class TestHTML(object): pass assert 'test_plugin' not in plugin.plugins plugin.register_plugin(TestPlugin) plugin.register_plugin(Oneword) plugin.register_plugin(TestHTML) assert 'test_plugin' in plugin.plugins assert 'oneword' in plugin.plugins assert 'test_html' in plugin.plugins
def test_register_by_class(self): class TestPlugin(object): pass class Oneword(object): pass class TestHTML(object): pass assert 'test_plugin' not in plugin.plugins plugin.register_plugin(TestPlugin) plugin.register_plugin(Oneword) plugin.register_plugin(TestHTML) assert 'test_plugin' in plugin.plugins assert 'oneword' in plugin.plugins assert 'test_html' in plugin.plugins
elif entry['series_id_type'] == 'sequence': lookupargs['absolutenum'] = entry['series_id'] + episode_offset elif entry['series_id_type'] == 'date': lookupargs['airdate'] = entry['series_date'] episode = lookup_episode(**lookupargs) entry.update_using_map(self.episode_map, episode) except LookupError as e: log.debug('Error looking up tvdb episode information for %s: %s' % (entry['title'], e.message)) entry.unregister_lazy_fields(self.episode_map, self.lazy_episode_lookup) return entry[field] # Run after series and metainfo series @priority(110) def on_task_metainfo(self, task, config): if not config: return for entry in task.entries: # If there is information for a series lookup, register our series lazy fields if entry.get('series_name') or entry.get('thetvdb_id', eval_lazy=False): entry.register_lazy_fields(self.series_map, self.lazy_series_lookup) # If there is season and ep info as well, register episode lazy fields if entry.get('series_id_type') in ('ep', 'sequence', 'date'): entry.register_lazy_fields(self.episode_map, self.lazy_episode_lookup) # TODO: lookup for 'seq' and 'date' type series register_plugin(PluginThetvdbLookup, 'thetvdb_lookup', api_ver=2)
import logging from flexget.plugin import register_plugin log = logging.getLogger('accept_all') class FilterAcceptAll(object): """ Just accepts all entries. Example:: accept_all: true """ def validator(self): from flexget import validator return validator.factory('boolean') def on_task_filter(self, task, config): if config: for entry in task.entries: task.accept(entry) register_plugin(FilterAcceptAll, 'accept_all', api_ver=2)
def entry_intersects(self, e1, e2, fields=None): """ :param e1: First :class:`flexget.entry.Entry` :param e2: Second :class:`flexget.entry.Entry` :param fields: List of fields which are checked :return: List of field names in common """ if fields is None: fields = [] common_fields = [] for field in fields: # TODO: simplify if seems to work (useless debug) log.trace('checking field %s' % field) v1 = e1.get(field, object()) v2 = e2.get(field, object()) log.trace('v1: %r' % v1) log.trace('v2: %r' % v2) if v1 == v2: common_fields.append(field) else: log.trace('not matching') return common_fields register_plugin(CrossMatch, 'crossmatch', api_ver=2)
# us tr object for seeders/leechers seeders, leechers = tr.find_all('td', ["seeders", "leechers"]) entry['torrent_seeds'] = int(seeders.contents[0]) entry['torrent_leeches'] = int(leechers.contents[0]) entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) # use tr object for size size = tr.find("td", text=re.compile('([\.\d]+) ([GMK])B')).contents[0] size = re.search('([\.\d]+) ([GMK])B', size) if size: if size.group(2) == 'G': entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2) elif size.group(2) == 'M': entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2) else: entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2) entries.append(entry) if not entries: dashindex = query.rfind('-') if dashindex != -1: return self.search(query[:dashindex]) else: raise PluginWarning('No close matches for %s' % query, log, log_once=True) entries.sort(reverse=True, key=lambda x: x.get('search_sort')) return entries register_plugin(UrlRewriteTorrentleech, 'torrentleech', groups=['urlrewriter', 'search'])
return validator.factory('boolean') @priority(180) def on_task_filter(self, task, config): # Return if we are disabled. if config is False: return # First make sure all the torrent_info_hash fields are in upper case for entry in task.entries: if isinstance(entry.get('torrent_info_hash'), basestring): entry['torrent_info_hash'] = entry['torrent_info_hash'].upper() FilterSeen.on_task_filter(self, task, config, remember_rejected=True) def on_task_modify(self, task, config): # Return if we are disabled. if config is False: return # Run the filter again after the torrent plugin has populated the infohash self.on_task_filter(task, config) # Make sure no duplicates were accepted this run accepted_infohashes = set() for entry in task.accepted: if 'torrent_info_hash' in entry: infohash = entry['torrent_info_hash'] if infohash in accepted_infohashes: task.reject(entry, 'Already accepted torrent with this infohash once for this task') else: accepted_infohashes.add(infohash) register_plugin(FilterSeenInfoHash, 'seen_info_hash', builtin=True, api_ver=2)
s = raw_input('--> ') if s == 'exit': break if s == 'abort' or s == 'continue': self.abort = True break except EOFError: break count = 0 for entry in task.entries: try: match, field = self.matches(entry, s) if match: print 'Title: %-40s URL: %-30s From: %s' % ( entry['title'], entry['url'], field) count += 1 except: print 'Invalid regular expression' break print '%s of %s entries matched' % (count, len(task.entries)) print 'Bye!' register_plugin(PluginTryRegexp, '--try-regexp', builtin=True) register_parser_option('--try-regexp', action='store_true', dest='try_regexp', default=False, help='Try regular expressions interactively.')
class OutputHtml: def validator(self): from flexget import validator root = validator.factory('dict') root.accept('text', key='template') root.accept('text', key='file', required=True) return root def on_task_output(self, task, config): # Use the default template if none is specified if not config.get('template'): config['template'] = 'default.template' filename = os.path.expanduser(config['template']) output = os.path.expanduser(config['file']) # Output to config directory if absolute path has not been specified if not os.path.isabs(output): output = os.path.join(task.manager.config_base, output) # create the template template = render_from_task(get_template(filename, PLUGIN_NAME), task) log.verbose('Writing output html to %s' % output) f = open(output, 'w') f.write(template.encode('utf-8')) f.close() register_plugin(OutputHtml, PLUGIN_NAME, api_ver=2)
root = validator.factory() # Accept one proxy for everything root.accept('url') # Accept a dict mapping protocol to proxy advanced = root.accept('dict') for prot in PROTOCOLS: advanced.accept('url', key=prot) return root @plugin.priority(255) def on_task_start(self, task, config): if not config: # If no configuration is provided, see if there are any proxy env variables proxies = {} for prot in PROTOCOLS: if os.environ.get(prot + '_proxy'): proxies[prot] = os.environ[prot + '_proxy'] if not proxies: # If there were no environment variables set, do nothing return elif isinstance(config, dict): proxies = config else: # Map all protocols to the configured proxy proxies = dict((prot, config) for prot in PROTOCOLS) log.verbose('Setting proxy to %s' % proxies) task.requests.proxies = proxies plugin.register_plugin(Proxy, 'proxy', builtin=True, api_ver=2)
check_auth() raise PluginError('Error getting trakt list: %s' % data['error']) if not data: check_auth() log.warning('No data returned from trakt.') return if url_params['data_type'] == 'custom': if not isinstance(data['items'], list): raise PluginError('Faulty custom items in response: %s' % data['items']) data = data['items'] for item in data: if url_params['data_type'] == 'custom': if item['type'] == 'movie': map = self.movie_map item = item['movie'] else: map = self.series_map item = item['show'] entry = Entry() entry.update_using_map(map, item) if entry.isvalid(): if config.get('strip_dates'): # Remove year from end of name if present entry['title'] = re.sub('\s+\(\d{4}\)$', '', entry['title']) entries.append(entry) return entries register_plugin(TraktList, 'trakt_list', api_ver=2)
acodec = media.getAttribute('audioCodec') container = media.getAttribute('container') resolution = media.getAttribute('videoResolution') + "p" for part in media.getElementsByTagName('Part'): key = part.getAttribute('key') e = Entry() if config['original_filename']: e['title'] = basename(part.getAttribute('file')) else: if config['strip_year']: title = re.sub(r'^(.*)\(\d+\)$', r'\1', title) title = re.sub(r'[\(\)]', r'', title) title = re.sub(r'\&', r'And', title).strip() title = re.sub(r'[^A-Za-z0-9- ]', r'', title).replace(" ", ".") if config['lowercase_title']: title = title.lower() e['title'] = filenamemap % (title, season, episode, resolution, vcodec, acodec, container) e['url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, accesstoken) e['plex_server'] = plexserver e['plex_server_ip'] = config['server'] e['plex_port'] = config['port'] e['plex_section'] = config['section'] e['plex_section_name'] = plexsectionname e['plex_path'] = key entries.append(e) else: raise PluginError('Selected section is not a TV section.') return entries register_plugin(InputPlex, 'plex', api_ver=2)
if director_name in accepted: log.debug('Accepting because of accept_directors %s' % director_name) force_accept = True break if 'reject_mpaa_ratings' in config: rejected = config['reject_mpaa_ratings'] if entry.get('rt_mpaa_rating') in rejected: reasons.append('reject_mpaa_ratings %s' % entry['rt_mpaa_rating']) if 'accept_mpaa_ratings' in config: accepted = config['accept_mpaa_ratings'] if entry.get('rt_mpaa_rating') not in accepted: reasons.append('accept_mpaa_ratings %s' % entry.get('rt_mpaa_rating')) if reasons and not force_accept: msg = 'Didn\'t accept `%s` because of rule(s) %s' % \ (entry.get('rt_name', None) or entry['title'], ', '.join(reasons)) if task.manager.options.debug: log.debug(msg) else: if task.manager.options.quiet: log_once(msg, log) else: log.info(msg) else: log.debug('Accepting %s' % (entry['title'])) entry.accept() register_plugin(FilterRottenTomatoes, 'rottentomatoes', api_ver=2)
raise PluginError( 'Cookies could not be loaded: %s' % sys.exc_info()[1], log) if cookie_file not in self.cookiejars: self.cookiejars[cookie_file] = cj # Add cookiejar to our requests session task.requests.add_cookiejar(cj) # Add handler to urllib2 default opener for backwards compatibility handler = urllib2.HTTPCookieProcessor(cj) if urllib2._opener: log.debug('Adding HTTPCookieProcessor to default opener') urllib2._opener.add_handler(handler) else: log.debug('Creating new opener and installing it') urllib2.install_opener(urllib2.build_opener(handler)) def on_task_exit(self, task, config): """Task exiting, remove cookiejar""" log.debug('Removing urllib2 opener') urllib2.install_opener(None) # Task aborted, unhook the cookiejar on_task_abort = on_task_exit def on_process_end(self, task, config): self.cookiejars = {} register_plugin(PluginCookies, 'cookies', api_ver=2)
continue entry = Entry() entry['title'] = item.title entry['url'] = item.link entry['search_ratio'] = comparator.ratio() # TODO: parse some shit #entry['torrent_seeds'] = int(item.seeds) #entry['torrent_leeches'] = int(item.leechs) #entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) #entry['content_size'] = int(item.size) / 1024 / 1024 entries.append(entry) # choose torrent if not entries: raise PluginWarning('No matches for %s' % name, log, log_once=True) entries.sort(reverse=True, key=lambda x: x.get('search_sort')) return entries def url_rewritable(self, task, entry): return entry['url'].startswith( 'http://www.nyaa.eu/?page=torrentinfo&tid=') def url_rewrite(self, task, entry): entry['url'] = entry['url'].replace('torrentinfo', 'download') register_plugin(UrlRewriteNyaa, 'nyaa', groups=['search', 'urlrewriter'])
:param entry: Entry instance :param search_allowed: Allow fallback to search :raises PluginError: Failure reason """ imdb_id = entry.get('imdb_id', eval_lazy=False) or \ imdb.extract_id(entry.get('imdb_url', eval_lazy=False)) if imdb_id: movie = lookup_movie(title=entry.get('imdb_name'), year=entry.get('imdb_year'), rottentomatoes_id=entry.get('rt_id', eval_lazy=False), imdb_id=imdb_id, only_cached=(not search_allowed)) else: movie = lookup_movie(smart_match=entry['title'], rottentomatoes_id=entry.get('rt_id', eval_lazy=False), only_cached=(not search_allowed)) log.debug(u'Got movie: %s' % movie) entry.update_using_map(self.field_map, movie) def on_task_metainfo(self, task, config): if not config: return for entry in task.entries: entry.register_lazy_fields(self.field_map, self.lazy_loader) register_plugin(PluginRottenTomatoesLookup, 'rottentomatoes_lookup', api_ver=2)
method = input.phase_handlers['input'] result = method(task, input_config) if not result: log.warning('Input %s did not return anything' % input_name) continue for entry in result: s = series.setdefault(entry['title'], {}) if entry.get('thetvdb_id'): s['set'] = {'thetvdb_id': entry['thetvdb_id']} if not series: log.info('Did not get any series to generate series configuration') return # Make a series config with the found series # Turn our dict of series with settings into a list of one item dicts series_config = { 'generated_series': [dict([s]) for s in series.iteritems()] } # If options were specified, add them to the series config if 'settings' in config: series_config['settings'] = { 'generated_series': config['settings'] } # Merge our series config in with the base series config self.merge_config(task, series_config) register_plugin(ImportSeries, 'import_series', api_ver=2)
from flexget import validator trackers = validator.factory() bundle = trackers.accept('list').accept('dict') # prevent invalid indentation level bundle.reject_keys(['from', 'to'], 'Option \'$key\' has invalid indentation level. It needs 2 more spaces.') edit = bundle.accept_any_key('dict') edit.accept('text', key='from', required=True) edit.accept('text', key='to', required=True) return trackers @priority(127) def on_task_modify(self, task, config): for entry in task.entries: if 'torrent' in entry: torrent = entry['torrent'] trackers = torrent.trackers for item in config: for replace in item.itervalues(): for tracker in trackers: if replace.get('from') in tracker: torrent.remove_multitracker(tracker) trackernew = tracker.replace(replace.get('from'), replace.get('to')) torrent.add_multitracker(trackernew) log.info('Modify %s in %s' % (tracker, trackernew)) register_plugin(AddTrackers, 'add_trackers', api_ver=2) register_plugin(RemoveTrackers, 'remove_trackers', api_ver=2) register_plugin(ModifyTrackers, 'modify_trackers', api_ver=2)
# filter only those that have matching release names langsubs = filter( lambda x: seqmatch(x['MovieReleaseName']), subtitles) if langsubs: # find the best one by SubRating langsubs.sort(key=lambda x: float(x['SubRating'])) langsubs.reverse() filtered_subs.append(langsubs[0]) # download for sub in filtered_subs: log.debug('SUBS FOUND: ', sub['MovieReleaseName'], sub['SubRating'], sub['SubLanguageID']) f = urlopener(sub['ZipDownloadLink'], log) subfilename = re.match( '^attachment; filename="(.*)"$', f.info()['content-disposition']).group(1) outfile = os.path.join(config['output'], subfilename) fp = file(outfile, 'w') fp.write(f.read()) fp.close() f.close() s.LogOut(token) register_plugin(Subtitles, 'subtitles')
private_torrents: yes This would reject all public torrents. Non-torrent content is not interviened. """ def validator(self): from flexget import validator return validator.factory("boolean") @priority(127) def on_task_modify(self, task): private_torrents = task.config["private_torrents"] for entry in task.accepted: if not "torrent" in entry: log.debug("`%s` is not a torrent" % entry["title"]) continue private = entry["torrent"].private if not private_torrents and private: entry.reject("torrent is marked as private", remember=True) elif private_torrents and not private: entry.reject("public torrent", remember=True) register_plugin(FilterPrivateTorrents, "private_torrents")
task.manager.disable_tasks() name = unicode(task.manager.options.series_forget[0]) if len(task.manager.options.series_forget) > 1: # remove by id identifier = task.manager.options.series_forget[1].upper() if identifier and name: try: forget_series_episode(name, identifier) print 'Removed episode `%s` from series `%s`.' % (identifier, name.capitalize()) except ValueError, e: print e.message else: # remove whole series try: forget_series(name) print 'Removed series `%s` from database.' % name.capitalize() except ValueError, e: print e.message task.manager.config_changed() register_plugin(SeriesReport, '--series', builtin=True) register_plugin(SeriesForget, '--series-forget', builtin=True) register_parser_option('--series', nargs='?', const=True, help='Display series summary.') register_parser_option('--series-forget', nargs='1-2', metavar=('NAME', 'EP_ID'), help='Remove complete series or single episode from database: <NAME> [EPISODE]')
from __future__ import unicode_literals, division, absolute_import import logging from flexget.entry import Entry from flexget import validator from flexget.plugin import register_plugin log = logging.getLogger(__name__.rsplit('.')[-1]) class Generate(object): """Generates n number of random entries. Used for debugging purposes.""" def validator(self): return validator.factory('integer') def on_task_input(self, task, config): amount = config or 0 # hackily makes sure it's an int value for i in range(amount): entry = Entry() import string import random entry['url'] = 'http://localhost/generate/%s/%s' % (i, ''.join([random.choice(string.letters + string.digits) for x in range(1, 30)])) entry['title'] = ''.join([random.choice(string.letters + string.digits) for x in range(1, 30)]) entry['description'] = ''.join([random.choice(string.letters + string.digits) for x in range(1, 1000)]) task.entries.append(entry) register_plugin(Generate, 'generate', api_ver=2, debug=True)
if director_id in accepted or director_name in accepted: log.debug('Accepting because of accept_directors %s' % director_name or director_id) force_accept = True break if 'reject_mpaa_ratings' in config: rejected = config['reject_mpaa_ratings'] if entry.get('imdb_mpaa_rating') in rejected: reasons.append('reject_mpaa_ratings %s' % entry['imdb_mpaa_rating']) if 'accept_mpaa_ratings' in config: accepted = config['accept_mpaa_ratings'] if entry.get('imdb_mpaa_rating') not in accepted: reasons.append('accept_mpaa_ratings %s' % entry.get('imdb_mpaa_rating')) if reasons and not force_accept: msg = 'Didn\'t accept `%s` because of rule(s) %s' % \ (entry.get('imdb_name', None) or entry['title'], ', '.join(reasons)) if task.manager.options.debug: log.debug(msg) else: if task.manager.options.quiet: log_once(msg, log) else: log.info(msg) else: log.debug('Accepting %s' % (entry['title'])) entry.accept() register_plugin(FilterImdb, 'imdb', api_ver=2)
description = entry['title'] log.error('Error rendering jinja description: %s' % e) # Open connection h = HTTPSConnection('prowl.weks.net') # Send the request data = {'priority': priority, 'application': application, 'apikey': apikey, 'event': event, 'description': description} h.request("POST", "/publicapi/add", headers=headers, body=urlencode(data)) # Check if it succeeded response = h.getresponse() request_status = response.status # error codes and messages from http://prowl.weks.net/api.php if request_status == 200: log.debug("Prowl message sent") elif request_status == 400: log.error("Bad request, the parameters you provided did not validate") elif request_status == 401: log.error("Not authorized, the API key given is not valid, and does not correspond to a user.") elif request_status == 406: log.error("Not acceptable, your IP address has exceeded the API limit.") elif request_status == 500: log.error("Internal server error, something failed to execute properly on the Prowl side.") else: log.error("Unknown error when sending Prowl message") register_plugin(OutputProwl, 'prowl')
item = session.query(QueuedMovie).filter( QueuedMovie.imdb_id == imdb_id).first() if item: item.quality = quality session.commit() return item.title else: raise QueueError('%s is not in the queue' % imdb_id) @with_session def queue_get(session=None, downloaded=False): """Get the current IMDb queue. KWArgs: session: new session is used it not given downloaded: boolean whether or not to return only downloaded Returns: List of QueuedMovie objects (detached from session) """ if not downloaded: return session.query(QueuedMovie).filter( QueuedMovie.downloaded == None).all() else: return session.query(QueuedMovie).filter( QueuedMovie.downloaded != None).all() register_plugin(FilterMovieQueue, 'movie_queue', api_ver=2)
"""Gets called when all of our tasks for deluge daemon are complete.""" client.disconnect() tasks = defer.DeferredList(dlist).addBoth(on_complete) def on_timeout(result): """Gets called if tasks have not completed in 30 seconds. Should only happen when something goes wrong.""" log.error('Timed out while adding torrents to deluge.') log.debug('dlist: %s' % result.resultList) client.disconnect() # Schedule a disconnect to happen if FlexGet hangs while connected to Deluge # Leave the timeout long, to give time for possible lookups to occur reactor.callLater(300, lambda: tasks.called or on_timeout(tasks)) def on_task_exit(self, task, config): """Make sure all temp files are cleaned up when task exits""" # If download plugin is enabled, it will handle cleanup. if not 'download' in task.config: download = get_plugin_by_name('download') download.instance.cleanup_temp_files(task) def on_task_abort(self, task, config): """Make sure normal cleanup tasks still happen on abort.""" DelugePlugin.on_task_abort(self, task, config) self.on_task_exit(task, config) register_plugin(InputDeluge, 'from_deluge', api_ver=2) register_plugin(OutputDeluge, 'deluge', api_ver=2)
presets.accept('text') return root @priority(250) def on_task_start(self, task, config): if isinstance(config, basestring): config = [config] # let's disable them for disable in config: if disable in task.config: log.debug('disabling %s' % disable) del (task.config[disable]) root_config_schema = { 'type': 'object', 'additionalProperties': {} # TODO: Reject keys that are plugin names } register_config_key('presets', root_config_schema) register_plugin(PluginPreset, 'preset', builtin=True, api_ver=2) register_plugin(DisablePlugin, 'disable_plugin', api_ver=2) register_parser_option('--preset', action='store', dest='preset', default=False, metavar='NAME', help='Execute tasks with given preset.')
import logging from flexget.plugin import register_plugin, get_plugin_by_name log = logging.getLogger('only_new') class FilterOnlyNew(object): """Causes input plugins to only emit entries that haven't been seen on previous runs.""" def validator(self): from flexget.validator import BooleanValidator return BooleanValidator() def on_process_start(self, task, config): """Make sure the remember_rejected plugin is available""" # Raises an error if plugin isn't available get_plugin_by_name('remember_rejected') def on_task_exit(self, task, config): """Reject all entries so remember_rejected will reject them next time""" if not config or not task.entries: return log.verbose('Rejecting entries after the task has run so they are not processed next time.') for entry in task.entries: task.reject(entry, 'Already processed entry', remember=True) register_plugin(FilterOnlyNew, 'only_new', api_ver=2)
'password': config['password'] } result = query_api(url, "login", post) response = json.loads(result.read()) if not response: raise PluginError('Login failed', log) self.session = response.replace('"', '') else: try: query_api(url, 'getServerVersion', {'session': self.session}) except HTTPError, e: if e.code == 403: # Forbidden self.session = None return self.check_login(task, config) else: raise PluginError('HTTP Error %s' % e, log) def query_api(url, method, post=None): try: return urlopen( url.rstrip("/") + "/" + method.strip("/"), urlencode(post) if post else None) except HTTPError, e: if e.code == 500: raise PluginError('Internal API Error', log) raise register_plugin(PluginPyLoad, 'pyload', api_ver=2)
for link in soup.findAll('a', attrs={'href': re.compile('down.php')}): torrent_url = 'http://www.newtorrents.info%s' % link.get('href') release_name = link.parent.next.get('title') # quick dirty hack seed = link.findNext('td', attrs={'class': re.compile('s')}).renderContents() if seed == 'n/a': seed = 0 #TODO: also parse content_size from results if comparator.matches(release_name): torrents.append(Entry(title=release_name, url=torrent_url, torrent_seeds=seed, search_ratio=comparator.ratio(), search_sort=torrent_availability(seed, 0))) else: log.debug('rejecting search result: %s !~ %s' % (release_name, name)) # sort with seed number Reverse order torrents.sort(reverse=True, key=lambda x: x.get('search_sort', 0)) # choose the torrent if not torrents: dashindex = name.rfind('-') if dashindex != -1: return self.entries_from_search(name[:dashindex], comparator=comparator) else: raise PluginWarning('No matches for %s' % name, log, log_once=True) else: if len(torrents) == 1: log.debug('found only one matching search result.') else: log.debug('search result contains multiple matches, sorted %s by most seeders' % torrents) return torrents register_plugin(NewTorrents, 'newtorrents', groups=['urlrewriter', 'search'])
page = urlopener(inc_url, log) except HTTPError, err: log.warning("HTTPError when opening playlist page: %d %s" % (err.code, err.reason)) continue soup = get_soup(page) links = soup.find_all('a', attrs={'class': 'target-quicktimeplayer', 'href': re.compile(r'_h?480p\.mov$')}) for link in links: url = link.get('href') url = url[:url.rfind('_')] quality = self.quality.lower() if quality == 'ipod': url += '_i320.m4v' else: url += '_h' + quality + '.mov' entry = Entry() entry['url'] = url entry['title'] = title match = re.search(r'.*/([^?#]*)', url) entry['filename'] = match.group(1) result.append(entry) log.debug('found trailer %s', url) return result register_plugin(AppleTrailers, 'apple_trailers', api_ver=2)
if not match: return if match['match'].start() > 1: # We start using the original title here, so we can properly ignore unwanted prefixes. # Look for unwanted prefixes to find out where the series title starts start = 0 prefix = re.match('|'.join(parser.ignore_prefixes), title) if prefix: start = prefix.end() # If an episode id is found, assume everything before it is series name name = title[start:match['match'].start()] # Remove possible episode title from series name (anything after a ' - ') name = name.split(' - ')[0] # Replace some special characters with spaces name = re.sub('[\._\(\) ]+', ' ', name).strip(' -') # Normalize capitalization to title case name = capwords(name) # If we didn't get a series name, return if not name: return parser.name = name parser.data = title try: parser.parse(data=title, quality=quality) except ParseWarning as pw: log.debug('ParseWarning: %s' % pw.value) if parser.valid: return parser register_plugin(MetainfoSeries, 'metainfo_series')
max_seeds_from_threads(threadlist)) background.start() threadlist = [background] log.debug( 'Started thread to scrape %s with info hash %s' % (tracker, info_hash)) seeds = max(seeds, max_seeds_from_threads(threadlist)) log.debug('Highest number of seeds found: %s' % seeds) else: # Single tracker tracker = torrent.content['announce'] try: seeds = get_tracker_seeds(tracker, info_hash) except URLError as e: log.debug('Error scraping %s: %s' % (tracker, e)) # Reject if needed if seeds < min_seeds: entry.reject( reason='Tracker(s) had < %s required seeds. (%s)' % (min_seeds, seeds), remember_time=config['reject_for']) # Maybe there is better match that has enough seeds task.rerun() else: log.debug('Found %i seeds from trackers' % seeds) register_plugin(TorrentAlive, 'torrent_alive', api_ver=2)
log_once(pw.value, logger=log) if disk_parser.valid: log.debug('name %s is same series as %s', name, series) log.debug('disk_parser.identifier = %s', disk_parser.identifier) log.debug('disk_parser.quality = %s', disk_parser.quality) log.debug('disk_parser.proper_count = %s', disk_parser.proper_count) for entry in accepted_series[series]: log.debug('series_parser.identifier = %s', entry['series_parser'].identifier) if disk_parser.identifier != entry['series_parser'].identifier: log.trace('wrong identifier') continue log.debug('series_parser.quality = %s', entry['series_parser'].quality) if config.get('allow_different_qualities') == 'better': if entry['series_parser'].quality > disk_parser.quality: log.trace('better quality') continue elif config.get('allow_different_qualities'): if disk_parser.quality != entry['series_parser'].quality: log.trace('wrong quality') continue log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count) if disk_parser.proper_count >= entry['series_parser'].proper_count: entry.reject('proper already exists') continue else: log.trace('new one is better proper, allowing') continue register_plugin(FilterExistsSeries, 'exists_series', groups=['exists'])
for genre in entry['rt_genres']): min_score = min_score - 5 if score > min_score: log.debug( 'Accepting because of rt genre accept (%s and %s > %s)' % (genre, score, min_score)) force_accept = True break if strict_reasons: reasons.append('rt genre strict (%s)' % (', '.join(strict_reasons))) if reasons and not force_accept: msg = 'Didn\'t accept `%s` because of rule(s) %s' % \ (entry.get('rt_name', None) or entry['title'], ', '.join(reasons)) if task.manager.options.debug: log.debug(msg) else: if score_offset != 0: msg = 'Offset score by %s. %s' % (score_offset, msg) if task.manager.options.quiet: log_once(msg, log) else: log.info(msg) else: log.debug('Accepting %s' % (entry['title'])) entry.accept() register_plugin(MyMovieFilter, 'my_movie_filter', api_ver=2)
f.write(br.response().get_data()) log.critical('I have saved the login page content to %s for you to view' % filename) raise PluginError('Unable to find login fields', log) br.form = loginform br.submit() cookiejar = br._ua_handlers["_cookies"].cookiejar # Add cookiejar to our requests session task.requests.add_cookiejar(cookiejar) # Add handler to urllib2 default opener for backwards compatibility handler = urllib2.HTTPCookieProcessor(cookiejar) if urllib2._opener: log.debug('Adding HTTPCookieProcessor to default opener') urllib2._opener.add_handler(handler) else: log.debug('Creating new opener and installing it') urllib2.install_opener(urllib2.build_opener(handler)) def on_task_exit(self, task, config): """Task exiting, remove cookiejar""" log.debug('Removing urllib2 opener') urllib2.install_opener(None) # Task aborted, unhook the cookiejar on_task_abort = on_task_exit register_plugin(FormLogin, 'form', api_ver=2)
if isinstance(name, dict): # assume the name is the first/only key in the dict. name, search_config = name.items()[0] log.verbose('Searching `%s` from %s' % (entry['title'], name)) try: results = plugins[name].search(entry, search_config) matcher = SequenceMatcher(a=entry['title']) for result in results: matcher.set_seq2(result['title']) if matcher.ratio() > 0.9: log.debug('Found url: %s', result['url']) entry['url'] = result['url'] break else: continue break except (PluginError, PluginWarning) as pw: log.verbose('Failed: %s' % pw.value) continue # Search failed else: # If I don't have a URL, doesn't matter if I'm immortal... entry['immortal'] = False entry.reject('search failed') register_plugin(PluginSearch, 'urlrewrite_search', api_ver=2) register_plugin(SearchPlugins, '--search-plugins', builtin=True) register_parser_option('--search-plugins', action='store_true', dest='search_plugins', default=False, help='List supported search plugins.')
entries.append( Entry(title=title, url='', series_name=series.name, series_season=latest['season'], series_episode=latest['episode'] + 1)) # different syntax (eg. 01x02) title = '%s %02dx%02d' % (series.name, latest['season'], latest['episode'] + 1) entries.append( Entry(title=title, url='', series_name=series.name, series_season=latest['season'] + 1, series_episode=1)) # try next season title = '%s S%02dE%02d' % (series.name, latest['season'] + 1, 1) entries.append( Entry(title=title, url='', series_name=series.name, series_season=latest['season'] + 1, series_episode=1)) return entries register_plugin(EmitSeries, 'emit_series', api_ver=2)
enc.href for enc in entry.get('enclosures', []) if enc.get('href') not in e['urls'] ]) if not e.get('url'): log.debug('%s does not have link (%s) or enclosure' % (entry.title, config['link'])) ignored += 1 continue add_entry(e) # Save last spot in rss if rss.entries: log.debug('Saving location in rss feed.') task.simple_persistence[ '%s_last_entry' % url_hash] = rss.entries[0].title + rss.entries[0].get( 'guid', '') if ignored: if not config.get('silent'): log.warning( 'Skipped %s RSS-entries without required information (title, link or enclosures)' % ignored) return entries register_plugin(InputRSS, 'rss', api_ver=2)
try: background.start() threadlist.append(background) except threading.ThreadError: # If we can't start a new thread, wait for current ones to complete and continue log.debug('Reached max threads, finishing current threads.') seeds = max(seeds, max_seeds_from_threads(threadlist)) background.start() threadlist = [background] log.debug('Started thread to scrape %s with info hash %s' % (tracker, info_hash)) seeds = max(seeds, max_seeds_from_threads(threadlist)) log.debug('Highest number of seeds found: %s' % seeds) else: # Single tracker tracker = torrent.content['announce'] try: seeds = get_tracker_seeds(tracker, info_hash) except URLError, e: log.debug('Error scraping %s: %s' % (tracker, e)) # Reject if needed if seeds < min_seeds: task.reject(entry, reason='Tracker(s) had < %s required seeds. (%s)' % (min_seeds, seeds), remember_time=config['reject_for']) task.rerun() else: log.debug('Found %i seeds from trackers' % seeds) register_plugin(TorrentAlive, 'torrent_alive', api_ver=2)
"""Does the lookup for this entry and populates the entry fields.""" imdb_id = entry.get('imdb_id', eval_lazy=False) or \ imdb.extract_id(entry.get('imdb_url', eval_lazy=False)) try: movie = lookup(smart_match=entry['title'], tmdb_id=entry.get('tmdb_id', eval_lazy=False), imdb_id=imdb_id) entry.update_using_map(self.field_map, movie) except LookupError, e: log.debug(u'Tmdb lookup for %s failed: %s' % (entry['title'], e.message)) # Set all of our fields to None if the lookup failed entry.unregister_lazy_fields(self.field_map, self.lazy_loader) return entry[field] def lookup(self, entry): """ Populates all lazy fields to an Entry. May be called by other plugins requiring tmdb info on an Entry :param entry: Entry instance """ entry.register_lazy_fields(self.field_map, self.lazy_loader) def on_feed_metainfo(self, feed, config): if not config: return for entry in feed.entries: self.lookup(entry) register_plugin(PluginTmdbLookup, 'tmdb_lookup', api_ver=2)
import logging from flexget.plugin import register_plugin, feed_phases log = logging.getLogger('disable_phases') class PluginDisablePhases(object): """Disables phases from feed execution. Mainly meant for advanced users and development. Example: disable_phases: - download """ def validator(self): from flexget import validator root = validator.factory('list') root.accept('choice').accept_choices(feed_phases) return root def on_feed_start(self, feed, config): map(feed.disable_phase, config) register_plugin(PluginDisablePhases, 'disable_phases', api_ver=2)
return False # urlrewriter API def url_rewrite(self, task, entry): try: # need to fake user agent txheaders = { 'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' } req = urllib2.Request(entry['url'], None, txheaders) page = urlopener(req, log) soup = get_soup(page) results = soup.find_all('a', attrs={'class': 'l'}) if not results: raise UrlRewritingError('No results') for res in results: url = res.get('href') url = url.replace('/interstitial?url=', '') # generate match regexp from google search result title regexp = '.*'.join([x.contents[0] for x in res.find_all('em')]) if re.match(regexp, entry['title']): log.debug('resolved, found with %s' % regexp) entry['url'] = url return raise UrlRewritingError('Unable to resolve') except Exception as e: raise UrlRewritingError(e) register_plugin(UrlRewriteGoogleCse, 'google_cse', groups=['urlrewriter'])
msg = 'TransmissionError: %s' % e.message or 'N/A' log.error(msg) task.fail(entry, msg) def remove_finished(self, cli): # Get a list of active transfers transfers = cli.info(arguments=['id', 'hashString', 'name', 'status', 'uploadRatio', 'seedRatioLimit']) remove_ids = [] # Go through the list of active transfers and add finished transfers to remove_ids. for transfer in transfers.itervalues(): log.debug('Transfer "%s": status: "%s" upload ratio: %.2f seed ratio: %.2f' % \ (transfer.name, transfer.status, transfer.uploadRatio, transfer.seedRatioLimit)) if transfer.status == 'stopped' and transfer.uploadRatio >= transfer.seedRatioLimit: log.info('Removing finished torrent `%s` from transmission' % transfer.name) remove_ids.append(transfer.id) # Remove finished transfers if remove_ids: cli.remove(remove_ids) def on_task_exit(self, task, config): """Make sure all temp files are cleaned up when task exits""" # If download plugin is enabled, it will handle cleanup. if not 'download' in task.config: download = get_plugin_by_name('download') download.instance.cleanup_temp_files(task) on_task_abort = on_task_exit register_plugin(PluginTransmission, 'transmission', api_ver=2) register_plugin(PluginTransmissionInput, 'from_transmission', api_ver=2)
msg = 'TransmissionError: %s' % e.message or 'N/A' log.error(msg) task.fail(entry, msg) def remove_finished(self, cli): # Get a list of active transfers transfers = cli.info(arguments=['id', 'hashString', 'name', 'status', 'uploadRatio', 'seedRatioLimit']) remove_ids = [] # Go through the list of active transfers and add finished transfers to remove_ids. for transfer in transfers.itervalues(): log.debug('Transfer "%s": status: "%s" upload ratio: %.2f seed ratio: %.2f' % \ (transfer.name, transfer.status, transfer.uploadRatio, transfer.seedRatioLimit)) if transfer.status == 'stopped' and transfer.uploadRatio >= transfer.seedRatioLimit: log.info('Removing finished torrent `%s` from transmission' % transfer.name) remove_ids.append(transfer.id) # Remove finished transfers if remove_ids: cli.remove(remove_ids) def on_task_exit(self, task, config): """Make sure all temp files are cleaned up when task exits""" # If download plugin is enabled, it will handle cleanup. if not 'download' in task.config: download = get_plugin_by_name('download') download.instance.cleanup_temp_files(task) on_task_abort = on_task_exit register_plugin(PluginTransmission, 'transmission', api_ver=2) register_plugin(PluginTransmissionInput, 'from_transmission', api_ver=2)
config = self.prepare_config(config) body_items = [] for entry in task.accepted: try: body_items.append(entry.render(config['item_template'])) except RenderError as e: log.error('Error setting body message: %s' % e) log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items)) title = config['title_template'] try: title = render_from_task(title, task) log.debug('Setting bubble title to :%s', title) except RenderError as e: log.error('Error setting title Notify-osd message: %s' % e) if not Notify.init("Flexget"): log.error('Unable to init libnotify.') return n = Notify.Notification.new(title, '\n'.join(body_items), None) timeout = (config['timeout'] * 1000) n.set_timeout(timeout) if not n.show(): log.error('Unable to send notification for %s', title) return register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
def on_feed_start(self, feed): self.priorities = {} names = [] for name, priority in feed.config.get("plugin_priority", {}).iteritems(): names.append(name) originals = self.priorities.setdefault(name, {}) for phase, event in plugins[name].phase_handlers.iteritems(): originals[phase] = event.priority log.debug("stored %s original value %s" % (phase, event.priority)) event.priority = priority log.debug("set %s new value %s" % (phase, priority)) log.debug("Changed priority for: %s" % ", ".join(names)) def on_feed_exit(self, feed): if not self.priorities: log.debug("nothing changed, aborting restore") return names = [] for name in feed.config.get("plugin_priority", {}).keys(): names.append(name) originals = self.priorities[name] for phase, priority in originals.iteritems(): plugins[name].phase_handlers[phase].priority = priority log.debug("Restored priority for: %s" % ", ".join(names)) self.priorities = {} on_feed_abort = on_feed_exit register_plugin(PluginPriority, "plugin_priority")
valid_plugins.accept(plugin.instance.validator, key=plugin_name) return root def __getattr__(self, item): """Returns a function for all on_task_* and on_process_* events, that runs all the configured plugins.""" for phase, method in phase_methods.iteritems(): # TODO: Deal with entry phases if item == method and phase not in ['accept', 'reject', 'fail']: break else: raise AttributeError(item) def handle_phase(task, config): """Function that runs all of the configured plugins which act on the current phase.""" # Keep a list of all results, for input plugin combining results = [] for item in config: for plugin_name, plugin_config in item.iteritems(): if phase in get_phases_by_plugin(plugin_name): method = get_plugin_by_name(plugin_name).phase_handlers[phase] log.debug('Running plugin %s' % plugin_name) result = method(task, plugin_config) if isinstance(result, list): results.extend(result) return results return handle_phase register_plugin(PluginSequence, 'sequence', api_ver=2, debug=True)
# Successfully updated from tvdb, update the database log.debug("Successfully updated favorites from thetvdb.com") if not user_favorites: user_favorites = ThetvdbFavorites(account_id, favorite_ids) else: user_favorites.series_ids = favorite_ids user_favorites.updated = datetime.now() session.merge(user_favorites) if not user_favorites.series_ids: log.warning("Didn't find any thetvdb.com favorites.") return # Construct list of entries with our series names entries = [] for series_id in user_favorites.series_ids: # Lookup the series name from the id try: series = lookup_series(tvdb_id=series_id) except LookupError, e: log.error("Error looking up %s from thetvdb: %s" % (series_id, e.message)) else: series_name = series.seriesname if config.get("strip_dates"): # Remove year from end of series name if present series_name = re.sub("\s+\(\d{4}\)$", "", series_name) entries.append(Entry(series_name, "", imaginary=True)) return entries register_plugin(InputThetvdbFavorites, "thetvdb_favorites", api_ver=2)
def validator(self): from flexget import validator return validator.factory('integer') def on_process_start(self, task, config): try: self.backlog = get_plugin_by_name('backlog').instance except DependencyError: log.warning('Unable utilize backlog plugin, entries may slip trough limit_new in some rare cases') @priority(-255) def on_task_filter(self, task, config): if task.manager.options.learn: log.info('Plugin limit_new is disabled with --learn / --reset') return amount = config for index, entry in enumerate(task.accepted): if index < amount: log.verbose('Allowed %s (%s)' % (entry['title'], entry['url'])) else: task.reject(entry, 'limit exceeded') # Also save this in backlog so that it can be accepted next time. if self.backlog: self.backlog.add_backlog(task, entry) log.debug('Rejected: %s Allowed: %s' % (len(task.accepted[amount:]), len(task.accepted[:amount]))) register_plugin(FilterLimitNew, 'limit_new', api_ver=2)
return config @priority(130) def on_task_start(self, task, config): """Task starting""" # Set the headers for this task's request session if task.requests.headers: task.requests.headers.update(config) else: task.requests.headers = config # Set the headers in urllib2 for backwards compatibility if urllib2._opener: log.debug("Adding HTTPHeadersProcessor to default opener") urllib2._opener.add_handler(HTTPHeadersProcessor(config)) else: log.debug("Creating new opener and installing it") opener = urllib2.build_opener(HTTPHeadersProcessor(config)) urllib2.install_opener(opener) def on_task_exit(self, task, config): """Task exiting, remove additions""" if urllib2._opener: log.debug("Removing urllib2 default opener") # TODO: this uninstalls all other handlers as well, but does it matter? urllib2.install_opener(None) on_task_abort = on_task_exit register_plugin(PluginHeaders, "headers", api_ver=2)
@priority(255) def on_task_start(self, task, config): # Allow reruns if task.is_rerun: return if task.manager.options.learn: log.info('Ignoring task %s interval for --learn' % task.name) return last_time = task.simple_persistence.get('last_time') if not last_time: log.info('No previous run recorded, running now') elif task.manager.options.interval_ignore: log.info('Ignoring interval because of --now') else: log.debug('last_time: %r' % last_time) log.debug('interval: %s' % config) next_time = last_time + parse_timedelta(config) log.debug('next_time: %r' % next_time) if datetime.datetime.now() < next_time: log.debug('interval not met') log.verbose('Interval %s not met on task %s. Use --now to override.' % (config, task.name)) task.abort('Interval not met', silent=True) return log.debug('interval passed') task.simple_persistence['last_time'] = datetime.datetime.now() register_plugin(PluginInterval, 'interval', api_ver=2) register_parser_option('--now', action='store_true', dest='interval_ignore', default=False, help='Ignore interval(s)')
'nooverwrites': False, 'retries': 10, 'continuedl': True, 'noprogress': False, 'playliststart': 1, 'playlistend': -1, 'logtostderr': False, 'consoletitle': False, 'nopart': False, 'updatetime': True, 'writedescription': False, 'writeinfojson': False, 'writesubtitles': False, 'subtitleslang': None, 'matchtitle': None, 'rejecttitle': None, 'max_downloads': None, 'prefer_free_formats': False, 'verbose': False, }) for extractor in youtube_dl.gen_extractors(): fd.add_info_extractor(extractor) urls = map(lambda url: url.encode('utf-8').strip(), urls) fd.download(urls) urllib2.install_opener(urllib2.build_opener()) register_plugin(PluginYoutubeDL, 'youtube_dl', api_ver=2)
rss_items.append(PyRSS2Gen.RSSItem(**gen)) else: # no longer needed session.delete(db_item) session.commit() session.close() # make rss rss = PyRSS2Gen.RSS2(title='FlexGet', link=config.get('rsslink', 'http://flexget.com'), description='FlexGet generated RSS feed', lastBuildDate=datetime.datetime.utcnow(), items=rss_items) # write rss fn = os.path.expanduser(config['file']) try: log.verbose('Writing output rss to %s' % fn) rss.write_xml(open(fn, 'w'), encoding=config['encoding']) except LookupError: log.critical('Unknown encoding %s' % config['encoding']) return except IOError: # TODO: plugins cannot raise PluginWarnings in terminate event .. log.critical('Unable to write %s' % fn) return self.written[config['file']] = True register_plugin(OutputRSS, 'make_rss')
""" @property def schema(self): return {'oneOf': [{'type': 'boolean'}, self.settings_schema]} # Run after series and metainfo series plugins @priority(115) def on_task_metainfo(self, task, config): if not config: # Don't run when we are disabled return # Generate the group settings for series plugin group_settings = {} if isinstance(config, dict): group_settings = config # Generate a list of unique series that metainfo_series can parse for this task metainfo_series = get_plugin_by_name('metainfo_series') guess_entry = metainfo_series.instance.guess_entry guessed_series = {} for entry in task.entries: if guess_entry(entry): guessed_series.setdefault(normalize_series_name(entry['series_name']), entry['series_name']) # Combine settings and series into series plugin config format allseries = {'settings': {'all_series': group_settings}, 'all_series': guessed_series.values()} # Merge our config in to the main series config self.merge_config(task, allseries) register_plugin(FilterAllSeries, 'all_series', api_ver=2)
return False @priority(255) def url_rewrite(self, task, entry): """Rewrites given entry url. Raises UrlRewritingError if failed.""" tries = 0 while self.url_rewritable(task, entry): tries += 1 if tries > 300: raise UrlRewritingError('URL rewriting was left in infinite loop while rewriting url for %s, some rewriter is returning always True' % entry) for urlrewriter in get_plugins_by_group('urlrewriter'): name = urlrewriter.name try: if urlrewriter.instance.url_rewritable(task, entry): log.debug('Url rewriting %s' % entry['url']) urlrewriter.instance.url_rewrite(task, entry) log.info('Entry \'%s\' URL rewritten to %s (with %s)' % (entry['title'], entry['url'], name)) except UrlRewritingError as r: # increase failcount #count = self.shared_cache.storedefault(entry['url'], 1) #count += 1 raise UrlRewritingError('URL rewriting %s failed: %s' % (name, r.value)) except PluginError as e: raise UrlRewritingError('URL rewriting %s failed: %s' % (name, e.value)) except Exception as e: log.exception(e) raise UrlRewritingError('%s: Internal error with url %s' % (name, entry['url'])) register_plugin(PluginUrlRewriting, 'urlrewriting', builtin=True) register_task_phase('urlrewrite', before='download')
from flexget.plugin import register_plugin import flexget.validator from tests import FlexGetBase class SearchPlugin(object): """Fake search plugin. Just returns the entry it was given.""" def validator(self): return flexget.validator.factory('boolean') def search(self, entry, comparator=None, config=None): return [entry] register_plugin(SearchPlugin, 'test_search', groups=['search']) class EstRelease(object): """Fake release estimate plugin. Just returns 'est_release' entry field.""" def estimate(self, entry): return entry.get('est_release') register_plugin(EstRelease, 'test_release', groups=['estimate_release']) class TestDiscover(FlexGetBase): __yaml__ = """ tasks: test_sort: