def lookup_movie(title, session, identifiers=None): try: imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance.lookup except DependencyError: imdb_lookup = None try: tmdb_lookup = plugin.get_plugin_by_name('tmdb_lookup').instance.lookup except DependencyError: tmdb_lookup = None if not (imdb_lookup or tmdb_lookup): return entry = Entry(title=title) if identifiers: for identifier in identifiers: for key, value in identifier.items(): entry[key] = value try: imdb_lookup(entry, session=session) # IMDB lookup raises PluginError instead of the normal ValueError except PluginError: tmdb_lookup(entry) # Return only if lookup was successful if entry.get('movie_name'): return entry return
def matches(self, task, config, entry): # Tell tmdb_lookup to add lazy lookup fields if not already present try: plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry) except plugin.DependencyError: log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated') try: plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry) except plugin.DependencyError: log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated') conditions = [] # Check if a movie id is already populated before incurring a lazy lookup for lazy in [False, True]: if entry.get('imdb_id', eval_lazy=lazy): conditions.append(QueuedMovie.imdb_id == entry['imdb_id']) if entry.get('tmdb_id', eval_lazy=lazy and not conditions): conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id']) if conditions: break if not conditions: log_once('IMDB and TMDB lookups failed for %s.' % entry['title'], log, logging.WARN) return quality = entry.get('quality', qualities.Quality()) movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None). \ filter(or_(*conditions)).first() if movie and movie.quality_req.allows(quality): return movie
def matches(self, feed, config, entry): # Tell tmdb_lookup to add lazy lookup fields if not already present try: get_plugin_by_name('tmdb_lookup').instance.lookup(entry) except DependencyError: log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated') try: get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry) except DependencyError: log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated') # make sure the entry has a movie id field filled conditions = [] # Check if a movie id is already populated before incurring a lazy lookup for lazy in [False, True]: if entry.get('imdb_id', eval_lazy=lazy): conditions.append(QueuedMovie.imdb_id == entry['imdb_id']) if entry.get('tmdb_id', eval_lazy=lazy): conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id']) if conditions: break if not conditions: log.verbose('IMDB and TMDB lookups failed for %s.' % entry['title']) return quality = entry.get('quality', qualities.UNKNOWN) return feed.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None).\ filter(or_(*conditions)).\ filter(QueuedMovie.quality_obj <= quality).first()
def on_task_filter(self, task): raise PluginWarning('This plugin no longer works with the imdb, replacement will be implemented soon') config = task.config['imdb_rated'] if isinstance(config, basestring): config = {'url': task.config['imdb_rated']} self.update_rated(task, config) for entry in task.entries: # if no imdb_url perform lookup if not 'imdb_url' in entry: try: get_plugin_by_name('imdb_lookup').instance.lookup(entry) except PluginError: pass # ignore imdb lookup failures # ignore entries without imdb_url if not 'imdb_url' in entry: continue is_rated = task.session.query(ImdbRated).\ filter(ImdbRated.url == config['url']).\ filter(ImdbRated.imdb_url == entry['imdb_url']).first() is not None if config.get('reverse', False): # reversed, reject unrated if not is_rated: task.reject(entry, 'imdb rated reverse') else: # normal mode, reject rated if is_rated: task.reject(entry, 'imdb rated')
def on_task_start(self, task, config): # TODO: Resolve user-agent in a way that doesn't involve modifying the task config. # make sure we have dependencies available, will throw DependencyError if not get_plugin_by_name('headers') # configure them task.config['headers'] = {'User-Agent': 'QuickTime/7.6.6'} self.quality = str(config)
def on_task_output(self, task, config): # Send default values for backwards compatibility notify_config = { 'to': [{__name__: config}], 'scope': 'entries', 'what': 'accepted' } plugin.get_plugin_by_name('notify').instance.send_notification(task, notify_config)
def on_feed_filter(self, feed): for entry in feed.entries: try: get_plugin_by_name("imdb_lookup").instance.lookup(entry) except PluginError: feed.reject(entry, "imdb required") if "imdb_url" not in entry and "imdb_id" not in entry: feed.reject(entry, "imdb required")
def on_feed_filter(self, feed): for entry in feed.entries: try: get_plugin_by_name('imdb_lookup').instance.lookup(entry) except PluginError: feed.reject(entry, 'imdb required') if not 'imdb_url' in entry: feed.reject(entry, 'imdb required')
def on_task_filter(self, task): for entry in task.entries: try: get_plugin_by_name('imdb_lookup').instance.lookup(entry) except PluginError: task.reject(entry, 'imdb required') if 'imdb_url' not in entry and 'imdb_id' not in entry: task.reject(entry, 'imdb required')
def on_task_start(self, task, config): # TODO: Fix or remove this plugin entirely raise plugin.PluginError('The apple_trailers plugin is currently broken, and will be removed unless someone ' 'finds a way to fix it.') # TODO: Resolve user-agent in a way that doesn't involve modifying the task config. # make sure we have dependencies available, will throw DependencyError if not plugin.get_plugin_by_name('headers') # configure them task.config['headers'] = {'User-Agent': 'QuickTime/7.6.6'}
def search(self, task, entry, config): """ Search for entries on RarBG """ categories = config.get('category', 'all') # Ensure categories a list if not isinstance(categories, list): categories = [categories] # Convert named category to its respective category id number categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories] category_url_fragment = ';'.join(str(c) for c in categories) entries = set() token = self.get_token() if not token: log.error('No token set. Exiting RARBG search.') return entries params = {'mode': 'search', 'token': token, 'ranked': int(config['ranked']), 'min_seeders': config['min_seeders'], 'min_leechers': config['min_leechers'], 'sort': config['sorted_by'], 'category': category_url_fragment, 'format': 'json'} for search_string in entry.get('search_strings', [entry['title']]): params.pop('search_string', None) params.pop('search_imdb', None) if entry.get('movie_name'): params['search_imdb'] = entry.get('imdb_id') else: query = normalize_unicode(search_string) query_url_fragment = query.encode('utf8') params['search_string'] = query_url_fragment if config['use_tvdb']: plugin.get_plugin_by_name('thetvdb_lookup').instance.lazy_series_lookup(entry) params['search_tvdb'] = entry.get('tvdb_id') log.debug('Using tvdb id %s' % entry.get('tvdb_id')) page = requests.get(self.base_url, params=params) log.debug('requesting: %s' % page.url) try: r = page.json() except ValueError: log.debug(page.text) continue for result in r: e = Entry() e['title'] = result.get('f') e['url'] = result.get('d') entries.add(e) return entries
def on_task_start(self, task, config): urlrewrite = plugin.get_plugin_by_name('urlrewriting')['instance'] for disable in config: try: plugin.get_plugin_by_name(disable) except plugin.DependencyError: log.critical('Unknown url-rewriter %s' % disable) continue log.debug('Disabling url rewriter %s' % disable) urlrewrite.disabled_rewriters.append(disable)
def on_task_filter(self, task, config): if not config: return for entry in task.entries: try: plugin.get_plugin_by_name('imdb_lookup').instance.lookup(entry) except plugin.PluginError: entry.reject('imdb required') if 'imdb_id' not in entry and 'imdb_url' not in entry: entry.reject('imdb required')
def on_task_input(self, task, config): if not config: return config = self.prepare_config(config) entries = [] queue_name = config.get('queue_name') with Session() as session: for queue_item in queue_get(session=session, downloaded=False, queue_name=queue_name): entry = Entry() # make sure the entry has IMDB fields filled entry['url'] = '' if queue_item.imdb_id: entry['imdb_id'] = queue_item.imdb_id entry['imdb_url'] = make_imdb_url(queue_item.imdb_id) if queue_item.tmdb_id: entry['tmdb_id'] = queue_item.tmdb_id # check if title is a imdb url (leftovers from old database?) # TODO: maybe this should be fixed at the queue_get ... if 'http://' in queue_item.title: plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry) log.debug('queue contains url instead of title') if entry.get('movie_name'): entry['title'] = entry['movie_name'] else: log.error('Found imdb url in imdb queue, but lookup failed: %s' % entry['title']) continue else: # normal title entry['title'] = queue_item.title # Add the year and quality if configured to (make sure not to double it up) if config.get('year') and entry.get('movie_year') \ and str(entry['movie_year']) not in entry['title']: plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry) entry['title'] += ' %s' % entry['movie_year'] # TODO: qualities can now be ranges.. how should we handle this? if config.get('quality') and queue_item.quality != 'ANY': log.info('quality option of emit_movie_queue is disabled while we figure out how to handle ranges') # entry['title'] += ' %s' % queue_item.quality entries.append(entry) if entry.get('imdb_id'): log.debug('Added title and IMDB id to new entry: %s - %s', entry['title'], entry['imdb_id']) elif entry.get('tmdb_id'): log.debug('Added title and TMDB id to new entry: %s - %s', entry['title'], entry['tmdb_id']) else: # should this ever happen though? log.debug('Added title to new entry: %s', entry['title']) return entries
def on_task_abort(self, task, config): if task.silent_abort: return title = 'Task {{ task_name }} has aborted!' message = 'Reason: {{ task.abort_reason }}' notify_config = {'to': config['to'], 'scope': 'task', 'title': title, 'message': message} log.debug('sending abort notification') plugin.get_plugin_by_name('notify').instance.send_notification(task, notify_config)
def on_task_output(self, task, config): if not config: return if not isinstance(config, dict): config = {"action": config} for entry in task.accepted: # Tell tmdb_lookup to add lazy lookup fields if not already present try: plugin.get_plugin_by_name("tmdb_lookup").instance.lookup(entry) except plugin.DependencyError: log.debug("tmdb_lookup is not available, queue will not work if movie ids are not populated") # Find one or both movie id's for this entry. See if an id is already populated before incurring lazy lookup kwargs = {} for lazy in [False, True]: if entry.get("imdb_id", eval_lazy=lazy): kwargs["imdb_id"] = entry["imdb_id"] if entry.get("tmdb_id", eval_lazy=lazy): kwargs["tmdb_id"] = entry["tmdb_id"] if kwargs: break if not kwargs: log.warning("Could not determine a movie id for %s, it will not be added to queue." % entry["title"]) continue # Provide movie title if it is already available, to avoid movie_queue doing a lookup kwargs["title"] = ( entry.get("imdb_name", eval_lazy=False) or entry.get("tmdb_name", eval_lazy=False) or entry.get("movie_name", eval_lazy=False) ) log.debug("movie_queue kwargs: %s" % kwargs) try: action = config.get("action") if action == "add": # since entries usually have unknown quality we need to ignore that .. if entry.get("quality_req"): kwargs["quality"] = qualities.Requirements(entry["quality_req"]) elif entry.get("quality"): kwargs["quality"] = qualities.Requirements(entry["quality"].name) else: kwargs["quality"] = qualities.Requirements(config.get("quality", "any")) queue_add(**kwargs) elif action == "remove": queue_del(**kwargs) elif action == "forget": queue_forget(**kwargs) except QueueError as e: # Ignore already in queue errors if e.errno != 1: entry.fail("Error adding movie to queue: %s" % e.message)
def on_task_output(self, task, config): if not config: return if not isinstance(config, dict): config = {'action': config} config.setdefault('queue_name', 'default') for entry in task.accepted: # Tell tmdb_lookup to add lazy lookup fields if not already present try: plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry) except plugin.DependencyError: log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated') # Find one or both movie id's for this entry. See if an id is already populated before incurring lazy lookup kwargs = {} for lazy in [False, True]: if entry.get('imdb_id', eval_lazy=lazy): kwargs['imdb_id'] = entry['imdb_id'] if entry.get('tmdb_id', eval_lazy=lazy): kwargs['tmdb_id'] = entry['tmdb_id'] if kwargs: break if not kwargs: log.warning('Could not determine a movie id for %s, it will not be added to queue.' % entry['title']) continue # Provide movie title if it is already available, to avoid movie_queue doing a lookup kwargs['title'] = (entry.get('imdb_name', eval_lazy=False) or entry.get('tmdb_name', eval_lazy=False) or entry.get('movie_name', eval_lazy=False)) log.debug('movie_queue kwargs: %s' % kwargs) kwargs['queue_name'] = config.get('queue_name') try: action = config.get('action') if action == 'add': # since entries usually have unknown quality we need to ignore that .. if entry.get('quality_req'): kwargs['quality'] = qualities.Requirements(entry['quality_req']) elif entry.get('quality'): kwargs['quality'] = qualities.Requirements(entry['quality'].name) else: kwargs['quality'] = qualities.Requirements(config.get('quality', 'any')) queue_add(**kwargs) elif action == 'remove': queue_del(**kwargs) elif action == 'forget': queue_forget(**kwargs) except QueueError as e: # Ignore already in queue errors if e.errno != 1: entry.fail('Error adding movie to queue: %s' % e.message)
def parse_what(what, lookup=True, session=None): """ Determines what information was provided by the search string `what`. If `lookup` is true, will fill in other information from tmdb. :param what: Can be one of: <Movie Title>: Search based on title imdb_id=<IMDB id>: search based on imdb id tmdb_id=<TMDB id>: search based on tmdb id :param bool lookup: Whether missing info should be filled in from tmdb. :param session: An existing session that will be used for lookups if provided. :rtype: dict :return: A dictionary with 'title', 'imdb_id' and 'tmdb_id' keys """ tmdb_lookup = plugin.get_plugin_by_name("api_tmdb").instance.lookup result = {"title": None, "imdb_id": None, "tmdb_id": None} result["imdb_id"] = extract_id(what) if not result["imdb_id"]: if isinstance(what, int): result["tmdb_id"] = what elif what.startswith("tmdb_id="): result["tmdb_id"] = what[8:] else: result["title"] = what if not lookup: # If not doing an online lookup we can return here return result search_entry = Entry(title=result["title"] or "") for field in ["imdb_id", "tmdb_id"]: if result.get(field): search_entry[field] = result[field] # Put lazy lookup fields on the search entry plugin.get_plugin_by_name("imdb_lookup").instance.register_lazy_fields(search_entry) plugin.get_plugin_by_name("tmdb_lookup").instance.lookup(search_entry) try: # Both ids are optional, but if movie_name was populated at least one of them will be there return { "title": search_entry["movie_name"], "imdb_id": search_entry.get("imdb_id"), "tmdb_id": search_entry.get("tmdb_id"), } except KeyError as e: raise QueueError(e.message)
def estimated(self, entries): """ :return: Entries that we have estimated to be available """ estimator = get_plugin_by_name('estimate_release').instance result = [] for entry in entries: est_date = estimator.estimate(entry) if est_date is None: log.debug('No release date could be determined for %s' % entry['title']) result.append(entry) continue if type(est_date) == datetime.date: # If we just got a date, add a time so we can compare it to now() est_date = datetime.datetime.combine(est_date, datetime.time()) if datetime.datetime.now() >= est_date: log.debug('%s has been released at %s' % (entry['title'], est_date)) result.append(entry) else: entry.reject('has not been released') entry.complete() log.debug("%s hasn't been released yet (Expected: %s)" % (entry['title'], est_date)) return result
def on_process_start(self, task, config): try: self.backlog = get_plugin_by_name('backlog').instance except DependencyError: log.warning( 'Unable utilize backlog plugin, entries may slip trough limit_new in some rare cases' )
def on_task_input(self, task, config): entry_titles = set() entry_urls = set() for item in config: for input_name, input_config in item.items(): input = plugin.get_plugin_by_name(input_name) method = input.phase_handlers['input'] try: result = method(task, input_config) except plugin.PluginError as e: log.warning('Error during input plugin %s: %s' % (input_name, e)) continue if not result: msg = 'Input %s did not return anything' % input_name if getattr(task, 'no_entries_ok', False): log.verbose(msg) else: log.warning(msg) continue for entry in result: if entry['title'] in entry_titles: log.debug('Title `%s` already in entry list, skipping.' % entry['title']) continue urls = ([entry['url']] if entry.get('url') else []) + entry.get('urls', []) if any(url in entry_urls for url in urls): log.debug('URL for `%s` already in entry list, skipping.' % entry['title']) continue yield entry entry_titles.add(entry['title']) entry_urls.update(urls)
def execute_searches(self, config, entries): """ :param config: Discover plugin config :param entries: List of pseudo entries to search :return: List of entries found from search engines listed under `from` configuration """ result = [] if config.get('type', 'normal') == 'normal': comparator = StringComparator(cutoff=0.7, cleaner=clean_title) elif config['type'] == 'exact': comparator = StringComparator(cutoff=0.9) elif config['type'] == 'any': comparator = AnyComparator() else: comparator = MovieComparator() for item in config['from']: if isinstance(item, dict): plugin_name, plugin_config = item.items()[0] else: plugin_name, plugin_config = item, None search = get_plugin_by_name(plugin_name).instance if not callable(getattr(search, 'search')): log.critical('Search plugin %s does not implement search method' % plugin_name) for entry in entries: try: search_results = search.search(entry['title'], comparator, plugin_config) log.debug('Discovered %s entries from %s' % (len(search_results), plugin_name)) result.extend(search_results[:config.get('limit')]) except (PluginError, PluginWarning): log.debug('No results from %s' % plugin_name) return sorted(result, reverse=True, key=lambda x: x.get('search_sort'))
def handle_phase(task, config): if task.name not in self.task_phases: log.debug('No config dict was generated for this task.') return entry_actions = { 'accept': Entry.accept, 'reject': Entry.reject, 'fail': Entry.fail} for item in self.task_phases[task.name][phase]: requirement, action = item.items()[0] passed_entries = [e for e in task.entries if self.check_condition(requirement, e)] if passed_entries: if isinstance(action, basestring): # Simple entry action (accept, reject or fail) was specified as a string for entry in passed_entries: entry_actions[action](entry, 'Matched requirement: %s' % requirement) else: # Other plugins were specified to run on this entry fake_task = Task(task.manager, task.name, task.config) fake_task.session = task.session # This entry still belongs to our feed, accept/reject etc. will carry through. fake_task.all_entries[:] = passed_entries try: for plugin_name, plugin_config in action.iteritems(): plugin = get_plugin_by_name(plugin_name) method = plugin.phase_handlers[phase] method(fake_task, plugin_config) except Exception: raise
def handle_phase(task, config): if task.name not in self.task_phases: log.debug('No config dict was generated for this task.') return entry_actions = { 'accept': task.accept, 'reject': task.reject, 'fail': task.fail} for item in self.task_phases[task.name][phase]: requirement, action = item.items()[0] passed_entries = [e for e in task.entries if self.check_condition(requirement, e)] if passed_entries: if isinstance(action, basestring): # Simple entry action (accept, reject or fail) was specified as a string for entry in passed_entries: entry_actions[action](entry, 'Matched requirement: %s' % requirement) else: # Other plugins were specified to run on this entry fake_task = Task(task.manager, task.name, task.config) fake_task.session = task.session # This entry still belongs to our feed, accept/reject etc. will carry through. fake_task.all_entries[:] = passed_entries try: for plugin_name, plugin_config in action.iteritems(): plugin = get_plugin_by_name(plugin_name) method = plugin.phase_handlers[phase] method(fake_task, plugin_config) except Exception: raise
def on_process_start(self, task, config): try: self.backlog = get_plugin_by_name('backlog').instance except DependencyError: log.warning( 'Unable utilize backlog plugin, failed entries may not be retried properly.' )
def __init__(self): try: self.backlog = plugin.get_plugin_by_name('backlog') except plugin.DependencyError: log.warning( 'Unable utilize backlog plugin, entries may slip trough limit_new in some rare cases' )
def on_task_filter(self, task, config): fields = config['fields'] action = config['action'] result = [] # TODO: xxx # we probably want to have common "run and combine inputs" function sometime soon .. this code is in # few places already (discover, inputs, ...) # code written so that this can be done easily ... for item in config['from']: for input_name, input_config in item.iteritems(): input = get_plugin_by_name(input_name) if input.api_ver == 1: raise PluginError('Plugin %s does not support API v2' % input_name) method = input.phase_handlers['input'] try: result.extend(method(task, input_config)) except PluginError, e: log.warning('Error during input plugin %s: %s' % (input_name, e)) continue if not result: log.warning('Input %s did not return anything' % input_name) continue
def on_task_learn(self, task, config): """Add downloaded movies to the database""" log.debug('check for learning') for entry in task.accepted: if 'imdb_id' not in entry: log.debug('`%s` does not have imdb_id' % entry['title']) continue parser = get_plugin_by_name('parsing').instance.parse_movie(entry['title']) quality = parser.quality.name log.debug('quality: %s' % quality) log.debug('imdb_id: %s' % entry['imdb_id']) log.debug('proper count: %s' % parser.proper_count) proper_movie = task.session.query(ProperMovie).\ filter(ProperMovie.imdb_id == entry['imdb_id']).\ filter(ProperMovie.quality == quality).\ filter(ProperMovie.proper_count == parser.proper_count).first() if not proper_movie: pm = ProperMovie() pm.title = entry['title'] pm.task = task.name pm.imdb_id = entry['imdb_id'] pm.quality = quality pm.proper_count = parser.proper_count task.session.add(pm) log.debug('added %s' % pm) else: log.debug('%s already exists' % proper_movie)
def estimated(self, entries, estimation_mode): """ :param dict estimation_mode: mode -> loose, strict, ignore :return: Entries that we have estimated to be available """ estimator = get_plugin_by_name('estimate_release').instance result = [] for entry in entries: est_date = estimator.estimate(entry) if est_date is None: log.debug('No release date could be determined for %s', entry['title']) if estimation_mode['mode'] == 'strict': entry.reject('has no release date') entry.complete() else: result.append(entry) continue if isinstance(est_date, datetime.date): # If we just got a date, add a time so we can compare it to now() est_date = datetime.datetime.combine(est_date, datetime.time()) if datetime.datetime.now() >= est_date: log.debug('%s has been released at %s', entry['title'], est_date) result.append(entry) elif datetime.datetime.now() >= est_date - parse_timedelta(estimation_mode['optimistic']): log.debug('%s will be released at %s. Ignoring release estimation because estimated release date is ' 'in less than %s', entry['title'], est_date, estimation_mode['optimistic']) result.append(entry) else: entry.reject('has not been released') entry.complete() log.verbose("%s hasn't been released yet (Expected: %s)", entry['title'], est_date) return result
def handle_phase(task, config): entry_actions = { 'accept': Entry.accept, 'reject': Entry.reject, 'fail': Entry.fail} for item in config: requirement, action = list(item.items())[0] passed_entries = (e for e in task.entries if self.check_condition(requirement, e)) if isinstance(action, str): if not phase == 'filter': continue # Simple entry action (accept, reject or fail) was specified as a string for entry in passed_entries: entry_actions[action](entry, 'Matched requirement: %s' % requirement) else: # Other plugins were specified to run on this entry fake_task = Task(task.manager, task.name, config=action, options=task.options) fake_task.session = task.session # This entry still belongs to our feed, accept/reject etc. will carry through. fake_task.all_entries[:] = passed_entries methods = {} for plugin_name, plugin_config in action.items(): p = plugin.get_plugin_by_name(plugin_name) method = p.phase_handlers.get(phase) if method: methods[method] = (fake_task, plugin_config) # Run the methods in priority order for method in sorted(methods, reverse=True): method(*methods[method])
def estimate(self, entry): if 'movie_name' not in entry: return movie_name = entry['movie_name'] movie_year = entry.get('movie_year') if movie_year is not None and movie_year > datetime.datetime.now().year: log.debug('Skipping Blu-ray.com lookup since movie year is %s', movie_year) return log.debug('Searching Blu-ray.com for release date of {} ({})'.format(movie_name, movie_year)) release_date = None try: with Session() as session: lookup = get_plugin_by_name('api_bluray').instance.lookup movie = lookup(title=movie_name, year=movie_year, session=session) if movie: release_date = movie.release_date except LookupError as e: log.debug(e) if release_date: log.debug('received release date: {0}'.format(release_date)) return release_date
def get(self, session=None): """ Get TMDB movie data """ args = tmdb_parser.parse_args() title = args.get('title') tmdb_id = args.get('tmdb_id') imdb_id = args.get('imdb_id') posters = args.pop('include_posters', False) backdrops = args.pop('include_backdrops', False) if not (title or tmdb_id or imdb_id): raise BadRequest(description) lookup = get_plugin_by_name('api_tmdb').instance.lookup try: movie = lookup(session=session, **args) except LookupError as e: raise NotFoundError(e.args[0]) return_movie = movie.to_dict() if posters: return_movie['posters'] = [p.to_dict() for p in movie.posters] if backdrops: return_movie['backdrops'] = [p.to_dict() for p in movie.backdrops] return jsonify(return_movie)
def on_task_filter(self, task, config): for item in config['from']: for plugin_name, plugin_config in item.items(): try: thelist = plugin.get_plugin_by_name( plugin_name).instance.get_list(plugin_config) except AttributeError: raise PluginError( 'Plugin %s does not support list interface' % plugin_name) already_accepted = [] for entry in task.entries: result = thelist.get(entry) if not result: continue if config['action'] == 'accept': if config['single_match']: if result not in already_accepted: already_accepted.append(result) # Add all new result data to entry for key in result: if key not in entry: entry[key] = result[key] entry.accept() else: entry.accept() elif config['action'] == 'reject': entry.reject()
def on_task_metainfo(self, task, config): if not config: # Don't run when we are disabled return # Generate the group settings for series plugin group_settings = {} allow_seasonless = False if isinstance(config, dict): allow_seasonless = config.pop('allow_seasonless', False) group_settings = config # Generate a list of unique series that have premieres metainfo_series = get_plugin_by_name('metainfo_series') guess_entry = metainfo_series.instance.guess_entry guessed_series = set() for entry in task.entries: if guess_entry(entry, allow_seasonless=allow_seasonless): if entry['series_season'] == 1 and entry['series_episode'] in (0, 1): guessed_series.add(entry['series_name']) # Reject any further episodes in those series for entry in task.entries: for series in guessed_series: if entry.get('series_name') == series and not ( entry.get('series_season') == 1 and entry.get('series_episode') in (0, 1) ): task.reject(entry, 'Non premiere episode in a premiere series') # Combine settings and series into series plugin config format allseries = {'settings': {'series_premiere': group_settings}, 'series_premiere': list(guessed_series)} # Merge the our config in to the main series config self.merge_config(task, allseries)
def estimate(self, entry): if 'movie_name' not in entry: return movie_name = entry['movie_name'] movie_year = entry.get('movie_year') if movie_year is not None and movie_year > datetime.datetime.now( ).year: log.debug('Skipping Blu-ray.com lookup since movie year is %s', movie_year) return log.debug('Searching Blu-ray.com for release date of {} ({})'.format( movie_name, movie_year)) release_date = None try: with Session() as session: lookup = get_plugin_by_name('api_bluray').instance.lookup movie = lookup(title=movie_name, year=movie_year, session=session) if movie: release_date = movie.release_date except LookupError as e: log.debug(e) if release_date: log.debug('received release date: {0}'.format(release_date)) return release_date
def index(): movie_queue = queue_get() tmdb_lookup = get_plugin_by_name('api_tmdb').instance.lookup for item in movie_queue: try: movie = tmdb_lookup(tmdb_id=item.tmdb_id, only_cached=True) except LookupError: item.overview = ('TMDb lookup was not successful, no overview available.' 'Lookup is being retried in the background.') log.debug('No themoviedb result for tmdb id %s' % item.tmdb_id) # this is probably not needed since non cached movies are retried also # in the cover function # #import thread #thread.start_new_thread(tmdb_lookup, (), {'imdb_id': item.imdb_id}) continue # set thumb, but only if already in cache because retrieving is too slow here # movies without cached thumb use img tag reading /cover/<imdb_id> which will # retrieve the image and thus allows rendering the page immediattely for poster in movie.posters: if poster.size == 'thumb': thumb = poster.get_file(only_cached=True) if thumb: item.thumb = url_for('.userstatic', filename=posixpath.join(*thumb)) break item.title = movie.name item.year = movie.released and movie.released.year item.overview = movie.overview context = {'movies': movie_queue} return render_template('movies/movies.html', **context)
def url_rewrite(self, task, entry): log.debug('Requesting %s' % entry['url']) page = requests.get(entry['url']) soup = get_soup(page.text) for link in soup.findAll('a', attrs={'href': re.compile(r'^/url')}): # Extract correct url from google internal link href = 'http://google.com' + link['href'] args = parse_qs(urlparse(href).query) href = args['q'][0] # import IPython; IPython.embed() # import sys # sys.exit(1) # href = link['href'].lstrip('/url?q=').split('&')[0] # Test if entry with this url would be recognized by some urlrewriter log.trace('Checking if %s is known by some rewriter' % href) fake_entry = {'title': entry['title'], 'url': href} urlrewriting = plugin.get_plugin_by_name('urlrewriting') if urlrewriting['instance'].url_rewritable(task, fake_entry): log.debug('--> rewriting %s (known url pattern)' % href) entry['url'] = href return else: log.debug('<-- ignoring %s (unknown url pattern)' % href) raise UrlRewritingError('Unable to resolve')
def test_all_types_handled(self): declared_types = set(plugin_parsing.PARSER_TYPES) method_handlers = set(m[6:] for m in dir(get_plugin_by_name("parsing").instance) if m.startswith("parse_")) assert set(declared_types) == set(method_handlers), "declared parser types: %s, handled types: %s" % ( declared_types, method_handlers, )
def __init__(self): try: self.backlog = plugin.get_plugin_by_name('backlog') except plugin.DependencyError: log.warning( 'Unable utilize backlog plugin, failed entries may not be retried properly.' )
def search(self, task, entry, config=None): from flexget.utils.template import environment from flexget.manager import manager search_strings = [ urllib.quote(normalize_unicode(s).encode("utf-8")) for s in entry.get("search_strings", [entry["title"]]) ] rss_plugin = plugin.get_plugin_by_name("rss") entries = set() rss_config = rss_plugin.instance.build_config(config) try: template = environment.from_string(rss_config["url"]) except TemplateSyntaxError as e: raise plugin.PluginError("Invalid jinja template as rss url: %s" % e) rss_config["all_entries"] = True for search_string in search_strings: rss_config["url"] = template.render({"search_term": search_string}) # TODO: capture some other_fields to try to find seed/peer/content_size numbers? try: results = rss_plugin.phase_handlers["input"](task, rss_config) except plugin.PluginError as e: log.error("Error attempting to get rss for %s: %s", rss_config["url"], e) else: entries.update(results) return entries
def on_task_metainfo(self, task, config): if not config: # Don't run when we are disabled return # Generate the group settings for series plugin group_settings = {} if isinstance(config, dict): group_settings = config # Generate a list of unique series that metainfo_series can parse for this task metainfo_series = get_plugin_by_name('metainfo_series') guess_entry = metainfo_series.instance.guess_entry guessed_series = {} for entry in task.entries: if guess_entry(entry): guessed_series.setdefault( normalize_series_name(entry['series_name']), entry['series_name']) # Combine settings and series into series plugin config format allseries = { 'settings': { 'all_series': group_settings }, 'all_series': guessed_series.values() } # Merge our config in to the main series config self.merge_config(task, allseries)
def test_all_types_handled(self): declared_types = set(plugin_parsing.PARSER_TYPES) method_handlers = set( m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_')) assert set(declared_types) == set(method_handlers), \ 'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def parse_what(what, lookup=True, session=None): """ Determines what information was provided by the search string `what`. If `lookup` is true, will fill in other information from tmdb. :param what: Can be one of: <Movie Title>: Search based on title imdb_id=<IMDB id>: search based on imdb id tmdb_id=<TMDB id>: search based on tmdb id :param bool lookup: Whether missing info should be filled in from tmdb. :param session: An existing session that will be used for lookups if provided. :rtype: dict :return: A dictionary with 'title', 'imdb_id' and 'tmdb_id' keys """ tmdb_lookup = get_plugin_by_name('api_tmdb').instance.lookup result = {'title': None, 'imdb_id': None, 'tmdb_id': None} result['imdb_id'] = extract_id(what) if not result['imdb_id'] and what.startswith('tmdb_id='): result['tmdb_id'] = what[8:] else: result['title'] = what if not lookup: # If not doing an online lookup we can return here return result try: result['session'] = session movie = tmdb_lookup(**result) except LookupError, e: raise QueueError(e.message)
def on_task_metainfo(self, task, config): if not config: # Don't run when we are disabled return if task.is_rerun: # Since we are running after task start phase, make sure not to merge into the config again on reruns return # Generate the group settings for series plugin group_settings = {} if isinstance(config, dict): group_settings = config group_settings['identified_by'] = 'ep' # Generate a list of unique series that metainfo_series can parse for this task metainfo_series = plugin.get_plugin_by_name('metainfo_series') guess_entry = metainfo_series.instance.guess_entry guessed_series = {} for entry in task.entries: if guess_entry(entry, config=group_settings): guessed_series.setdefault( normalize_series_name(entry['series_name']), entry['series_name']) # Combine settings and series into series plugin config format allseries = { 'settings': { 'all_series': group_settings }, 'all_series': guessed_series.values() } # Merge our config in to the main series config self.merge_config(task, allseries)
def parse_site(self, url, task): """Parse configured url and return releases array""" try: page = task.requests.get(url).content except RequestException as e: raise plugin.PluginError('Error getting input page: %e' % e) soup = get_soup(page) releases = [] for entry in soup.find_all('div', attrs={'class': 'entry'}): release = {} title = entry.find('h2') if not title: log.debug('No h2 entrytitle') continue release['title'] = title.a.contents[0].strip() log.debug('Processing title %s' % (release['title'])) for link in entry.find_all('a'): # no content in the link if not link.contents: continue link_name = link.contents[0] if link_name is None: continue if not isinstance(link_name, NavigableString): continue link_name = link_name.strip().lower() if link.has_attr('href'): link_href = link['href'] else: continue log.debug('found link %s -> %s' % (link_name, link_href)) # handle imdb link if link_name.lower() == 'imdb': log.debug('found imdb link %s' % link_href) release['imdb_id'] = extract_id(link_href) # test if entry with this url would be rewritable by known plugins (ie. downloadable) temp = {} temp['title'] = release['title'] temp['url'] = link_href urlrewriting = plugin.get_plugin_by_name('urlrewriting') if urlrewriting['instance'].url_rewritable(task, temp): release['url'] = link_href log.trace('--> accepting %s (resolvable)' % link_href) else: log.trace('<-- ignoring %s (non-resolvable)' % link_href) # reject if no torrent link if 'url' not in release: from flexget.utils.log import log_once log_once('%s skipped due to missing or unsupported (unresolvable) download link' % (release['title']), log) else: releases.append(release) return releases
def on_task_start(self, task, config): series = set() for input_name, input_config in config.get('from', {}).iteritems(): input = get_plugin_by_name(input_name) if input.api_ver == 1: raise PluginError('Plugin %s does not support API v2' % input_name) method = input.phase_handlers['input'] result = method(task, input_config) if not result: log.warning('Input %s did not return anything' % input_name) continue series.update([x['title'] for x in result]) if not series: log.info('Did not get any series to generate series configuration') return # Make a series config with the found series series_config = {'generated_series': list(series)} # If options were specified, add them to the series config if 'settings' in config: series_config['settings'] = {'generated_series': config['settings']} # Merge our series config in with the base series config self.merge_config(task, series_config)
def search(self, task, entry, config=None): from flexget.utils.template import environment from flexget.manager import manager search_strings = [ urllib.quote(normalize_unicode(s).encode('utf-8')) for s in entry.get('search_strings', [entry['title']]) ] rss_plugin = plugin.get_plugin_by_name('rss') entries = set() rss_config = rss_plugin.instance.build_config(config) try: template = environment.from_string(rss_config['url']) except TemplateSyntaxError as e: raise plugin.PluginError('Invalid jinja template as rss url: %s' % e) rss_config['all_entries'] = True for search_string in search_strings: rss_config['url'] = template.render({'search_term': search_string}) # TODO: capture some other_fields to try to find seed/peer/content_size numbers? try: results = rss_plugin.phase_handlers['input'](task, rss_config) except plugin.PluginError as e: log.error('Error attempting to get rss for %s: %s', rss_config['url'], e) else: entries.update(results) return entries
def cover(imdb_id): import os # TODO: return '' should be replaced with something sane, http error 404 ? tmdb_lookup = get_plugin_by_name('api_tmdb').instance.lookup try: movie = tmdb_lookup(imdb_id=imdb_id) except LookupError: log.error('No cached data for %s' % imdb_id) return '' filepath = None for poster in movie.posters: if poster.size == 'thumb': filepath = os.path.join(manager.config_base, 'userstatic', *poster.get_file()) break if filepath is None: log.error('No cover for %s' % imdb_id) return '' elif not os.path.exists(filepath): log.error('File %s does not exist' % filepath) return '' log.debug('sending thumb file %s' % filepath) return send_file(filepath, mimetype='image/png')
def on_task_download(self, task, config): """ Call download plugin to generate the temp files we will load into deluge then verify they are valid torrents """ import deluge.ui.common config = self.prepare_config(config) if not config['enabled']: return # If the download plugin is not enabled, we need to call it to get our temp .torrent files if not 'download' in task.config: download = get_plugin_by_name('download') for entry in task.accepted: if not entry.get('deluge_id'): download.instance.get_temp_file(task, entry, handle_magnets=True) # Check torrent files are valid for entry in task.accepted: if os.path.exists(entry.get('file', '')): # Check if downloaded file is a valid torrent file try: deluge.ui.common.TorrentInfo(entry['file']) except Exception: entry.fail('Invalid torrent file') log.error('Torrent file appears invalid for: %s', entry['title'])
def on_task_download(self, task, config): """ Call download plugin to generate the temp files we will load into deluge then verify they are valid torrents """ import deluge.ui.common config = self.prepare_config(config) if not config['enabled']: return # If the download plugin is not enabled, we need to call it to get our temp .torrent files if 'download' not in task.config: download = plugin.get_plugin_by_name('download') for entry in task.accepted: if not entry.get('deluge_id'): download.instance.get_temp_file(task, entry, handle_magnets=True) # Check torrent files are valid for entry in task.accepted: if os.path.exists(entry.get('file', '')): # Check if downloaded file is a valid torrent file try: deluge.ui.common.TorrentInfo(entry['file']) except Exception: entry.fail('Invalid torrent file') log.error('Torrent file appears invalid for: %s', entry['title'])
def handle_phase(task, config): entry_actions = {'accept': Entry.accept, 'reject': Entry.reject, 'fail': Entry.fail} for item in config: requirement, action = list(item.items())[0] passed_entries = (e for e in task.entries if self.check_condition(requirement, e)) if isinstance(action, str): if not phase == 'filter': continue # Simple entry action (accept, reject or fail) was specified as a string for entry in passed_entries: entry_actions[action](entry, 'Matched requirement: %s' % requirement) else: # Other plugins were specified to run on this entry fake_task = Task(task.manager, task.name, config=action, options=task.options) fake_task.session = task.session # This entry still belongs to our feed, accept/reject etc. will carry through. fake_task.all_entries[:] = passed_entries methods = {} for plugin_name, plugin_config in action.items(): p = plugin.get_plugin_by_name(plugin_name) method = p.phase_handlers.get(phase) if method: methods[method] = (fake_task, plugin_config) # Run the methods in priority order for method in sorted(methods, reverse=True): method(*methods[method])
def on_process_start(self, feed): """ Register the usable set: keywords. """ set_plugin = get_plugin_by_name('set') set_plugin.instance.register_keys({'apikey': 'text', 'application': 'text', 'event': 'text', 'priority': 'integer'})
def on_task_learn(self, task, config): """Add downloaded movies to the database""" log.debug('check for learning') for entry in task.accepted: if 'imdb_id' not in entry: log.debug('`%s` does not have imdb_id' % entry['title']) continue parser = get_plugin_by_name('parsing').instance.parse_movie(entry['title']) quality = parser.quality.name log.debug('quality: %s' % quality) log.debug('imdb_id: %s' % entry['imdb_id']) log.debug('proper count: %s' % parser.proper_count) proper_movie = task.session.query(ProperMovie). \ filter(ProperMovie.imdb_id == entry['imdb_id']). \ filter(ProperMovie.quality == quality). \ filter(ProperMovie.proper_count == parser.proper_count).first() if not proper_movie: pm = ProperMovie() pm.title = entry['title'] pm.task = task.name pm.imdb_id = entry['imdb_id'] pm.quality = quality pm.proper_count = parser.proper_count task.session.add(pm) log.debug('added %s' % pm) else: log.debug('%s already exists' % proper_movie)
def on_task_input(self, task, config): if not config: return config = self.prepare_config(config) entries = [] with Session() as session: for queue_item in queue_get(session=session): entry = Entry() # make sure the entry has IMDB fields filled entry['url'] = '' if queue_item.imdb_id: entry['imdb_id'] = queue_item.imdb_id entry['imdb_url'] = make_imdb_url(queue_item.imdb_id) if queue_item.tmdb_id: entry['tmdb_id'] = queue_item.tmdb_id plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry) # check if title is a imdb url (leftovers from old database?) # TODO: maybe this should be fixed at the queue_get ... if 'http://' in queue_item.title: log.debug('queue contains url instead of title') if entry.get('movie_name'): entry['title'] = entry['movie_name'] else: log.error( 'Found imdb url in imdb queue, but lookup failed: %s' % entry['title']) continue else: # normal title entry['title'] = queue_item.title # Add the year and quality if configured to if config.get('year') and entry.get('movie_year'): entry['title'] += ' %s' % entry['movie_year'] # TODO: qualities can now be ranges.. how should we handle this? if config.get('quality') and queue_item.quality != 'ANY': log.info( 'quality option of emit_movie_queue is disabled while we figure out how to handle ranges' ) #entry['title'] += ' %s' % queue_item.quality entries.append(entry) log.debug('Added title and IMDB id to new entry: %s - %s' % (entry['title'], entry['imdb_id'])) return entries
def parse_what(what, lookup=True, session=None): """ Determines what information was provided by the search string `what`. If `lookup` is true, will fill in other information from tmdb. :param what: Can be one of: <Movie Title>: Search based on title imdb_id=<IMDB id>: search based on imdb id tmdb_id=<TMDB id>: search based on tmdb id :param bool lookup: Whether missing info should be filled in from tmdb. :param session: An existing session that will be used for lookups if provided. :rtype: dict :return: A dictionary with 'title', 'imdb_id' and 'tmdb_id' keys """ tmdb_lookup = plugin.get_plugin_by_name('api_tmdb').instance.lookup result = {'title': None, 'imdb_id': None, 'tmdb_id': None} result['imdb_id'] = extract_id(what) if not result['imdb_id']: if what.startswith('tmdb_id='): result['tmdb_id'] = what[8:] else: result['title'] = what if not lookup: # If not doing an online lookup we can return here return result search_entry = Entry(title=result['title'] or '') for field in ['imdb_id', 'tmdb_id']: if result.get(field): search_entry[field] = result[field] # Put lazy lookup fields on the search entry plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields( search_entry) plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(search_entry) try: # Both ids are optional, but if movie_name was populated at least one of them will be there return { 'title': search_entry['movie_name'], 'imdb_id': search_entry.get('imdb_id'), 'tmdb_id': search_entry.get('tmdb_id') } except KeyError as e: raise QueueError(e.message)
def parse_site(self, url, task): """Parse configured url and return releases array""" try: page = task.requests.get(url).content except RequestException as e: raise plugin.PluginError('Error getting input page: %e' % e) soup = get_soup(page) releases = [] for entry in soup.find_all('div', attrs={'class': 'entry'}): release = {} title = entry.find('h2') if not title: log.debug('No h2 entrytitle') continue release['title'] = title.a.contents[0].strip() log.debug('Processing title %s' % (release['title'])) for link in entry.find_all('a'): # no content in the link if not link.contents: continue link_name = link.contents[0] if link_name is None: continue if not isinstance(link_name, NavigableString): continue link_name = link_name.strip().lower() if link.has_attr('href'): link_href = link['href'] else: continue log.debug('found link %s -> %s' % (link_name, link_href)) # handle imdb link if link_name.lower() == 'imdb': log.debug('found imdb link %s' % link_href) release['imdb_id'] = extract_id(link_href) # test if entry with this url would be rewritable by known plugins (ie. downloadable) temp = {} temp['title'] = release['title'] temp['url'] = link_href urlrewriting = plugin.get_plugin_by_name('urlrewriting') if urlrewriting['instance'].url_rewritable(task, temp): release['url'] = link_href log.trace('--> accepting %s (resolvable)' % link_href) else: log.trace('<-- ignoring %s (non-resolvable)' % link_href) # reject if no torrent link if not 'url' in release: from flexget.utils.log import log_once log_once('%s skipped due to missing or unsupported (unresolvable) download link' % (release['title']), log) else: releases.append(release) return releases
def on_task_exit(self, task, config): urlrewrite = plugin.get_plugin_by_name('urlrewriting')['instance'] for disable in config: log.debug('Enabling url rewriter %s' % disable) try: urlrewrite.disabled_rewriters.remove(disable) except ValueError: log.debug('%s does not exists' % disable)
def on_task_download(self, task, config): # If the download plugin is not enabled, we need to call it to get # our temp .torrent files if config['action'] == 'add' and 'download' not in task.config: download = plugin.get_plugin_by_name('download') download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)