def register_web_server(manager): """Registers Web Server and loads API (always) and WebUi via config""" global web_server, config_hash if not manager.is_daemon: return config = manager.config.get('web_server') if get_config_hash(config) == config_hash: log.debug('web server config has\'nt changed') return config_hash = get_config_hash(config) web_server_config = prepare_config(config) # Removes any existing web server instances if exists stop_server(manager) if not web_server_config: return log.info("Running web server at IP %s:%s", web_server_config['bind'], web_server_config['port']) # Register API api_app.secret_key = get_secret() log.info("Initiating API") register_app('/api', api_app) # Register WebUI if web_server_config.get('web_ui'): log.info('Registering WebUI') register_web_ui(manager) web_server = setup_server(web_server_config)
def irc_update_config(manager): global irc_manager, config_hash # Exit if we're not running daemon mode if not manager.is_daemon: return config = manager.config.get('irc') # No config, no connections if not config: logger.debug('No irc connections defined in the config') stop_irc(manager) return if irc_bot is None: logger.error( 'ImportError: irc_bot module not found or version is too old. Shutting down daemon.' ) stop_irc(manager) manager.shutdown(finish_queue=False) return config_hash.setdefault('names', {}) new_config_hash = get_config_hash(config) if config_hash.get('config') == new_config_hash: logger.verbose( 'IRC config has not been changed. Not reloading any connections.') return config_hash['manager'] = new_config_hash if irc_manager is not None and irc_manager.is_alive(): irc_manager.update_config(config) else: irc_manager = IRCConnectionManager(config)
def is_config_modified(self, last_hash): """ Checks the task's config hash. Returns True/False depending on config has been modified and the config hash :param str last_hash: :return bool, str: config modified and config hash """ # Save current config hash and set config_modified flag config_modified = False config_hash = get_config_hash(self.config) if self.is_rerun: # Restore the config to state right after start phase if self.prepared_config: self.config = copy.deepcopy(self.prepared_config) else: log.error("BUG: No prepared_config on rerun, please report.") config_modified = False elif not last_hash: config_modified = True elif last_hash.hash != config_hash: config_modified = True last_hash.hash = config_hash else: config_modified = False return config_modified, config_hash
def irc_update_config(manager): global irc_manager, config_hash # Exit if we're not running daemon mode if not manager.is_daemon: return config = manager.config.get('irc') # No config, no connections if not config: log.debug('No irc connections defined in the config') stop_irc(manager) return if irc_bot is None: log.error( 'ImportError: irc_bot module not found or version is too old. Shutting down daemon.' ) stop_irc(manager) manager.shutdown(finish_queue=False) return config_hash.setdefault('names', {}) new_config_hash = get_config_hash(config) if config_hash.get('config') == new_config_hash: log.verbose('IRC config has not been changed. Not reloading any connections.') return config_hash['manager'] = new_config_hash if irc_manager is not None and irc_manager.is_alive(): irc_manager.update_config(config) else: irc_manager = IRCConnectionManager(config)
def update_config(self, config): new_irc_connections = {} removed_connections = set(self.config.keys()) - set(config.keys()) for name, conf in config.items(): hash = get_config_hash(conf) if name in self.config and config_hash['names'].get(name) == hash: continue try: new_irc_connections[name] = IRCConnection(conf, name) config_hash['names'][name] = hash except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e: logger.error( 'Failed to update config. Error when updating {}: {}', name, e) return # stop connections that have been removed from config for name in removed_connections: self.stop_connection(name) del irc_connections[name] # and (re)start the new ones for name, connection in new_irc_connections.items(): if name in irc_connections: self.stop_connection(name) irc_connections[name] = connection connection.thread.start() self.config = config
def update_config(self, config): new_irc_connections = {} removed_connections = set(self.config.keys()) - set(config.keys()) for name, conf in config.items(): hash = get_config_hash(conf) if name in self.config and config_hash['names'].get(name) == hash: continue try: new_irc_connections[name] = IRCConnection(conf, name) config_hash['names'][name] = hash except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e: log.error('Failed to update config. Error when updating %s: %s', name, e) return # stop connections that have been removed from config for name in removed_connections: self.stop_connection(name) del irc_connections[name] # and (re)start the new ones for name, connection in new_irc_connections.items(): if name in irc_connections: self.stop_connection(name) irc_connections[name] = connection connection.thread.start() self.config = config
def is_config_modified(self, last_hash): """ Checks the task's config hash. Returns True/False depending on config has been modified and the config hash :param str last_hash: :return bool, str: config modified and config hash """ # Save current config hash and set config_modified flag config_modified = False config_hash = get_config_hash(self.config) if self.is_rerun: # Restore the config to state right after start phase if self.prepared_config: self.config = copy.deepcopy(self.prepared_config) else: log.error('BUG: No prepared_config on rerun, please report.') config_modified = False elif not last_hash: config_modified = True elif last_hash.hash != config_hash: config_modified = True last_hash.hash = config_hash else: config_modified = False return config_modified, config_hash
def on_task_start(self, task, config): series = {} for input_name, input_config in config.get('from', {}).items(): input = plugin.get_plugin_by_name(input_name) if input.api_ver == 1: raise plugin.PluginError('Plugin %s does not support API v2' % input_name) method = input.phase_handlers['input'] try: result = method(task, input_config) except PluginError as e: log.warning('Error during input plugin %s: %s' % (input_name, e)) continue if not result: log.warning('Input %s did not return anything' % input_name) continue for entry in result: s = series.setdefault(entry['title'], {}) if entry.get('tvdb_id'): s['set'] = {'tvdb_id': entry['tvdb_id']} # Allow configure_series to set anything available to series for key, schema in self.settings_schema['properties'].items(): if 'configure_series_' + key in entry: errors = process_config(entry['configure_series_' + key], schema, set_defaults=False) if errors: log.debug('not setting series option %s for %s. errors: %s' % (key, entry['title'], errors)) else: s[key] = entry['configure_series_' + key] # Set the config_modified flag if the list of shows changed since last time new_hash = str(get_config_hash(series)) with Session() as session: last_hash = session.query(LastHash).filter(LastHash.task == task.name).first() if not last_hash: last_hash = LastHash(task=task.name) session.add(last_hash) if last_hash.hash != new_hash: task.config_changed() last_hash.hash = new_hash if not series: log.info('Did not get any series to generate series configuration') return # Make a series config with the found series # Turn our dict of series with settings into a list of one item dicts series_config = {'generated_series': [dict([x]) for x in series.items()]} # If options were specified, add them to the series config if 'settings' in config: series_config['settings'] = {'generated_series': config['settings']} # Merge our series config in with the base series config self.merge_config(task, series_config)
def on_task_start(self, task, config): series = {} for input_name, input_config in config.get("from", {}).items(): input = plugin.get_plugin_by_name(input_name) if input.api_ver == 1: raise plugin.PluginError("Plugin %s does not support API v2" % input_name) method = input.phase_handlers["input"] try: result = method(task, input_config) except PluginError as e: log.warning("Error during input plugin %s: %s" % (input_name, e)) continue if not result: log.warning("Input %s did not return anything" % input_name) continue for entry in result: s = series.setdefault(entry["title"], {}) if entry.get("tvdb_id"): s["set"] = {"tvdb_id": entry["tvdb_id"]} # Allow configure_series to set anything available to series for key, schema in self.settings_schema["properties"].items(): if "configure_series_" + key in entry: errors = process_config(entry["configure_series_" + key], schema, set_defaults=False) if errors: log.debug("not setting series option %s for %s. errors: %s" % (key, entry["title"], errors)) else: s[key] = entry["configure_series_" + key] # Set the config_modified flag if the list of shows changed since last time new_hash = str(get_config_hash(series)) with Session() as session: last_hash = session.query(LastHash).filter(LastHash.task == task.name).first() if not last_hash: last_hash = LastHash(task=task.name) session.add(last_hash) if last_hash.hash != new_hash: task.config_changed() last_hash.hash = new_hash if not series: log.info("Did not get any series to generate series configuration") return # Make a series config with the found series # Turn our dict of series with settings into a list of one item dicts series_config = {"generated_series": [dict([x]) for x in series.items()]} # If options were specified, add them to the series config if "settings" in config: series_config["settings"] = {"generated_series": config["settings"]} # Merge our series config in with the base series config self.merge_config(task, series_config)
def wrapped_func(*args, **kwargs): # get task from method parameters task = args[1] self.config_hash = get_config_hash(args[2]) logger.trace('self.name: {}', self.name) logger.trace('hash: {}', self.config_hash) self.cache_name = self.name + '_' + self.config_hash logger.debug('cache name: {} (has: {})', self.cache_name, ', '.join(list(self.cache.keys()))) if not task.options.nocache: cache_value = self.cache.get(self.cache_name, None) if cache_value: # return from the cache logger.verbose('Restored entries from cache') return cache_value if self.persist: # Check database cache db_cache = self.load_from_db() if db_cache is not None: return db_cache # Nothing was restored from db or memory cache, run the function logger.trace('cache miss') # call input event try: response = func(*args, **kwargs) except PluginError as e: # If there was an error producing entries, but we have valid entries in the db cache, return those. if self.persist and not task.options.nocache: cache = self.load_from_db(load_expired=True) if cache is not None: logger.error( 'There was an error during {} input ({}), using cache instead.', self.name, e, ) return cache # If there was nothing in the db cache, re-raise the error. raise # store results to cache logger.debug('storing entries to cache {} ', self.cache_name) cache = IterableCache(response, self.store_to_db if self.persist else None) self.cache[self.cache_name] = cache return cache
def on_task_start(self, task, config): if not config: return files = config if isinstance(config, str): files = [config] for file_name in files: file_name = os.path.expanduser(file_name) if not os.path.isabs(file_name): file_name = os.path.join(task.manager.config_base, file_name) with io.open(file_name, encoding='utf-8') as inc_file: include = yaml.load(inc_file) inc_file.flush() errors = process_config(include, plugin.plugin_schemas(interface='task')) if errors: log.error('Included file %s has invalid config:', file_name) for error in errors: log.error('[%s] %s', error.json_pointer, error.message) task.abort('Invalid config in included file %s' % file_name) new_hash = str(get_config_hash(include)) with Session() as session: last_hash = session.query(LastHash).filter( LastHash.task == task.name).filter( LastHash.file == file_name).first() if not last_hash: log.debug( 'no config hash detected for task %s with file %s, creating', task.name, file_name) last_hash = LastHash(task=task.name, file=file_name) session.add(last_hash) if last_hash.hash != new_hash: log.debug( 'new hash detected, triggering config change event') task.config_changed() last_hash.hash = new_hash log.debug('Merging %s into task %s', file_name, task.name) # merge try: merge_dict_from_to(include, task.config) except MergeException: raise plugin.PluginError( 'Failed to merge include file to task %s, incompatible datatypes' % task.name)
def check_config_hash(self): """ Checks the task's config hash and updates the hash if necessary. """ # Save current config hash and set config_modified flag config_hash = get_config_hash(self.config) if self.is_rerun: # Restore the config to state right after start phase if self.prepared_config: self.config = copy.deepcopy(self.prepared_config) else: log.error('BUG: No prepared_config on rerun, please report.') with Session() as session: last_hash = session.query(TaskConfigHash).filter(TaskConfigHash.task == self.name).first() if not last_hash: session.add(TaskConfigHash(task=self.name, hash=config_hash)) self.config_changed() elif last_hash.hash != config_hash: last_hash.hash = config_hash self.config_changed()
def start_connections(self): """ Start all the irc connections. Stop the daemon if there are failures. :return: """ # First we validate the config for all connections including their .tracker files for conn_name, config in self.config.items(): try: log.info('Starting IRC connection for %s', conn_name) conn = IRCConnection(config, conn_name) irc_connections[conn_name] = conn config_hash['names'][conn_name] = get_config_hash(config) except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e: log.error(e) if conn_name in irc_connections: del irc_connections[conn_name] # remove it from the list of connections # Now we can start for conn_name, connection in irc_connections.items(): connection.thread.start()
def start_connections(self): """ Start all the irc connections. Stop the daemon if there are failures. :return: """ # First we validate the config for all connections including their .tracker files for conn_name, config in self.config.items(): try: logger.info('Starting IRC connection for {}', conn_name) conn = IRCConnection(config, conn_name) irc_connections[conn_name] = conn config_hash['names'][conn_name] = get_config_hash(config) except (MissingConfigOption, TrackerFileParseError, TrackerFileError, OSError) as e: logger.error(e) if conn_name in irc_connections: del irc_connections[conn_name] # remove it from the list of connections # Now we can start for conn_name, connection in irc_connections.items(): connection.thread.start()
def check_config_hash(self): """ Checks the task's config hash and updates the hash if necessary. """ # Save current config hash and set config_modified flag config_hash = get_config_hash(self.config) if self.is_rerun: # Restore the config to state right after start phase if self.prepared_config: self.config = copy.deepcopy(self.prepared_config) else: log.error('BUG: No prepared_config on rerun, please report.') with Session() as session: last_hash = (session.query(TaskConfigHash).filter( TaskConfigHash.task == self.name).first()) if not last_hash: session.add(TaskConfigHash(task=self.name, hash=config_hash)) self.config_changed() elif last_hash.hash != config_hash: last_hash.hash = config_hash self.config_changed()
def wrapped_func(*args, **kwargs): # get task from method parameters task = args[1] # detect api version api_ver = 1 if len(args) == 3: api_ver = 2 if api_ver == 1: # get name for a cache from tasks configuration if self.name not in task.config: raise Exception( '@cache config name %s is not configured in task %s' % (self.name, task.name)) hash = get_config_hash(task.config[self.name]) else: hash = get_config_hash(args[2]) log.trace('self.name: %s' % self.name) log.trace('hash: %s' % hash) cache_name = self.name + '_' + hash log.debug('cache name: %s (has: %s)' % (cache_name, ', '.join(list(self.cache.keys())))) cache_value = self.cache.get(cache_name, None) if not task.options.nocache and cache_value: # return from the cache log.trace('cache hit') entries = [] for entry in cache_value: fresh = copy.deepcopy(entry) entries.append(fresh) if entries: log.verbose('Restored %s entries from cache' % len(entries)) return entries else: if self.persist and not task.options.nocache: # Check database cache with Session() as session: db_cache = (session.query(InputCache).filter( InputCache.name == self.name).filter( InputCache.hash == hash).filter( InputCache.added > datetime.now() - self.persist).first()) if db_cache: entries = [e.entry for e in db_cache.entries] log.verbose('Restored %s entries from db cache' % len(entries)) # Store to in memory cache self.cache[cache_name] = copy.deepcopy(entries) return entries # Nothing was restored from db or memory cache, run the function log.trace('cache miss') # call input event try: response = func(*args, **kwargs) except PluginError as e: # If there was an error producing entries, but we have valid entries in the db cache, return those. if self.persist and not task.options.nocache: with Session() as session: db_cache = (session.query(InputCache).filter( InputCache.name == self.name).filter( InputCache.hash == hash).first()) if db_cache and db_cache.entries: log.error( 'There was an error during %s input (%s), using cache instead.' % (self.name, e)) entries = [ ent.entry for ent in db_cache.entries ] log.verbose( 'Restored %s entries from db cache' % len(entries)) # Store to in memory cache self.cache[cache_name] = copy.deepcopy(entries) return entries # If there was nothing in the db cache, re-raise the error. raise if api_ver == 1: response = task.entries if not isinstance(response, list): log.warning( 'Input %s did not return a list, cannot cache.' % self.name) return response # store results to cache log.debug('storing to cache %s %s entries' % (cache_name, len(response))) try: self.cache[cache_name] = copy.deepcopy(response) except TypeError: # might be caused because of backlog restoring some idiotic stuff, so not neccessarily a bug log.critical( 'Unable to save task content into cache, ' 'if problem persists longer than a day please report this as a bug' ) if self.persist: # Store to database log.debug('Storing cache %s to database.' % cache_name) with Session() as session: db_cache = (session.query(InputCache).filter( InputCache.name == self.name).filter( InputCache.hash == hash).first()) if not db_cache: db_cache = InputCache(name=self.name, hash=hash) db_cache.entries = [ InputCacheEntry(entry=ent) for ent in response ] db_cache.added = datetime.now() session.merge(db_cache) return response
def wrapped_func(*args, **kwargs): # get task from method parameters task = args[1] # detect api version api_ver = 1 if len(args) == 3: api_ver = 2 if api_ver == 1: # get name for a cache from tasks configuration if self.name not in task.config: raise Exception( '@cache config name %s is not configured in task %s' % (self.name, task.name) ) hash = get_config_hash(task.config[self.name]) else: hash = get_config_hash(args[2]) log.trace('self.name: %s' % self.name) log.trace('hash: %s' % hash) cache_name = self.name + '_' + hash log.debug( 'cache name: %s (has: %s)' % (cache_name, ', '.join(list(self.cache.keys()))) ) cache_value = self.cache.get(cache_name, None) if not task.options.nocache and cache_value: # return from the cache log.trace('cache hit') entries = [] for entry in cache_value: fresh = copy.deepcopy(entry) entries.append(fresh) if entries: log.verbose('Restored %s entries from cache' % len(entries)) return entries else: if self.persist and not task.options.nocache: # Check database cache with Session() as session: db_cache = ( session.query(InputCache) .filter(InputCache.name == self.name) .filter(InputCache.hash == hash) .filter(InputCache.added > datetime.now() - self.persist) .first() ) if db_cache: entries = [e.entry for e in db_cache.entries] log.verbose('Restored %s entries from db cache' % len(entries)) # Store to in memory cache self.cache[cache_name] = copy.deepcopy(entries) return entries # Nothing was restored from db or memory cache, run the function log.trace('cache miss') # call input event try: response = func(*args, **kwargs) except PluginError as e: # If there was an error producing entries, but we have valid entries in the db cache, return those. if self.persist and not task.options.nocache: with Session() as session: db_cache = ( session.query(InputCache) .filter(InputCache.name == self.name) .filter(InputCache.hash == hash) .first() ) if db_cache and db_cache.entries: log.error( 'There was an error during %s input (%s), using cache instead.' % (self.name, e) ) entries = [ent.entry for ent in db_cache.entries] log.verbose('Restored %s entries from db cache' % len(entries)) # Store to in memory cache self.cache[cache_name] = copy.deepcopy(entries) return entries # If there was nothing in the db cache, re-raise the error. raise if api_ver == 1: response = task.entries if not isinstance(response, list): log.warning('Input %s did not return a list, cannot cache.' % self.name) return response # store results to cache log.debug('storing to cache %s %s entries' % (cache_name, len(response))) try: self.cache[cache_name] = copy.deepcopy(response) except TypeError: # might be caused because of backlog restoring some idiotic stuff, so not neccessarily a bug log.critical( 'Unable to save task content into cache, ' 'if problem persists longer than a day please report this as a bug' ) if self.persist: # Store to database log.debug('Storing cache %s to database.' % cache_name) with Session() as session: db_cache = ( session.query(InputCache) .filter(InputCache.name == self.name) .filter(InputCache.hash == hash) .first() ) if not db_cache: db_cache = InputCache(name=self.name, hash=hash) db_cache.entries = [InputCacheEntry(entry=ent) for ent in response] db_cache.added = datetime.now() session.merge(db_cache) return response