def load(module_name, name): log.info('application', "Importing application %s from module %s", name, module_name) module = sys.modules.get(module_name) if module: log.warning('application', "Application module %s has already been loaded. ", module_name) else: module = reflect.named_module(module_name) application = getattr(module, name, None) if application is None: raise ValueError('Module %s has no attribute %s' % (module_name, name)) if not IApplication.providedBy(application): raise ValueError('Variable %s.%s should provide IApplication interface' % (module_name, name)) try: application.load() except Exception as e: error.handle_exception( 'application', e, 'Error loading application: %s', application.name) application.unload() raise else: get_application_registry().register(application) log.info('application', "Loading application %s complete.", name)
def do_cleanup(connection, host_agent_id): ''' Performs cleanup after the host agent who left his descriptor in database. Deletes the descriptor and the descriptors of the partners he was hosting. ''' desc = yield safe_get(connection, host_agent_id) if isinstance(desc, host.Descriptor): for partner in desc.partners: partner_desc = yield safe_get(connection, partner.recipient.key) if partner_desc: host_part = first(x for x in partner_desc.partners if x.role == 'host') if host_part is None: log.warning('host_restart', 'Agent id: %s type: %s did not have any ' 'host partner. So we are leaving it be.', partner_desc.doc_id, partner_desc.type_name) elif host_part.recipient.key == host_agent_id: log.info('host_restart', "Deleting document with ID: %s", partner_desc.doc_id) yield connection.delete_document(partner_desc) else: log.warning('host_restart', "Not deleting descriptor of the agent id: %s, " "agent_type: %s, as it seems to be hosted by " "the host agent: %s. Although keep in mind " "that he will not receive the goodbye " "notification from us!", partner_desc.doc_id, partner_desc.type_name, host_part.recipient.key) log.info('host_restart', "Deleting document with ID: %s", desc.doc_id) yield connection.delete_document(desc)
def __enter__(self): opts, args = parse_options() self.connection = create_connection(opts.db_host, opts.db_port, opts.db_name) log.FluLogKeeper.init() log.FluLogKeeper.set_debug("5") log.info("script", "Using host: %s, port: %s, db_name; %s", opts.db_host, opts.db_port, opts.db_name) self._deferred = defer.Deferred() return self._deferred, args
def __enter__(self): self.connection = create_connection( self.config.host, self.config.port, self.config.name) log.info('script', "Using host: %s, port: %s, db_name; %s", self.config.host, self.config.port, self.config.name) self._deferred = defer.Deferred() return self._deferred
def body(connection): if len(args) < 1: log.error("script", "USAGE: locate.py <agent_id>") return agent_id = args[0] try: host = yield locate(connection, agent_id) except Exception as e: log.error("script", "ERROR: %r", e) log.info("script", "Agent runs at host: %r", host)
def body(connection): documents = applications.get_initial_data_registry().itervalues() log.info('script', "I will push %d documents.", len(list(documents))) d = create_db(connection) d.addCallback(defer.drop_param, push_initial_data, connection, opts.force) if opts.migration: d.addCallback(defer.drop_param, migration_script, connection) return d
def __enter__(self): self.connection = create_connection( self.config.host, self.config.port, self.config.name, self.config.username, self.config.password, self.config.https) log.info('script', "Using host: %s, port: %s, db_name; %s, ssl: %r", self.config.host, self.config.port, self.config.name, self.config.https) self._deferred = defer.Deferred() return self._deferred
def unload(name): log.info("application", "Starting unloading application %r", name) r = get_application_registry() application = r.lookup(name) if not application: log.error("application", "Tried to unload application which is not " "loaded: %r", name) return try: application.unload() except Exception as e: error.handle_exception("application", e, "Problem while unloading " "application %r", name) log.info("application", "Unloading application %r complete", name)
def load(self, obj, parent, name=None): try: parse = guistate.IGuiState(obj) if name is None: name = parse.get_name() else: name = [name, None] node = self.model.append(parent, name) for e in parse.iter_elements(): self.load(e[1], node, e[0]) self.view.expand_row(self.model.get_path(node), True) except TypeError as e: log.info('agent-info', 'Error adapting: %r', e)
def testDefaultLogging(self): keeper = DummyLogKeeper() log.set_default(keeper) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual(keeper.entries, [(LogLevel.log, None, 'foo', '1', (), 1), (LogLevel.debug, None, 'bar', '2', (42, ), 1), (LogLevel.info, None, 'spam', '3', (), 1), (LogLevel.warning, None, 'bacon', '4', (2, 3, 5), 1), (LogLevel.error, None, 'eggs', '4', (), 1)])
def __exit__(self, type, value, traceback): if type is not None: raise type(value), None, traceback if self.opts.agency_daemonize: tmp = tempfile.mktemp(suffix="feat.temp.log") log.info("run", "Logging will temporarily be done to: %s", tmp) run.daemonize(stdout=tmp, stderr=tmp) # dump all the log entries logged so far to the FluLogKeeper again # the reason for this is that we want them to be included in text # file (so far they have been printed to the console) tee = log.get_default() buff = tee.get_keeper('buffer') flulog = tee.get_keeper('flulog') buff.dump(flulog) # use the resolver from twisted.names instead of the default # the reason for this is that ThreadedResolver behaves strangely # after the reconnection - raises the DNSLookupError for names # which have been resolved while there was no connection resolver.installResolver(reactor) reactor.run()
def migration_script(connection): log.info("script", "Running the migration script.") for application in applications.get_application_registry().itervalues(): keys = ApplicationVersions.key_for(application.name) version_doc = yield connection.query_view(ApplicationVersions, **keys) if not version_doc: to_run = application.get_migrations() version_doc = ApplicationVersion(name=unicode(application.name)) else: version_doc = version_doc[0] to_run = [(version, migration) for version, migration in application.get_migrations() if version > version_doc.version] if not to_run: log.info("script", "There are no migrations for application %s " "from version %s to %s", application.name, version_doc.version, application.version) continue try: for version, migration in to_run: yield migration.run(connection._database) if isinstance(version, str): version = unicode(version) version_doc.version = version yield connection.save_document(version_doc) log.info("script", "Successfully applied migration %r", migration) except Exception as e: error.handle_exception("script", e, "Failed applying migration %r", migration) continue
def testDefaultLogging(self): keeper = DummyLogKeeper() current = log.get_default() log.set_default(keeper) self.addCleanup(log.set_default, current) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual( keeper.entries, [ (LogLevel.log, None, "foo", "1", (), 1), (LogLevel.debug, None, "bar", "2", (42,), 1), (LogLevel.info, None, "spam", "3", (), 1), (LogLevel.warning, None, "bacon", "4", (2, 3, 5), 1), (LogLevel.error, None, "eggs", "4", (), 1), ], )
def migration_script(connection): log.info("script", "Running the migration script.") index = yield connection.query_view(view.DocumentByType, group_level=2, parse_result=False) try: for (type_name, version), count in index: restorator = serialization.lookup(type_name) if not restorator: log.error( 'script', "Failed to lookup the restorator for the " "type name: %s. There is %d objects like this in the" " database. They will not be migrated.", type_name, count) if (IVersionAdapter.providedBy(restorator) and ((version is None and restorator.version > 1) or (version is not None and version < restorator.version))): log.info('script', "I will migrate %d documents of the " "type: %s from version %s to %d", count, type_name, version, restorator.version) migrated = 0 while migrated < count: fetched = yield connection.query_view( view.DocumentByType, key=(type_name, version), limit=15, reduce=False, include_docs=True) migrated += len(fetched) if not fetched: break log.info("script", "Migrated %d documents of the type %s " "from %s version to %s", migrated, type_name, version, restorator.version) except Exception: error.handle_exception("script", None, "Failed running migration script") raise
def clean_all_descriptors(connection, dry_run=False): rows = yield connection.query_view(view.DocumentByType, group_level=1, parse_results=False) to_delete = list() for row in rows: type_name = row[0][0] restorator = serialization.lookup(type_name) if not restorator: log.info('cleanup', 'Could not lookup restorator for type name: %s. ' 'There is %s documents of this type.', type_name, row[1]) continue if IDescriptor.implementedBy(restorator): log.info('cleanup', 'I will delete %s documents of type name: %s', row[1], type_name) to_delete.append(type_name) if dry_run: log.info("cleanup", "Not deleting anything, this is just a dry run.") return for type_name in to_delete: keys = view.DocumentByType.fetch(type_name) keys['include_docs'] = False rows = yield connection.query_view( view.DocumentByType, parse_results=False, **keys) for (key, value, doc_id) in rows: try: yield connection.update_document(doc_id, update.delete) except Exception as e: log.error("cleanup", "Cannot delete the documents of type %s with ID: %s." " Reason: %s", type_name, doc_id, e)
def handle(self, addrport='', *args, **options): if args: raise CommandError('Usage is server %s' % self.args) if not addrport: self.addr = '' self.port = DEFAULT_PORT else: m = re.match(naiveip_re, addrport) if m is None: raise CommandError('"%s" is not a valid port number ' 'or address:port pair.' % addrport) self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() if not self.port.isdigit(): raise CommandError("%r is not a valid port number." % self.port) if self.addr and _ipv6: raise CommandError("ipv6 is not supported") if not self.addr: self.addr = '127.0.0.1' if options.get('apiprefix') and not options.get('prefix'): raise CommandError("--apiprefix can only be used in conjuction " "with --prefix. ") logger = logging.getLogger('feat') if options.get('featlog'): log.FluLogKeeper.init(options['featlog']) log.set_default(log.FluLogKeeper()) log.info('featdjango', 'Use feat logging: %s' % (options['featlog'], )) else: log.set_default(log.PythonLogKeeper(logger)) from feat.extern.log import log as flulog flulog.setPackageScrubList('featcredex', 'featdjango', 'feat') log.info('featdjango', 'Use python logging') log.info('featdjango', "Listening on %s:%s", self.addr, self.port) if os.environ.get("RUN_MAIN") == 'true': # this is how django autoreloader lets the child process know # that its a child process if options.get('elflog_path'): stats = webserver.ELFLog(options.get('elflog_path'), options.get('elflog_fields')) else: stats = None site = server.Server(self.addr, int(self.port), prefix=options.get('prefix'), apiprefix=options.get('apiprefix'), web_statistics=stats, thread_stats_file=options.get('stats_file')) reactor.callWhenRunning(site.initiate) reactor.addSystemEventTrigger('before', 'shutdown', site.cleanup) if options.get('use_reloader'): task = reloader.Reloader(reactor, site) reactor.callWhenRunning(task.run) reactor.run() if options.get('use_reloader'): if task.should_reload: sys.exit(3) else: # in the original process we just spawn the child worker as # many times as it tells us to try: while autoreload.restart_with_reloader() == 3: pass except KeyboardInterrupt: pass
def body(connection): log.info("script", "I will push %d documents.", len(_documents)) d = create_db(connection) d.addCallback(lambda _: push_initial_data(connection)) return d
def body(connection): log.info('script', "I will push %d documents.", len(_documents)) d = create_db(connection) d.addCallback(defer.drop_param, push_initial_data, connection) return d
def push_initial_data(connection, overwrite=False, push_design_docs=True): documents = applications.get_initial_data_registry().itervalues() for doc in documents: try: yield connection.save_document(doc) except ConflictError: fetched = yield connection.get_document(doc.doc_id) if fetched.compare_content(doc): continue if not overwrite: log.warning('script', 'Document with id %s already exists! ' 'Use --force, Luck!', doc.doc_id) else: log.info('script', 'Updating old version of the document, ' 'id: %s', doc.doc_id) rev = yield connection.get_revision(doc.doc_id) doc.rev = rev yield connection.save_document(doc) if not push_design_docs: return design_docs = view.generate_design_docs() for design_doc in design_docs: try: yield connection.save_document(design_doc) except ConflictError: fetched = yield connection.get_document(design_doc.doc_id) if fetched.compare_content(design_doc): continue log.warning('script', 'The design document %s changed. ' 'Use "feat-service upgrade" to push the new revisions ' 'and restart the service in organised manner.', design_doc.doc_id) # calculate a diff for debugging purpose diffs = dict() for what in ('views', 'filters'): diffs[what] = dict() a = getattr(design_doc, what) b = getattr(fetched, what) diff = set(a.keys()) - set(b.keys()) for key in diff: diffs[what][key] = (a[key], None) diff = set(b.keys()) - set(a.keys()) for key in diff: diffs[what][key] = (None, b[key]) for name in set(a.keys()).intersection(set(b.keys())): if a[name] != b[name]: diffs[what][name] = (a[name], b[name]) def strcode(x): if not x: return '' if isinstance(x, (str, unicode)): return x return "\n".join("%s: %s" % t for t in x.items()) for what in diffs: for name in diffs[what]: log.info('script', '%s code changed. \nOLD: \n%s\n\nNEW:\n%s\n', what, strcode(diffs[what][name][1]), strcode(diffs[what][name][0]))
try: pid = os.fork() except OSError, e: sys.stderr.write("Failed to fork: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if pid > 0: # child process just exits sys.exit(0) else: # grandchild runs the reactor and logs to an external log file log.FluLogKeeper.redirect_to(logfile, logfile) reactor.run() global _exit_code log.info('bootstrap', 'Process exiting with %d status', _exit_code) sys.exit(_exit_code) def _bootstrap_success(value, queue): log.info("bootstrap", "Bootstrap finished successfully") if queue: # this informs the master process that it can terminate with 0 status queue.put((0, "")) def _bootstrap_failure(fail, agency, queue=None): error.handle_failure(agency, fail, 'Agency bootstrap failed, exiting.') reason = error.get_failure_message(fail) if queue: queue.put((1, reason))
def _bootstrap_success(value, queue): log.info("bootstrap", "Bootstrap finished successfully") if queue: # this informs the master process that it can terminate with 0 status queue.put((0, ""))
def _update_old(connection, doc): doc_id = doc.doc_id log.info('script', 'Updating old version of the document, id: %s', doc_id) rev = yield connection.get_revision(doc_id) doc.rev = rev yield connection.save_document(doc)
def bootstrap(parser=None, args=None, descriptors=None): """Bootstrap a feat process, handling command line arguments. @param parser: the option parser to use; more options will be added to the parser; if not specified or None a new one will be created @type parser: optparse.OptionParser or None @param args: the command line arguments to parse; if not specified or None, sys.argv[1:] will be used @type args: [str()] or None @param descriptors: the descriptors of the agent to starts in addition of the host agent; if not specified or None no additional agents will be started @type descriptors: [Descriptor()] or None @return: the deferred of the bootstrap chain @rtype: defer.Deferred()""" tee = log.init() # The purpose of having log buffer here, is to be able to dump the # log lines to a journal after establishing connection with it. # This is done in stage_configure() of net agency Startup procedure. tee.add_keeper('buffer', log.LogBuffer(limit=10000)) # use the resolver from twisted.names instead of the default # the reason for this is that ThreadedResolver behaves strangely # after the reconnection - raises the DNSLookupError for names # which have been resolved while there was no connection resolver.installResolver(reactor) if parser is None: parser = optparse.OptionParser() options.add_options(parser) try: opts, args = check_options(*parser.parse_args(args)) except Exception as e: error.handle_exception('bootstrap', e, "Failed parsing config") sys.exit(1) if opts.standalone: cls = standalone.Agency else: cls = net_agency.Agency config = config_module.Config() config.load(os.environ, opts) agency = cls(config) applications.load('feat.agents.application', 'feat') applications.load('feat.gateway.application', 'featmodels') d = defer.Deferred() reactor.callWhenRunning(d.callback, None) if not opts.standalone: # specific to running normal agency hostdef = opts.hostdef if opts.hostres or opts.hostcat or opts.hostports: from feat.agents.common import host hostdef = host.HostDef() for resdef in opts.hostres: parts = resdef.split(":", 1) name = parts[0] value = 1 if len(parts) > 1: try: value = int(parts[1]) except ValueError: raise OptionError( "Invalid host resource: %s" % resdef), \ None, sys.exc_info()[2] hostdef.resources[name] = value for catdef in opts.hostcat: name, value = check_category(catdef) hostdef.categories[name] = value if opts.hostports: hostdef.ports_ranges = dict() for ports in opts.hostports: group, start, stop = tuple(ports.split(":")) hostdef.ports_ranges[group] = (int(start), int(stop)) agency.set_host_def(hostdef) d.addCallback(defer.drop_param, agency.initiate) for desc, kwargs, name in opts.agents: d.addCallback(defer.drop_param, agency.add_static_agent, desc, kwargs, name) else: # standalone specific kwargs = opts.standalone_kwargs or dict() to_spawn = opts.agent_id or opts.agents[0][0] d.addCallback(defer.drop_param, agency.initiate) d.addCallback(defer.drop_param, agency.spawn_agent, to_spawn, **kwargs) queue = None if opts.agency_daemonize: import multiprocessing queue = multiprocessing.Queue() d.addCallbacks(_bootstrap_success, _bootstrap_failure, callbackArgs=(queue, ), errbackArgs=(agency, queue)) if not opts.agency_daemonize: reactor.run() else: logname = "%s.%s.log" % ('feat', agency.agency_id) logfile = os.path.join(config.agency.logdir, logname) log.info("bootstrap", "Daemon processs will be logging to %s", logfile) try: pid = os.fork() except OSError, e: sys.stderr.write("Failed to fork: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if pid > 0: # original process waits for information about what status code # to use on exit log.info('bootstrap', "Waiting for deamon process to intialize the agency") try: exit_code, reason = queue.get(timeout=20) except multiprocessing.queues.Empty: log.error('bootstrap', "20 seconds timeout expires waiting for agency" " in child process to initiate.") os._exit(1) else: log.info('bootstrap', "Process exiting with %d status", exit_code) if exit_code: log.info('bootstrap', 'Reason for failure: %s', reason) sys.exit(exit_code) else: # child process performs second fork try: pid = os.fork() except OSError, e: sys.stderr.write("Failed to fork: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if pid > 0: # child process just exits sys.exit(0) else: # grandchild runs the reactor and logs to an external log file log.FluLogKeeper.redirect_to(logfile, logfile) reactor.run()
def handle(self, addrport='', *args, **options): if args: raise CommandError('Usage is server %s' % self.args) if not addrport: self.addr = '' self.port = DEFAULT_PORT else: m = re.match(naiveip_re, addrport) if m is None: raise CommandError('"%s" is not a valid port number ' 'or address:port pair.' % addrport) self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() if not self.port.isdigit(): raise CommandError( "%r is not a valid port number." % self.port) if self.addr and _ipv6: raise CommandError("ipv6 is not supported") if not self.addr: self.addr = '127.0.0.1' if options.get('apiprefix') and not options.get('prefix'): raise CommandError("--apiprefix can only be used in conjuction " "with --prefix. ") logger = logging.getLogger('feat') if options.get('featlog'): log.FluLogKeeper.init(options['featlog']) log.set_default(log.FluLogKeeper()) log.info('featdjango', 'Use feat logging: %s' % ( options['featlog'], )) else: log.set_default(log.PythonLogKeeper(logger)) from feat.extern.log import log as flulog flulog.setPackageScrubList('featcredex', 'featdjango', 'feat') log.info('featdjango', 'Use python logging') log.info('featdjango', "Listening on %s:%s", self.addr, self.port) if os.environ.get("RUN_MAIN") == 'true': # this is how django autoreloader lets the child process know # that its a child process if options.get('elflog_path'): stats = webserver.ELFLog(options.get('elflog_path'), options.get('elflog_fields')) else: stats = None site = server.Server(self.addr, int(self.port), prefix=options.get('prefix'), apiprefix=options.get('apiprefix'), web_statistics=stats, thread_stats_file=options.get('stats_file')) reactor.callWhenRunning(site.initiate) reactor.addSystemEventTrigger('before', 'shutdown', site.cleanup) if options.get('use_reloader'): task = reloader.Reloader(reactor, site) reactor.callWhenRunning(task.run) reactor.run() if options.get('use_reloader'): if task.should_reload: sys.exit(3) else: # in the original process we just spawn the child worker as # many times as it tells us to try: while autoreload.restart_with_reloader() == 3: pass except KeyboardInterrupt: pass