def do_cleanup(connection, host_agent_id): ''' Performs cleanup after the host agent who left his descriptor in database. Deletes the descriptor and the descriptors of the partners he was hosting. ''' desc = yield safe_get(connection, host_agent_id) if isinstance(desc, host.Descriptor): for partner in desc.partners: partner_desc = yield safe_get(connection, partner.recipient.key) if partner_desc: host_part = first(x for x in partner_desc.partners if x.role == 'host') if host_part is None: log.warning('host_restart', 'Agent id: %s type: %s did not have any ' 'host partner. So we are leaving it be.', partner_desc.doc_id, partner_desc.type_name) elif host_part.recipient.key == host_agent_id: log.info('host_restart', "Deleting document with ID: %s", partner_desc.doc_id) yield connection.delete_document(partner_desc) else: log.warning('host_restart', "Not deleting descriptor of the agent id: %s, " "agent_type: %s, as it seems to be hosted by " "the host agent: %s. Although keep in mind " "that he will not receive the goodbye " "notification from us!", partner_desc.doc_id, partner_desc.type_name, host_part.recipient.key) log.info('host_restart', "Deleting document with ID: %s", desc.doc_id) yield connection.delete_document(desc)
def load(module_name, name): log.info('application', "Importing application %s from module %s", name, module_name) module = sys.modules.get(module_name) if module: log.warning('application', "Application module %s has already been loaded. ", module_name) else: module = reflect.named_module(module_name) application = getattr(module, name, None) if application is None: raise ValueError('Module %s has no attribute %s' % (module_name, name)) if not IApplication.providedBy(application): raise ValueError('Variable %s.%s should provide IApplication interface' % (module_name, name)) try: application.load() except Exception as e: error.handle_exception( 'application', e, 'Error loading application: %s', application.name) application.unload() raise else: get_application_registry().register(application) log.info('application', "Loading application %s complete.", name)
def register(view): global _registry view = IViewFactory(view) if view.name in _registry: log.warning('view-registry', 'View with the name %s is already ' 'registered and points to %r. Overwriting!', view.name, _registry[view.name]) _registry[view.name] = view return view
def register(view): global _registry view = IViewFactory(view) if view.name in _registry: log.warning( "view-registry", "View with the name %s is already " "registered and points to %r. Overwriting!", view.name, _registry[view.name], ) _registry[view.name] = view return view
def cancel_connector(d): if not hasattr(d, 'connector'): log.warning('httpclient', "The Deferred called with cancel_connector()" " doesn't have the reference to the connector set.") return if d.connector.state == 'connecting': timeoutCall = d.connector.timeoutID msg = ('Connection to %s:%s was cancelled %.3f ' 'seconds after it was initialized' % (d.connector.host, d.connector.port, time.time() - timeoutCall.getTime() + d.connector.timeout)) d.connector.stopConnecting() d._suppressAlreadyCalled = True d.errback(defer.CancelledError(msg))
def testDefaultLogging(self): keeper = DummyLogKeeper() log.set_default(keeper) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual(keeper.entries, [(LogLevel.log, None, 'foo', '1', (), 1), (LogLevel.debug, None, 'bar', '2', (42, ), 1), (LogLevel.info, None, 'spam', '3', (), 1), (LogLevel.warning, None, 'bacon', '4', (2, 3, 5), 1), (LogLevel.error, None, 'eggs', '4', (), 1)])
def stop(processName, rundir='/tmp', processType=PROCESS_TYPE): pid = get_pid(rundir, processType, processName) if not pid: print "%s %s not running" % (processType, processName) return startClock = time.clock() termClock = startClock + 20 killClock = termClock + 10 log.debug("run", 'stopping process with pid %d', pid) if not term_pid(pid): log.warning("run", 'No process with pid %d', pid) return 1 # wait for the kill while (check_pid_running(pid)): if time.clock() > termClock: log.warning("run", "Process with pid %d has not responded " "to TERM for %d seconds, killing", pid, 20) kill_pid(pid) # so it does not get triggered again termClock = killClock + 1.0 if time.clock() > killClock: log.warning("run", "Process with pid %d has not responded to " "KILL for %d seconds, stopping", pid, 10) return 1 print "%s %s with pid %d stopped" % (processType, processName, pid) return 0
def testDefaultLogging(self): keeper = DummyLogKeeper() current = log.get_default() log.set_default(keeper) self.addCleanup(log.set_default, current) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual( keeper.entries, [ (LogLevel.log, None, "foo", "1", (), 1), (LogLevel.debug, None, "bar", "2", (42,), 1), (LogLevel.info, None, "spam", "3", (), 1), (LogLevel.warning, None, "bacon", "4", (2, 3, 5), 1), (LogLevel.error, None, "eggs", "4", (), 1), ], )
def _verify_callback(self, connection, x509, errnum, errdepth, ok): if not ok: log.warning("ssl-context", "Invalid certificate: %s", x509.get_subject()) return False return True
def display_warning(f): log.warning('script', 'Creating of database failed, reason: %s', f.value)
def cleanup(self, ctime=None): ''' This method is called iteratively by the connection owning it. Its job is to control the size of cache and remove old entries. ''' ctime = ctime or time.time() if self.last_cleanup: self.average_cleanup_time.add_point(ctime - self.last_cleanup) self.last_cleanup = ctime log.debug('couchdb', "Running cache cleanup().") # first remove already invalidated entries, used this iteration to # build up the map of usage expire = list() # [(num_accessed / time_in_cache, size, ident)] usage = list() actual_size = 0 for ident, entry in self.iteritems(): if entry.state is EntryState.invalid: expire.append(ident) continue elif entry.state is EntryState.waiting: continue else: # EntryState.ready actual_size += entry.size time_in_cache = max([ctime - entry.cached_at, 1]) usage.append(( float(entry.num_accessed) / time_in_cache, -entry.size, ident)) self.average_size.add_point(actual_size) if self.average_size.get_value() > 3 * self.desired_size: log.warning("couchdb", "The average size of Cache is %.2f times " "bigger than the desired size of: %s. It might be " "a good idea to rethink the caching strategy.", self.average_size.get_value() / self.desired_size, self.desired_size) if actual_size > self.desired_size: log.debug('couchdb', "I will have to cleanup some data, " "the actual size is: %s, the desired limit is %s.", actual_size, self.desired_size) # The usage list is sorted in order of things I will # be removing first. The important factor is "density" of usages # in time. usage.sort() size_to_delete = 0 num_to_delete = 0 while (len(usage) > 1 and actual_size - size_to_delete > self.desired_size): _, negative_size, ident = usage.pop(0) size_to_delete += -negative_size num_to_delete += 1 expire.append(ident) log.debug('couchdb', "I will remove %d entries from cache of the " "size of %s to compensate the size.", num_to_delete, size_to_delete) for ident in expire: del self[ident]
def push_initial_data(connection, overwrite=False, push_design_docs=True): documents = applications.get_initial_data_registry().itervalues() for doc in documents: try: yield connection.save_document(doc) except ConflictError: fetched = yield connection.get_document(doc.doc_id) if fetched.compare_content(doc): continue if not overwrite: log.warning('script', 'Document with id %s already exists! ' 'Use --force, Luck!', doc.doc_id) else: log.info('script', 'Updating old version of the document, ' 'id: %s', doc.doc_id) rev = yield connection.get_revision(doc.doc_id) doc.rev = rev yield connection.save_document(doc) if not push_design_docs: return design_docs = view.generate_design_docs() for design_doc in design_docs: try: yield connection.save_document(design_doc) except ConflictError: fetched = yield connection.get_document(design_doc.doc_id) if fetched.compare_content(design_doc): continue log.warning('script', 'The design document %s changed. ' 'Use "feat-service upgrade" to push the new revisions ' 'and restart the service in organised manner.', design_doc.doc_id) # calculate a diff for debugging purpose diffs = dict() for what in ('views', 'filters'): diffs[what] = dict() a = getattr(design_doc, what) b = getattr(fetched, what) diff = set(a.keys()) - set(b.keys()) for key in diff: diffs[what][key] = (a[key], None) diff = set(b.keys()) - set(a.keys()) for key in diff: diffs[what][key] = (None, b[key]) for name in set(a.keys()).intersection(set(b.keys())): if a[name] != b[name]: diffs[what][name] = (a[name], b[name]) def strcode(x): if not x: return '' if isinstance(x, (str, unicode)): return x return "\n".join("%s: %s" % t for t in x.items()) for what in diffs: for name in diffs[what]: log.info('script', '%s code changed. \nOLD: \n%s\n\nNEW:\n%s\n', what, strcode(diffs[what][name][1]), strcode(diffs[what][name][0]))
def display_warning(f): if 'file_exists' in str(f.value): return log.warning('script', 'Creating of database failed, reason: %s', f.value)
def display_warning(f): log.warning("script", "Creating of database failed, reason: %s", f.value)