def view_aterator(connection, callback, view, view_keys=dict(), args=tuple(), kwargs=dict(), per_page=15, consume_errors=True): ''' Asynchronous iterator for the view. Downloads a view in pages and calls the callback for each row. This helps avoid transfering data in huge datachunks. ''' skip = 0 while True: keys = dict(view_keys) keys.update(dict(skip=skip, limit=per_page)) records = yield connection.query_view(view, **keys) log.debug('view_aterator', "Fetched %d records of the view: %s", len(records), view.name) skip += len(records) for record in records: try: yield callback(connection, record, *args, **kwargs) except Exception as e: error.handle_exception( 'view_aterator', e, "Callback %s failed its iteration on a row %r", callback.__name__, record) if not consume_errors: raise e if not records: break
def stop(processName, rundir='/tmp', processType=PROCESS_TYPE): pid = get_pid(rundir, processType, processName) if not pid: print "%s %s not running" % (processType, processName) return startClock = time.clock() termClock = startClock + 20 killClock = termClock + 10 log.debug("run", 'stopping process with pid %d', pid) if not term_pid(pid): log.warning("run", 'No process with pid %d', pid) return 1 # wait for the kill while (check_pid_running(pid)): if time.clock() > termClock: log.warning("run", "Process with pid %d has not responded " "to TERM for %d seconds, killing", pid, 20) kill_pid(pid) # so it does not get triggered again termClock = killClock + 1.0 if time.clock() > killClock: log.warning("run", "Process with pid %d has not responded to " "KILL for %d seconds, stopping", pid, 10) return 1 print "%s %s with pid %d stopped" % (processType, processName, pid) return 0
def _get_pidpath(rundir, type, name=None): """ Get the full path to the pid file for the given process type and name. """ path = os.path.join(rundir, '%s.pid' % type) if name: path = os.path.join(rundir, '%s.%s.pid' % (type, name)) log.debug('common', 'get_pidpath for type %s, name %r: %s' % ( type, name, path)) return path
def debug_message(prefix, message, postfix=""): if not debug_message: return mtype = type(message).__name__ mid = getattr(message, "message_id", None) mrec = getattr(message, "recipient", None) mrec = mrec.key if mrec is not None else None mrep = getattr(message, "reply_to", None) mrep = mrep.key if mrep is not None else None log.debug("messages", "%s Type: %s; Id: %s; Recipient: %s; Reply-To: %s; %s", prefix, mtype, mid, mrec, mrep, postfix)
def testDefaultLogging(self): keeper = DummyLogKeeper() log.set_default(keeper) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual(keeper.entries, [(LogLevel.log, None, 'foo', '1', (), 1), (LogLevel.debug, None, 'bar', '2', (42, ), 1), (LogLevel.info, None, 'spam', '3', (), 1), (LogLevel.warning, None, 'bacon', '4', (2, 3, 5), 1), (LogLevel.error, None, 'eggs', '4', (), 1)])
def delete_pidfile(rundir, type=PROCESS_TYPE, name=None, force=False): """ Delete the pid file in the run directory, using the given process type and process name for the filename. @param force: if errors due to the file not existing should be ignored @type force: bool @rtype: str @returns: full path to the pid file that was written """ log.debug(type, 'deleting pid file') path = _get_pidpath(rundir, type, name) try: os.unlink(path) except OSError, e: if e.errno == errno.ENOENT and force: pass else: raise
def load(module_name, name): log.log("application", "Importing application %s from module %s", name, module_name) module = sys.modules.get(module_name) if module: log.log("application", "Application module %s has already been loaded. ", module_name) else: module = reflect.named_module(module_name) application = getattr(module, name, None) if application is None: raise ValueError("Module %s has no attribute %s" % (module_name, name)) if not IApplication.providedBy(application): raise ValueError("Variable %s.%s should provide IApplication interface" % (module_name, name)) try: application.load() except Exception as e: error.handle_exception("application", e, "Error loading application: %s", application.name) application.unload() raise else: get_application_registry().register(application) log.debug("application", "Loading application %s complete.", name)
def getContext(self): ctx = _create_ssl_context(self._key_filename, self._cert_filename, self._verify_ca_filename, self._p12_filename, self._verify_ca_from_p12, self._key_pass, self._p12_pass) opts = 0 if self._verify_ca_from_p12 or self._verify_ca_filename is not None: log.debug("ssl-context", "getContext: setting VERIFY_PEER") opts |= SSL.VERIFY_PEER if self._enforce_cert: log.debug("ssl-context", "getContext: setting VERIFY_FAIL_IF_NO_PEER_CERT") opts |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT if opts != 0: ctx.set_verify(opts, self._verify_callback) return ctx
def __init__(self, key_filename=None, cert_filename=None, verify_ca_filename=None, p12_filename=None, verify_ca_from_p12=False, key_pass=None, p12_pass=None, enforce_cert=False): self._cert_filename = cert_filename self._key_filename = key_filename self._p12_filename = p12_filename self._verify_ca_filename = verify_ca_filename self._verify_ca_from_p12 = verify_ca_from_p12 self._key_pass = key_pass self._p12_pass = p12_pass self._enforce_cert = enforce_cert log.debug("ssl-context", "Created BaseContextFactory, p12 %s, verify_ca_from_p12 %r", self._p12_filename, self._verify_ca_from_p12)
def testDefaultLogging(self): keeper = DummyLogKeeper() current = log.get_default() log.set_default(keeper) self.addCleanup(log.set_default, current) log.log("foo", "1") log.debug("bar", "2", 42) log.info("spam", "3") log.warning("bacon", "4", 2, 3, 5) log.error("eggs", "4") self.assertEqual( keeper.entries, [ (LogLevel.log, None, "foo", "1", (), 1), (LogLevel.debug, None, "bar", "2", (42,), 1), (LogLevel.info, None, "spam", "3", (), 1), (LogLevel.warning, None, "bacon", "4", (2, 3, 5), 1), (LogLevel.error, None, "eggs", "4", (), 1), ], )
def signal(sig, action): """ The point of this module and method is to decouple signal handlers from each other. Standard way to deal with handlers is to always store the old handler and call it. It creates a chain of handlers, making it impossible to later remove the handler. This method behaves like signal.signal() from standard python library. It always returns SIG_DFL indicating that the new handler is not supposed to call the old one. """ assert callable(action), ("Second argument of signal() needs to be a " "callable, got %r instead" % (action, )) global _handlers _install_handler(sig) if action in _handlers[sig]: log.debug('signal', "Handler for signal %s already registered. %r", sig, action) return SIG_DFL _handlers[sig][1].append(action) return SIG_DFL
def write_error(doc, obj, *args, **kwargs): result = {} result[u"type"] = u"error" result[u"error"] = unicode(obj.error_type.name) if obj.error_code is not None: result[u"code"] = int(obj.error_code) if obj.message is not None: result[u"message"] = obj.message if obj.subjects is not None: result[u"subjects"] = list(obj.subjects) if obj.reasons: result[u"reasons"] = dict([k, str(v)] for k, v in obj.reasons.iteritems()) if obj.debug is not None: result[u"debug"] = obj.debug if obj.stamp: result[u"stamp"] = obj.stamp log.debug('application/json', 'Wrote error response with debug stamp: %s', obj.stamp) log.debug('application/json', 'Error: %s', result[u'error']) if obj.message: log.debug('application/json', 'Message: %s', obj.message) render_json(result, doc)
def bootstrap(parser=None, args=None, descriptors=None): """Bootstrap a feat process, handling command line arguments. @param parser: the option parser to use; more options will be added to the parser; if not specified or None a new one will be created @type parser: optparse.OptionParser or None @param args: the command line arguments to parse; if not specified or None, sys.argv[1:] will be used @type args: [str()] or None @param descriptors: the descriptors of the agent to starts in addition of the host agent; if not specified or None no additional agents will be started @type descriptors: [Descriptor()] or None @return: the deferred of the bootstrap chain @rtype: defer.Deferred()""" parser = parser or optparse.OptionParser() add_options(parser) net_agency.add_options(parser) with _Bootstrap(parser=parser, args=args) as bootstrap: agency = bootstrap.agency opts = bootstrap.opts args = bootstrap.args opts, args = check_options(opts, args) descriptors = descriptors or [] d = agency.initiate() if not opts.standalone: # specific to running normal agency for name in opts.agents: factory = descriptor.lookup(name) if factory is None: msg = "No descriptor factory found for agent %s" % name raise OptionError(msg) descriptors.append(factory()) hostdef = opts.hostdef if opts.hostres or opts.hostcat: hostdef = host.HostDef() for resdef in opts.hostres: parts = resdef.split(":", 1) name = parts[0] value = 1 if len(parts) > 1: try: value = int(parts[1]) except ValueError: raise OptionError("Invalid host resource: %s" % resdef) hostdef.resources[name] = value for catdef in opts.hostcat: name, value = check_category(catdef) hostdef.categories[name] = value agency.set_host_def(hostdef) for desc in descriptors: log.debug("feat", "Starting agent with descriptor %r", desc) d.addCallback(defer.drop_param, agency.spawn_agent, desc) else: # standalone specific kwargs = opts.standalone_kwargs or dict() d.addCallback(defer.drop_param, agency.spawn_agent, opts.agents[0], **kwargs) return d
def cleanup(self, ctime=None): ''' This method is called iteratively by the connection owning it. Its job is to control the size of cache and remove old entries. ''' ctime = ctime or time.time() if self.last_cleanup: self.average_cleanup_time.add_point(ctime - self.last_cleanup) self.last_cleanup = ctime log.debug('couchdb', "Running cache cleanup().") # first remove already invalidated entries, used this iteration to # build up the map of usage expire = list() # [(num_accessed / time_in_cache, size, ident)] usage = list() actual_size = 0 for ident, entry in self.iteritems(): if entry.state is EntryState.invalid: expire.append(ident) continue elif entry.state is EntryState.waiting: continue else: # EntryState.ready actual_size += entry.size time_in_cache = max([ctime - entry.cached_at, 1]) usage.append(( float(entry.num_accessed) / time_in_cache, -entry.size, ident)) self.average_size.add_point(actual_size) if self.average_size.get_value() > 3 * self.desired_size: log.warning("couchdb", "The average size of Cache is %.2f times " "bigger than the desired size of: %s. It might be " "a good idea to rethink the caching strategy.", self.average_size.get_value() / self.desired_size, self.desired_size) if actual_size > self.desired_size: log.debug('couchdb', "I will have to cleanup some data, " "the actual size is: %s, the desired limit is %s.", actual_size, self.desired_size) # The usage list is sorted in order of things I will # be removing first. The important factor is "density" of usages # in time. usage.sort() size_to_delete = 0 num_to_delete = 0 while (len(usage) > 1 and actual_size - size_to_delete > self.desired_size): _, negative_size, ident = usage.pop(0) size_to_delete += -negative_size num_to_delete += 1 expire.append(ident) log.debug('couchdb', "I will remove %d entries from cache of the " "size of %s to compensate the size.", num_to_delete, size_to_delete) for ident in expire: del self[ident]