Пример #1
0
    def _on_close_ready(self):
        """
        On a clean shutdown we'll call this once all of our channels are closed
        Let the Broker know we want to close
        """
        if self.closed:
            log.warn("%s.on_close_ready invoked while closed",
                         self.__class__.__name__)
            return

        self._rpc(0, spec.Connection.Close(self.closing[0],
                                          self.closing[1], 0, 0),
                  self._on_connection_closed,
                  [spec.Connection.CloseOk])
Пример #2
0
	def process_cluster( self, cluster, hosts,
			ts_stale_limit=graphite_min_cycle, ts=None ):
		ts_now = ts or time()
		if abs(int(cluster['LOCALTIME']) - ts_now) > ts_stale_limit:
			log.warn(( 'Localtime for cluster {0[NAME]} ({0}) is way off'
				' the local timestamp (limit: {1})' ).format(cluster, ts_stale_limit))
		for host, metrics in hosts:
			tn, ts_host = it.imap(int, op.itemgetter('TN', 'REPORTED')(host))
			ts_host += tn
			if tn > ts_stale_limit:
				log.error(( 'Data for host {0[NAME]} ({0}) is too'
					' stale (limit: {1}), skipping metrics for host' ).format(host, ts_stale_limit))
			elif abs(ts_now - ts_host) > ts_stale_limit:
				log.error(( '"Reported" timestamp for host {0[NAME]}'
					' ({0}) is way off the local timestamp (limit: {1}),'
					' skipping metrics for host' ).format(host, ts_stale_limit))
			else:
				process_name = ft.partial( self.process_name,
					*it.imap(op.itemgetter('NAME'), [cluster, host]), ts=ts )
				for metric in metrics:
					name = process_name(metric['NAME'])
					try: yield (name,) + self.process_metric(name, host, metric, ts=ts_host)
					except self.IgnoreValue: pass
Пример #3
0
def gmond_xml_process( xml,
		validate=True, validate_strict=False,
		log=logging.getLogger('gmond_amqp.xml_parser') ):
	'Process gmond XML data into tuples of (host_data, metrics).'
	# Don't see much point in iterative parsing here,
	#  since whole XML is cached into RAM anyway.
	# Alternative is to pull it from socket through parser or cache into a file,
	#  which may lead to parsing same XML multiple times if first link is slow.
	xml = etree.parse(BytesIO(xml.replace('\n', '')))

	# Validation is kinda optional, but why not?
	if validate or validate_strict:
		dtd = xml.docinfo.internalDTD
		if not dtd.validate(xml):
			err = 'XML validation failed, errors:\n{}'.format(
					'\n'.join(it.imap(bytes, dtd.error_log.filter_from_errors())) )
			if validate_strict: raise AssertionError(err)
			else: log.warn(err)

	for cluster in xml.iter('CLUSTER'):
		yield cluster.attrib, list(
			(host.attrib, map(op.attrgetter('attrib'), host.iter('METRIC')))
			for host in cluster.iter('HOST') )