def update(sysname, timestamp, status, responsetime, serviceid=None, handler=""): """Sends metric updates to graphite. :param sysname: Sysname of the device in question. :param timestamp: Timestamp of the measurements. If None or 'N', the current time will be used. :param status: Either Event.UP or Event.DOWN :param responsetime: Round-trip or response time of device/service. :param serviceid: Service id (db primary key) in case we're updating a service handler. :param handler: The type of service handler in case we're updating a service handler. """ if serviceid: status_name = metric_path_for_service_availability( sysname, handler, serviceid) response_name = metric_path_for_service_response_time( sysname, handler, serviceid) else: status_name = metric_path_for_packet_loss(sysname) response_name = metric_path_for_roundtrip_time(sysname) if timestamp is None or timestamp == 'N': timestamp = time.time() metrics = [ (status_name, (timestamp, 0 if status == event.Event.UP else 1)), (response_name, (timestamp, responsetime)) ] send_metrics(metrics)
def handle(self): timestamp = time.time() stats = yield self._get_stats() tuples = list(self._make_metrics(stats, timestamp)) if tuples: self._logger.debug("Counters collected") send_metrics(tuples)
def update(netboxid, sysname, timestamp, status, responsetime, serviceid=None, handler=""): """Sends metric updates to graphite. :param netboxid: Netboxid. Not actually used, but preserved for compatibility with old API. :param sysname: Sysname of the device in question. :param timestamp: Timestamp of the measurements. If None or 'N', the current time will be used. :param status: Either Event.UP or Event.DOWN :param responsetime: Round-trip or response time of device/service. :param serviceid: Service id (db primary key) in case we're updating a service handler. :param handler: The type of service handler in case we're updating a service handler. """ if serviceid: status_name = metric_path_for_service_availability( sysname, handler, serviceid) response_name = metric_path_for_service_response_time( sysname, handler, serviceid) else: status_name = metric_path_for_packet_loss(sysname) response_name = metric_path_for_roundtrip_time(sysname) if timestamp is None or timestamp == 'N': timestamp = time.time() metrics = [ (status_name, (timestamp, 0 if status == event.Event.UP else 1)), (response_name, (timestamp, responsetime)) ] send_metrics(metrics)
def handle(self): bandwidth = yield self._collect_bandwidth() cpu = yield self._collect_cpu() sysuptime = yield self._collect_sysuptime() memory = yield self._collect_memory() metrics = bandwidth + cpu + sysuptime + memory if metrics: send_metrics(metrics)
def _log_to_graphite(): prefix = metric_prefix_for_ipdevpoll_job(job_handler.netbox.sysname, job_handler.name) runtime_path = prefix + ".runtime" runtime = (runtime_path, (timestamp, duration_in_seconds)) send_metrics([runtime]) counter_path = ( prefix + (".success-count" if success else ".failure-count")) _COUNTERS.increment(counter_path) _COUNTERS.start()
def _log_to_graphite(): prefix = metric_prefix_for_ipdevpoll_job(job_handler.netbox.sysname, job_handler.name) runtime_path = prefix + ".runtime" runtime = (runtime_path, (timestamp, duration_in_seconds)) send_metrics([runtime]) counter_path = (prefix + (".success-count" if success else ".failure-count")) _COUNTERS.increment(counter_path) _COUNTERS.start()
def _response_to_metrics(self, result, sensors): metrics = [] timestamp = time.time() data = ((sensors[oid], value) for oid, value in result.iteritems() if oid in sensors) for sensor, value in data: value = convert_to_precision(value, sensor) path = metric_path_for_sensor(self.netbox, sensor['internal_name']) metrics.append((path, (timestamp, value))) send_metrics(metrics) return metrics
def handle(self): if self.netbox.master: yield self._log_instance_details() defer.returnValue(None) timestamp = time.time() stats = yield self._get_stats() netboxes = yield db.run_in_thread(self._get_netbox_list) tuples = list(self._make_metrics(stats, netboxes=netboxes, timestamp=timestamp)) if tuples: self._logger.debug("Counters collected") send_metrics(tuples)
def handle(self): if self.netbox.master: defer.returnValue(None) netboxes = yield db.run_in_thread(self._get_netbox_list) bandwidth = yield self._collect_bandwidth(netboxes) cpu = yield self._collect_cpu(netboxes) sysuptime = yield self._collect_sysuptime(netboxes) memory = yield self._collect_memory(netboxes) metrics = bandwidth + cpu + sysuptime + memory if metrics: send_metrics(metrics)
def flush(self): """ Flushes all the counters to the Carbon backend and resets them to zero """ if not self: _logger.debug("no counters to flush yet") _logger.debug("flushing %d counters to graphite", len(self)) metrics = [] timestamp = time.time() for counter, count in iteritems(self): metrics.append((counter, (timestamp, count))) self[counter] = 0 send_metrics(metrics)
def _collect_hp_multicast(self): timestamp = time.time() mib = StatisticsMib(self.agent) result = yield mib.get_ipv4_multicast_groups_per_port() if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug("%s", pformat(result)) if result: counts = self._count_ports_by_group(result) self._logger.debug("%r", counts) metrics = self._make_metrics_from_counts(counts, timestamp) if metrics: send_metrics(metrics)
def flush(self): """ Flushes all the counters to the Carbon backend and resets them to zero """ if not self: _logger.debug("no counters to flush yet") _logger.debug("flushing %d counters to graphite", len(self)) metrics = [] timestamp = time.time() for counter, count in self.iteritems(): metrics.append((counter, (timestamp, count))) self[counter] = 0 send_metrics(metrics)
def store_tuple(db_tuple): """Sends data to whisper with correct metric path :param db_tuple: a row from a rrd_fetchall object """ prefix, when, ip_count, mac_count = db_tuple ip_range = find_range(prefix) when = get_timestamp(when) metrics = [(metric_path_for_prefix(prefix, 'ip_count'), (when, ip_count)), (metric_path_for_prefix(prefix, 'mac_count'), (when, mac_count)), (metric_path_for_prefix(prefix, 'ip_range'), (when, ip_range))] LOG.debug(metrics) send_metrics(metrics)
def store_tuple(db_tuple): """Sends data to whisper with correct metric path :param db_tuple: a row from a rrd_fetchall object """ prefix, when, ip_count, mac_count = db_tuple ip_range = find_range(prefix) when = get_timestamp(when) metrics = [ (metric_path_for_prefix(prefix, 'ip_count'), (when, ip_count)), (metric_path_for_prefix(prefix, 'mac_count'), (when, mac_count)), (metric_path_for_prefix(prefix, 'ip_range'), (when, ip_range)) ] LOG.debug(metrics) send_metrics(metrics)
def _response_to_metrics(self, result, sensors, netboxes): metrics = [] timestamp = time.time() data = ((sensors[oid], value) for oid, value in result.items() if oid in sensors) for sensor, value in data: # Attempt to support numbers-as-text values if isinstance(value, bytes): value = safestring(value) if isinstance(value, str): try: value = float(value) except ValueError: pass value = convert_to_precision(value, sensor) for netbox in netboxes: path = metric_path_for_sensor(netbox, sensor['internal_name']) metrics.append((path, (timestamp, value))) send_metrics(metrics) return metrics
def _log_to_graphite(): prefix = metric_prefix_for_ipdevpoll_job(self.netbox.sysname, self.name) runtime_path = prefix + ".runtime" runtime = (runtime_path, (timestamp, duration_in_seconds)) send_metrics([runtime])