def submit(self, check, metric, value, tags): if value == -1: return if metric == 'CumulativeGCTimeInSeconds': check.rate(self.full_metric_name('GCTime'), total_time_to_temporal_percent(value, scale=SECOND), tags=tags) elif metric.endswith('MemoryInMB'): check.gauge(self.full_metric_name(metric), value, tags=tags) else: super().submit(check, metric, value, tags)
def submit(self, check, data, tags): for metric, value in data.items(): if metric in self.GAUGES: check.gauge(self.full_metric_name(metric), value, tags=tags) elif metric in self.MONOTONIC_COUNTS: check.monotonic_count(self.full_metric_name(metric), value, tags=tags) elif metric in self.TEMPORAL_PERCENTS: metric_data = self.TEMPORAL_PERCENTS[metric] check.rate( self.full_metric_name(metric_data.get('name', metric)), total_time_to_temporal_percent(value, scale=metric_data['scale']), tags=tags, )
def submit_metric(self, name, snmp_value, forced_type, tags): """ Convert the values reported as pysnmp-Managed Objects to values and report them to the aggregator. """ if reply_invalid(snmp_value): # Metrics not present in the queried object self.log.warning('No such Mib available: %s', name) return metric_name = self.normalize(name, prefix='snmp') if forced_type: forced_type = forced_type.lower() if forced_type == 'gauge': value = int(snmp_value) self.gauge(metric_name, value, tags) elif forced_type == 'percent': value = total_time_to_temporal_percent(int(snmp_value), scale=1) self.rate(metric_name, value, tags) elif forced_type == 'counter': value = int(snmp_value) self.rate(metric_name, value, tags) elif forced_type == 'monotonic_count': value = int(snmp_value) self.monotonic_count(metric_name, value, tags) else: self.warning('Invalid forced-type specified: %s in %s', forced_type, name) raise ConfigurationError( 'Invalid forced-type in config file: {}'.format(name)) return # Ugly hack but couldn't find a cleaner way # Proper way would be to use the ASN1 method isSameTypeWith but it # wrongfully returns True in the case of CounterBasedGauge64 # and Counter64 for example snmp_class = snmp_value.__class__.__name__ if snmp_class in SNMP_COUNTERS: value = int(snmp_value) self.rate(metric_name, value, tags) return if snmp_class in SNMP_GAUGES: value = int(snmp_value) self.gauge(metric_name, value, tags) return if snmp_class == 'Opaque': # Try support for floats try: value = float(decoder.decode(bytes(snmp_value))[0]) except Exception: pass else: self.gauge(metric_name, value, tags) return # Falls back to try to cast the value. try: value = float(snmp_value) except ValueError: pass else: self.gauge(metric_name, value, tags) return self.log.warning('Unsupported metric type %s for %s', snmp_class, metric_name)
def query_volume_io(self): # https://help.sap.com/viewer/4fe29514fd584807ac9f2a04f6754767/2.0.02/en-US/20cadec8751910148bab98528e3634a9.html for volume in self.iter_rows(queries.GlobalSystemVolumeIO): tags = [ 'db:{}'.format(volume['db_name']), 'hana_port:{}'.format(volume['port']), 'resource_type:{}'.format(volume['resource']), 'fs_path:{}'.format(volume['path']), ] tags.extend(self._tags) hana_host = volume['host'] tags.append('hana_host:{}'.format(hana_host)) host = self.get_hana_hostname(hana_host) # Read reads = volume['reads'] self.gauge('volume.io.read.total', reads, tags=tags, hostname=host) self.monotonic_count('volume.io.read.count', reads, tags=tags, hostname=host) read_size = volume['read_size'] self.gauge('volume.io.read.size.total', read_size, tags=tags, hostname=host) self.monotonic_count('volume.io.read.size.count', read_size, tags=tags, hostname=host) read_percent = total_time_to_temporal_percent(volume['read_time'], scale=MICROSECOND) self.rate('volume.io.read.utilized', read_percent, tags=tags, hostname=host) # Write writes = volume['writes'] self.gauge('volume.io.write.total', writes, tags=tags, hostname=host) self.monotonic_count('volume.io.write.count', writes, tags=tags, hostname=host) write_size = volume['write_size'] self.gauge('volume.io.write.size.total', write_size, tags=tags, hostname=host) self.monotonic_count('volume.io.write.size.count', write_size, tags=tags, hostname=host) write_percent = total_time_to_temporal_percent( volume['write_time'], scale=MICROSECOND) self.rate('volume.io.write.utilized', write_percent, tags=tags, hostname=host) # Overall # Convert microseconds -> seconds io_time = volume['io_time'] / MICROSECOND io_percent = total_time_to_temporal_percent(io_time, scale=1) self.rate('volume.io.utilized', io_percent, tags=tags, hostname=host) if io_time: throughput = (read_size + write_size) / io_time else: throughput = 0 self.gauge('volume.io.throughput', throughput, tags=tags, hostname=host)
def query_service_statistics(self): # https://help.sap.com/viewer/4fe29514fd584807ac9f2a04f6754767/2.0.02/en-US/20c460be751910149173ac5c08d42be5.html for service in self.iter_rows(queries.GlobalSystemServiceStatistics): tags = [ 'db:{}'.format(service['db_name'] or 'none'), 'hana_port:{}'.format(service['port']), 'service_name:{}'.format(service['service']), ] tags.extend(self._tags) hana_host = service['host'] tags.append('hana_host:{}'.format(hana_host)) host = self.get_hana_hostname(hana_host) response_time = service['response_time'] self.gauge('network.service.request.response_time', response_time, tags=tags, hostname=host) requests_per_second = service['requests_per_second'] self.gauge('network.service.request.per_second', requests_per_second, tags=tags, hostname=host) requests_active = service['requests_active'] self.gauge('network.service.request.active', requests_active, tags=tags, hostname=host) requests_pending = service['requests_pending'] self.gauge('network.service.request.pending', requests_pending, tags=tags, hostname=host) requests_finished_total = service['requests_finished_total'] self.monotonic_count('network.service.request.total_finished', requests_finished_total, tags=tags, hostname=host) requests_finished_external = service['requests_finished_external'] self.monotonic_count( 'network.service.request.external.total_finished', requests_finished_external, tags=tags, hostname=host) requests_finished_internal = requests_finished_total - requests_finished_external self.monotonic_count( 'network.service.request.internal.total_finished', requests_finished_internal, tags=tags, hostname=host) threads_total = service['threads_total'] self.gauge('thread.service.total', threads_total, tags=tags, hostname=host) threads_active = service['threads_active'] self.gauge('thread.service.active', threads_active, tags=tags, hostname=host) threads_inactive = threads_total - threads_active self.gauge('thread.service.inactive', threads_inactive, tags=tags, hostname=host) files_open = service['files_open'] self.gauge('file.service.open', files_open, tags=tags, hostname=host) cpu_percent = total_time_to_temporal_percent(service['cpu_time']) self.rate('cpu.service.utilized', cpu_percent, tags=tags, hostname=host)