def test_multiple_counter(self, mock_client): instance = mock_client.return_value instance._send.return_value = 1 config = configobj.ConfigObj() config['host'] = 'localhost' config['port'] = '9999' config['batch'] = 1 metric1 = Metric('servers.com.example.www.cpu.total.idle', 5, raw_value=123, timestamp=1234567, host='will-be-ignored', metric_type='COUNTER') metric2 = Metric('servers.com.example.www.cpu.total.idle', 7, raw_value=128, timestamp=1234567, host='will-be-ignored', metric_type='COUNTER') expected_data1 = { 'servers.com.example.www.cpu.total.idle': '123|c' } expected_data2 = { 'servers.com.example.www.cpu.total.idle': '5|c' } handler = StatsdHandler(config) handler.process(metric1) mock_client._send.assert_called_with(ANY, expected_data1) handler.process(metric2) mock_client._send.assert_called_with(ANY, expected_data2)
def test_disconnect_after_flush_enabled(self): config = configobj.ConfigObj() handler = mod.GraphiteHandler(config) socket_mock = Mock() patch_sock = patch.object(handler, 'socket', socket_mock) send_mock = Mock() patch_send = patch.object(handler, '_send_data', send_mock) # have the reconnect check always return true # so a reconnect is done after every batch send check_mock = Mock(return_value=True) patch_check = patch.object(handler, '_time_to_reconnect', check_mock) patch_sock.start() patch_send.start() patch_check.start() handler.process(Metric('foo.bar', 42, timestamp=123)) handler.process(Metric('foo.bar', 42, timestamp=124)) handler.process(Metric('foo.bar', 42, timestamp=125)) patch_check.stop() patch_send.stop() patch_sock.stop() self.assertEqual(send_mock.call_count, 3) self.assertEqual(check_mock.call_count, 3) self.assertEqual(len(handler.config['__sockets_created']), 3)
def test_batch(self, mock_urlopen, mock_request): config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['batch'] = 2 metric = Metric('servers.myhostname.cpu.cpu_count', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') metric2 = Metric('servers.myhostname.cpu.cpu_time', 123, raw_value=456, timestamp=5678910, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) handler.process(metric2) body = ( '[{"timestamp": 1234567, "metric": "cpu.cpu_count", "value": ' '123, "tags": {"hostname": "myhostname"}}, {"timestamp": 567891' '0, "metric": "cpu.cpu_time", "value": 123, "tags": {"hostname"' ': "myhostname"}}]') header = {'Content-Type': 'application/json'} mock_urlopen.assert_called_with(self.url, body, header)
def test_multi_with_batching(self): config = configobj.ConfigObj() config['host'] = 'graphite.example.com' config['batch'] = 2 metrics = [ Metric('metricname1', 0, timestamp=123), Metric('metricname2', 0, timestamp=123), Metric('metricname3', 0, timestamp=123), Metric('metricname4', 0, timestamp=123), ] expected_data = [ call("metricname1 0 123\nmetricname2 0 123\n"), call("metricname3 0 123\nmetricname4 0 123\n"), ] handler = GraphiteHandler(config) patch_sock = patch.object(handler, 'socket', True) sendmock = Mock() patch_send = patch.object(GraphiteHandler, '_send_data', sendmock) patch_sock.start() patch_send.start() for m in metrics: handler.process(m) patch_send.stop() patch_sock.stop() self.assertEqual(sendmock.call_count, len(expected_data)) self.assertEqual(sendmock.call_args_list, expected_data)
def publish_metrics(self, gaugesList, countersList, timestamp): for gaugeDesc in gaugesList: metricPath, metricValue = gaugeDesc metric = Metric(metricPath, metricValue, raw_value=None, timestamp=timestamp) self.publish_metric(metric) for counterDesc in countersList: metricPath, metricValue = counterDesc if metricPath == 'ERROR:': self.log.info(MetricPath, MetricVale) continue else: try: metric = Metric(metricPath, metricValue, raw_value=None, timestamp=timestamp) self.publish_metric(metric) except: self.log.info('ERROR collecting ' + str(metricPath)) pass
def collect_snmp(self, device, host, port, community): """ Collect stats from device """ # Log self.log.info("Collecting ServerTech PDU statistics from: %s" % (device)) # Set timestamp timestamp = time.time() inputFeeds = {} # Collect PDU input gauge values for gaugeName,gaugeOid in self.PDU_SYSTEM_GAUGES.items(): systemGauges = self.walk(gaugeOid, host, port, community) for o,gaugeValue in systemGauges.items(): # Get Metric Name metricName = gaugeName # Get Metric Value metricValue = float(gaugeValue) # Get Metric Path metricPath = '.'.join(['devices', device, 'system', metricName]) # Create Metric metric = Metric(metricPath, metricValue, timestamp, 2) # Publish Metric self.publish_metric(metric) # Collect PDU input feed names inputFeedNames = self.walk(self.PDU_INFEED_NAMES, host, port, community) for o,inputFeedName in inputFeedNames.items(): # Extract input feed name inputFeed = ".".join(o.split(".")[-2:]) inputFeeds[inputFeed] = inputFeedName # Collect PDU input gauge values for gaugeName,gaugeOid in self.PDU_INFEED_GAUGES.items(): inputFeedGauges = self.walk(gaugeOid, host, port, community) for o,gaugeValue in inputFeedGauges.items(): # Extract input feed name inputFeed = ".".join(o.split(".")[-2:]) # Get Metric Name metricName = '.'.join([re.sub(r'\.|\\', '_', inputFeeds[inputFeed]), gaugeName]) # Get Metric Value if gaugeName == "infeedVolts": # Note: Voltage is in "tenth volts", so divide by 10 metricValue = float(gaugeValue) / 10.0 elif gaugeName == "infeedAmps": # Note: Amps is in "hundredth amps", so divide by 100 metricValue = float(gaugeValue) / 100.0 else: metricValue = float(gaugeValue) # Get Metric Path metricPath = '.'.join(['devices', device, 'input', metricName]) # Create Metric metric = Metric(metricPath, metricValue, timestamp, 2) # Publish Metric self.publish_metric(metric)
def testgetMetricPathHostNone(self): metric = Metric('servers.host.cpu.total.idle', 0) actual_value = metric.getMetricPath() expected_value = 'total.idle' message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)
def testgetMetricPathHostNone(self): metric = Metric('prefix.cpu.TotalIdle', 0) actual_value = metric.getMetricPath() expected_value = 'TotalIdle' message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)
def testgetMetricPath(self): metric = Metric('prefix.cpu.TotalIdle', 0, host='com.example.www') actual_value = metric.getMetricPath() expected_value = 'TotalIdle' message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)
def testgetPathPrefixCustom(self): metric = Metric('custom.path.prefix.com.example.www.cpu.total.idle', 0, host='com.example.www') actual_value = metric.getPathPrefix() expected_value = 'custom.path.prefix' message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)
def testgetMetricPath(self): metric = Metric('servers.com.example.www.cpu.total.idle', 0, host='com.example.www') actual_value = metric.getMetricPath() expected_value = 'total.idle' message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)
def publish_metrics(self, gaugesList, countersList, timestamp): for gaugeDesc in gaugesList: metricPath, metricValue = gaugeDesc metric = Metric( metricPath, metricValue, raw_value=None, timestamp=timestamp ) self.publish_metric(metric) for counterDesc in countersList: metricPath, metricValue = counterDesc if metricPath == 'ERROR:': self.log.info(MetricPath, MetricVale) continue if 'Octets' in metricPath: for unit in self.config['byte_unit']: metricPathNewUnit = metricPath.replace('Octets', unit) metricValueNewUnit = diamond.convertor.binary.convert( value=metricValue, oldUnit='byte', newUnit=unit) metric = Metric( metricPathNewUnit, self.derivative( metricPathNewUnit, metricValueNewUnit, COUNTER_MAX_64BIT ), raw_value=None, timestamp=timestamp ) self.publish_metric(metric) else: # Graphite's derivitive has no counter reset detection, so use the collector class method instead. self.log.info(metricPath, metricValue) try: metric = Metric(metricPath, self.derivative( metricPath, metricValue, COUNTER_MAX_64BIT ), raw_value=None, timestamp=timestamp ) self.publish_metric(metric) except: self.log.info('ERROR collecting ' + str(metricPath)) pass
def test_single_metric(self): config = configobj.ConfigObj() config['host'] = 'graphite.example.com' config['batch'] = 1 metric = Metric('servers.com.example.www.cpu.total.idle', 0, timestamp=1234567, host='will-be-ignored') expected_data = [ call("servers.com.example.www.cpu.total.idle 0 1234567\n"), ] handler = GraphiteHandler(config) patch_sock = patch.object(handler, 'socket', True) sendmock = Mock() patch_send = patch.object(GraphiteHandler, '_send_data', sendmock) patch_sock.start() patch_send.start() handler.process(metric) patch_send.stop() patch_sock.stop() self.assertEqual(sendmock.call_count, len(expected_data)) self.assertEqual(sendmock.call_args_list, expected_data)
def test_diskspace_metrics(self, mock_urlopen, mock_request): """ taghandling deactivate """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] config['cleanMetrics'] = False metric = Metric( 'servers.myhostname.diskspace.MOUNT_POINT.byte_' 'percentfree', 80, raw_value=80, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = ('[{"timestamp": 1234567, "metric": "diskspace.MOUNT_POINT' '.byte_percentfree", "value": 80, "tags": {"myFirstTag": ' '"myValue", "hostname": "myhostname"}}]') header = {'Content-Type': 'application/json'} mock_urlopen.assert_called_with(self.url, body, header)
def test_haproxy_metrics(self, mock_urlopen, mock_request): """ taghandling deactivate """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] config['cleanMetrics'] = False metric = Metric( 'servers.myhostname.haproxy.SOME-BACKEND.SOME-SERVER.' 'bin', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = ('[{"timestamp": 1234567, "metric": "haproxy.SOME-BACKEND.SOME-' 'SERVER.bin", "value": 123, "tags": {"myFirstTag": "myValue", ' '"hostname": "myhostname"}}]') header = {'Content-Type': 'application/json'} mock_urlopen.assert_called_with(self.url, body, header)
def test_cpu_metrics_taghandling_2(self, mock_urlopen, mock_request): """ aggregate deactivate """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] config['cleanMetrics'] = True config['skipAggregates'] = False metric = Metric('servers.myhostname.cpu.total.user', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = ('[{"timestamp": 1234567, "metric": "cpu.total.user", "value": ' '123, "tags": {"cpuId": "total", "myFirstTag": "myValue", ' '"hostname": "myhostname"}}]') header = {'Content-Type': 'application/json'} mock_urlopen.assert_called_with(self.url, body, header)
def test_iostat_metrics_default(self, mock_request, mock_urlopen): """ taghandling default """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] metric = Metric('servers.myhostname.iostat.DEV.io_in_progress', 80, raw_value=80, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "iostat.io_in_progress", "value": 80, "tags": { "device": "DEV", "myFirstTag": "myValue", "hostname": "myhostname" } }] header = {'Content-Type': 'application/json'} self.check_request_param(mock_request, body, header)
def collect_snmp(self, device, host, port, community): """ Collect SNMP interface data from device """ self.log.debug( 'Collecting raw SNMP statistics from device \'{0}\''.format( device)) dev_config = self.config['devices'][device] if 'oids' in dev_config: for oid, metricName in dev_config['oids'].items(): if (device, oid) in self.skip_list: self.log.debug( 'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format( oid, metricName, device)) continue timestamp = time.time() value = self._get_value(device, oid, host, port, community) if value is None: continue self.log.debug( '\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format( oid, metricName, device, value)) path = '.'.join([ self.config['path_prefix'], device, self.config['path_suffix'], metricName ]) metric = Metric(path, value, timestamp, self._precision(value), None, 'GAUGE') self.publish_metric(metric)
def test_cpu_metrics_taghandling_1(self, mock_request, mock_urlopen): """ aggregate deactivate """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] config['cleanMetrics'] = False metric = Metric('servers.myhostname.cpu.total.user', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "cpu.total.user", "value": 123, "tags": { "myFirstTag": "myValue", "hostname": "myhostname" } }] header = {'Content-Type': 'application/json'} self.check_request_param(mock_request, body, header)
def test_compression(self, mock_request, mock_urlopen): config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['compression'] = 1 metric = Metric('servers.myhostname.cpu.cpu_count', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "cpu.cpu_count", "value": 123, "tags": { "hostname": "myhostname" } }] passed_headers = mock_request.call_args[0][2] passed_body = mock_request.call_args[0][1] assert passed_headers['Content-Encoding'] == 'gzip' assert passed_headers['Content-Type'] == 'application/json' assert json.loads(self.decompress(passed_body)) == body
def publish(self, name, value, raw_value=None, precision=0, metric_type='GAUGE', instance=None): """ Publish a metric with the given name """ # Get metric Path path = self.get_metric_path(name, instance=instance) # Get metric TTL ttl = float(self.config['interval']) * float( self.config['ttl_multiplier']) # Create Metric metric = Metric(path, value, raw_value=raw_value, timestamp=None, precision=precision, host=self.get_hostname(), metric_type=metric_type, ttl=ttl) # Publish Metric self.publish_metric(metric)
def test_haproxy_metrics_default(self, mock_request, mock_urlopen): """ taghandling default """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] metric = Metric( 'servers.myhostname.haproxy.SOME-BACKEND.SOME-SERVER.' 'bin', 123, raw_value=123, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "haproxy.bin", "value": 123, "tags": { "backend": "SOME-BACKEND", "myFirstTag": "myValue", "hostname": "myhostname", "server": "SOME-SERVER" } }] header = {'Content-Type': 'application/json'} self.check_request_param(mock_request, body, header)
def test_network_metrics(self, mock_request, mock_urlopen): """ taghandling deactivate """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] config['cleanMetrics'] = False metric = Metric('servers.myhostname.network.IF.rx_packets', 80, raw_value=80, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "network.IF.rx_packets", "value": 80, "tags": { "myFirstTag": "myValue", "hostname": "myhostname" } }] header = {'Content-Type': 'application/json'} self.check_request_param(mock_request, body, header)
def test_diskspace_metrics_default(self, mock_request, mock_urlopen): """ taghandling default """ config = configobj.ConfigObj() config['host'] = '127.0.0.1' config['port'] = '4242' config['tags'] = ['myFirstTag=myValue'] metric = Metric( 'servers.myhostname.diskspace.MOUNT_POINT.byte_percent' 'free', 80, raw_value=80, timestamp=1234567, host='myhostname', metric_type='GAUGE') handler = TSDBHandler(config) handler.process(metric) body = [{ "timestamp": 1234567, "metric": "diskspace.byte_percentfree", "value": 80, "tags": { "mountpoint": "MOUNT_POINT", "myFirstTag": "myValue", "hostname": "myhostname" } }] header = {'Content-Type': 'application/json'} self.check_request_param(mock_request, body, header)
def publish(self, name, value, raw_value=None, precision=0, metric_type='GAUGE', instance=None): """ Publish a metric with the given name """ # THOUGHTSPOT_CUSTOMIZATION_BEGIN # Collect monitoring stats for total collected metrics self.metric_count += 1 if self.config.get('whitelist_file', None): found = False if self.config['regex']: for regex in self.config['regex']: if re.search(regex, name, re.IGNORECASE): found = True break if not found: return # THOUGHTSPOT_CUSTOMIZATION_END # Check whitelist/blacklist if self.config['metrics_whitelist']: if not self.config['metrics_whitelist'].match(name): return elif self.config['metrics_blacklist']: if self.config['metrics_blacklist'].match(name): return # Get metric Path path = self.get_metric_path(name, instance=instance) # Get metric TTL ttl = float(self.config['interval']) * float( self.config['ttl_multiplier']) # Create Metric try: metric = Metric(path, value, raw_value=raw_value, timestamp=None, precision=precision, host=self.get_hostname(), metric_type=metric_type, ttl=ttl) except DiamondException: self.log.error(('Error when creating new Metric: path=%r, ' 'value=%r'), path, value) raise # THOUGHTSPOT_CUSTOMIZATION_BEGIN # Collect monitoring stats for published metrics self.metric_publish_count += 1 # THOUGHTSPOT_CUSTOMIZATION_END # Publish Metric self.publish_metric(metric)
def publish(self, name, value, raw_value=None, precision=2, metric_type='GAUGE', instance=None, timestamp=None, dry_run=False): ''' Publish a metric with the given name (monkey patch for creating the metric with a timestamp) ''' # Check whitelist/blacklist if self.config['metrics_whitelist']: if not self.config['metrics_whitelist'].match(name): return elif self.config['metrics_blacklist']: if self.config['metrics_blacklist'].match(name): return # Get metric Path path = self.get_metric_path(name, instance=instance) # Get metric TTL ttl = float(self.config['interval']) * float( self.config['ttl_multiplier']) # Create Metric try: metric = Metric(path, value, raw_value=raw_value, timestamp=timestamp, precision=precision, host=self.get_hostname(), metric_type=metric_type, ttl=ttl) except DiamondException: self.log.error(('Error when creating new Metric: path=%r, ' 'value=%r'), path, value) raise # Publish Metric if dry_run: self.log.info('dry run sample: {}'.format(metric)) else: self.publish_metric(metric)
def test_parse(self): metric = Metric('test.parse', 0) actual_value = str(metric).strip() expected_value = str(Metric.parse(actual_value)).strip() message = 'Actual %s, expected %s' % (actual_value, expected_value) self.assertEqual(actual_value, expected_value, message)