def test_check(self): """Integration test for supervisord check. Using a mocked supervisord.""" for tc in self.TEST_CASES: check, instances = get_check('supervisord', tc['yaml']) self.assertTrue(check is not None, msg=check) self.assertEquals(tc['expected_instances'], instances) for instance in instances: name = instance['name'] try: # Run the check check.check(instance) except Exception, e: if 'error_message' in tc: # excepted error self.assertEquals(str(e), tc['error_message']) else: self.assertTrue(False, msg=str(e)) else: # Assert that the check collected the right metrics expected_metrics = tc['expected_metrics'][name] self.assert_metrics(expected_metrics, check.get_metrics()) # Assert that the check generated the right service checks expected_service_checks = tc['expected_service_checks'][name] self.assert_service_checks(expected_service_checks, check.get_service_checks())
def testSqlServer(self): check, instances = get_check('sqlserver', CONFIG) check.check(instances[0]) metrics = check.get_metrics() # Make sure the base metrics loaded base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] for metric in base_metrics: assert metric in ret_metrics # Check our custom metrics assert 'sqlserver.clr.execution' in ret_metrics assert 'sqlserver.exec.in_progress' in ret_metrics assert 'sqlserver.db.commit_table_entries' in ret_metrics # Make sure the ALL custom metric is tagged tagged_metrics = [m for m in metrics if m[0] == 'sqlserver.db.commit_table_entries'] for metric in tagged_metrics: for tag in metric[3]['tags']: assert tag.startswith('db') # Service checks service_checks = check.get_service_checks() service_checks_count = len(service_checks) self.assertTrue(type(service_checks) == type([])) self.assertTrue(service_checks_count > 0) self.assertEquals(len([sc for sc in service_checks if sc['check'] == check.SERVICE_CHECK_NAME]), 1, service_checks) # Assert that all service checks have the proper tags: host and port self.assertEquals(len([sc for sc in service_checks if "host:127.0.0.1,1433" in sc['tags']]), service_checks_count, service_checks) self.assertEquals(len([sc for sc in service_checks if "db:master" in sc['tags']]), service_checks_count, service_checks)
def testNginxPlus(self): test_data = read_data_from_file('nginx_plus_in.json') expected = eval(read_data_from_file('nginx_plus_out.python')) nginx, instances = get_check('nginx', self.nginx_config) parsed = nginx.parse_json(test_data) parsed.sort() self.assertEquals(parsed, expected)
def testIIS(self): check, instances = get_check('iis', CONFIG) check.check(instances[0]) metrics = check.get_metrics() service_checks = check.get_service_checks() time.sleep(1) # Second run to get the rates check.check(instances[0]) metrics = check.get_metrics() service_checks = check.get_service_checks() base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] ret_tags = [m[3]['tags'] for m in metrics] # Make sure each metric was captured for metric in base_metrics: self.assertTrue(metric in ret_metrics, "not reporting %s" % metric) # Make sure everything is tagged correctly for tags in ret_tags: self.assertEquals(['mytag1', 'mytag2', 'site:Default Web Site'], tags) # Make sure that we get a service check self.assertEquals(len(service_checks), 1) self.assertEquals(check.SERVICE_CHECK, service_checks[0]['check']) self.assertEquals(['site:Default Web Site'], service_checks[0]['tags'])
def test_zk_stat_parsing_lt_v344(self): zk, instances = get_check('zk', CONFIG) expected = [ ('zookeeper.latency.min', -10), ('zookeeper.latency.avg', 0), ('zookeeper.latency.max', 20007), ('zookeeper.bytes_received', 101032173L), ('zookeeper.bytes_sent', 0L), ('zookeeper.connections', 6), ('zookeeper.bytes_outstanding', 0L), ('zookeeper.outstanding_requests', 0L), ('zookeeper.zxid.epoch', 1), ('zookeeper.zxid.count', 55024071), ('zookeeper.nodes', 487L), ] with patch.object(zk, '_send_command', send_command_lt_v344): zk.check(instances[0]) service_checks = zk.get_service_checks() self.assertEquals(service_checks[0]['check'], 'zookeeper.ruok') self.assertEquals(service_checks[1]['check'], 'zookeeper.mode') self.assertEquals(service_checks[1]['status'], AgentCheck.CRITICAL) metrics = zk.get_metrics() self.assertEquals(sorted([(name, val) for name, _, val, _ in metrics]), sorted(expected)) self.assertEquals(len(service_checks), 2) self.assertEquals(metrics[0][3]['tags'], ['mode:leader'])
def test_build_message(self): """Unit test supervisord build service check message.""" process = { 'now': 1414815513, 'group': 'mysql', 'description': 'pid 787, uptime 0:02:05', 'pid': 787, 'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log', 'stop': 0, 'statename': 'RUNNING', 'start': 1414815388, 'state': 20, 'stdout_logfile': '/var/log/mysql/mysql.log', 'logfile': '/var/log/mysql/mysql.log', 'exitstatus': 0, 'spawnerr': '', 'name': 'mysql' } expected_message = """Current time: 2014-11-01 04:18:33 Process name: mysql Process group: mysql Description: pid 787, uptime 0:02:05 Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log Stdout log file: /var/log/mysql/mysql.log Log file: /var/log/mysql/mysql.log State: RUNNING Start time: 2014-11-01 04:16:28 Stop time: \nExit Status: 0""" check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml']) self.assertEquals(expected_message, check._build_message(process))
def test_travis_supervisord(self): """Integration test for supervisord check. Using a supervisord on Travis.""" # Load yaml config config_str = open(os.environ['VOLATILE_DIR'] + '/supervisor/supervisord.yaml', 'r').read() self.assertTrue(config_str is not None and len(config_str) > 0, msg=config_str) # init the check and get the instances check, instances = get_check('supervisord', config_str) self.assertTrue(check is not None, msg=check) self.assertEquals(len(instances), 1) # Supervisord should run 3 programs for 30, 60 and 90 seconds # respectively. The tests below will ensure that the process count # metric is reported correctly after (roughly) 10, 40, 70 and 100 seconds for i in range(4): try: # Run the check check.check(instances[0]) except Exception, e: # Make sure that it ran successfully self.assertTrue(False, msg=str(e)) else: up, down = 0, 0 for name, timestamp, value, meta in check.get_metrics(): if name == 'supervisord.process.count': if 'status:up' in meta['tags']: up = value elif 'status:down' in meta['tags']: down = value self.assertEquals(up, 3 - i) self.assertEquals(down, i) sleep(10)
def testApacheOldConfig(self): a, _ = get_check('apache', self.apache_config) config = { 'apache_status_url': 'http://example.com/server-status?auto' } instances = a.parse_agent_config(config)['instances'] assert instances[0]['apache_status_url'] == config['apache_status_url']
def run_check(name, path=None): from tests.common import get_check # Read the config file confd_path = path or os.path.join(get_confd_path(get_os()), '%s.yaml' % name) try: f = open(confd_path) except IOError: raise Exception('Unable to open configuration at %s' % confd_path) config_str = f.read() f.close() # Run the check check, instances = get_check(name, config_str) if not instances: raise Exception('YAML configuration returned no instances.') for instance in instances: check.check(instance) if check.has_events(): print "Events:\n" pprint(check.get_events(), indent=4) print "Metrics:\n" pprint(check.get_metrics(), indent=4)
def test_check(self): check, instances = get_check('activemq_xml', self.config) check.requests = mock.Mock() def response_side_effect(*args, **kwargs): text = '' if '/admin/xml/topics.jsp' in args[0]: text = '<topics></topics>' elif '/admin/xml/queues.jsp' in args[0]: text = '<queues></queues>' elif '/admin/xml/subscribers.jsp' in args[0]: text = '<subscribers></subscribers>' # if text='' then we will get an xml parsing error # (which is what we want if we called with a url we dont know) return mock.Mock(text=text) check.requests.get.side_effect = response_side_effect check.check(instances[0]) expected = { 'url:http://localhost:8161': { 'activemq.queue.count': (0, 'gauge'), 'activemq.topic.count': (0, 'gauge'), 'activemq.subscriber.count': (0, 'gauge'), } } self._assert_expected_metrics(expected, check.get_metrics())
def testIIS(self): check, instances = get_check('iis', CONFIG) check.check(instances[0]) metrics = check.get_metrics() service_checks = check.get_service_checks() time.sleep(1) # Second run to get the rates check.check(instances[0]) metrics = check.get_metrics() service_checks = check.get_service_checks() base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] ret_tags = [m[3]['tags'] for m in metrics] # Make sure each metric was captured for metric in base_metrics: self.assertTrue(metric in ret_metrics, "not reporting %s" % metric) # Make sure everything is tagged correctly for tags in ret_tags: self.assertEquals(['mytag1', 'mytag2', 'site:Default Web Site'], tags) # Make sure that we get a service check self.assertEquals(len(service_checks),1) self.assertEquals(check.SERVICE_CHECK, service_checks[0]['check']) self.assertEquals(['site:Default Web Site'], service_checks[0]['tags'])
def test_get_zpool_stats(self): zpool_get_data = """NAME PROPERTY VALUE SOURCE tank capacity 64% - tank size 14.5T - tank dedupratio 1.00x - tank free 5.08T - tank allocated 9.42T -""" expected = { 'capacity': '64', 'size': '15942918602752', 'dedupratio': '1.00', 'free': '5585519069102', 'allocated': '10357399533649' } check, instances = get_check('zfs', self.config) check.subprocess.Popen = mock.Mock() check.subprocess.Popen.return_value = mock.Mock() check.subprocess.Popen.return_value.communicate.return_value = (zpool_get_data, None) zpool_name = 'tank' actual = check._get_zpool_stats(zpool_name) assert check.subprocess.Popen.call_count == 1 assert check.subprocess.Popen.call_args == mock.call( 'sudo zpool get capacity,size,dedupratio,free,allocated tank'.split(), stdout=check.subprocess.PIPE ) for result in actual.keys(): assert actual[result] == expected[result]
def test_process_zpool(self): zpool_metrics = { 'capacity': '64', 'size': '15942918602752', 'dedupratio': '1.00', 'free': '5585519069102', 'allocated': '10357399533649' } zpool_checks = { 'health': 'ONLINE' } check, instances = get_check('zfs', self.config) zpool = 'tank' check._process_zpool(zpool=zpool, zpool_metrics=zpool_metrics, zpool_checks=zpool_checks) metrics = check.get_metrics() for metric in metrics: if metric[0] == 'zpool.capacity': assert metric[2] == '64' elif metric[0] == 'zpool.size': assert metric[2] == '15942918602752' elif metric[0] == 'zpool.dedupratio': assert metric[2] == '1.00' elif metric[0] == 'zpool.free': assert metric[2] == '5585519069102' elif metric[0] == 'zpool.allocated': assert metric[2] == '10357399533649' else: assert False, "Unexpcted metric " + metric[0]
def test_convert_human_to_bytes(self): check, instances = get_check('zfs', self.config) # Test bytes result = check._convert_human_to_bytes('300') assert result == 300 # Test kilobytes result = check._convert_human_to_bytes('300K') assert result == 307200 # Test megabytes result = check._convert_human_to_bytes('300M') assert result == 314572800 # Test gigabytes result = check._convert_human_to_bytes('300G') assert result == 322122547200 # Test terabytes result = check._convert_human_to_bytes('300T') assert result == 329853488332800 # Test invalid input with self.assertRaises(ValueError): check._convert_human_to_bytes('Pfffffft') # Test non-implemented units with self.assertRaises(NotImplementedError): check._convert_human_to_bytes('300J')
def testNginx(self): nginx, instances = get_check('nginx', self.nginx_config) nginx.check(instances[0]) r = nginx.get_metrics() self.assertEquals(len([t for t in r if t[0] == "nginx.net.connections"]), 1, r) nginx.check(instances[1]) r = nginx.get_metrics() self.assertEquals(r[0][3].get('tags'), ['first_one'])
def test_check(self): v, instances = get_check('varnish', self.config) import pprint try: for i in range(3): v.check({"varnishstat": os.popen("which varnishstat").read()[:-1]}) pprint.pprint(v.get_metrics()) time.sleep(1) except Exception: pass
def testApache(self): a, instances = get_check('apache', self.apache_config) a.check(instances[0]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) a.check(instances[1]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second'])
def testLighttpd(self): l, instances = get_check('lighttpd', self.lighttpd_config) l.check(instances[0]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) l.check(instances[1]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second'])
def testNginx(self): raise SkipTest("Requires running Lighthttpd") nginx, instances = get_check('nginx', self.nginx_config) nginx.check(instances[0]) r = nginx.get_metrics() self.assertEqual(len([t for t in r if t[0] == "nginx.net.connections"]), 1, r) nginx.check(instances[1]) r = nginx.get_metrics() self.assertEqual(r[0][3].get('dimensions'), {'test': 'first_one'})
def testNginxOldConfig(self): nginx, _ = get_check('nginx', self.nginx_config) config = { 'nginx_status_url_1': 'http://www.example.com/nginx_status:first_tag', 'nginx_status_url_2': 'http://www.example2.com/nginx_status:8080:second_tag', 'nginx_status_url_3': 'http://www.example3.com/nginx_status:third_tag' } instances = nginx.parse_agent_config(config)['instances'] self.assertEqual(len(instances), 3) for i, instance in enumerate(instances): assert ':'.join(config.values()[i].split(':')[:-1]) == instance['nginx_status_url']
def testNginx(self): raise SkipTest("Requires running Lighthttpd") nginx, instances = get_check('nginx', self.nginx_config) nginx.check(instances[0]) r = nginx.get_metrics() self.assertEqual( len([t for t in r if t[0] == "nginx.net.connections"]), 1, r) nginx.check(instances[1]) r = nginx.get_metrics() self.assertEqual(r[0][3].get('dimensions'), {'test': 'first_one'})
def testApache(self): raise SkipTest("Requires running apache") a, instances = get_check('apache', self.apache_config) a.check(instances[0]) metrics = a.get_metrics() self.assertEqual(metrics[0][3].get('dimensions'), {'instance': 'first'}) a.check(instances[1]) metrics = a.get_metrics() self.assertEqual(metrics[0][3].get('dimensions'), {'instance': 'second'})
def testLighttpd(self): raise SkipTest("Requires running Lighthttpd") l, instances = get_check('lighttpd', self.lighttpd_config) l.check(instances[0]) metrics = l.get_metrics() self.assertEqual(metrics[0][3].get('dimensions'), {'instance': 'first'}) l.check(instances[1]) metrics = l.get_metrics() self.assertEqual(metrics[0][3].get('dimensions'), {'instance': 'second'})
def testNginxOldConfig(self): nginx, _ = get_check('nginx', self.nginx_config) config = { 'nginx_status_url_1': 'http://www.example.com/nginx_status:first_tag', 'nginx_status_url_2': 'http://www.example2.com/nginx_status:8080:second_tag', 'nginx_status_url_3': 'http://www.example3.com/nginx_status:third_tag' } instances = nginx.parse_agent_config(config)['instances'] self.assertEquals(len(instances), 3) for i, instance in enumerate(instances): assert ':'.join(config.values()[i].split(':')[:-1]) == instance['nginx_status_url']
def test_process_subscriber_data_normal(self): check, instances = get_check('activemq_xml', self.config) data = """ <subscribers> <subscriber clientId="10" subscriptionName="subscription1" connectionId="10" destinationName="Queue1" selector="*" active="yes" > <stats pendingQueueSize="5" dispatchedQueueSize="15" dispatchedCounter="15" enqueueCounter="235" dequeueCounter="175"/> </subscriber> <subscriber clientId="5" subscriptionName="subscription2" connectionId="15" destinationName="Topic1" selector="*" active="no" > <stats pendingQueueSize="0" dispatchedQueueSize="0" dispatchedCounter="5" enqueueCounter="12" dequeueCounter="15"/> </subscriber> </subscribers> """ check._process_subscriber_data(data, [], 300, []) expected = { 'active:yes-clientId:10-connectionId:10-destinationName:Queue1-selector:*-subscriptionName:subscription1': { 'activemq.subscriber.enqueue_counter': ('235', 'gauge'), 'activemq.subscriber.dequeue_counter': ('175', 'gauge'), 'activemq.subscriber.dispatched_counter': ('15', 'gauge'), 'activemq.subscriber.dispatched_queue_size': ('15', 'gauge'), 'activemq.subscriber.pending_queue_size': ('5', 'gauge'), }, '': { 'activemq.subscriber.count': (2, 'gauge'), }, 'active:no-clientId:5-connectionId:15-destinationName:Topic1-selector:*-subscriptionName:subscription2': { 'activemq.subscriber.enqueue_counter': ('12', 'gauge'), 'activemq.subscriber.dequeue_counter': ('15', 'gauge'), 'activemq.subscriber.dispatched_counter': ('5', 'gauge'), 'activemq.subscriber.dispatched_queue_size': ('0', 'gauge'), 'activemq.subscriber.pending_queue_size': ('0', 'gauge'), }, } self._assert_expected_metrics(expected, check.get_metrics())
def testNginx(self): nginx, instances = get_check('nginx', self.nginx_config) nginx.check(instances[0]) r = nginx.get_metrics() self.assertEquals(len([t for t in r if t[0] == "nginx.net.connections"]), 1, r) nginx.check(instances[1]) r = nginx.get_metrics() self.assertEquals(r[0][3].get('tags'), ['first_one']) service_checks = nginx.get_service_checks() can_connect = [sc for sc in service_checks if sc['check'] == 'nginx.can_connect'] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:44441']), service_checks)
def test_service_check(self): v, instances = get_check('varnish', self.config) v._parse_varnishadm(self.varnishadm_dump) service_checks = v.get_service_checks() self.assertEquals(len(service_checks), 2) b0_check = service_checks[0] self.assertEquals(b0_check['check'], v.SERVICE_CHECK_NAME) self.assertEquals(b0_check['tags'], ['backend:b0']) b1_check = service_checks[1] self.assertEquals(b1_check['check'], v.SERVICE_CHECK_NAME) self.assertEquals(b1_check['tags'], ['backend:b1'])
def test_parsing(self): v, instances = get_check('varnish', self.config) v._parse_varnishstat(self.v_dump, False) metrics = v.get_metrics() self.assertEquals([m[2] for m in metrics if m[0] == "varnish.n_waitinglist"][0], 980) assert "varnish.fetch_length" not in [m[0] for m in metrics] # XML parsing v._parse_varnishstat(self.xml_dump, True) metrics = v.get_metrics() self.assertEquals([m[2] for m in metrics if m[0] == "varnish.SMA.s0.g_space"][0], 120606) assert "varnish.SMA.transient.c_bytes" not in [m[0] for m in metrics]
def test_process_topic_data_no_data(self): check, instances = get_check('activemq_xml', self.config) data = """ <topics> </topics> """ check._process_data(data, "topic", [], 300, []) expected = { '': { 'activemq.topic.count': (0, 'gauge') }, } self._assert_expected_metrics(expected, check.get_metrics())
def testLighttpd(self): l, instances = get_check('lighttpd', self.lighttpd_config) l.check(instances[0]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) l.check(instances[1]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = l.get_service_checks() service_checks = l.get_service_checks() can_connect = [sc for sc in service_checks if sc['check'] == 'lighttpd.can_connect'] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:9449']), service_checks)
def test_process_subscriber_data_no_data(self): check, instances = get_check('activemq_xml', self.config) data = """ <subscribers> </subscribers> """ check._process_subscriber_data(data, [], 300, []) expected = { '': { 'activemq.subscriber.count': (0, 'gauge') }, } self._assert_expected_metrics(expected, check.get_metrics())
def testApache(self): a, instances = get_check('apache', self.apache_config) a.check(instances[0]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) a.check(instances[1]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = a.get_service_checks() can_connect = [sc for sc in service_checks if sc['check'] == 'apache.can_connect'] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:9444']), service_checks)
def test_zk_stat_parsing_lt_v344(self): zk, instances = get_check('zk', CONFIG) stat_response = """Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT Clients: /10.42.114.160:32634[1](queued=0,recved=12,sent=0) /10.37.137.74:21873[1](queued=0,recved=53613,sent=0) /10.37.137.74:21876[1](queued=0,recved=57436,sent=0) /10.115.77.32:32990[1](queued=0,recved=16,sent=0) /10.37.137.74:21891[1](queued=0,recved=55011,sent=0) /10.37.137.74:21797[1](queued=0,recved=19431,sent=0) Latency min/avg/max: -10/0/20007 Received: 101032173 Sent: 0 Outstanding: 0 Zxid: 0x1034799c7 Mode: leader Node count: 487 """ expected = [ ('zookeeper.latency.min', -10), ('zookeeper.latency.avg', 0), ('zookeeper.latency.max', 20007), ('zookeeper.bytes_received', 101032173L), ('zookeeper.bytes_sent', 0L), ('zookeeper.connections', 6), ('zookeeper.bytes_outstanding', 0L), ('zookeeper.zxid.epoch', 1), ('zookeeper.zxid.count', 55024071), ('zookeeper.nodes', 487L), ] buf = StringIO(stat_response) metrics, tags, mode = zk.parse_stat(buf) self.assertEquals(tags, ['mode:leader']) self.assertEquals(metrics, expected) zk.check(instances[0]) service_checks = zk.get_service_checks() expected = 1 if self.is_travis() else 2 self.assertEquals(len(service_checks), expected) self.assertEquals(service_checks[0]['check'], 'zookeeper.ruok') # Don't check status of ruok because it can vary if ZK is running. if not self.is_travis(): self.assertEquals(service_checks[1]['check'], 'zookeeper.mode') self.assertEquals(service_checks[1]['status'], AgentCheck.CRITICAL)
def testIIS(self): import win32evtlog check, instances = get_check('win32_event_log', CONFIG) # Run the check against all instances to set the last_ts for instance in instances: check.check(instance) # Run checks again and make sure there are no events for instance in instances: check.check(instance) assert len(check.get_metrics()) == 0 # Generate some events for the log for msg, ev_type in self.LOG_EVENTS: self.write_event(msg, ev_type) self.write_event('do not pick me', win32evtlog.EVENTLOG_INFORMATION_TYPE, source_name='EVENTLOGTESTBAD') # Run the checks again for them to pick up the new events inst1, inst2 = instances check.check(inst1) ev1 = check.get_events() assert len(ev1) > 0 assert len(ev1) == len([ ev for ev in self.LOG_EVENTS if ev[1] == win32evtlog.EVENTLOG_WARNING_TYPE ]) for ev in ev1: # Make sure we only picked up our source assert 'EVENTLOGTESTBAD' not in ev['msg_title'] # Make sure the tags match up assert ev['tags'] == inst1['tags'] check.check(inst2) ev2 = check.get_events() assert len(ev2) > 0 assert len(ev2) == len([ ev for ev in self.LOG_EVENTS if ev[1] in (win32evtlog.EVENTLOG_ERROR_TYPE, win32evtlog.EVENTLOG_INFORMATION_TYPE) ]) for ev in ev2: # Make sure we only picked up our source assert 'EVENTLOGTESTBAD' not in ev['msg_title'] # Make sure the tags match up assert ev['tags'] == inst1['tags']
def testIIS(self): check, instances = get_check('iis', CONFIG) check.check(instances[0]) metrics = check.get_metrics() base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] ret_tags = [m[3]['tags'] for m in metrics] # Make sure each metric was captured for metric in base_metrics: assert metric in ret_metrics # Make sure everything is tagged correctly for tags in ret_tags: assert tags == ['mytag1', 'mytag2']
def test_process_queue_data_normal(self): check, instances = get_check('activemq_xml', self.config) data = """ <queues> <queue name="Queue1"> <stats size="0" consumerCount="6" enqueueCount="64714" dequeueCount="64714"/> <feed> <atom>queueBrowse/Queue1;jsessionid=sess_token?view=rss&feedType=atom_1.0</atom> <rss>queueBrowse/Queue1;jsessionid=sess_token?view=rss&feedType=rss_2.0</rss> </feed> </queue> <queue name="Queue2"> <stats size="10" consumerCount="3" enqueueCount="1165" dequeueCount="1165"/> <feed> <atom>queueBrowse/Queue2;jsessionid=sess_token?view=rss&feedType=atom_1.0</atom> <rss>queueBrowse/Queue2;jsessionid=sess_token?view=rss&feedType=rss_2.0</rss> </feed> </queue> </queues> """ check._process_data(data, "queue", [], 300, []) expected = { 'queue:Queue2': { 'activemq.queue.size': ('10', 'gauge'), 'activemq.queue.enqueue_count': ('1165', 'gauge'), 'activemq.queue.dequeue_count': ('1165', 'gauge'), 'activemq.queue.consumer_count': ('3', 'gauge') }, '': { 'activemq.queue.count': (2, 'gauge') }, 'queue:Queue1': { 'activemq.queue.dequeue_count': ('64714', 'gauge'), 'activemq.queue.consumer_count': ('6', 'gauge'), 'activemq.queue.size': ('0', 'gauge'), 'activemq.queue.enqueue_count': ('64714', 'gauge'), }, } self._assert_expected_metrics(expected, check.get_metrics())
def testIIS(self): raise SkipTest('Requires IIS and wmi') check, instances = get_check('iis', CONFIG) check.check(instances[0]) metrics = check.get_metrics() base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] ret_dimensions = [m[3]['dimensions'] for m in metrics] # Make sure each metric was captured for metric in base_metrics: assert metric in ret_metrics # Make sure everything is tagged correctly for dimensions in ret_dimensions: assert dimensions == {'dim1': 'value1', 'dim2': 'value2'}
def test_fetch_data(self): # not too concerned with the response body, just that requests.get was called # with the correct arguments check, instances = get_check('activemq_xml', self.config) check.requests = mock.Mock() check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', None, None) assert check.requests.get.call_count == 1 assert check.requests.get.call_args == mock.call( 'http://localhost:8171/admin/xml/queues.jsp', auth=None ) check.requests.get.reset_mock() check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', 'user', 'pass') assert check.requests.get.call_count == 1 assert check.requests.get.call_args == mock.call( 'http://localhost:8171/admin/xml/queues.jsp', auth=('user', 'pass') )
def testNginx(self): nginx, instances = get_check('nginx', self.nginx_config) nginx.check(instances[0]) r = nginx.get_metrics() self.assertEquals( len([t for t in r if t[0] == "nginx.net.connections"]), 1, r) nginx.check(instances[1]) r = nginx.get_metrics() self.assertEquals(r[0][3].get('tags'), ['first_one']) service_checks = nginx.get_service_checks() can_connect = [ sc for sc in service_checks if sc['check'] == 'nginx.can_connect' ] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:44441']), service_checks)
def testSqlServer(self): check, instances = get_check('sqlserver', CONFIG) check.check(instances[0]) metrics = check.get_metrics() # Make sure the base metrics loaded base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] for metric in base_metrics: assert metric in ret_metrics # Check our custom metrics assert 'sqlserver.clr.execution' in ret_metrics assert 'sqlserver.exec.in_progress' in ret_metrics assert 'sqlserver.db.commit_table_entries' in ret_metrics # Make sure the ALL custom metric is tagged tagged_metrics = [ m for m in metrics if m[0] == 'sqlserver.db.commit_table_entries' ] for metric in tagged_metrics: for tag in metric[3]['tags']: assert tag.startswith('db') # Service checks service_checks = check.get_service_checks() service_checks_count = len(service_checks) self.assertTrue(type(service_checks) == type([])) self.assertTrue(service_checks_count > 0) self.assertEquals( len([ sc for sc in service_checks if sc['check'] == check.SERVICE_CHECK_NAME ]), 1, service_checks) # Assert that all service checks have the proper tags: host and port self.assertEquals( len([ sc for sc in service_checks if "host:127.0.0.1,1433" in sc['tags'] ]), service_checks_count, service_checks) self.assertEquals( len([sc for sc in service_checks if "db:master" in sc['tags']]), service_checks_count, service_checks)
def testApache(self): a, instances = get_check('apache', self.apache_config) a.check(instances[0]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) a.check(instances[1]) metrics = a.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = a.get_service_checks() can_connect = [ sc for sc in service_checks if sc['check'] == 'apache.can_connect' ] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:9444']), service_checks)
def testLighttpd(self): l, instances = get_check('lighttpd', self.lighttpd_config) l.check(instances[0]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) l.check(instances[1]) metrics = l.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = l.get_service_checks() service_checks = l.get_service_checks() can_connect = [ sc for sc in service_checks if sc['check'] == 'lighttpd.can_connect' ] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:9449']), service_checks)
def test_process_topics_data_normal(self): check, instances = get_check('activemq_xml', self.config) data = """ <topics> <topic name="Topic1"> <stats size="5" consumerCount="0" enqueueCount="24" dequeueCount="0"/> </topic> <topic name="Topic2"> <stats size="1" consumerCount="50" enqueueCount="12" dequeueCount="1200"/> </topic> </topics> """ check._process_data(data, "topic", [], 300, []) expected = { 'topic:Topic1': { 'activemq.topic.size': ('5', 'gauge'), 'activemq.topic.enqueue_count': ('24', 'gauge'), 'activemq.topic.dequeue_count': ('0', 'gauge'), 'activemq.topic.consumer_count': ('0', 'gauge') }, '': { 'activemq.topic.count': (2, 'gauge') }, 'topic:Topic2': { 'activemq.topic.dequeue_count': ('1200', 'gauge'), 'activemq.topic.consumer_count': ('50', 'gauge'), 'activemq.topic.size': ('1', 'gauge'), 'activemq.topic.enqueue_count': ('12', 'gauge'), }, } self._assert_expected_metrics(expected, check.get_metrics())
def test_zk_stat_parsing_gte_v344(self): Zookeeper, instances = get_check('zk', CONFIG) stat_response = """Zookeeper version: 3.4.5--1, built on 03/16/2010 07:31 GMT Clients: /10.42.114.160:32634[1](queued=0,recved=12,sent=0) /10.37.137.74:21873[1](queued=0,recved=53613,sent=0) /10.37.137.74:21876[1](queued=0,recved=57436,sent=0) /10.115.77.32:32990[1](queued=0,recved=16,sent=0) /10.37.137.74:21891[1](queued=0,recved=55011,sent=0) /10.37.137.74:21797[1](queued=0,recved=19431,sent=0) Latency min/avg/max: -10/0/20007 Received: 101032173 Sent: 0 Connections: 1 Outstanding: 0 Zxid: 0x1034799c7 Mode: leader Node count: 487 """ expected = [ ('zookeeper.latency.min', -10), ('zookeeper.latency.avg', 0), ('zookeeper.latency.max', 20007), ('zookeeper.bytes_received', 101032173), ('zookeeper.bytes_sent', 0), ('zookeeper.connections', 1), ('zookeeper.bytes_outstanding', 0), ('zookeeper.zxid.epoch', 1), ('zookeeper.zxid.count', 55024071), ('zookeeper.nodes', 487), ] buf = StringIO(stat_response) metrics, dimensions = Zookeeper.parse_stat(buf) self.assertEqual(dimensions, {'mode': 'leader'}) self.assertEqual(metrics, expected)
def testSqlServer(self): check, instances = get_check('sqlserver', CONFIG) check.check(instances[0]) metrics = check.get_metrics() # Make sure the base metrics loaded base_metrics = [m[0] for m in check.METRICS] ret_metrics = [m[0] for m in metrics] for metric in base_metrics: assert metric in ret_metrics # Check our custom metrics assert 'sqlserver.clr.execution' in ret_metrics assert 'sqlserver.exec.in_progress' in ret_metrics assert 'sqlserver.db.commit_table_entries' in ret_metrics # Make sure the ALL custom metric is tagged tagged_metrics = [ m for m in metrics if m[0] == 'sqlserver.db.commit_table_entries' ] for metric in tagged_metrics: for tag in metric[3]['tags']: assert tag.startswith('db')
def _create_check(self): # Create the jenkins check self.check, instances = get_check('jenkins', self.config_yaml) self.instance = instances[0]