def test_redis_auth(self): # Test connection with password if not self.is_travis(): # correct password r = load_check('redisdb', {}, {}) instance = { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len(metrics) > 0, "No metrics returned" # wrong passwords instances = [{ 'host': 'localhost', 'port': AUTH_PORT, 'password': '' }, { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' }] for instance in instances: r = load_check('redisdb', {}, {}) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len( metrics ) == 0, "Should have failed with bad password; got %s instead" % metrics
def test_redis_auth(self): # Test connection with password if not self.is_travis(): # correct password r = load_check('redisdb', {}, {}) instance = { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len(metrics) > 0, "No metrics returned" # wrong passwords instances = [ { 'host': 'localhost', 'port': AUTH_PORT, 'password': '' }, { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } ] for instance in instances: r = load_check('redisdb', {}, {}) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len(metrics) == 0, "Should have failed with bad password; got %s instead" % metrics
def testElasticChecksD(self): raise SkipTest("See https://github.com/DataDog/dd-agent/issues/825") agent_config = {'elasticsearch': 'http://localhost:%s' % PORT, 'version': '0.1', 'api_key': 'toto'} # Initialize the check from checks_d c = load_check('elastic', {'init_config': {}, 'instances': {}}, agent_config) conf = c.parse_agent_config(agent_config) self.check = load_check('elastic', conf, agent_config) self.check.check(conf['instances'][0]) r = self.check.get_metrics() self.assertIsInstance(r, list) self.assertTrue(len(r) > 0) self.assertEqual(len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "elasticsearch.search.fetch.total"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "elasticsearch.transport.rx_count"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "elasticsearch.transport.tx_size"]), 1, r) self.assertEqual( len([t for t in r if t[0] == "elasticsearch.transport.server_open"]), 1, r) self.assertEqual( len([t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue"]), 1, r) self.assertEqual(len([t for t in r if t[0] == "elasticsearch.active_shards"]), 1, r) self.check.cluster_status[conf['instances'][0].get('url')] = "red" self.check.check(conf['instances'][0]) events = self.check.get_events() self.assertEqual(len(events), 1, events)
def testInit(self): self.config = { "init_config": { 'mibs_folder':'/etc/mibs' } } # Initialize the check from checks.d self.check = load_check('snmp', self.config, self.agentConfig) mib_folders = self.check.cmd_generator.snmpEngine.msgAndPduDsp\ .mibInstrumController.mibBuilder.getMibSources() custom_folder_represented = False for folder in mib_folders: if '/etc/mibs' == folder.fullPath(): custom_folder_represented = True break self.assertTrue(custom_folder_represented) self.assertFalse(self.check.cmd_generator.ignoreNonIncreasingOid) self.config = { "init_config": { "ignore_nonincreasing_oid": True } } self.check = load_check('snmp', self.config, self.agentConfig) self.assertTrue(self.check.cmd_generator.ignoreNonIncreasingOid)
def testChecks(self): if not self.skip: agent_config = { 'mysql_server': 'localhost', 'mysql_user': "******", 'mysql_pass': "******", 'version': '0.1', 'api_key': 'toto' } # Initialize the check from checks_d c = load_check('mysql', { 'init_config': {}, 'instances': {} }, agent_config) conf = c.parse_agent_config(agent_config) self.check = load_check('mysql', conf, agent_config) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 8, metrics) time.sleep(1) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 16, metrics)
def test_apache(self): agent_config = { 'version': '0.1', 'api_key': 'toto' } config = { 'init_config': {}, 'instances': [ { 'apache_status_url': 'http://localhost:8080/server-status', 'tags': ['instance:first'] }, { 'apache_status_url': 'http://localhost:8080/server-status?auto', 'tags': ['instance:second'] }, ] } check = load_check('apache', config, agent_config) check.check(config['instances'][0]) metrics = check.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) check.check(config['instances'][1]) metrics = check.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = check.get_service_checks() can_connect = [sc for sc in service_checks if sc['check'] == 'apache.can_connect'] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:8080']), service_checks)
def testSNMPCheck(self): self.check = load_check('snmp', self.config, self.agentConfig) self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() # Assert that there is only the gauge metric because the counter is used # as a rate so we don't report with 1 point self.assertEqual(len(metrics), 1) self.assertEqual(metrics[0][0], 'snmp.tcpCurrEstab') # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rate self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() self.assertEqual(len(metrics) ,2) expected_metrics = ['snmp.udpDatagrams','snmp.tcpCurrEstab'] for metric in expected_metrics: metric_present=False for result in metrics: if result[0] == metric: metric_present = True break self.assertTrue(metric_present)
def test_collector(self): agentConfig = { 'api_key': 'test_apikey', 'check_timings': True, 'collect_ec2_tags': True, 'collect_instance_metadata': False, 'version': 'test', 'tags': '', } # Run a single checks.d check as part of the collector. redis_config = { "init_config": {}, "instances": [{"host": "localhost", "port": 6379}] } checks = [load_check('redisdb', redis_config, agentConfig)] c = Collector(agentConfig, [], {}, get_hostname(agentConfig)) payload = c.run({ 'initialized_checks': checks, 'init_failed_checks': {} }) metrics = payload['metrics'] # Check that we got a timing metric for all checks. timing_metrics = [m for m in metrics if m[0] == 'datadog.agent.check_run_time'] all_tags = [] for metric in timing_metrics: all_tags.extend(metric[3]['tags']) for check in checks: tag = "check:%s" % check.name assert tag in all_tags, all_tags
def test_nginx_plus(self): test_data = read_data_from_file('nginx_plus_in.json') expected = eval(read_data_from_file('nginx_plus_out.python')) nginx = load_check('nginx', self.config, self.agent_config) parsed = nginx.parse_json(test_data) parsed.sort() self.assertEquals(parsed, expected)
def test_check_real_process(self): "Check that we detect python running (at least this process)" config = { 'instances': [{"name": "py", "search_string": ["python"], "exact_match": False, "ignored_denied_access": True, "thresholds": {"warning": [1, 10], "critical": [1, 100]}, }] } self.agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('process', config, self.agentConfig) self.check.check(config['instances'][0]) python_metrics = self.check.get_metrics() service_checks = self.check.get_service_checks() assert service_checks self.assertTrue(len(python_metrics) > 0) # system.process.number >= 1 self.assertTrue([m[2] for m in python_metrics if m[0] == "system.process.number"] >= 1) self.assertTrue(len([t for t in service_checks if t['status']== AgentCheck.OK]) > 0, service_checks) self.assertEquals(len([t for t in service_checks if t['status']== AgentCheck.WARNING]), 0, service_checks) self.assertEquals(len([t for t in service_checks if t['status']== AgentCheck.CRITICAL]), 0, service_checks)
def test_check(self): config = { 'init_config': {}, 'instances': [] } self.agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('process', config, self.agentConfig) config = self.build_config(config) self.check.find_pids = self.find_pids for i in self.nb_procs: for j in range(len(config['instances'])): self.check.check(config['instances'][j]) self.offset += 1 service_checks = self.check.get_service_checks() assert service_checks self.assertTrue(type(service_checks) == type([])) self.assertTrue(len(service_checks) > 0) self.assertEquals(len([t for t in service_checks if t['status']== 0]), 12, service_checks) self.assertEquals(len([t for t in service_checks if t['status']== 1]), 6, service_checks) self.assertEquals(len([t for t in service_checks if t['status']== 2]), 22, service_checks)
def test_service_checks(self): config = { 'instances': [ {'host': '127.0.0.1', 'port': 4730}, {'host': '127.0.0.1', 'port': 4731}] } agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('gearmand', config, agentConfig) self.check.check(config['instances'][0]) self.assertRaises(Exception, self.check.check, config['instances'][1]) service_checks = self.check.get_service_checks() self.assertEqual(len(service_checks), 2) ok_svc_check = service_checks[0] self.assertEqual(ok_svc_check['check'], self.check.SERVICE_CHECK_NAME) self.assertEqual(ok_svc_check['status'], AgentCheck.OK) cr_svc_check = service_checks[1] self.assertEqual(cr_svc_check['check'], self.check.SERVICE_CHECK_NAME) self.assertEqual(cr_svc_check['status'], AgentCheck.CRITICAL)
def testTomcatMetrics(self): raise SkipTest() agentConfig = { 'tomcat_jmx_instance_1': 'localhost:8090:first_instance', 'tomcat_jmx_instance_2': 'dummyurl:4444:fake_url', 'tomcat_jmx_instance_3': 'monitorRole:tomcat@localhost:8091:second_instance_with_auth', 'version': '0.1', 'api_key': 'toto' } config = JmxCheck.parse_agent_config(agentConfig, 'tomcat') config['init_config'] = TOMCAT_CONFIG metrics_check = load_check('tomcat', config, agentConfig) tomcat6 = '/tmp/apache-tomcat-6/bin' tomcat7 = '/tmp/apache-tomcat-7/bin' self.start_tomcat(tomcat6, 8080) self.start_tomcat(tomcat7, 7070) timers_first_check = [] for instance in config['instances']: try: start = time.time() metrics_check.check(instance) timers_first_check.append(time.time() - start) except Exception,e: print e continue
def testJavaMetric(self): raise SkipTest() agentConfig = { 'java_jmx_instance_1': 'localhost:8090', 'java_jmx_instance_2': 'dummyhost:9999:dummy', 'java_jmx_instance_3': 'localhost:2222:second_instance', 'version': '0.1', 'api_key': 'toto' } config = JmxCheck.parse_agent_config(agentConfig, 'java') metrics_check = load_check('jmx', config, agentConfig) # Starting tomcat tomcat6 = '/tmp/apache-tomcat-6/bin' self.start_tomcat(tomcat6, 8080) # Starting solr jmx_prefix = "-Dcom.sun.management.jmxremote" first_instance = "%s.port=2222 %s.authenticate=false -Djetty.port=8380" % (jmx_prefix, jmx_prefix) first_instance = self.start_solr(first_instance, 8983) timers_first_check = [] for instance in config['instances']: #print "processing instance %s" % instance try: start = time.time() metrics_check.check(instance) timers_first_check.append(time.time() - start) except Exception,e: print e continue
def setUp(self): self.agent_config = { "memcache_server": "localhost", "memcache_instance_1": "localhost:11211:mytag", "memcache_instance_2": "localhost:11211:mythirdtag", } self.conf = { 'init_config': {}, 'instances': [ { 'url': "localhost" }, { 'url': "localhost", 'port': 11211, 'tags': ['instance:mytag'] }, { 'url': "localhost", 'port': 11211, 'tags': ['instance:mythirdtag'] }, ] } self.c = load_check('mcache', self.conf, self.agent_config)
def setUp(self): self.psutil_process_patcher = mock.patch('psutil.Process') self.psutil_process_iter_patcher = mock.patch('psutil.process_iter') self.mock_process = self.psutil_process_patcher.start() self.mock_process_iter = self.psutil_process_iter_patcher.start() process_attrs_as_dict = { 'name': 'process_name', 'pid': 1234, 'username': '******', 'cmdline': '/usr/bin/process_name', } process_attrs = { 'memory_info_ex.return_value': mock.Mock(rss=1048576), 'num_threads.return_value': 1, 'num_fds.return_value': 1, 'cpu_percent.return_value': 1, 'io_counters.return_value': mock.Mock(**{'read_count': 1, 'write_count': 1, 'read_bytes': 1024, 'write_bytes': 1024}) } process = mock.Mock(**process_attrs) process.as_dict.return_value = process_attrs_as_dict self.mock_process_iter.return_value = [process] self.mock_process.return_value = process config = {'init_config': {}, 'instances': [{'name': 'test', 'search_string': ['process_name'], 'detailed': True}]} self.check = load_check('process', config)
def testChecks(self): if not self.skip: agentConfig = { 'version': '0.1', 'api_key': 'toto' } conf = {'init_config': {}, 'instances': [{ 'server': 'localhost', 'user': '******', 'pass': '******', 'options': {'replication': True}, }]} # Initialize the check from checks.d self.check = load_check('mysql', conf, agentConfig) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 8, metrics) # Service checks service_checks = self.check.get_service_checks() service_checks_count = len(service_checks) self.assertTrue(type(service_checks) == type([])) self.assertTrue(service_checks_count > 0) self.assertEquals(len([sc for sc in service_checks if sc['check'] == self.check.SERVICE_CHECK_NAME]), 1, service_checks) # Assert that all service checks have the proper tags: host and port self.assertEquals(len([sc for sc in service_checks if "host:localhost" in sc['tags']]), service_checks_count, service_checks) self.assertEquals(len([sc for sc in service_checks if "port:0" in sc['tags']]), service_checks_count, service_checks) time.sleep(1) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 16, metrics)
def test_redis_repl(self): master_instance = { 'host': 'localhost', 'port': NOAUTH_PORT } slave_instance = { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } repl_metrics = [ 'redis.replication.delay', 'redis.replication.backlog_histlen', 'redis.replication.delay', 'redis.replication.master_repl_offset', ] master_db = redis.Redis(port=NOAUTH_PORT, db=14) slave_db = redis.Redis(port=AUTH_PORT, password=slave_instance['password'], db=14) master_db.flushdb() # Assert that the replication works master_db.set('replicated:test', 'true') self.assertEquals(slave_db.get('replicated:test'), 'true') r = load_check('redisdb', {}, {}) r.check(master_instance) metrics = self._sort_metrics(r.get_metrics()) # Assert the presence of replication metrics keys = [m[0] for m in metrics] assert [x in keys for x in repl_metrics]
def test_redis_default(self): # Base test, uses the noauth instance if self.is_travis(): port = DEFAULT_PORT else: port = NOAUTH_PORT instance = {"host": "localhost", "port": port} db = redis.Redis(port=port, db=14) # Datadog's test db db.flushdb() db.set("key1", "value") db.set("key2", "value") db.setex("expirekey", "expirevalue", 1000) r = load_check("redisdb", {}, {}) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert metrics, "No metrics returned" # Assert we have values, timestamps and tags for each metric. for m in metrics: assert isinstance(m[1], int) # timestamp assert isinstance(m[2], (int, float, long)) # value tags = m[3]["tags"] expected_tags = ["redis_host:localhost", "redis_port:%s" % port] for e in expected_tags: assert e in tags def assert_key_present(expected, present, tolerance): "Assert we have the rest of the keys (with some tolerance for missing keys)" e = set(expected) p = set(present) assert len(e - p) < tolerance * len(e), pprint.pformat((p, e - p)) # gauges collected? remaining_keys = [m[0] for m in metrics] expected = r.GAUGE_KEYS.values() assert_key_present(expected, remaining_keys, MISSING_KEY_TOLERANCE) # Assert that the keys metrics are tagged by db. just check db0, since # it's the only one we can guarantee is there. db_metrics = self._sort_metrics( [m for m in metrics if m[0] in ["redis.keys", "redis.expires"] and "redis_db:db14" in m[3]["tags"]] ) self.assertEquals(2, len(db_metrics)) self.assertEquals("redis.expires", db_metrics[0][0]) self.assertEquals(1, db_metrics[0][2]) self.assertEquals("redis.keys", db_metrics[1][0]) self.assertEquals(3, db_metrics[1][2]) # Run one more check and ensure we get total command count # and other rates time.sleep(5) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) keys = [m[0] for m in metrics] assert "redis.net.commands" in keys
def test_config_parser(self): check = load_check(self.CHECK_NAME, {}, {}) instance = { "username": "******", "password": "******", "is_external": "yes", "url": "http://foo.bar", "tags": ["a", "b:c"], } c = check.get_instance_config(instance) self.assertEquals(c.username, "user") self.assertEquals(c.password, "pass") self.assertEquals(c.is_external, True) self.assertEquals(c.url, "http://foo.bar") self.assertEquals(c.tags, ["url:http://foo.bar", "a", "b:c"]) self.assertEquals(c.timeout, check.DEFAULT_TIMEOUT) self.assertEquals(c.service_check_tags, ["host:foo.bar", "port:None"]) instance = { "url": "http://192.168.42.42:12999", "timeout": 15 } c = check.get_instance_config(instance) self.assertEquals(c.username, None) self.assertEquals(c.password, None) self.assertEquals(c.is_external, False) self.assertEquals(c.url, "http://192.168.42.42:12999") self.assertEquals(c.tags, ["url:http://192.168.42.42:12999"]) self.assertEquals(c.timeout, 15) self.assertEquals(c.service_check_tags, ["host:192.168.42.42", "port:12999"])
def test_invalid_metric(self): self.config = { "instances": [{ "ip_address": "localhost", "port":161, "community_string": "public", "metrics": [{ "MIB": "IF-MIB", "table": "ifTable", "symbols": ["ifInOctets", "ifOutOctets"], },{ "MIB": "IF-MIB", "table": "noIdeaWhatIAmDoingHere", "symbols": ["ifInOctets", "ifOutOctets"], }] }] } self.check = load_check('snmp', self.config, self.agentConfig) # Make it fails faster self.check.RETRIES = 0 self.check.TIMEOUT = 0.5 # We expect: No symbol IF-MIB::noIdeaWhatIAmDoingHere self.assertRaises(Exception, self.check.check, self.config['instances'][0]) # Service checks service_checks = self.check.get_service_checks() service_checks = [sc for sc in service_checks if sc['check'].startswith('snmp')] service_checks_count = len(service_checks) self.assertEquals(service_checks_count, 1, service_checks) for sc in service_checks: self.assertEquals(sc['status'], self.check.CRITICAL, sc) self.assertEquals(sc['tags'], ['snmp_device:localhost'], sc)
def testChecks(self): if not self.skip: agentConfig = {'version': '0.1', 'api_key': 'toto'} conf = { 'init_config': {}, 'instances': [{ 'server': 'localhost', 'user': '******', 'pass': '******', 'options': { 'replication': True }, }] } # Initialize the check from checks.d self.check = load_check('mysql', conf, agentConfig) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 8, metrics) time.sleep(1) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) >= 16, metrics)
def test_table_SNMPCheck(self): self.config = { "instances": [{ "ip_address": "localhost", "port":161, "community_string": "public", "metrics": [{ "MIB": "IF-MIB", "table": "ifTable", "symbols": ["ifInOctets", "ifOutOctets"], "metric_tags": [{ "tag":"interface", "column":"ifDescr" }, { "tag":"dumbindex", "index":1 }] }] }] } self.check = load_check('snmp', self.config, self.agentConfig) self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so that we get the rates self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() # nb of metrics depends on the nb of interfaces on the test machine # so it's not possible to specify an excat number self.assertTrue(len(metrics)>0, "No metrics") for metric in metrics: self.assertTrue(metric[0] in ['snmp.ifInOctets', 'snmp.ifOutOctets'], metric[0]) tags = metric[3]['tags'] # Assert that all the wanted tags are here self.assertEquals(len(tags), 3, tags) tag_group_expected = ["snmp_device", "dumbindex", "interface"] for tag in tags: tag_group = tag.split(":")[0] self.assertTrue(tag_group in tag_group_expected, tag_group) if tag_group == "interface": interface_type = tag.split(":")[1] try: float(interface_type) except ValueError: pass else: self.fail("Tag discovered not pretty printed %s" % interface_type) # Service checks service_checks = self.check.get_service_checks() service_checks = [sc for sc in service_checks if sc['check'].startswith('snmp')] service_checks_count = len(service_checks) # We run the check twice self.assertEquals(service_checks_count, 2, service_checks) for sc in service_checks: self.assertEquals(sc['status'], self.check.OK, sc) self.assertEquals(sc['tags'], ['snmp_device:localhost'], sc)
def test_apache(self): agent_config = {'version': '0.1', 'api_key': 'toto'} config = { 'init_config': {}, 'instances': [ { 'apache_status_url': 'http://localhost:8080/server-status', 'tags': ['instance:first'] }, { 'apache_status_url': 'http://localhost:8080/server-status?auto', 'tags': ['instance:second'] }, ] } check = load_check('apache', config, agent_config) check.check(config['instances'][0]) metrics = check.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:first']) check.check(config['instances'][1]) metrics = check.get_metrics() self.assertEquals(metrics[0][3].get('tags'), ['instance:second']) service_checks = check.get_service_checks() can_connect = [ sc for sc in service_checks if sc['check'] == 'apache.can_connect' ] for i in range(len(can_connect)): self.assertEquals(set(can_connect[i]['tags']), set(['host:localhost', 'port:8080']), service_checks)
def setUp(self): self.psutil_process_patcher = mock.patch('psutil.Process') self.psutil_process_iter_patcher = mock.patch('psutil.process_iter') self.mock_process = self.psutil_process_patcher.start() self.mock_process_iter = self.psutil_process_iter_patcher.start() process_attrs = { 'name.return_value': 'process_name', 'pid': 1234, 'username.return_value': 'user', 'cmdline.return_value': '/usr/bin/process_name', 'memory_info_ex.return_value': mock.Mock(rss=1048576), 'num_threads.return_value': 1, 'num_fds.return_value': 1, 'cpu_percent.return_value': 1, 'io_counters.return_value': mock.Mock(**{'read_count': 1, 'write_count': 1, 'read_bytes': 1024, 'write_bytes': 1024}) } process = mock.Mock(**process_attrs) self.mock_process_iter.return_value = [process] self.mock_process.return_value = process config = {'init_config': {}, 'instances': [{'name': 'test', 'search_string': ['process_name'], 'detailed': True}]} self.check = load_check('process', config)
def test_service_checks(self): config = { 'instances': [ {'server': 'http://localhost:5984'}, {'server': 'http://localhost:5985'}] } agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('couch', config, agentConfig) self.check.check(config['instances'][0]) self.assertRaises(Exception, self.check.check, config['instances'][1]) service_checks = self.check.get_service_checks() self.assertEqual(len(service_checks), 2) ok_svc_check = service_checks[0] self.assertEqual(ok_svc_check['check'], self.check.SERVICE_CHECK_NAME) self.assertEqual(ok_svc_check['status'], AgentCheck.OK) cr_svc_check = service_checks[1] self.assertEqual(cr_svc_check['check'], self.check.SERVICE_CHECK_NAME) self.assertEqual(cr_svc_check['status'], AgentCheck.CRITICAL)
def test_build_event(self): agent_config = {'version': '0.1', 'api_key': 'toto'} check = load_check('teamcity', CONFIG, agent_config) with patch('requests.get', get_mock_first_build): check.check(check.instances[0]) metrics = check.get_metrics() self.assertEquals(len(metrics), 0) events = check.get_events() # Nothing should have happened because we only create events # for newer builds self.assertEquals(len(events), 0) with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) events = check.get_events() self.assertEquals(len(events), 1) self.assertEquals(events[0]['msg_title'], "Build for One test build successful") self.assertEquals( events[0]['msg_text'], "Build Number: 2\nDeployed To: buildhost42.dtdg.co\n\nMore Info: http://localhost:8111/viewLog.html?buildId=2&buildTypeId=TestProject_TestBuild" ) self.assertEquals(events[0]['tags'], ['build', 'one:tag', 'one:test']) self.assertEquals(events[0]['host'], "buildhost42.dtdg.co") # One more check should not create any more events with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) events = check.get_events() self.assertEquals(len(events), 0)
def test_build_event(self): agent_config = { 'version': '0.1', 'api_key': 'toto' } check = load_check('teamcity', CONFIG, agent_config) with patch('requests.get', get_mock_first_build): check.check(check.instances[0]) metrics = check.get_metrics() self.assertEquals(len(metrics), 0) events = check.get_events() # Nothing should have happened because we only create events # for newer builds self.assertEquals(len(events), 0) with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) events = check.get_events() self.assertEquals(len(events), 1) self.assertEquals(events[0]['msg_title'], "Build for One test build successful") self.assertEquals(events[0]['msg_text'], "Build Number: 2\nDeployed To: buildhost42.dtdg.co\n\nMore Info: http://localhost:8111/viewLog.html?buildId=2&buildTypeId=TestProject_TestBuild") self.assertEquals(events[0]['tags'], ['build', 'one:tag', 'one:test']) self.assertEquals(events[0]['host'], "buildhost42.dtdg.co") # One more check should not create any more events with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) events = check.get_events() self.assertEquals(len(events), 0)
def setUp(self): p = psutil.Process(os.getpid()) self._config = {'init_config': {}, 'instances': [{'name': 'test', 'search_string': [p.name()], 'detailed': True, 'mapping': { 'gauges': ['stats.(MessagesAvg)'], 'rates': ['MessagesTotal'], 'dimensions': { 'index': 'index', 'simple_dimension': 'simple_label', 'complex_dimension': { 'source_key': 'complex_label', 'regex': 'k8s_([._\-a-zA-Z0-9]*)_postfix' }, 'complex_dimension_rest': { 'source_key': 'complex_label', 'regex': 'k8s_([._\-a-zA-Z0-9]*_postfix)' } }, 'groups': { 'testgroup': { 'dimensions': { 'user': '******' }, 'rates': ['.*\.Responses.*', '(sec_auth_.*).stats', '(io_service_bytes)_stats_Total'] } # dimensions should be inherited from above }}}]} self.check = load_check('process', self._config) # TODO mock check self.helper = DynamicCheckHelper(self.check, 'dynhelper')
def test_check(self): config = {'init_config': {}, 'instances': []} self.agentConfig = {'version': '0.1', 'api_key': 'toto'} self.check = load_check('process', config, self.agentConfig) config = self.build_config(config) self.check.find_pids = self.find_pids for i in self.nb_procs: for j in range(len(config['instances'])): self.check.check(config['instances'][j]) self.offset += 1 service_checks = self.check.get_service_checks() assert service_checks self.assertTrue(type(service_checks) == type([])) self.assertTrue(len(service_checks) > 0) self.assertEquals(len([t for t in service_checks if t['status'] == 0]), 12, service_checks) self.assertEquals(len([t for t in service_checks if t['status'] == 1]), 6, service_checks) self.assertEquals(len([t for t in service_checks if t['status'] == 2]), 22, service_checks)
def testMongoOldConfig(self): conf = { 'init_config': {}, 'instances': [ { 'server': "mongodb://localhost:%s/test" % PORT1 }, { 'server': "mongodb://localhost:%s/test" % PORT2 }, ] } # Test the first mongodb instance self.check = load_check('mongo', conf, {}) # Run the check against our running server self.check.check(conf['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(conf['instances'][0]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) metric_val_checks = { 'mongodb.connections.current': lambda x: x >= 1, 'mongodb.connections.available': lambda x: x >= 1, 'mongodb.uptime': lambda x: x >= 0, 'mongodb.mem.resident': lambda x: x > 0, 'mongodb.mem.virtual': lambda x: x > 0 } for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2])) # Run the check against our running server self.check.check(conf['instances'][1]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(conf['instances'][1]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2]))
def test_types_support(self): self.config = { "instances": [{ "ip_address": "localhost", "port": 11111, "community_string": "public", "metrics": [ { "OID": "1.3.6.1.2.1.7.1", # Counter32 "name": "IAmACounter32" }, { "OID": "1.3.6.1.2.1.4.31.1.1.6.1", # Counter32 "name": "IAmACounter64" }, { "OID": "1.3.6.1.2.1.4.24.6.0", # Gauge32 "name": "IAmAGauge32" }, { "OID": "1.3.6.1.2.1.88.1.1.1.0", # Integer "name": "IAmAnInteger" }, { "OID": "1.3.6.1.2.1.25.6.3.1.2.637", # String (not supported) "name": "IAmString" } ] }] } self.check = load_check('snmp', self.config, self.agentConfig) self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() self.assertEqual(len(metrics), 2) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() self.assertEqual(len(metrics), 4) expected_metrics = [ 'snmp.IAmACounter32', 'snmp.IAmACounter64', 'snmp.IAmAGauge32', 'snmp.IAmAnInteger' ] for metric in expected_metrics: for result in metrics: if result[0] == metric: break else: self.fail("Missing metric: %s" % metric)
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.config = {"instances": [{"foo": "bar"}]} self.check = load_check(self.CHECK_NAME, self.config, {}) self.check._connect = Mock( return_value=(None, None, ["aggregation_key:localhost:8080"])) self.check._get_stats = Mock(return_value=self.check.load_json( read_data_from_file("riakcs_in.json")))
def setUp(self): self.agent_config = { "memcache_server": "localhost", "memcache_instance_1": "localhost:11211:mytag", "memcache_instance_2": "localhost:11211:mythirdtag", } self.c = load_check('mcache', {'init_config': {}, 'instances': {}}, self.agent_config) self.conf = self.c.parse_agent_config(self.agent_config)
def test_service_checks(self): self.check = load_check(self.CHECK_NAME, self.config, {}) self.assertRaises(error, lambda: self.run_check(self.config)) self.assertEqual(len(self.service_checks), 1, self.service_checks) self.assertServiceCheck(self.check.SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL, tags=['aggregation_key:localhost:8080'])
def init_check(self, config, check_name): self.agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check(check_name, config, self.agentConfig) self.checks.append(self.check)
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.config = {"instances": [{ "access_id":"foo", "access_secret": "bar"}]} self.check = load_check(self.CHECK_NAME, self.config, {}) self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"])) self.check._get_stats = Mock(return_value=self.check.load_json(read_data_from_file("riakcs_in.json")))
def test_table_SNMPCheck(self): self.config = { "instances": [{ "ip_address": "localhost", "port": 161, "community_string": "public", "metrics": [{ "MIB": "IF-MIB", "table": "ifTable", "symbols": ["ifInOctets", "ifOutOctets"], "metric_tags": [{ "tag": "interface", "column": "ifDescr" }, { "tag": "dumbindex", "index": 1 }] }] }] } self.check = load_check('snmp', self.config, self.agentConfig) self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so that we get the rates self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() # nb of metrics depends on the nb of interfaces on the test machine # so it's not possible to specify an excat number self.assertTrue(len(metrics) > 0, "No metrics") for metric in metrics: self.assertTrue( metric[0] in ['snmp.ifInOctets', 'snmp.ifOutOctets'], metric[0]) tags = metric[3]['tags'] # Assert that all the wanted tags are here self.assertEquals(len(tags), 3, tags) tag_group_expected = ["snmp_device", "dumbindex", "interface"] for tag in tags: tag_group = tag.split(":")[0] self.assertTrue(tag_group in tag_group_expected, tag_group) if tag_group == "interface": interface_type = tag.split(":")[1] try: float(interface_type) except ValueError: pass else: self.fail("Tag discovered not pretty printed %s" % interface_type)
def testMongoCheck(self): raise SkipTest('Requires MongoDB') self.config = { 'instances': [{ 'server': "mongodb://localhost:%s/test" % PORT1 }, { 'server': "mongodb://localhost:%s/test" % PORT2 }] } # Test mongodb with checks_d self.check = load_check('mongo', self.config, self.agent_config) # Run the check against our running server self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][0]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertIsInstance(metrics, list) self.assertTrue(len(metrics) > 0) metric_val_checks = { 'mongodb.connections.current': lambda x: x >= 1, 'mongodb.connections.available': lambda x: x >= 1, 'mongodb.uptime': lambda x: x >= 0, 'mongodb.mem.resident': lambda x: x > 0, 'mongodb.mem.virtual': lambda x: x > 0 } for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2])) # Run the check against our running server self.check.check(self.config['instances'][1]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][1]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertIsInstance(metrics, list) self.assertTrue(len(metrics) > 0) for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2]))
def testInit(self): self.config = {"init_config": {'mibs_folder': '/etc/mibs'}} # Initialize the check from checks.d self.check = load_check('snmp', self.config, self.agentConfig) mib_folders = self.check.cmd_generator.snmpEngine.msgAndPduDsp\ .mibInstrumController.mibBuilder.getMibSources() custom_folder_represented = False for folder in mib_folders: if '/etc/mibs' == folder.fullPath(): custom_folder_represented = True break self.assertTrue(custom_folder_represented) self.assertFalse(self.check.cmd_generator.ignoreNonIncreasingOid) self.config = {"init_config": {"ignore_nonincreasing_oid": True}} self.check = load_check('snmp', self.config, self.agentConfig) self.assertTrue(self.check.cmd_generator.ignoreNonIncreasingOid)
def testElasticChecksD(self): agentConfig = { 'elasticsearch': 'http://localhost:%s' % PORT, 'version': '0.1', 'api_key': 'toto' } conf = { 'init_config': {}, 'instances': [ {'url': 'http://localhost:%s' % PORT}, ] } # Initialize the check from checks.d self.check = load_check('elastic', conf, agentConfig) self.check.check(conf['instances'][0]) r = self.check.get_metrics() self.assertTrue(type(r) == type([])) self.assertTrue(len(r) > 0) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.search.fetch.total"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.rx_count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.tx_size"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.server_open"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.active_shards"]), 1, r) # Checks enabled for specific ES versions version = self.check._get_es_version('http://localhost:%s' % PORT) if version >= [0,90,10]: # ES versions 0.90.10 and above pass else: # ES version 0.90.9 and below self.assertEquals(len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) # Service checks service_checks = self.check.get_service_checks() service_checks_count = len(service_checks) self.assertTrue(type(service_checks) == type([])) self.assertTrue(service_checks_count > 0) self.assertEquals(len([sc for sc in service_checks if sc['check'] == self.check.SERVICE_CHECK_CLUSTER_STATUS]), 1, service_checks) self.assertEquals(len([sc for sc in service_checks if sc['check'] == self.check.SERVICE_CHECK_CONNECT_NAME]), 1, service_checks) # Assert that all service checks have the proper tags: host and port self.assertEquals(len([sc for sc in service_checks if "host:localhost" in sc['tags']]), service_checks_count, service_checks) self.assertEquals(len([sc for sc in service_checks if "port:%s" % PORT in sc['tags']]), service_checks_count, service_checks) self.check.cluster_status[conf['instances'][0].get('url')] = "red" self.check.check(conf['instances'][0]) events = self.check.get_events() self.assertEquals(len(events),1,events)
def testTokuMXCheck(self): self.agentConfig = {'version': '0.1', 'api_key': 'toto'} self.config = { 'instances': [{ 'server': "mongodb://localhost:%s/test" % PORT1 }, { 'server': "mongodb://localhost:%s/test" % PORT2 }] } # Test mongodb with checks.d self.check = load_check('tokumx', self.config, self.agentConfig) # Run the check against our running server self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][0]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) metric_val_checks = { 'mongodb.connections.current': lambda x: x >= 1, 'mongodb.connections.available': lambda x: x >= 1, 'mongodb.uptime': lambda x: x >= 0, 'mongodb.ft.cachetable.size.current': lambda x: x > 0, 'mongodb.ft.cachetable.size.limit': lambda x: x > 0, } for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2])) # Run the check against our running server self.check.check(self.config['instances'][1]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][1]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2]))
def testMongoCheck(self): raise SkipTest('Requires MongoDB') self.config = { 'instances': [{ 'server': "mongodb://localhost:%s/test" % PORT1 }, { 'server': "mongodb://localhost:%s/test" % PORT2 }] } # Test mongodb with checks_d self.check = load_check('mongo', self.config, self.agent_config) # Run the check against our running server self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][0]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(isinstance(metrics, list)) self.assertTrue(len(metrics) > 0) metric_val_checks = { 'mongodb.connections.current': lambda x: x >= 1, 'mongodb.connections.available': lambda x: x >= 1, 'mongodb.uptime': lambda x: x >= 0, 'mongodb.mem.resident': lambda x: x > 0, 'mongodb.mem.virtual': lambda x: x > 0 } for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2])) # Run the check against our running server self.check.check(self.config['instances'][1]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][1]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(isinstance(metrics, list)) self.assertTrue(len(metrics) > 0) for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue(metric_val_checks[metric_name](m[2]))
def testChecks(self): config = { 'instances': [ { 'host': 'localhost', 'port': 5432, 'username': '******', 'password': '******', 'dbname': 'datadog_test', 'relations': ['persons'], } ] } agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('postgres', config, agentConfig) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.connections']) >= 1, pprint(metrics)) self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.dead_rows']) >= 1, pprint(metrics)) self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.live_rows']) >= 1, pprint(metrics)) # Don't test for locks # self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.locks']) >= 1, pprint(metrics)) # Brittle tests self.assertTrue(4 <= len(metrics) <= 6, metrics) self.assertTrue(4 <= len([m for m in metrics if 'db:datadog_test' in str(m[3]['tags']) ]) <= 5, pprint(metrics)) self.assertTrue(len([m for m in metrics if 'table:persons' in str(m[3]['tags'])]) == 2, pprint(metrics)) # Rate metrics, need 2 collection rounds time.sleep(1) metrics = self.check.get_metrics() self.assertTrue(len([m for m in metrics if m[0] == u'postgresql.bgwriter.sync_time']) >= 1, pprint(metrics)) # Service checks service_checks = self.check.get_service_checks() service_checks_count = len(service_checks) self.assertTrue(type(service_checks) == type([])) self.assertTrue(service_checks_count > 0) self.assertEquals(len([sc for sc in service_checks if sc['check'] == "postgres.can_connect"]), 1, service_checks) # Assert that all service checks have the proper tags: host, port and db self.assertEquals(len([sc for sc in service_checks if "host:localhost" in sc['tags']]), service_checks_count, service_checks) self.assertEquals(len([sc for sc in service_checks if "port:%s" % config['instances'][0]['port'] in sc['tags']]), service_checks_count, service_checks) self.assertEquals(len([sc for sc in service_checks if "db:%s" % config['instances'][0]['dbname'] in sc['tags']]), service_checks_count, service_checks) time.sleep(1) self.check.run() metrics = self.check.get_metrics() self.assertTrue(len(metrics) == 20, metrics) self.assertTrue(len([m for m in metrics if 'db:datadog_test' in str(m[3]['tags']) ]) == 20, metrics) self.assertTrue(len([m for m in metrics if 'table:persons' in str(m[3]['tags']) ]) == 8, metrics)
def testMongoCheck(self): self.config = { 'instances': [{ 'server': "mongodb://localhost:%s/test" % PORT1 }, { 'server': "mongodb://localhost:%s/test" % PORT2 }] } # Test mongodb with checks.d self.check = load_check('tokumx', self.config, self.agentConfig) # Run the check against our running server self.check.check(self.config['instances'][0]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][0]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) metric_val_checks = { 'mongodb.connections.current': lambda x: x >= 1, 'mongodb.connections.available': lambda x: x >= 1, 'mongodb.uptime': lambda x: x >= 0, 'mongodb.ft.cachetable.size.current': lambda x: x > 0, 'mongodb.ft.cachetable.size.limit': lambda x: x > 0, } for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue( metric_val_checks[metric_name]( m[2] ) ) # Run the check against our running server self.check.check(self.config['instances'][1]) # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rates self.check.check(self.config['instances'][1]) # Metric assertions metrics = self.check.get_metrics() assert metrics self.assertTrue(type(metrics) == type([])) self.assertTrue(len(metrics) > 0) for m in metrics: metric_name = m[0] if metric_name in metric_val_checks: self.assertTrue( metric_val_checks[metric_name]( m[2] ) )
def setUp(self): config = { 'init_config': {}, 'instances': [{ 'name': 'test', 'search_string': ['python'], 'detailed': False }] } self.check = load_check('process', config)
def setUp(self): self.config = { 'instances': [{ 'server': 'http://localhost:8091', 'user': '******', 'password': '******', }] } self.agent_config = {'version': '0.1', 'api_key': 'toto'} self.check = load_check('couchbase', self.config, self.agent_config)
def test_redis_auth(self): # correct password r = load_check('redisdb', {}, {}) instance = { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len(metrics) > 0, "No metrics returned" # wrong passwords instances = [ { 'host': 'localhost', 'port': AUTH_PORT, 'password': '' }, { 'host': 'localhost', 'port': AUTH_PORT, 'password': '******' } ] r = load_check('redisdb', {}, {}) try: r.check(instances[0]) except Exception as e: self.assertTrue( # 2.8 'noauth authentication required' in str(e).lower() # previously or 'operation not permitted' in str(e).lower(), str(e)) r = load_check('redisdb', {}, {}) try: r.check(instances[1]) except Exception as e: self.assertTrue('invalid password' in str(e).lower(), str(e))
def setUp(self): p = psutil.Process(os.getpid()) config = { 'init_config': {}, 'instances': [{ 'name': 'test', 'search_string': [p.name()], 'detailed': False }] } self.check = load_check('process', config)
def setUp(self): self.config = { 'instances': [{ 'server': 'http://localhost:8091', }] } self.agentConfig = { 'version': '0.1', 'api_key': 'toto' } self.check = load_check('couchbase', self.config, self.agentConfig)
def testElasticChecksD(self): agentConfig = { 'elasticsearch': 'http://localhost:%s' % PORT, 'version': '0.1', 'api_key': 'toto' } # Initialize the check from checks.d c = load_check('elastic', {'init_config': {}, 'instances':{}},agentConfig) conf = c.parse_agent_config(agentConfig) self.check = load_check('elastic', conf, agentConfig) self.check.check(conf['instances'][0]) r = self.check.get_metrics() self.assertTrue(type(r) == type([])) self.assertTrue(len(r) > 0) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.search.fetch.total"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.rx_count"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.tx_size"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.server_open"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue"]), 1, r) self.assertEquals(len([t for t in r if t[0] == "elasticsearch.active_shards"]), 1, r) # Checks enabled for specific ES versions version = c._get_es_version('http://localhost:%s' % PORT) if version >= [0,90,10]: # ES versions 0.90.10 and above pass else: # ES version 0.90.9 and below self.assertEquals(len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) self.check.cluster_status[conf['instances'][0].get('url')] = "red" self.check.check(conf['instances'][0]) events = self.check.get_events() self.assertEquals(len(events),1,events)
def test_scalar_SNMPCheck(self): self.config = { "instances": [{ "ip_address": "localhost", "port": 161, "community_string": "public", "metrics": [ { "OID": "1.3.6.1.2.1.7.1.0", # Counter (needlessly specify the index) "name": "udpDatagrams" }, { "OID": "1.3.6.1.2.1.6.10", # Counter "name": "tcpInSegs" }, { "MIB": "TCP-MIB", # Gauge "symbol": "tcpCurrEstab", } ] }] } self.check = load_check('snmp', self.config, self.agentConfig) self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() # Assert that there is only the gauge metric because the counters are # used as rate so we don't report them with 1 point self.assertEqual(len(metrics), 1) self.assertEqual(metrics[0][0], 'snmp.tcpCurrEstab') # Sleep for 1 second so the rate interval >=1 time.sleep(1) # Run the check again so we get the rate self.check.check(self.config['instances'][0]) metrics = self.check.get_metrics() self.assertEqual(len(metrics), 3) expected_metrics = [ 'snmp.udpDatagrams', 'snmp.tcpCurrEstab', 'snmp.tcpInSegs' ] for metric in expected_metrics: for result in metrics: if result[0] == metric: break else: self.fail("Missing metric: %s" % metric)