예제 #1
0
    def test_ntp_global_settings(self):
        # Clear any existing ntp config
        NTPUtil._drop()

        config = {'instances': [{
            "host": "foo.com",
            "port": "bar",
            "version": 42,
            "timeout": 13.37}],
            'init_config': {}}

        agentConfig = {
            'version': '0.1',
            'agent_key': 'toto'
        }

        # load this config in the ntp singleton
        ntp_util = NTPUtil(config)

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        check.run()

        self.assertEqual(ntp_util.args["host"], "foo.com")
        self.assertEqual(ntp_util.args["port"], "bar")
        self.assertEqual(ntp_util.args["version"], 42)
        self.assertEqual(ntp_util.args["timeout"], 13.37)

        # Clear the singleton to prepare for next config
        NTPUtil._drop()

        config = {'instances': [{}], 'init_config': {}}
        agentConfig = {
            'version': '0.1',
            'agent_key': 'toto'
        }

        # load the new config
        ntp_util = NTPUtil(config)

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        try:
            check.run()
        except Exception:
            pass

        self.assertTrue(ntp_util.args["host"].endswith("datadog.pool.ntp.org"))
        self.assertEqual(ntp_util.args["port"], "ntp")
        self.assertEqual(ntp_util.args["version"], 3)
        self.assertEqual(ntp_util.args["timeout"], 1.0)

        NTPUtil._drop()
예제 #2
0
    def test_topology_details(self):
        topology_from_sum = {"id":"sometopo_987IENien9887a-3-1464117779","encodedId":"sometopo_987IENien9887a-3-1464117779","name":"sometopo_987IENien9887a","status":"ACTIVE","uptime":"30s","tasksTotal":11366,"workersTotal":7,"executorsTotal":11366}
        name = 'sometopo'
        topology_details = {
            "spouts": [
                {"executors": 48, "emitted": 0, "errorLapsedSecs": 13822, "completeLatency": "2.030", "transferred": 50, "acked": 40, "errorPort": 6711, "spoutId": "somespout", "tasks": 48, "errorHost": "", "lastError": "", "errorWorkerLogLink": "", "failed": 20, "encodedSpoutId": "somespout"},
                {"executors": 48, "emitted": 0, "errorLapsedSecs": 13822, "completeLatency": "2.030", "transferred": 50, "acked": 40, "errorPort": 6711, "spoutId": "detailspout", "tasks": 48, "errorHost": "", "lastError": "", "errorWorkerLogLink": "", "failed": 20, "encodedSpoutId": "detailspout"}
            ],
            "bolts": [
                {"executors": 3, "emitted": 10, "errorLapsedSecs": None, "transferred": 12, "acked": 9, "errorPort": "", "executeLatency": "2.300", "tasks": 4, "executed": 12, "processLatency": "2.501", "boltId": "somebolt", "errorHost": "", "lastError": "", "errorWorkerLogLink": "", "capacity": "0.020", "failed": 2, "encodedBoltId": "somebolt"},
                {"executors": 3, "emitted": 10, "errorLapsedSecs": None, "transferred": 12, "acked": 3, "errorPort": "", "executeLatency": "2.300", "tasks": 4, "executed": 12, "processLatency": "2.501", "boltId": "detail::bolt", "errorHost": "", "lastError": "", "errorWorkerLogLink": "", "capacity": "0.020", "failed": 2, "encodedBoltId": "detail%3A%3Abolt"}

            ]
        }
        instance = {
            'url': 'http://localhost:8080',
            'timeout': 0,
            'topologies': '^(sometopo)_.*$',
            'executor_details_whitelist': ['detailspout', 'detail::bolt'],
            'task_tags': {
                'spout': {
                    'somespout': [
                        'is_a_great_spout:true'
                    ]
                },
                'bolt': {
                    'somebolt': [
                        'is_a_great_bolt:true'
                    ]
                }
            },
            "cache_file": "/dev/null"
        }
        conf = {
            'init_config': {},
            'instances': [
                instance
            ],
        }
        self.check = load_check('storm_rest_api', conf, {})

        self.check.report_topology(self.check.instance_config(instance), name, topology_details)
        self.check.report_topology(self.check.instance_config(instance), name, topology_details)

        metrics = self.check.get_metrics()
        spout_workers_metric = self.find_metric(metrics, 'storm.rest.spout.executors_total', ['storm_task_id:somespout'])
        self.assertEqual(48, spout_workers_metric[2])
        print spout_workers_metric[3]
        self.assert_tags(['storm_topology:sometopo', 'storm_task_id:somespout', 'is_a_great_spout:true'], spout_workers_metric[3]['tags'])

        complete_latency_metric = self.find_metric(metrics, 'storm.rest.spout.complete_latency_us', ['storm_task_id:somespout'])
        self.assertEqual(2.030, complete_latency_metric[2])

        bolt_workers_metric = self.find_metric(metrics, 'storm.rest.bolt.executors_total', ['storm_task_id:somebolt'])
        self.assertEqual(3, bolt_workers_metric[2])
        self.assert_tags(['storm_topology:sometopo', 'storm_task_id:somebolt', 'is_a_great_bolt:true'], bolt_workers_metric[3]['tags'])

        bolt_executed_metric = self.find_metric(metrics, 'storm.rest.bolt.executed_total', ['storm_task_id:somebolt'])
        self.assertEqual(0, bolt_executed_metric[2])
        self.assert_tags(['storm_topology:sometopo', 'storm_task_id:somebolt', 'is_a_great_bolt:true'], bolt_workers_metric[3]['tags'])
예제 #3
0
    def test_redis_repl(self):
        master_instance = {
            'host': 'localhost',
            'port': NOAUTH_PORT
        }

        slave_instance = {
            'host': 'localhost',
            'port': AUTH_PORT,
            'password': '******'
        }

        repl_metrics = [
            'redis.replication.delay',
            'redis.replication.backlog_histlen',
            'redis.replication.delay',
            'redis.replication.master_repl_offset',
        ]

        master_db = redis.Redis(port=NOAUTH_PORT, db=14)
        slave_db = redis.Redis(port=AUTH_PORT, password=slave_instance['password'], db=14)
        master_db.flushdb()

        # Assert that the replication works
        master_db.set('replicated:test', 'true')
        self.assertEquals(slave_db.get('replicated:test'), 'true')

        r = load_check('redisdb', {}, {})
        r.check(master_instance)
        metrics = self._sort_metrics(r.get_metrics())

        # Assert the presence of replication metrics
        keys = [m[0] for m in metrics]
        assert [x in keys for x in repl_metrics]
예제 #4
0
    def test_collector(self):
        agentConfig = {
            'agent_key': 'test_agentkey',
            'check_timings': True,
            'collect_ec2_tags': True,
            'collect_instance_metadata': False,
            'create_dd_check_tags': False,
            'version': 'test',
            'tags': '',
        }

        # Run a single checks.d check as part of the collector.
        redis_config = {
            "init_config": {},
            "instances": [{"host": "localhost", "port": 6379}]
        }
        checks = [load_check('redisdb', redis_config, agentConfig)]

        c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
        payload = c.run({
            'initialized_checks': checks,
            'init_failed_checks': {}
        })
        metrics = payload['metrics']

        # Check that we got a timing metric for all checks.
        timing_metrics = [m for m in metrics
            if m[0] == 'sd.agent.check_run_time']
        all_tags = []
        for metric in timing_metrics:
            all_tags.extend(metric[3]['tags'])
        for check in checks:
            tag = "check:%s" % check.name
            assert tag in all_tags, all_tags
예제 #5
0
    def test_apptags(self):
        '''
        Tests that the app tags are sent if specified so
        '''
        agentConfig = {
            'agent_key': 'test_agentkey',
            'collect_ec2_tags': False,
            'collect_instance_metadata': False,
            'create_dd_check_tags': True,
            'version': 'test',
            'tags': '',
        }

        # Run a single checks.d check as part of the collector.
        redis_config = {
            "init_config": {},
            "instances": [{"host": "localhost", "port": 6379}]
        }
        checks = [load_check('redisdb', redis_config, agentConfig)]

        c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
        payload = c.run({
            'initialized_checks': checks,
            'init_failed_checks': {}
        })

        # We check that the redis DD_CHECK_TAG is sent in the payload
        self.assertTrue('dd_check:redisdb' in payload['host-tags']['system'])
예제 #6
0
    def test_network_latency_checks(self):
        self.check = load_check(self.CHECK_NAME,
                                MOCK_CONFIG_NETWORK_LATENCY_CHECKS,
                                self.DEFAULT_AGENT_CONFIG)

        mocks = self._get_consul_mocks()

        # We start out as the leader, and stay that way
        self.check._last_known_leader = self.mock_get_cluster_leader_A(None)

        self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)

        latency = [m for m in self.metrics if m[0].startswith('consul.net.')]
        latency.sort()
        # Make sure we have the expected number of metrics
        self.assertEquals(19, len(latency))

        # Only 3 dc-latency metrics since we only do source = self
        dc = [m for m in latency if '.dc.latency.' in m[0]]
        self.assertEquals(3, len(dc))
        self.assertEquals(1.6746410750238774, dc[0][2])

        # 16 latency metrics, 2 nodes * 8 metrics each
        node = [m for m in latency if '.node.latency.' in m[0]]
        self.assertEquals(16, len(node))
        self.assertEquals(0.26577747932995816, node[0][2])
예제 #7
0
    def test_config_parser(self):
        check = load_check(self.CHECK_NAME, {}, {})
        instance = {
            "username": "******",
            "password": "******",
            "is_external": "yes",
            "url": "http://foo.bar",
            "tags": ["a", "b:c"],
        }

        c = check.get_instance_config(instance)
        self.assertEquals(c.username, "user")
        self.assertEquals(c.password, "pass")
        self.assertEquals(c.cluster_stats, True)
        self.assertEquals(c.url, "http://foo.bar")
        self.assertEquals(c.tags, ["url:http://foo.bar", "a", "b:c"])
        self.assertEquals(c.timeout, check.DEFAULT_TIMEOUT)
        self.assertEquals(c.service_check_tags,
                          ["host:foo.bar", "port:None", "a", "b:c"])

        instance = {"url": "http://192.168.42.42:12999", "timeout": 15}

        c = check.get_instance_config(instance)
        self.assertEquals(c.username, None)
        self.assertEquals(c.password, None)
        self.assertEquals(c.cluster_stats, False)
        self.assertEquals(c.url, "http://192.168.42.42:12999")
        self.assertEquals(c.tags, ["url:http://192.168.42.42:12999"])
        self.assertEquals(c.timeout, 15)
        self.assertEquals(c.service_check_tags,
                          ["host:192.168.42.42", "port:12999"])
예제 #8
0
    def test_redis_repl(self):
        master_instance = {'host': 'localhost', 'port': NOAUTH_PORT}

        slave_instance = {
            'host': 'localhost',
            'port': AUTH_PORT,
            'password': '******'
        }

        repl_metrics = [
            'redis.replication.delay',
            'redis.replication.backlog_histlen',
            'redis.replication.delay',
            'redis.replication.master_repl_offset',
        ]

        master_db = redis.Redis(port=NOAUTH_PORT, db=14)
        slave_db = redis.Redis(port=AUTH_PORT,
                               password=slave_instance['password'],
                               db=14)
        master_db.flushdb()

        # Assert that the replication works
        master_db.set('replicated:test', 'true')
        self.assertEquals(slave_db.get('replicated:test'), 'true')

        r = load_check('redisdb', {}, {})
        r.check(master_instance)
        metrics = self._sort_metrics(r.get_metrics())

        # Assert the presence of replication metrics
        keys = [m[0] for m in metrics]
        assert [x in keys for x in repl_metrics]
예제 #9
0
    def test_redis_replication_service_check(self):
        check_name = 'redis.replication.master_link_status'
        r = load_check('redisdb', {}, {})

        def extract_check(instance):
            r.check(instance)
            checks = [
                c for c in r.get_service_checks() if c['check'] == check_name
            ]
            return (checks and checks[0]) or None

        # Healthy host
        time.sleep(
            5)  # Give time for the replication failure metrics to build up
        check = extract_check({
            'host': 'localhost',
            'port': SLAVE_HEALTHY_PORT
        })
        assert check, "%s service check not returned" % check_name
        self.assertEqual(check['status'], AgentCheck.OK,
                         "Value of %s service check should be OK" % check_name)

        # Unhealthy host
        check = extract_check({
            'host': 'localhost',
            'port': SLAVE_UNHEALTHY_PORT
        })
        self.assertEqual(
            check['status'], AgentCheck.CRITICAL,
            "Value of %s service check should be CRITICAL" % check_name)
예제 #10
0
    def test_redis_replication_link_metric(self):
        metric_name = 'redis.replication.master_link_down_since_seconds'
        r = load_check('redisdb', {}, {})

        def extract_metric(instance):
            r.check(instance)
            metrics = [m for m in r.get_metrics() if m[0] == metric_name]
            return (metrics and metrics[0]) or None

        # Healthy host
        metric = extract_metric({
            'host': 'localhost',
            'port': SLAVE_HEALTHY_PORT
        })
        assert metric, "%s metric not returned" % metric_name
        self.assertEqual(metric[2], 0, "Value of %s should be 0" % metric_name)

        # Unhealthy host
        time.sleep(
            5)  # Give time for the replication failure metrics to build up
        metric = extract_metric({
            'host': 'localhost',
            'port': SLAVE_UNHEALTHY_PORT
        })
        self.assert_(metric[2] > 0,
                     "Value of %s should be greater than 0" % metric_name)
    def test_build_event(self):
        agent_config = {
            'version': '0.1',
            'api_key': 'toto'
        }
        check = load_check('teamcity', CONFIG, agent_config)

        with patch('requests.get', get_mock_first_build):
            check.check(check.instances[0])

        metrics = check.get_metrics()
        self.assertEquals(len(metrics), 0)

        events = check.get_events()
        # Nothing should have happened because we only create events
        # for newer builds
        self.assertEquals(len(events), 0)

        with patch('requests.get', get_mock_one_more_build):
            check.check(check.instances[0])

        events = check.get_events()
        self.assertEquals(len(events), 1)
        self.assertEquals(events[0]['msg_title'], "Build for One test build successful")
        self.assertEquals(events[0]['msg_text'], "Build Number: 2\nDeployed To: buildhost42.dtdg.co\n\nMore Info: http://localhost:8111/viewLog.html?buildId=2&buildTypeId=TestProject_TestBuild")
        self.assertEquals(events[0]['tags'], ['build', 'one:tag', 'one:test'])
        self.assertEquals(events[0]['host'], "buildhost42.dtdg.co")


        # One more check should not create any more events
        with patch('requests.get', get_mock_one_more_build):
            check.check(check.instances[0])

        events = check.get_events()
        self.assertEquals(len(events), 0)
예제 #12
0
    def test_network_latency_checks(self):
        self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_NETWORK_LATENCY_CHECKS,
                                self.DEFAULT_AGENT_CONFIG)

        mocks = self._get_consul_mocks()

        # We start out as the leader, and stay that way
        instance_hash = hash_mutable(MOCK_CONFIG_NETWORK_LATENCY_CHECKS['instances'][0])
        self.check._instance_states[instance_hash].last_known_leader = self.mock_get_cluster_leader_A(None)

        self.run_check(MOCK_CONFIG_NETWORK_LATENCY_CHECKS, mocks=mocks)

        latency = [m for m in self.metrics if m[0].startswith('consul.net.')]
        latency.sort()
        # Make sure we have the expected number of metrics
        self.assertEquals(19, len(latency))

        # Only 3 dc-latency metrics since we only do source = self
        dc = [m for m in latency if '.dc.latency.' in m[0]]
        self.assertEquals(3, len(dc))
        self.assertEquals(1.6746410750238774, dc[0][2])

        # 16 latency metrics, 2 nodes * 8 metrics each
        node = [m for m in latency if '.node.latency.' in m[0]]
        self.assertEquals(16, len(node))
        self.assertEquals(0.26577747932995816, node[0][2])
예제 #13
0
    def test_build_event(self):
        agent_config = {'version': '0.1', 'api_key': 'toto'}
        check = load_check('teamcity', CONFIG, agent_config)

        with patch('requests.get', get_mock_first_build):
            check.check(check.instances[0])

        metrics = check.get_metrics()
        self.assertEquals(len(metrics), 0)

        events = check.get_events()
        # Nothing should have happened because we only create events
        # for newer builds
        self.assertEquals(len(events), 0)

        with patch('requests.get', get_mock_one_more_build):
            check.check(check.instances[0])

        events = check.get_events()
        self.assertEquals(len(events), 1)
        self.assertEquals(events[0]['msg_title'],
                          "Build for One test build successful")
        self.assertEquals(
            events[0]['msg_text'],
            "Build Number: 2\nDeployed To: buildhost42.dtdg.co\n\nMore Info: http://localhost:8111/viewLog.html?buildId=2&buildTypeId=TestProject_TestBuild"
        )
        self.assertEquals(events[0]['tags'], ['build', 'one:tag', 'one:test'])
        self.assertEquals(events[0]['host'], "buildhost42.dtdg.co")

        # One more check should not create any more events
        with patch('requests.get', get_mock_one_more_build):
            check.check(check.instances[0])

        events = check.get_events()
        self.assertEquals(len(events), 0)
예제 #14
0
    def test_cull_services_list(self):
        self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)

        # Pad num_services to kick in truncation logic
        num_services = self.check.MAX_SERVICES + 20

        # Big whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(num_services)]
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # Whitelist < MAX_SERVICES should spit out the whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))

        # No whitelist, still triggers truncation
        whitelist = []
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # Num. services < MAX_SERVICES should be no-op in absence of whitelist
        num_services = self.check.MAX_SERVICES - 1
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)

        # Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
        num_services = self.check.MAX_SERVICES - 1
        whitelist = ['service_1', 'service_2', 'service_3']
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {
         "instances": [{
             "access_id":
             "foo",
             "access_secret":
             "bar",
             "metrics": [
                 "request_pool_overflow",
                 "request_pool_size",
                 "request_pool_workers",
             ],
         }],
     }
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.collect_ok = True
     self.check._connect = Mock(return_value=(
         None,
         None,
         ["aggregation_key:localhost:8080"],
         self.config["instances"][0]["metrics"],
     ))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs21_in.json', sdk_dir=FIXTURE_DIR)))
예제 #16
0
    def test_register_psutil_metrics(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        check._register_psutil_metrics(MOCK_STATS, MOCK_NAMES_TO_METRIC_TYPES)
        self.metrics = check.get_metrics()

        self.assertMetric('datadog.agent.collector.memory_info.rss', value=16814080)
        self.assertMetric('datadog.agent.collector.memory_info.vms', value=74522624)
예제 #17
0
 def test_nginx_plus(self):
     test_data = Fixtures.read_file('nginx_plus_in.json')
     expected = eval(Fixtures.read_file('nginx_plus_out.python'))
     nginx = load_check('nginx', self.config, self.agent_config)
     parsed = nginx.parse_json(test_data)
     parsed.sort()
     self.assertEquals(parsed, expected)
예제 #18
0
    def test_apptags(self):
        '''
        Tests that the app tags are sent if specified so
        '''
        agentConfig = {
            'api_key': 'test_apikey',
            'collect_ec2_tags': False,
            'collect_instance_metadata': False,
            'create_dd_check_tags': True,
            'version': 'test',
            'tags': '',
        }

        # Run a single checks.d check as part of the collector.
        redis_config = {
            "init_config": {},
            "instances": [{"host": "localhost", "port": 6379}]
        }
        checks = [load_check('redisdb', redis_config, agentConfig)]

        c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
        payload = c.run({
            'initialized_checks': checks,
            'init_failed_checks': {}
        })

        # We check that the redis DD_CHECK_TAG is sent in the payload
        self.assertTrue('dd_check:redisdb' in payload['host-tags']['system'])
예제 #19
0
    def test_no_profiling(self):
        agentConfig = {
            'api_key': 'XXXtest_apikey',
            'developer_mode': True,
            'allow_profiling': False
        }
        # this must be SystemExit, because otherwise the Exception is eaten
        mocks = {
            '_set_internal_profiling_stats': mock.MagicMock(side_effect=SystemExit),
        }
        redis_config = {
            "init_config": {},
            "instances": [{"host": "localhost", "port": 6379}]
        }
        check = load_check('redisdb', redis_config, agentConfig)

        self.assertFalse(check.allow_profiling)
        self.assertTrue(check.in_developer_mode)

        for func_name, mock1 in mocks.iteritems():
            if not hasattr(check, func_name):
                continue
            else:
                setattr(check, func_name, mock1)

        check.run()
예제 #20
0
    def test_cluster(self):
        uptime = "22h 41m 49s"
        cluster = {
            "stormVersion":"0.9.3",
            "nimbusUptime": uptime,
            "supervisors":7,
            "slotsTotal":147,
            "slotsUsed":7,
            "slotsFree":140,
            "executorsTotal":11415,
            "tasksTotal":11415
        }
        instance = {'url': 'http://localhost:8080', 'timeout': 0, 'tags': ['cluster_want:form'], "cache_file": "/dev/null"}
        conf = {
            'init_config': {},
            'instances': [instance]
        }
        self.check = load_check('storm_rest_api', conf, {})
        self.check.report_cluster(self.check.instance_config(instance), cluster)
        metrics = self.check.get_metrics()

        cluster_uptime = self.find_metric(metrics, 'storm.rest.cluster.nimbus_uptime_seconds')
        self.assertEqual(81709, cluster_uptime[2])
        self.assert_tags(['cluster_want:form'], cluster_uptime[3]['tags'])

        cluster_slots_used = self.find_metric(metrics, 'storm.rest.cluster.slots_used_count')
        self.assertEqual(7, cluster_slots_used[2])
        print cluster_slots_used
        self.assert_tags(['cluster_want:form'], cluster_slots_used[3]['tags'])

        for metric_name in ['supervisor_count', 'slots_total_count', 'slots_free_count', 'executors_total_count', 'tasks_total_count']:
            metric = self.find_metric(metrics, 'storm.rest.cluster.%s' % metric_name)
            self.assert_tags(['cluster_want:form'], metric[3]['tags'])
            self.assertTrue(metric[2] > 0)
예제 #21
0
    def test_collector(self):
        agentConfig = {
            'agent_key': 'test_agentkey',
            'check_timings': True,
            'collect_ec2_tags': True,
            'collect_instance_metadata': False,
            'create_dd_check_tags': False,
            'version': 'test',
            'tags': '',
        }

        # Run a single checks.d check as part of the collector.
        redis_config = {
            "init_config": {},
            "instances": [{"host": "localhost", "port": 6379}]
        }
        checks = [load_check('redisdb', redis_config, agentConfig)]

        c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
        payload = c.run({
            'initialized_checks': checks,
            'init_failed_checks': {}
        })
        metrics = payload['metrics']

        # Check that we got a timing metric for all checks.
        timing_metrics = [m for m in metrics
            if m[0] == 'sd.agent.check_run_time']
        all_tags = []
        for metric in timing_metrics:
            all_tags.extend(metric[3]['tags'])
        for check in checks:
            tag = "check:%s" % check.name
            assert tag in all_tags, all_tags
예제 #22
0
    def test_apptags(self):
        '''
        Tests that the app tags are sent if specified so
        '''
        agentConfig = {
            'agent_key': 'test_agentkey',
            'collect_ec2_tags': False,
            'collect_orchestrator_tags': False,
            'collect_instance_metadata': False,
            'create_sd_check_tags': True,
            'version': 'test',
            'tags': '',
        }

        # Run a single checks.d check as part of the collector.
        disk_config = {
            "init_config": {},
            "instances": [{}]
        }

        checks = [load_check('disk', disk_config, agentConfig)]

        c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
        payload = c.run({
            'initialized_checks': checks,
            'init_failed_checks': {}
        })

        # We check that the redis SD_CHECK_TAG is sent in the payload
        self.assertTrue('sd_check:disk' in payload['host-tags']['system'])
예제 #23
0
    def test_config_parser(self):
        check = load_check(self.CHECK_NAME, {}, {})
        instance = {
            "username": "******",
            "password": "******",
            "is_external": "yes",
            "url": "http://foo.bar",
            "tags": ["a", "b:c"],
        }

        c = check.get_instance_config(instance)
        self.assertEquals(c.username, "user")
        self.assertEquals(c.password, "pass")
        self.assertEquals(c.cluster_stats, True)
        self.assertEquals(c.url, "http://foo.bar")
        self.assertEquals(c.tags, ["url:http://foo.bar", "a", "b:c"])
        self.assertEquals(c.timeout, check.DEFAULT_TIMEOUT)
        self.assertEquals(c.service_check_tags, ["host:foo.bar", "port:None"])

        instance = {
            "url": "http://192.168.42.42:12999",
            "timeout": 15
        }

        c = check.get_instance_config(instance)
        self.assertEquals(c.username, None)
        self.assertEquals(c.password, None)
        self.assertEquals(c.cluster_stats, False)
        self.assertEquals(c.url, "http://192.168.42.42:12999")
        self.assertEquals(c.tags, ["url:http://192.168.42.42:12999"])
        self.assertEquals(c.timeout, 15)
        self.assertEquals(c.service_check_tags,
                          ["host:192.168.42.42", "port:12999"])
예제 #24
0
    def testMongoOldConfig(self):
        conf = {
            'init_config': {},
            'instances': [
                {
                    'server': "mongodb://localhost:%s/test" % PORT1
                },
                {
                    'server': "mongodb://localhost:%s/test" % PORT2
                },
            ]
        }

        # Test the first mongodb instance
        self.check = load_check('mongo', conf, {})

        # Run the check against our running server
        self.check.check(conf['instances'][0])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf['instances'][0])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(len(metrics) > 0)

        metric_val_checks = {
            'mongodb.connections.current': lambda x: x >= 1,
            'mongodb.connections.available': lambda x: x >= 1,
            'mongodb.uptime': lambda x: x >= 0,
            'mongodb.mem.resident': lambda x: x > 0,
            'mongodb.mem.virtual': lambda x: x > 0
        }

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue(metric_val_checks[metric_name](m[2]))

        # Run the check against our running server
        self.check.check(conf['instances'][1])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf['instances'][1])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(len(metrics) > 0)

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue(metric_val_checks[metric_name](m[2]))
예제 #25
0
 def test_metric_name_without_prefix(self):
     instance = {'url': 'http://localhost:8080', 'timeout': 0, "cache_file": "/dev/null"}
     conf = {
         'init_config': {},
         'instances': [instance]
     }
     self.check = load_check('storm_rest_api', conf, {})
     self.assertEqual('storm.rest.baz', self.check.metric(self.check.instance_config(instance), 'baz'))
예제 #26
0
    def init_check(self, config, check_name):
        self.agentConfig = {
            'version': AGENT_VERSION,
            'api_key': 'toto'
        }

        self.check = load_check(check_name, config, self.agentConfig)
        self.checks.append(self.check)
예제 #27
0
        def test_service_checks(self):
            self.check = load_check(self.CHECK_NAME, self.config, {})
            self.assertRaises(error, lambda: self.run_check(self.config))

            self.assertEqual(len(self.service_checks), 1, self.service_checks)
            self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
                                    status=AgentCheck.CRITICAL,
                                    tags=['aggregation_key:localhost:8080'])
예제 #28
0
    def init_check(self, config, check_name):
        self.agentConfig = {
            'version': AGENT_VERSION,
            'api_key': 'toto'
        }

        self.check = load_check(check_name, config, self.agentConfig)
        self.checks.append(self.check)
예제 #29
0
    def test_service_checks(self):
        self.check = load_check(self.CHECK_NAME, self.config, {})
        self.assertRaises(error, lambda: self.run_check(self.config))

        self.assertEqual(len(self.service_checks), 1, self.service_checks)
        self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
                                status=AgentCheck.CRITICAL,
                                tags=['aggregation_key:localhost:8080'])
예제 #30
0
    def test_cull_services_list(self):
        self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)

        # Pad num_services to kick in truncation logic
        num_services = self.check.MAX_SERVICES + 20

        # Max services parameter (from consul.yaml) set to be bigger than MAX_SERVICES and smaller than the total of services
        max_services = num_services - 10

        # Big whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(num_services)]
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # Big whitelist with max_services
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(num_services)]
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), max_services)

        # Whitelist < MAX_SERVICES should spit out the whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))

        # Whitelist < max_services param should spit out the whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(max_services-1)]
        self.assertEqual(set(self.check._cull_services_list(services, whitelist, max_services)), set(whitelist))

        # No whitelist, still triggers truncation
        whitelist = []
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # No whitelist with max_services set, also triggers truncation
        whitelist = []
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), max_services)

        # Num. services < MAX_SERVICES should be no-op in absence of whitelist
        num_services = self.check.MAX_SERVICES - 1
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)

        # Num. services < max_services (from consul.yaml) should be no-op in absence of whitelist
        num_services = max_services - 1
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), num_services)

        # Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
        num_services = self.check.MAX_SERVICES - 1
        whitelist = ['service_1', 'service_2', 'service_3']
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))

        # Num. services < max_services should spit out only the whitelist when one is defined
        num_services = max_services - 1
        whitelist = ['service_1', 'service_2', 'service_3']
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(set(self.check._cull_services_list(services, whitelist, max_services)), set(whitelist))
예제 #31
0
    def test_cull_services_list(self):
        self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)

        # Pad num_services to kick in truncation logic
        num_services = self.check.MAX_SERVICES + 20

        # Max services parameter (from consul.yaml) set to be bigger than MAX_SERVICES and smaller than the total of services
        max_services = num_services - 10

        # Big whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(num_services)]
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # Big whitelist with max_services
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(num_services)]
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), max_services)

        # Whitelist < MAX_SERVICES should spit out the whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))

        # Whitelist < max_services param should spit out the whitelist
        services = self.mock_get_n_services_in_cluster(num_services)
        whitelist = ['service_{0}'.format(k) for k in range(max_services-1)]
        self.assertEqual(set(self.check._cull_services_list(services, whitelist, max_services)), set(whitelist))

        # No whitelist, still triggers truncation
        whitelist = []
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)

        # No whitelist with max_services set, also triggers truncation
        whitelist = []
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), max_services)

        # Num. services < MAX_SERVICES should be no-op in absence of whitelist
        num_services = self.check.MAX_SERVICES - 1
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)

        # Num. services < max_services (from consul.yaml) should be no-op in absence of whitelist
        num_services = max_services - 1
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(len(self.check._cull_services_list(services, whitelist, max_services)), num_services)

        # Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
        num_services = self.check.MAX_SERVICES - 1
        whitelist = ['service_1', 'service_2', 'service_3']
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))

        # Num. services < max_services should spit out only the whitelist when one is defined
        num_services = max_services - 1
        whitelist = ['service_1', 'service_2', 'service_3']
        services = self.mock_get_n_services_in_cluster(num_services)
        self.assertEqual(set(self.check._cull_services_list(services, whitelist, max_services)), set(whitelist))
예제 #32
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {"instances": [{
         "access_id":"foo",
         "access_secret": "bar"}]}
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"]))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs_in.json')))
예제 #33
0
    def test_nginx_plus(self):
        test_data = Fixtures.read_file('nginx_plus_in.json', sdk_dir=FIXTURE_DIR)
        expected = eval(Fixtures.read_file('nginx_plus_out.python', sdk_dir=FIXTURE_DIR))
        nginx = load_check('nginx', self.config, self.agent_config)
        parsed = nginx.parse_json(test_data)
        parsed.sort()

        # Check that the parsed test data is the same as the expected output
        self.assertEquals(parsed, expected)
예제 #34
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {"instances": [{
         "access_id":"foo",
         "access_secret": "bar"}]}
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"]))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs_in.json')))
예제 #35
0
    def test_nginx_one_connection(self):
        nginx = load_check('nginx', self.config, self.agent_config)

        # Testing that connection will work with instance 0
        nginx.check(self.config['instances'][0])

        # Checking that only one metric is of type 'nginx.net.connections'
        r = nginx.get_metrics()
        self.assertEquals(len([t for t in r if t[0] == "nginx.net.connections"]), 1, r)
예제 #36
0
    def test_nginx_ssl_validation_enabled(self):
        # Note: Throws an SSLError, because we're attempting to connect to an https endpoint with a self-signed
        #       certificate. In addition, this throws an InsecurePlatformWarning. Both of these are expected;
        #       versions of Python < 2.7.9 have restrictions in their ssl module limiting the configuration
        #       urllib3 can apply. (https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning)
        nginx = load_check('nginx', self.config, self.agent_config)

        # Testing that connection will FAIL with instance 4
        self.assertRaises(requests.exceptions.SSLError, nginx.check, self.config['instances'][4])
예제 #37
0
    def test_nginx_plus(self):
        test_data = Fixtures.read_file('nginx_plus_in.json', sdk_dir=FIXTURE_DIR)
        expected = eval(Fixtures.read_file('nginx_plus_out.python', sdk_dir=FIXTURE_DIR))
        nginx = load_check('nginx', self.config, self.agent_config)
        parsed = nginx.parse_json(test_data)
        parsed.sort()

        # Check that the parsed test data is the same as the expected output
        self.assertEquals(parsed, expected)
예제 #38
0
    def test_nginx_one_connection(self):
        nginx = load_check('nginx', self.config, self.agent_config)

        # Testing that connection will work with instance 0
        nginx.check(self.config['instances'][0])

        # Checking that only one metric is of type 'nginx.net.connections'
        r = nginx.get_metrics()
        self.assertEquals(len([t for t in r if t[0] == "nginx.net.connections"]), 1, r)
예제 #39
0
    def test_register_psutil_metrics(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        check._register_psutil_metrics(MOCK_STATS, MOCK_NAMES_TO_METRIC_TYPES)
        self.metrics = check.get_metrics()

        self.assertMetric('stackstate.agent.collector.memory_info.rss',
                          value=16814080)
        self.assertMetric('stackstate.agent.collector.memory_info.vms',
                          value=74522624)
예제 #40
0
    def test_nginx_ssl_validation_enabled(self):
        # Note: Throws an SSLError, because we're attempting to connect to an https endpoint with a self-signed
        #       certificate. In addition, this throws an InsecurePlatformWarning. Both of these are expected;
        #       versions of Python < 2.7.9 have restrictions in their ssl module limiting the configuration
        #       urllib3 can apply. (https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning)
        nginx = load_check('nginx', self.config, self.agent_config)

        # Testing that connection will FAIL with instance 4
        self.assertRaises(requests.exceptions.SSLError, nginx.check, self.config['instances'][4])
예제 #41
0
    def test_ntp_global_settings(self):
        config = {'instances': [{
            "host": "foo.com",
            "port": "bar",
            "version": 42,
            "timeout": 13.37}],
            'init_config': {}}

        agentConfig = {
            'version': '0.1',
            'api_key': 'toto'
        }

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        check.run()

        ntp_args = get_ntp_args()

        self.assertEqual(ntp_args["host"], "foo.com")
        self.assertEqual(ntp_args["port"], "bar")
        self.assertEqual(ntp_args["version"], 42)
        self.assertEqual(ntp_args["timeout"], 13.37)

        config = {'instances': [{}], 'init_config': {}}

        agentConfig = {
            'version': '0.1',
            'api_key': 'toto'
        }

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        try:
            check.run()
        except Exception:
            pass

        ntp_args = get_ntp_args()

        self.assertTrue(ntp_args["host"].endswith("datadog.pool.ntp.org"))
        self.assertEqual(ntp_args["port"], "ntp")
        self.assertEqual(ntp_args["version"], 3)
        self.assertEqual(ntp_args["timeout"], 1.0)
예제 #42
0
    def test_ntp_global_settings(self):
        config = {'instances': [{
            "host": "foo.com",
            "port": "bar",
            "version": 42,
            "timeout": 13.37}],
            'init_config': {}}

        agentConfig = {
            'version': '0.1',
            'api_key': 'toto'
        }

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        check.run()

        ntp_args = get_ntp_args()

        self.assertEqual(ntp_args["host"], "foo.com")
        self.assertEqual(ntp_args["port"], "bar")
        self.assertEqual(ntp_args["version"], 42)
        self.assertEqual(ntp_args["timeout"], 13.37)

        config = {'instances': [{}], 'init_config': {}}

        agentConfig = {
            'version': '0.1',
            'api_key': 'toto'
        }

        # default min collection interval for that check was 20sec
        check = load_check('ntp', config, agentConfig)
        try:
            check.run()
        except Exception:
            pass

        ntp_args = get_ntp_args()

        self.assertTrue(ntp_args["host"].endswith("datadog.pool.ntp.org"))
        self.assertEqual(ntp_args["port"], "ntp")
        self.assertEqual(ntp_args["version"], 3)
        self.assertEqual(ntp_args["timeout"], 1.0)
예제 #43
0
    def test_ssh(self):
        config = {
            'instances': [
                {
                    'host': 'io.netgarage.org',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
                {
                    'host': 'localhost',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
                {
                    'host': 'wronghost',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
            ]
        }

        agentConfig = {}
        self.check = load_check('ssh_check', config, agentConfig)

        nb_threads = threading.active_count()

        # Testing that connection will work
        self.check.check(config['instances'][0])

        service = self.check.get_service_checks()
        self.assertEqual(service[0].get('status'), AgentCheck.OK)
        self.assertEqual(service[0].get('message'), "No errors occured")
        self.assertEqual(service[0].get('tags'),
                         ["instance:io.netgarage.org-22"])

        # Testing that bad authentication will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][1])
        # Testing that bad hostname will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][2])
        service_fail = self.check.get_service_checks()
        # Check failure status
        self.assertEqual(service_fail[0].get('status'), AgentCheck.CRITICAL)
        # Check that we've closed all connections, if not we're leaking threads
        self.assertEqual(nb_threads, threading.active_count())
예제 #44
0
    def check_and_assert(self, filename, matches,
        kernel_line_regex='^(?P<timestamp>.+?) (?P<host>\S+) kernel: \[\s*(?P<uptime>\d+(?:\.\d+)?)\] (?P<message>.*)$',
        kill_message_regex='^Out of memory: Kill process (?P<pid>\d+) \((?P<pname>.*?)\) score (?P<score>.*?) or sacrifice child'
    ):
        if filename[0] != '/':
            filename = path.join(self.FIXTURE_PATH, filename)

        conf = {
            'init_config': {},
            'instances': [{
                'logfile': filename,
                'kernel_line_regex': kernel_line_regex,
                'kill_message_regex': kill_message_regex
            }]
        }

        check = load_check('oom', conf, {})
        check.check(conf['instances'][0])

        service_checks = check.get_service_checks()

        self.assertEqual(
            len(service_checks),
            len(matches),
            "Got %s service checks but specified %s matches" % (len(service_checks), len(matches))
        )

        for idx, obj in enumerate(service_checks):
            match = matches[idx]

            self.assertEqual(
                obj.get('check'),
                self.CHECK_NAME,
                "(%s) Service check name should be %s" % (idx, self.CHECK_NAME)
            )

            if 'status' in match:
                self.assertEqual(
                    obj.get('status'),
                    match.get('status'),
                    "(%s) Status should be %s" % (idx, match.get('status'))
                )

            if 'message' in match:
                if match.get('message') is None:
                    self.assertEqual(
                        obj.get('message'),
                        None,
                        "(%s) Service check should have no message" % idx
                    )
                else:
                    self.assertRegexpMatches(
                        obj.get('message'),
                        match.get('message'),
                        "(%s) Message should match %s" % (idx, match.get('message'))
                    )
예제 #45
0
    def check_with_contents(self, *contents, **instance_config):
        with os.fdopen(self.fd, 'w') as conf_file:
            json.dump({"id": "the_id", "key": "the_key"}, conf_file)
        conf = {
            "init_config": {
                "credentials_json_file_path": self.path,
                "aws_access_key_id_field_name": "id",
                "aws_secret_access_key_field_name": "key",
            },
            "instances": [
                dict(
                    {
                        "uri": "s3://my-bucket/my/path/prefix/{date_stamp}",
                        "sla_seconds": 36000,
                        "min_size_bytes": 10000,
                        "run_time": "2016-12-01 13:05:01",
                    }, **instance_config),
            ],
        }

        content_default = {
            'Key': 'part-00000-m-00000.parquet',
            'LastModified': datetime.datetime(2015, 1, 1),
            'ETag': 'the-tag',
            'Size': 10000000,
            'StorageClass': 'STANDARD',
            'Owner': {
                'DisplayName': 'yours truly',
                'ID': 'my id'
            }
        }
        response = {
            'IsTruncated': False,
            'Contents': [dict(content_default, **c) for c in contents],
        }
        check = load_check('s3_object_exists', conf, {})

        stub_s3 = stub.Stubber(check.get_client())
        stub_s3.add_response(
            'list_objects',
            response,
            {
                "Bucket": "my-bucket",
                "Prefix": "my/path/prefix/20161201"
            },
        )
        with stub_s3:
            check.check(conf["instances"][0])

        service_checks = check.get_service_checks()
        self.assertEqual(
            1,
            len(service_checks),
            "Failed to perform service checks {0!r}".format(service_checks),
        )
        return service_checks[0]
예제 #46
0
    def testMongoOldConfig(self):
        conf = {
            "init_config": {},
            "instances": [
                {"server": "mongodb://localhost:%s/test" % PORT1},
                {"server": "mongodb://localhost:%s/test" % PORT2},
            ],
        }

        # Test the first mongodb instance
        self.check = load_check("mongo", conf, {})

        # Run the check against our running server
        self.check.check(conf["instances"][0])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf["instances"][0])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(len(metrics) > 0)

        metric_val_checks = {
            "mongodb.connections.current": lambda x: x >= 1,
            "mongodb.connections.available": lambda x: x >= 1,
            "mongodb.uptime": lambda x: x >= 0,
            "mongodb.mem.resident": lambda x: x > 0,
            "mongodb.mem.virtual": lambda x: x > 0,
        }

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue(metric_val_checks[metric_name](m[2]))

        # Run the check against our running server
        self.check.check(conf["instances"][1])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf["instances"][1])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(len(metrics) > 0)

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue(metric_val_checks[metric_name](m[2]))
예제 #47
0
    def testMongoOldConfig(self):
        conf = {
            'init_config': {},
            'instances': [
                {'server': "mongodb://localhost:%s/test" % PORT1},
                {'server': "mongodb://localhost:%s/test" % PORT2},
            ]
        }

        # Test the first mongodb instance
        self.check = load_check('mongo', conf, {})

        # Run the check against our running server
        self.check.check(conf['instances'][0])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf['instances'][0])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(type(metrics) == type([]))
        self.assertTrue(len(metrics) > 0)

        metric_val_checks = {
            'mongodb.connections.current': lambda x: x >= 1,
            'mongodb.connections.available': lambda x: x >= 1,
            'mongodb.uptime': lambda x: x >= 0,
            'mongodb.mem.resident': lambda x: x > 0,
            'mongodb.mem.virtual': lambda x: x > 0
        }

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue( metric_val_checks[metric_name]( m[2] ) )

        # Run the check against our running server
        self.check.check(conf['instances'][1])
        # Sleep for 1 second so the rate interval >=1
        time.sleep(1)
        # Run the check again so we get the rates
        self.check.check(conf['instances'][1])

        # Metric assertions
        metrics = self.check.get_metrics()
        assert metrics
        self.assertTrue(type(metrics) == type([]))
        self.assertTrue(len(metrics) > 0)

        for m in metrics:
            metric_name = m[0]
            if metric_name in metric_val_checks:
                self.assertTrue( metric_val_checks[metric_name]( m[2] ) )
예제 #48
0
    def test_redis_auth(self):
        # correct password
        r = load_check('redisdb', {}, {})
        instance = {
            'host': 'localhost',
            'port': AUTH_PORT,
            'password': '******'
        }
        r.check(instance)
        metrics = self._sort_metrics(r.get_metrics())
        assert len(metrics) > 0, "No metrics returned"

        # wrong passwords
        instances = [
            {
                'host': 'localhost',
                'port': AUTH_PORT,
                'password': ''
            },
            {
                'host': 'localhost',
                'port': AUTH_PORT,
                'password': '******'
            }
        ]

        r = load_check('redisdb', {}, {})
        try:
            r.check(instances[0])
        except Exception as e:
            self.assertTrue(
                # 2.8
                'noauth authentication required' in str(e).lower()
                # previously
                or 'operation not permitted' in str(e).lower(),
                str(e))

        r = load_check('redisdb', {}, {})
        try:
            r.check(instances[1])
        except Exception as e:
            self.assertTrue('invalid password' in str(e).lower(), str(e))
예제 #49
0
    def test_redis_auth(self):
        # correct password
        r = load_check('redisdb', {}, {})
        instance = {
            'host': 'localhost',
            'port': AUTH_PORT,
            'password': '******'
        }
        r.check(instance)
        metrics = self._sort_metrics(r.get_metrics())
        assert len(metrics) > 0, "No metrics returned"

        # wrong passwords
        instances = [
            {
                'host': 'localhost',
                'port': AUTH_PORT,
                'password': ''
            },
            {
                'host': 'localhost',
                'port': AUTH_PORT,
                'password': '******'
            }
        ]

        r = load_check('redisdb', {}, {})
        try:
            r.check(instances[0])
        except Exception as e:
            self.assertTrue(
                # 2.8
                'noauth authentication required' in str(e).lower()
                # previously
                or 'operation not permitted' in str(e).lower(),
                str(e))

        r = load_check('redisdb', {}, {})
        try:
            r.check(instances[1])
        except Exception as e:
            self.assertTrue('invalid password' in str(e).lower(), str(e))
예제 #50
0
    def check_sudo(self, sudo_value):
        conf = {
            'init_config': {"sudo": sudo_value},
            'instances': [
                {}
            ]
        }

        check = load_check('unbound', conf, {})
        cmd = check.get_cmd()
        self.assertEqual(cmd.startswith("sudo "), sudo_value)
예제 #51
0
 def test_metric_name_without_prefix(self):
     instance = {
         'url': 'http://localhost:8080',
         'timeout': 0,
         "cache_file": "/dev/null"
     }
     conf = {'init_config': {}, 'instances': [instance]}
     self.check = load_check('storm_rest_api', conf, {})
     self.assertEqual(
         'storm.rest.baz',
         self.check.metric(self.check.instance_config(instance), 'baz'))
예제 #52
0
 def setUp(self):
     self.config = {
         'instances': [{
             'server': 'http://localhost:8091',
             'user': '******',
             'password': '******',
             'timeout': 0.1
         }]
     }
     self.agentConfig = {'version': '0.1', 'api_key': 'toto'}
     self.check = load_check('couchbase', self.config, self.agentConfig)
예제 #53
0
    def test_ssh(self):
        config = {
            'instances': [
                {
                    'host': 'io.smashthestack.org',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
                {
                    'host': 'localhost',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
                {
                    'host': 'wronghost',
                    'port': 22,
                    'username': '******',
                    'password': '******',
                    'sftp_check': False,
                    'private_key_file': '',
                    'add_missing_keys': True
                },
            ]
        }

        agentConfig = {}
        self.check = load_check('ssh_check', config, agentConfig)

        #Testing that connection will work
        self.check.check(config['instances'][0])

        service = self.check.get_service_checks()
        self.assertEqual(service[0].get('status'), AgentCheck.OK)
        self.assertEqual(service[0].get('message'), None)
        self.assertEqual(service[0].get('tags'),
                         ["instance:io.smashthestack.org-22"])

        #Testing that bad authentication will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][1])
        #Testing that bad hostname will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][2])
        service_fail = self.check.get_service_checks()
        #Check failure status
        self.assertEqual(service_fail[0].get('status'), AgentCheck.CRITICAL)
예제 #54
0
    def test_psutil_config_to_stats(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        instance = MOCK_CONFIG.get('instances')[0]

        stats, names_to_metric_types = check._psutil_config_to_stats(instance)
        self.assertIn('memory_info', names_to_metric_types)
        self.assertEqual(names_to_metric_types['memory_info'], 'gauge')

        self.assertIn('cpu_times', names_to_metric_types)
        self.assertEqual(names_to_metric_types['cpu_times'], 'rate')

        self.assertIn('memory_info', stats)
        self.assertIn('cpu_times', stats)
예제 #55
0
    def test_send_single_metric(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        check.gauge = mock.MagicMock()
        check.rate = mock.MagicMock()

        check._send_single_metric('datadog.agent.collector.memory_info.vms', 16814081, 'gauge')
        check.gauge.assert_called_with('datadog.agent.collector.memory_info.vms', 16814081)

        check._send_single_metric('datadog.agent.collector.memory_info.vms', 16814081, 'rate')
        check.rate.assert_called_with('datadog.agent.collector.memory_info.vms', 16814081)

        self.assertRaises(Exception, check._send_single_metric,
                          *('datadog.agent.collector.memory_info.vms', 16814081, 'bogus'))
예제 #56
0
    def test_psutil_config_to_stats(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        instance = MOCK_CONFIG.get('instances')[0]

        stats, names_to_metric_types = check._psutil_config_to_stats(instance)
        self.assertIn('memory_info', names_to_metric_types)
        self.assertEqual(names_to_metric_types['memory_info'], 'gauge')

        self.assertIn('cpu_times', names_to_metric_types)
        self.assertEqual(names_to_metric_types['cpu_times'], 'rate')

        self.assertIn('memory_info', stats)
        self.assertIn('cpu_times', stats)
예제 #57
0
    def test_register_psutil_metrics(self):
        check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE)
        check._register_psutil_metrics(MOCK_STATS,
                                       MOCK_NAMES_TO_METRIC_TYPES,
                                       tags=['optional:tags'])
        self.metrics = check.get_metrics()

        self.assertMetric('datadog.agent.collector.memory_info.rss',
                          value=16814080,
                          tags=['optional:tags'])
        self.assertMetric('datadog.agent.collector.memory_info.vms',
                          value=74522624,
                          tags=['optional:tags'])
    def test_query_monitoring_metrics(self):
        raise SkipTest("Skipped for now as it's hard to configure couchbase on travis")
        # Add query monitoring endpoint and reload check
        self.config['instances'][0]['query_monitoring_url'] = 'http://localhost:8093'
        self.check = load_check('couchbase', self.config, self.agentConfig)
        self.check.check(self.config['instances'][0])

        metrics = self.check.get_metrics()

        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(len(metrics) > 3)

        self.assertTrue(len([k for k in metrics if 'query' in k[0]]) > 1, 'Unable to fund any query metrics')
예제 #59
0
    def test_bad_process_metric_check(self):
        ''' Tests that a bad configuration option for `process_metrics` gets ignored '''
        check = load_check(self.CHECK_NAME, MOCK_CONFIG_2, AGENT_CONFIG_DEV_MODE)
        instance = MOCK_CONFIG.get('instances')[0]
        stats, names_to_metric_types = check._psutil_config_to_stats(instance)

        self.assertIn('memory_info', names_to_metric_types)
        self.assertEqual(names_to_metric_types['memory_info'], 'gauge')

        self.assertNotIn('non_existent_stat', names_to_metric_types)

        self.assertIn('memory_info', stats)
        self.assertNotIn('non_existent_stat', stats)