def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect' ) as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS) aggregator.reset() # SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS)
def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(Exception) as e: check.check(instance) # FIXME: the check should raise a more meaningful exception so we don't # need to check the message assert "Connection to None failed:" in str(e.value) assert len(aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)) == 1 sc = aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)[0] assert sc.status == check.CRITICAL assert 'foo:bar' in sc.tags aggregator.reset() # SmartConnect succeeds, RetrieveContent fails server = MagicMock() server.RetrieveContent.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(Exception) as e: check.check(instance) assert "Connection to None died unexpectedly:" in str(e.value) assert len(aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)) == 1 sc = aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)[0] assert sc.status == check.CRITICAL assert 'foo:bar' in sc.tags
def test_build_event(aggregator): agent_config = {'version': '0.1', 'api_key': 'toto'} check = TeamCityCheck('teamcity', {}, agent_config, INSTANCES) with patch('requests.get', get_mock_first_build): check.check(check.instances[0]) assert len(aggregator.metric_names) == 0 assert len(aggregator.events) == 0 aggregator.reset() with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) events = aggregator.events assert len(events) == 1 assert events[0]['msg_title'] == "Build for One test build successful" assert events[0]['msg_text'] == "Build Number: 2\nDeployed To: buildhost42.dtdg.co\n\nMore Info: " + \ "http://localhost:8111/viewLog.html?buildId=2&buildTypeId=TestProject_TestBuild" assert events[0]['tags'] == ['build', 'one:tag', 'one:test'] assert events[0]['host'] == "buildhost42.dtdg.co" aggregator.reset() # One more check should not create any more events with patch('requests.get', get_mock_one_more_build): check.check(check.instances[0]) assert len(aggregator.events) == 0
def test_file_metrics(aggregator): """ File metric coverage """ config_stubs = get_config_stubs(temp_dir, filegauges=True) config = {'instances': config_stubs} for config in config_stubs: aggregator.reset() dir_check.check(config) dirtagname = config.get('dirtagname', "name") name = config.get('name', temp_dir) filetagname = config.get('filetagname', "filename") dir_tags = [dirtagname + ":%s" % name, 'optional:tag1'] # File metrics for mname in FILE_METRICS: if config.get('pattern') != "file_*": # 2 '*.log' files in 'temp_dir' for i in xrange(1, 3): file_tag = [ filetagname + ":%s" % os.path.normpath(temp_dir + "/log_" + str(i) + ".log") ] aggregator.assert_metric(mname, tags=dir_tags + file_tag, count=1) if config.get('pattern') != "*.log": # Files in 'temp_dir' for i in xrange(0, 10): file_tag = [ filetagname + ":%s" % os.path.normpath(temp_dir + "/file_" + str(i)) ] aggregator.assert_metric(mname, tags=dir_tags + file_tag, count=1) if not config.get('pattern'): # Files in 'temp_dir/subfolder' if config.get('recursive'): for i in xrange(0, 5): file_tag = [ filetagname + ":%s" % os.path.normpath(temp_dir + "/subfolder" + "/file_" + str(i)) ] aggregator.assert_metric(mname, tags=dir_tags + file_tag, count=1) # Common metrics for mname in COMMON_METRICS: aggregator.assert_metric(mname, tags=dir_tags, count=1) # Raises when coverage < 100% assert aggregator.metrics_asserted_pct == 100.0
def test_directory_metrics(aggregator): """ Directory metric coverage """ config_stubs = get_config_stubs(temp_dir) countonly_stubs = get_config_stubs(temp_dir) # Try all the configurations in countonly mode as well for stub in countonly_stubs: stub['countonly'] = True config = {'instance': config_stubs + countonly_stubs} for config in config_stubs: aggregator.reset() dir_check.check(config) dirtagname = config.get('dirtagname', "name") name = config.get('name', temp_dir) dir_tags = [dirtagname + ":%s" % name, 'optional:tag1'] # 'recursive' and 'pattern' parameters if config.get('pattern') == "*.log": # 2 '*.log' files in 'temp_dir' aggregator.assert_metric("system.disk.directory.files", tags=dir_tags, count=1, value=2) elif config.get('pattern') == "file_*": # 10 'file_*' files in 'temp_dir' aggregator.assert_metric("system.disk.directory.files", tags=dir_tags, count=1, value=10) elif config.get('recursive'): # 12 files in 'temp_dir' + 5 files in 'tempdir/subfolder' aggregator.assert_metric("system.disk.directory.files", tags=dir_tags, count=1, value=17) else: # 12 files in 'temp_dir' aggregator.assert_metric("system.disk.directory.files", tags=dir_tags, count=1, value=12) # Raises when coverage < 100% aggregator.metrics_asserted_pct == 100.0
def test_check_auto(aggregator, spin_up_apache): apache_check = Apache(CHECK_NAME, {}, {}) apache_check.check(AUTO_CONFIG) tags = AUTO_CONFIG['tags'] for mname in APACHE_GAUGES + APACHE_RATES: aggregator.assert_metric(mname, tags=tags, count=1) assert aggregator.service_checks( 'apache.can_connect')[0].status == Apache.OK sc_tags = ['host:' + HOST, 'port:' + PORT] + tags for sc in aggregator.service_checks('apache.can_connect'): for tag in sc.tags: assert tag in sc_tags assert aggregator.metrics_asserted_pct == 100.0 aggregator.reset()
def test_check(self, aggregator, mock_get): """ Testing kube_dns check. """ check = KubeDNSCheck('kube_dns', {}, {}, [instance]) check.check(instance) # check that we then get the count metrics also check.check(instance) for metric in self.METRICS + self.COUNT_METRICS: aggregator.assert_metric(metric) aggregator.assert_metric_has_tag(metric, customtag) aggregator.assert_all_metrics_covered() # Make sure instance tags are not modified, see #3066 aggregator.reset() check.check(instance) name = self.NAMESPACE + ".request_duration.seconds.sum" aggregator.assert_metric(name) aggregator.assert_metric(name, tags=['custom:tag', 'system:reverse'])
def test_check(mock_get_usage, mock_device_list, aggregator): """ Testing Btrfs check. """ with mock.patch.object(btrfs_check, 'get_unallocated_space', return_value=None): btrfs_check.check({}) aggregator.assert_metric('system.disk.btrfs.unallocated', count=0) aggregator.reset() with mock.patch.object(btrfs_check, 'get_unallocated_space', return_value=0): btrfs_check.check({}) aggregator.assert_metric('system.disk.btrfs.total', count=4) aggregator.assert_metric('system.disk.btrfs.used', count=4) aggregator.assert_metric('system.disk.btrfs.free', count=4) aggregator.assert_metric('system.disk.btrfs.usage', count=4) aggregator.assert_metric('system.disk.btrfs.unallocated', count=1) aggregator.assert_all_metrics_covered()
def aggregator(): from datadog_checks.stubs import aggregator aggregator.reset() return aggregator
def Aggregator(): aggregator.reset() return aggregator
def aggregator(): __aggregator.reset() return __aggregator