def test_init(): with pytest.raises(Exception): # Must define a unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = { 'refresh_morlist_interval': -99, 'refresh_metrics_metadata_interval': -99, } check = VSphereCheck('disk', init_config, {}, [{'name': 'vsphere_foo'}]) assert check.time_started > 0 assert check.pool_started is False assert len(check.jobs_status) == 0 assert len(check.server_instances) == 0 assert len(check.cache_times) == 1 assert 'vsphere_foo' in check.cache_times assert check.cache_times['vsphere_foo'][MORLIST][INTERVAL] == -99 assert check.cache_times['vsphere_foo'][METRICS_METADATA][INTERVAL] == -99 assert len(check.event_config) == 1 assert 'vsphere_foo' in check.event_config assert len(check.registry) == 0 assert len(check.morlist_raw) == 0 assert len(check.morlist) == 0 assert len(check.metrics_metadata) == 0 assert len(check.latest_event_query) == 0
def test__init__(instance): with pytest.raises(BadConfigError): # Must define a unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance) assert check.time_started > 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval == 50 assert len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config assert not check.registry assert not check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size == 50
def vsphere(): """ Provide a check instance with mocked parts """ # create topology from a fixture file vcenter_topology = create_topology('vsphere_topology.json') # mock pyvmomi stuff view_mock = MockedContainer(topology=vcenter_topology) viewmanager_mock = MagicMock( **{'CreateContainerView.return_value': view_mock}) event_mock = MagicMock(createdTime=datetime.now()) eventmanager_mock = MagicMock(latestEvent=event_mock) content_mock = MagicMock(viewManager=viewmanager_mock, eventManager=eventmanager_mock) # assemble the mocked server server_mock = MagicMock() server_mock.configure_mock(**{ 'RetrieveContent.return_value': content_mock, 'content': content_mock, }) # create a check instance check = VSphereCheck('disk', {}, {}, [instance()]) # patch the check instance check._get_server_instance = MagicMock(return_value=server_mock) # disable the thread pool check.pool = MagicMock(apply_async=lambda func, args: func(*args)) check.pool_started = True # otherwise the mock will be overwritten return check
def test__should_cache(instance): now = time.time() # do not use fixtures for the check instance, some params are set at # __init__ time and we need to instantiate the check multiple times check = VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance) # first run should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration times, don't use defaults so we also test # configuration is properly propagated init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous runs, set the last execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch("time.time", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata)
def vsphere(): """ Provide a check instance with mocked parts """ # mock the server server_mock = get_mocked_server() # create a check instance check = VSphereCheck('disk', {}, {}, [instance()]) # patch the check instance check._get_server_instance = MagicMock(return_value=server_mock) # return the check after disabling the thread pool return disable_thread_pool(check)
def test_renew_rest_api_session_on_failure(aggregator, dd_run_check, realtime_instance): realtime_instance.update({'collect_tags': True}) check = VSphereCheck('vsphere', {}, [realtime_instance]) config = VSphereConfig(realtime_instance, {}, MagicMock()) check.api_rest = VSphereRestAPI(config, MagicMock()) check.api_rest.make_batch = MagicMock(side_effect=[Exception, []]) check.api_rest.smart_connect = MagicMock() tags = check.collect_tags({}) assert tags assert check.api_rest.make_batch.call_count == 2 assert check.api_rest.smart_connect.call_count == 1
def test_continue_if_tag_collection_fail(aggregator, dd_run_check, realtime_instance): realtime_instance.update({'collect_tags': True}) check = VSphereCheck('vsphere', {}, [realtime_instance]) check.log = MagicMock() with mock.patch('requests.post', side_effect=Exception, autospec=True): dd_run_check(check) aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE'], hostname='10.0.0.104') check.log.error.assert_called_once_with( "Cannot connect to vCenter REST API. Tags won't be collected. Error: %s", mock.ANY )
def test_version_metadata(aggregator, dd_run_check, realtime_instance, datadog_agent): check = VSphereCheck('vsphere', {}, [realtime_instance]) check.check_id = 'test:123' dd_run_check(check) version_metadata = { 'version.scheme': 'semver', 'version.major': '6', 'version.minor': '7', 'version.patch': '0', 'version.build': '123456789', 'version.raw': '6.7.0+123456789', } datadog_agent.assert_metadata('test:123', version_metadata)
def test_tags_filters_integration_tags(aggregator, dd_run_check, historical_instance): historical_instance['collect_tags'] = True historical_instance['resource_filters'] = [ { 'resource': 'cluster', 'property': 'tag', 'patterns': [ r'vsphere_datacenter:Datacenter2', ], }, { 'resource': 'datastore', 'property': 'tag', 'patterns': [ r'vsphere_datastore:Datastore 1', ], }, ] check = VSphereCheck('vsphere', {}, [historical_instance]) dd_run_check(check) aggregator.assert_metric('vsphere.cpu.usage.avg', count=1) aggregator.assert_metric_has_tag('vsphere.cpu.usage.avg', 'vsphere_datacenter:Datacenter2', count=1) aggregator.assert_metric_has_tag('vsphere.cpu.usage.avg', 'vsphere_datacenter:Dätacenter', count=0) aggregator.assert_metric('vsphere.disk.used.latest', count=1) aggregator.assert_metric_has_tag('vsphere.disk.used.latest', 'vsphere_datastore:Datastore 1', count=1) aggregator.assert_metric_has_tag('vsphere.disk.used.latest', 'vsphere_datastore:Datastore 2', count=0)
def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect' ) as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS) aggregator.reset() # SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS)
def test_collect_metric_instance_values(aggregator, dd_run_check, realtime_instance): realtime_instance.update( { 'collect_per_instance_filters': { 'vm': [r'cpu\.usagemhz\.avg', r'disk\..*'], 'host': [r'cpu\.coreUtilization\..*', r'sys\.uptime\..*', r'disk\..*'], } } ) check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) # Following metrics should match and have instance value tag aggregator.assert_metric('vsphere.cpu.usagemhz.avg', tags=['cpu_core:6', 'vcenter_server:FAKE']) aggregator.assert_metric( 'vsphere.cpu.coreUtilization.avg', hostname='10.0.0.104', tags=['cpu_core:16', 'vcenter_server:FAKE'] ) # Following metrics should NOT match and do NOT have instance value tag aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE']) aggregator.assert_metric('vsphere.cpu.totalCapacity.avg', tags=['vcenter_server:FAKE']) # None of `vsphere.disk.usage.avg` metrics have instance values for specific metric+resource_type # Hence the aggregated metric IS collected aggregator.assert_metric('vsphere.disk.usage.avg', tags=['vcenter_server:FAKE'], hostname='VM4-1', count=1) # Some of `vsphere.disk.read.avg` metrics have instance values for specific metric+resource_type # Hence the aggregated metric IS NOT collected aggregator.assert_metric('vsphere.disk.read.avg', tags=['vcenter_server:FAKE'], hostname='VM4-1', count=0) for instance_tag in ['device_path:value-aa', 'device_path:value-bb']: aggregator.assert_metric( 'vsphere.disk.read.avg', tags=['vcenter_server:FAKE'] + [instance_tag], hostname='VM4-1', count=1 )
def test_attributes_filters(aggregator, dd_run_check, realtime_instance): realtime_instance['collect_attributes'] = True realtime_instance['attributes_prefix'] = 'vattr_' realtime_instance['resource_filters'] = [ { 'resource': 'vm', 'property': 'attribute', 'patterns': [r'vattr_foo:bar\d'] }, { 'resource': 'host', 'property': 'name', 'type': 'blacklist', 'patterns': [r'.*'] }, ] check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) # Assert that only a single resource was collected aggregator.assert_metric('vsphere.cpu.usage.avg', count=2) # Assert that the resource that was collected is the one with the correct tag aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE'], hostname='VM4-15') # Assert that the resource that was collected is the one with the correct tag aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE'], hostname='VM4-9')
def test_historical_metrics_no_dsc_folder(aggregator, dd_run_check, historical_instance): """This test does the same check than test_historical_events, but deactivate the option to get datastore cluster folder in metrics tags""" check = VSphereCheck('vsphere', {}, [historical_instance]) check._config.include_datastore_cluster_folder_tag = False dd_run_check(check) fixture_file = os.path.join(HERE, 'fixtures', 'metrics_historical_values.json') with open(fixture_file, 'r') as f: data = json.load(f) for metric in data: all_tags = metric.get('tags') if all_tags is not None: # The tag 'vsphere_folder:Datastores' is not supposed to be there anymore! all_tags = [ tag for tag in all_tags if tag != 'vsphere_folder:Datastores' ] aggregator.assert_metric(metric['name'], metric.get('value'), tags=all_tags) aggregator.assert_all_metrics_covered()
def test_collect_tags(aggregator, dd_run_check, realtime_instance): realtime_instance.update({ 'collect_tags': True, 'excluded_host_tags': ['my_cat_name_1', 'my_cat_name_2'] }) check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) aggregator.assert_metric( 'vsphere.cpu.usage.avg', tags=[ 'my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2', 'vcenter_server:FAKE' ], hostname='VM4-4', ) aggregator.assert_metric( 'vsphere.rescpu.samplePeriod.latest', tags=['my_cat_name_2:my_tag_name_2', 'vcenter_server:FAKE'], hostname='10.0.0.104', ) aggregator.assert_metric( 'vsphere.datastore.maxTotalLatency.latest', tags=['my_cat_name_2:my_tag_name_2', 'vcenter_server:FAKE'], hostname='10.0.0.104', ) aggregator.assert_metric('datadog.vsphere.query_tags.time', tags=['vcenter_server:FAKE'])
def test_tags_filters_with_prefix_when_tags_are_not_yet_collected( aggregator, dd_run_check, realtime_instance): realtime_instance['collect_tags'] = True realtime_instance['resource_filters'] = [ { 'resource': 'vm', 'property': 'tag', 'patterns': [r'foo_my_cat_name_1:my_tag_name_1'] }, { 'resource': 'host', 'property': 'name', 'type': 'blacklist', 'patterns': [r'.*'] }, ] realtime_instance['tags_prefix'] = 'foo_' check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) # Assert that only a single resource was collected aggregator.assert_metric('vsphere.cpu.usage.avg', count=1) # Assert that the resource that was collected is the one with the correct tag aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE'], hostname='VM4-4')
def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(Exception) as e: check.check(instance) # FIXME: the check should raise a more meaningful exception so we don't # need to check the message assert "Connection to None failed:" in str(e.value) assert len(aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)) == 1 sc = aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)[0] assert sc.status == check.CRITICAL assert 'foo:bar' in sc.tags aggregator.reset() # SmartConnect succeeds, RetrieveContent fails server = MagicMock() server.RetrieveContent.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(Exception) as e: check.check(instance) assert "Connection to None died unexpectedly:" in str(e.value) assert len(aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)) == 1 sc = aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)[0] assert sc.status == check.CRITICAL assert 'foo:bar' in sc.tags
def test_specs_start_time(aggregator, dd_run_check, historical_instance): mock_time = dt.datetime.now() check = VSphereCheck('vsphere', {}, [historical_instance]) dd_run_check(check) check.api.server_time = mock_time start_times = [] for specs in check.make_query_specs(): for spec in specs: start_times.append(spec.startTime) assert len(start_times) != 0 for start_time in start_times: assert start_time == (mock_time - dt.timedelta(hours=2))
def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) assert len(aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)) > 0 sc = aggregator.service_checks(VSphereCheck.SERVICE_CHECK_NAME)[0] assert sc.status == check.OK assert 'foo:bar' in sc.tags
def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS )
def test_collect_metric_instance_values_historical(aggregator, dd_run_check, historical_instance): historical_instance.update( { 'collect_per_instance_filters': { 'datastore': [r'disk\..*'], # datacenter metric group doesn't have any instance tags so this has no effect 'datacenter': [r'cpu\.usagemhz\.avg'], 'cluster': [r'cpu\.usagemhz\.avg'], } } ) check = VSphereCheck('vsphere', {}, [historical_instance]) dd_run_check(check) aggregator.assert_metric( 'vsphere.cpu.usagemhz.avg', tags=[ 'cpu_core:16', 'vcenter_server:FAKE', 'vsphere_cluster:Cluster2', 'vsphere_datacenter:Datacenter2', 'vsphere_folder:Datacenters', 'vsphere_folder:host', 'vsphere_type:cluster', ], ) aggregator.assert_metric( 'vsphere.disk.used.latest', tags=[ 'device_path:value-aa', 'vcenter_server:FAKE', 'vsphere_datacenter:Datacenter2', 'vsphere_datastore:NFS Share', 'vsphere_folder:Datacenters', 'vsphere_folder:datastore', 'vsphere_type:datastore', ], ) # Following metrics should NOT match and do NOT have instance value tag aggregator.assert_metric( 'vsphere.cpu.usage.avg', tags=[ 'vcenter_server:FAKE', 'vsphere_cluster:Cluster2', 'vsphere_datacenter:Datacenter2', 'vsphere_folder:Datacenters', 'vsphere_folder:host', 'vsphere_type:cluster', ], )
def test_tag_prefix(aggregator, dd_run_check, realtime_instance): realtime_instance.update( {'collect_tags': True, 'tags_prefix': 'ABC_', 'excluded_host_tags': ['ABC_my_cat_name_1', 'ABC_my_cat_name_2']} ) check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) aggregator.assert_metric( 'vsphere.cpu.usage.avg', tags=['ABC_my_cat_name_1:my_tag_name_1', 'ABC_my_cat_name_2:my_tag_name_2', 'vcenter_server:FAKE'], hostname='VM4-4', )
def test_rest_api_config(init_config, instance_config, expected_shared_rest_api_options, expected_rest_api_options): instance_config.update( { 'name': 'abc', 'use_legacy_check_version': False, 'host': 'my-host', } ) check = VSphereCheck('vsphere', init_config, [instance_config]) assert check._config.rest_api_options == expected_rest_api_options assert check._config.shared_rest_api_options == expected_shared_rest_api_options
def test_historical_metrics(aggregator, dd_run_check, historical_instance): """This test asserts that the same api content always produces the same metrics.""" check = VSphereCheck('vsphere', {}, [historical_instance]) dd_run_check(check) fixture_file = os.path.join(HERE, 'fixtures', 'metrics_historical_values.json') with open(fixture_file, 'r') as f: data = json.load(f) for metric in data: aggregator.assert_metric(metric['name'], metric.get('value'), tags=metric.get('tags')) aggregator.assert_all_metrics_covered()
def test__is_excluded(): """ * Exclude hosts/vms not compliant with the user's `*_include` configuration. * Exclude "non-labeled" virtual machines when the user configuration instructs to. """ # Sample(s) include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"} # OK included_host = MockedMOR(spec="HostSystem", name="foo") included_vm = MockedMOR(spec="VirtualMachine", name="foo") assert not VSphereCheck._is_excluded(included_host, {"name": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {"name": included_vm.name}, include_regexes, None) # Not OK! excluded_host = MockedMOR(spec="HostSystem", name="bar") excluded_vm = MockedMOR(spec="VirtualMachine", name="bar") assert VSphereCheck._is_excluded(excluded_host, {"name": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {"name": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes = None include_only_marked = True # OK included_vm = MockedMOR(spec="VirtualMachine", name="foo", label=True) assert not VSphereCheck._is_excluded( included_vm, {"customValue": included_vm.customValue}, include_regexes, include_only_marked ) # Not OK included_vm = MockedMOR(spec="VirtualMachine", name="foo") assert VSphereCheck._is_excluded(included_vm, {"customValue": []}, include_regexes, include_only_marked)
def test_error_disabled_tags(realtime_instance): realtime_instance['collect_tags'] = False realtime_instance['resource_filters'] = [ { 'resource': 'vm', 'property': 'name', 'patterns': [r'^\$VM5$', r'^VM4-2\d$'] }, { 'resource': 'vm', 'property': 'tag', 'patterns': [r'env:production'] }, ] # This config should not be possible with pytest.raises(ConfigurationError): VSphereCheck('vsphere', {}, [realtime_instance]) # collecting tags should not raise a configuration error realtime_instance['collect_tags'] = True VSphereCheck('vsphere', {}, [realtime_instance])
def test_error_disabled_attributes(realtime_instance): realtime_instance['collect_attributes'] = False realtime_instance['resource_filters'] = [ { 'resource': 'vm', 'property': 'name', 'patterns': [r'^\$VM5$', r'^VM4-2\d$'] }, { 'resource': 'vm', 'property': 'attribute', 'patterns': [r'env:production'] }, ] # Filtering on attributes without collecting them should not be possible with pytest.raises(ConfigurationError): VSphereCheck('vsphere', {}, [realtime_instance]) # Now that attributes are enabled, no configuration error should be raised. realtime_instance['collect_attributes'] = True VSphereCheck('vsphere', {}, [realtime_instance])
def test_realtime_metrics(aggregator, dd_run_check, realtime_instance): """This test asserts that the same api content always produces the same metrics.""" check = VSphereCheck('vsphere', {}, [realtime_instance]) dd_run_check(check) fixture_file = os.path.join(HERE, 'fixtures', 'metrics_realtime_values.json') with open(fixture_file, 'r') as f: data = json.load(f) for metric in data: aggregator.assert_metric( metric['name'], metric.get('value'), hostname=metric.get('hostname'), tags=metric.get('tags') ) aggregator.assert_metric('datadog.vsphere.collect_events.time', metric_type=aggregator.GAUGE, count=1) aggregator.assert_all_metrics_covered()
def test_refresh_tags_cache_should_not_raise_exception(aggregator, dd_run_check, realtime_instance): realtime_instance.update({'collect_tags': True}) check = VSphereCheck('vsphere', {}, [realtime_instance]) check.log = MagicMock() check.api_rest = MagicMock() check.api_rest.get_resource_tags_for_mors.side_effect = APIConnectionError("Some error") check.collect_tags({}) # Error logged, but `refresh_tags_cache` should NOT raise any exception check.log.error.assert_called_once_with("Failed to collect tags: %s", mock.ANY)
def test_lab(aggregator): """ This test is intended to be run manually to connect to a real vSphere Instance It's useful for: - QA/testing the integration with a real vSphere instance - using a debugger to inspect values from a real vSphere instance - analysing the metrics received, see `metrics_lab.csv` below Example usage: $ export TEST_VSPHERE_USER='******' TEST_VSPHERE_PASS='******' $ TEST_VSPHERE_RUN_LAB=true ddev test vsphere:py38 -k test_lab """ if not is_affirmative(os.environ.get('TEST_VSPHERE_RUN_LAB')): pytest.skip( "Skipped! Set TEST_VSPHERE_RUN_LAB to run this test. " "TEST_VSPHERE_USER and TEST_VSPHERE_PASS must also be set.") username = os.environ['TEST_VSPHERE_USER'] password = os.environ['TEST_VSPHERE_PASS'] instance = { 'host': 'aws.vcenter.localdomain', 'username': username, 'password': password, 'collection_level': 4, 'collection_type': 'both', 'use_legacy_check_version': False, 'collect_metric_instance_values': True, 'ssl_verify': False, 'collect_tags': True, 'collect_attributes': True, 'rest_api_options': { 'timeout': '5', }, } check = VSphereCheck('vsphere', {}, [instance]) check.initiate_api_connection() check.check(instance) # Basic assert aggregator.assert_metric('vsphere.cpu.coreUtilization.avg') aggregator.assert_metric_has_tag( 'vsphere.cpu.usage.avg', 'MyCategoryFoo:MyTagFoo') # verify collect_tags works print("TOTAL metrics: {}".format(len(aggregator._metrics))) # Write all metrics to a file f = open(os.path.join(HERE, 'metrics_lab.csv'), 'w') f.write("name,host,type,value,tags\n") for metrics in aggregator._metrics.values(): for m in metrics: f.write("{},{},{},{},\"{}\"\n".format(m.name, m.hostname, m.type, m.value, m.tags))
def test_events_only(aggregator, events_only_instance): check = VSphereCheck('vsphere', {}, [events_only_instance]) check.initiate_api_connection() time1 = dt.datetime.now() event1 = mock_alarm_event(from_status='green', key=10, created_time=time1) check.api.mock_events = [event1] check.check(None) aggregator.assert_event("vCenter monitor status changed on this alarm, it was green and it's now red.", count=1) aggregator.assert_metric('datadog.vsphere.collect_events.time') # assert all metrics will check that we are not collecting historical and realtime metrics aggregator.assert_all_metrics_covered()