예제 #1
0
    def __init__(self, name, init_config, instances):
        super(VSphereCheck, self).__init__(name, init_config, instances)
        self.config = VSphereConfig(self.instance, self.log)

        self.latest_event_query = datetime.now()
        self.infrastructure_cache = InfrastructureCache(
            interval_sec=self.config.refresh_infrastructure_cache_interval)
        self.metrics_metadata_cache = MetricsMetadataCache(
            interval_sec=self.config.refresh_metrics_metadata_cache_interval)
        self.api = None
        # Do not override `AgentCheck.hostname`
        self._hostname = None
        self.thread_pool = ThreadPoolExecutor(
            max_workers=self.config.threads_count)
        self.check_initializations.append(self.initiate_api_connection)
예제 #2
0
def test_get_new_events_with_fallback(realtime_instance):
    realtime_instance['use_collect_events_fallback'] = True

    with patch('datadog_checks.vsphere.api.connect'):
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        event1 = vim.event.Event(key=1)
        event3 = vim.event.Event(key=3)
        event_collector = MagicMock()
        api._conn.content.eventManager.QueryEvents.side_effect = [
            SoapAdapter.ParserError("some parse error"),
            [event1],
            SoapAdapter.ParserError("event parse error"),
            [event3],
        ]
        api._conn.content.eventManager.CreateCollectorForEvents.return_value = event_collector

        event_collector.ReadNextEvents.side_effect = [
            [event1],
            SoapAdapter.ParserError("event parse error"),
            [event3],
            [],
        ]

        events = api.get_new_events(start_time=dt.datetime.now())
        assert events == [event1, event3]
예제 #3
0
def test_get_infrastructure(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect'):
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        container_view = api._conn.content.viewManager.CreateContainerView.return_value
        container_view.__class__ = vim.ManagedObject

        obj1 = MagicMock(missingSet=None, obj="foo")
        obj2 = MagicMock(missingSet=None, obj="bar")
        api._conn.content.propertyCollector.RetrievePropertiesEx.return_value = MagicMock(
            objects=[obj1], token=['baz'])
        api._conn.content.propertyCollector.ContinueRetrievePropertiesEx.return_value = MagicMock(
            objects=[obj2], token=None)

        root_folder = api._conn.content.rootFolder
        root_folder.name = 'root-folder'
        infrastructure_data = api.get_infrastructure()
        assert infrastructure_data == {
            'foo': {},
            'bar': {},
            root_folder: {
                'name': 'root-folder',
                'parent': None
            }
        }
        container_view.Destroy.assert_called_once()
예제 #4
0
def test_infrastructure_cache(realtime_instance):
    cache = InfrastructureCache(float('inf'))
    config = VSphereConfig(realtime_instance, {}, logger)
    mock_api = build_rest_api_client(config, logger)

    mors = {
        MagicMock(spec=k, _moId="foo"): object()
        for k in ALL_RESOURCES_WITH_METRICS * 2
    }
    with cache.update():
        for k, v in iteritems(mors):
            cache.set_mor_props(k, v)
        cache.set_all_tags(mock_api.get_resource_tags_for_mors(mors))

    for r in ALL_RESOURCES_WITH_METRICS:
        assert len(list(cache.get_mors(r))) == 2

    for k, v in iteritems(mors):
        assert cache.get_mor_props(k) == v

    vm_mor = vim.VirtualMachine(moId='VM4-4-1')
    vm2_mor = vim.VirtualMachine(moId='i-dont-have-tags')
    datastore = vim.Datastore(moId='NFS-Share-1')

    assert cache.get_mor_tags(vm_mor) == [
        'my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2'
    ]
    assert cache.get_mor_tags(datastore) == ['my_cat_name_2:my_tag_name_2']
    assert cache.get_mor_tags(vm2_mor) == []
예제 #5
0
    def __init__(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        super(VSphereCheck, self).__init__(*args, **kwargs)
        instance = cast(InstanceConfig, self.instance)
        self._config = VSphereConfig(instance, self.init_config, self.log)

        self.latest_event_query = get_current_datetime()
        self.infrastructure_cache = InfrastructureCache(interval_sec=self._config.refresh_infrastructure_cache_interval)
        self.metrics_metadata_cache = MetricsMetadataCache(
            interval_sec=self._config.refresh_metrics_metadata_cache_interval
        )
        self.api = cast(VSphereAPI, None)
        self.api_rest = cast(VSphereRestAPI, None)
        # Do not override `AgentCheck.hostname`
        self._hostname = None
        self.thread_pool = ThreadPoolExecutor(max_workers=self._config.threads_count)
        self.check_initializations.append(self.initiate_api_connection)
예제 #6
0
def test_get_new_events_failure_without_fallback(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect'):
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        api._conn.content.eventManager.QueryEvents.side_effect = SoapAdapter.ParserError("some parse error")

        with pytest.raises(SoapAdapter.ParserError):
            api.get_new_events(start_time=dt.datetime.now())
예제 #7
0
def test_make_batch(realtime_instance, batch_size, number_of_batches):
    realtime_instance['batch_tags_collector_size'] = batch_size
    config = VSphereConfig(realtime_instance, logger)
    mock_api = VSphereRestAPI(config, log=logger)
    data_to_batch = list(range(1000))

    batches = list(VSphereRestAPI.make_batch(mock_api, data_to_batch))
    flat_data = [x for y in batches for x in y]
    assert flat_data == data_to_batch
    assert len(batches) == number_of_batches
예제 #8
0
def test_get_new_events_success_without_fallback(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect'):
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        returned_events = [vim.event.Event(), vim.event.Event(), vim.event.Event()]
        api._conn.content.eventManager.QueryEvents.return_value = returned_events

        events = api.get_new_events(start_time=dt.datetime.now())
        assert events == returned_events
예제 #9
0
def test_smart_retry(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect') as connect:
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        smart_connect = connect.SmartConnect
        query_perf_counter = api._conn.content.perfManager.QueryPerfCounterByLevel
        query_perf_counter.side_effect = [Exception('error'), 'success']
        api.get_perf_counter_by_level(None)
        assert query_perf_counter.call_count == 2
        assert smart_connect.call_count == 2
예제 #10
0
def test_renew_rest_api_session_on_failure(aggregator, dd_run_check, realtime_instance):
    realtime_instance.update({'collect_tags': True})
    check = VSphereCheck('vsphere', {}, [realtime_instance])
    config = VSphereConfig(realtime_instance, {}, MagicMock())
    check.api_rest = VSphereRestAPI(config, MagicMock())
    check.api_rest.make_batch = MagicMock(side_effect=[Exception, []])
    check.api_rest.smart_connect = MagicMock()

    tags = check.collect_tags({})
    assert tags
    assert check.api_rest.make_batch.call_count == 2
    assert check.api_rest.smart_connect.call_count == 1
예제 #11
0
def test_get_max_query_metrics(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect'):
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())
        values = [12, -1]
        expected = [12, float('inf')]

        for val, expect in zip(values, expected):
            query_config = MagicMock()
            query_config.return_value = [MagicMock(value=val)]
            api._conn.content.setting.QueryOptions = query_config
            max_metrics = api.get_max_query_metrics()
            assert max_metrics == expect
            query_config.assert_called_once_with("config.vpxd.stats.maxQueryMetrics")
예제 #12
0
def test_tags_cache(realtime_instance):
    cache = TagsCache(float('inf'))
    config = VSphereConfig(realtime_instance, logger)
    mock_api = VSphereRestAPI(config, log=logger)

    with cache.update():
        cache.set_all_tags(mock_api.get_resource_tags())

    vm_mor = vim.VirtualMachine(moId='VM4-4-1')
    vm2_mor = vim.VirtualMachine(moId='i-dont-have-tags')
    datastore = vim.Datastore(moId='NFS-Share-1')

    assert cache.get_mor_tags(vm_mor) == ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2']
    assert cache.get_mor_tags(datastore) == ['my_cat_name_2:my_tag_name_2']
    assert cache.get_mor_tags(vm2_mor) == []
예제 #13
0
def test_get_resource_tags(realtime_instance):
    config = VSphereConfig(realtime_instance, {}, logger)
    mock_api = build_rest_api_client(config, logger)
    mock_mors = [MagicMock(spec=vim.VirtualMachine, _moId="foo")]

    resource_tags = mock_api.get_resource_tags_for_mors(mock_mors)

    expected_resource_tags = {
        vim.HostSystem: {'10.0.0.104-1': ['my_cat_name_2:my_tag_name_2']},
        vim.VirtualMachine: {'VM4-4-1': ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2']},
        vim.Datacenter: {},
        vim.Datastore: {'NFS-Share-1': ['my_cat_name_2:my_tag_name_2']},
        vim.ClusterComputeResource: {},
    }
    assert expected_resource_tags == resource_tags
예제 #14
0
def test_ssl_verify_false(realtime_instance):
    realtime_instance['ssl_verify'] = False

    with patch('datadog_checks.vsphere.api.connect') as connect, patch(
            'ssl.SSLContext.load_verify_locations') as load_verify_locations:
        smart_connect = connect.SmartConnect

        config = VSphereConfig(realtime_instance, MagicMock())
        VSphereAPI(config, MagicMock())

        actual_context = smart_connect.call_args.kwargs[
            'sslContext']  # type: ssl.SSLContext
        assert actual_context.protocol == ssl.PROTOCOL_TLS
        assert actual_context.verify_mode == ssl.CERT_NONE
        load_verify_locations.assert_not_called()
예제 #15
0
def test_smart_retry(realtime_instance, exception, expected_calls):
    with patch('datadog_checks.vsphere.api.connect') as connect:
        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())

        smart_connect = connect.SmartConnect
        disconnect = connect.Disconnect
        query_perf_counter = api._conn.content.perfManager.QueryPerfCounterByLevel
        query_perf_counter.side_effect = [exception, 'success']
        try:
            api.get_perf_counter_by_level(None)
        except Exception:
            pass
        assert query_perf_counter.call_count == expected_calls
        assert smart_connect.call_count == expected_calls
        assert disconnect.call_count == expected_calls - 1
예제 #16
0
def test_should_collect_per_instance_values(metric_name, resource_type,
                                            expect_match):
    config = VSphereConfig(
        {
            'host': 'foo',
            'username': '******',
            'password': '******',
            'collect_per_instance_filters': {
                'vm': [r'cpu\..*\.sum']
            },
        },
        None,
    )

    assert expect_match == should_collect_per_instance_values(
        config, metric_name, resource_type)
예제 #17
0
def test_ssl_cert(realtime_instance):
    realtime_instance['ssl_verify'] = True
    realtime_instance['ssl_capath'] = '/dummy/path'

    with patch('datadog_checks.vsphere.api.connect') as connect, patch(
            'ssl.SSLContext.load_verify_locations') as load_verify_locations:
        smart_connect = connect.SmartConnect

        config = VSphereConfig(realtime_instance, MagicMock())
        VSphereAPI(config, MagicMock())

        actual_context = smart_connect.call_args.kwargs[
            'sslContext']  # type: ssl.SSLContext
        assert actual_context.protocol == ssl.PROTOCOL_TLS
        assert actual_context.verify_mode == ssl.CERT_REQUIRED
        assert actual_context.check_hostname is True
        load_verify_locations.assert_called_with(capath='/dummy/path')
예제 #18
0
def test_connect_success(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect') as connect:
        connection = MagicMock()
        smart_connect = connect.SmartConnect
        smart_connect.return_value = connection
        get_about_info = connection.content.about.version.__str__

        config = VSphereConfig(realtime_instance, MagicMock())
        api = VSphereAPI(config, MagicMock())
        smart_connect.assert_called_once_with(
            host=realtime_instance['host'],
            user=realtime_instance['username'],
            pwd=realtime_instance['password'],
            sslContext=ANY,
        )
        get_about_info.assert_called_once()

        assert api._conn == connection
예제 #19
0
def test_external_host_tags(aggregator, realtime_instance):
    realtime_instance['collect_tags'] = True
    check = VSphereCheck('vsphere', {}, [realtime_instance])
    config = VSphereConfig(realtime_instance, MagicMock())
    check.api = MockedAPI(config)
    check.api_rest = VSphereRestAPI(config, MagicMock())
    with check.tags_cache.update():
        check.refresh_tags_cache()
    with check.infrastructure_cache.update():
        check.refresh_infrastructure_cache()

    fixture_file = os.path.join(HERE, 'fixtures', 'host_tags_values.json')
    with open(fixture_file, 'r') as f:
        expected_tags = json.load(f)

    check.set_external_tags = MagicMock()
    check.submit_external_host_tags()
    submitted_tags = check.set_external_tags.mock_calls[0].args[0]
    submitted_tags.sort(key=lambda x: x[0])
    for ex, sub in zip(expected_tags, submitted_tags):
        ex_host, sub_host = ex[0], sub[0]
        ex_tags, sub_tags = ex[1]['vsphere'], sub[1]['vsphere']
        ex_tags = [
            to_native_string(t) for t in ex_tags
        ]  # json library loads data in unicode, let's convert back to native
        assert ex_host == sub_host
        assert ex_tags == sub_tags

    check.config.excluded_host_tags = ['vsphere_host']
    check.set_external_tags = MagicMock()
    check.submit_external_host_tags()
    submitted_tags = check.set_external_tags.mock_calls[0].args[0]
    submitted_tags.sort(key=lambda x: x[0])
    for ex, sub in zip(expected_tags, submitted_tags):
        ex_host, sub_host = ex[0], sub[0]
        ex_tags, sub_tags = ex[1]['vsphere'], sub[1]['vsphere']
        ex_tags = [
            to_native_string(t) for t in ex_tags if 'vsphere_host:' not in t
        ]
        assert ex_host == sub_host
        assert ex_tags == sub_tags

    check.set_external_tags = MagicMock()
    check.submit_external_host_tags()
예제 #20
0
def test_connect_failure(realtime_instance):
    with patch('datadog_checks.vsphere.api.connect') as connect:
        connection = MagicMock()
        smart_connect = connect.SmartConnect
        smart_connect.return_value = connection
        version_info = connection.content.about.version.__str__
        version_info.side_effect = Exception('foo')

        config = VSphereConfig(realtime_instance, MagicMock())
        with pytest.raises(APIConnectionError):
            VSphereAPI(config, MagicMock())

        smart_connect.assert_called_once_with(
            host=realtime_instance['host'],
            user=realtime_instance['username'],
            pwd=realtime_instance['password'],
            sslContext=ANY,
        )
        version_info.assert_called_once()
예제 #21
0
def test_get_resource_tags(realtime_instance):

    config = VSphereConfig(realtime_instance, logger)
    mock_api = VSphereRestAPI(config, log=logger)
    resource_tags = mock_api.get_resource_tags()

    expected_resource_tags = {
        vim.HostSystem: {
            '10.0.0.104-1': ['my_cat_name_2:my_tag_name_2']
        },
        vim.VirtualMachine: {
            'VM4-4-1':
            ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2']
        },
        vim.Datacenter: {},
        vim.Datastore: {
            'NFS-Share-1': ['my_cat_name_2:my_tag_name_2']
        },
        vim.ClusterComputeResource: {},
    }
    assert expected_resource_tags == resource_tags
예제 #22
0
def test_is_realtime_resource_collected_by_filters(realtime_instance):
    realtime_instance['resource_filters'] = [
        {
            'resource': 'vm',
            'property': 'name',
            'patterns': [r'^\$VM5$', r'^VM4-2\d$']
        },
        {
            'resource': 'vm',
            'property': 'inventory_path',
            'patterns': [u'\\/D\xe4tacenter\\/vm\\/m.*']
        },
        {
            'resource': 'vm',
            'property': 'hostname',
            'patterns': [r'10\.0\.0\.103']
        },
        {
            'resource': 'vm',
            'property': 'guest_hostname',
            'patterns': [r'ubuntu-test']
        },
        {
            'resource': 'vm',
            'property': 'tag',
            'patterns': [r'env:production']
        },
        {
            'resource': 'host',
            'property': 'name',
            'patterns': [r'10\.0\.0\.103'],
            'type': 'blacklist'
        },
    ]
    realtime_instance['collect_tags'] = True

    collected_resources = [
        'VM2-1',
        '$VM3-2',
        '$VM5',
        '10.0.0.101',
        '10.0.0.102',
        '10.0.0.104',
        u'VM1-6ê',
        'VM3-1',
        'VM4-20',
        'migrationTest',
    ]

    check = VSphereCheck('vsphere', {}, [realtime_instance])

    formatted_filters = check._config.resource_filters

    config = VSphereConfig(realtime_instance, MagicMock())
    infra = MockedAPI(config).get_infrastructure()
    resources = [
        m for m in infra if m.__class__ in (vim.VirtualMachine, vim.HostSystem)
    ]
    VM2_1 = next(r for r in resources if infra.get(r).get('name') == 'VM2-1')
    check.infrastructure_cache.set_all_tags(
        {vim.VirtualMachine: {
            VM2_1._moId: ['env:production', 'tag:2']
        }})
    for resource in resources:
        is_collected = infra.get(resource).get('name') in collected_resources
        assert (is_resource_collected_by_filters(
            resource, infra, formatted_filters,
            check.infrastructure_cache.get_mor_tags(resource)) == is_collected)
예제 #23
0
class VSphereCheck(AgentCheck):
    __NAMESPACE__ = 'vsphere'

    def __new__(cls, name, init_config, instances):
        # type: (Type[VSphereCheck], str, Dict[str, Any], List[Dict[str, Any]]) -> VSphereCheck
        """For backward compatibility reasons, there are two side-by-side implementations of the VSphereCheck.
        Instantiating this class will return an instance of the legacy integration for existing users and
        an instance of the new implementation for new users."""
        if is_affirmative(instances[0].get('use_legacy_check_version', True)):
            from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck

            return VSphereLegacyCheck(name, init_config,
                                      instances)  # type: ignore
        return super(VSphereCheck, cls).__new__(cls)

    def __init__(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        super(VSphereCheck, self).__init__(*args, **kwargs)
        instance = cast(InstanceConfig, self.instance)
        self.config = VSphereConfig(instance, self.log)

        self.latest_event_query = get_current_datetime()
        self.infrastructure_cache = InfrastructureCache(
            interval_sec=self.config.refresh_infrastructure_cache_interval)
        self.metrics_metadata_cache = MetricsMetadataCache(
            interval_sec=self.config.refresh_metrics_metadata_cache_interval)
        self.api = cast(VSphereAPI, None)
        self.api_rest = cast(VSphereRestAPI, None)
        # Do not override `AgentCheck.hostname`
        self._hostname = None
        self.thread_pool = ThreadPoolExecutor(
            max_workers=self.config.threads_count)
        self.check_initializations.append(self.initiate_api_connection)

    def initiate_api_connection(self):
        # type: () -> None
        try:
            self.log.debug(
                "Connecting to the vCenter API %s with username %s...",
                self.config.hostname, self.config.username)
            self.api = VSphereAPI(self.config, self.log)
            self.log.debug("Connected")
        except APIConnectionError:
            self.log.error(
                "Cannot authenticate to vCenter API. The check will not run.")
            self.service_check(SERVICE_CHECK_NAME,
                               AgentCheck.CRITICAL,
                               tags=self.config.base_tags,
                               hostname=None)
            raise

        if self.config.should_collect_tags:
            try:
                self.api_rest = VSphereRestAPI(self.config, self.log)
            except Exception as e:
                self.log.error(
                    "Cannot connect to vCenter REST API. Tags won't be collected. Error: %s",
                    e)

    def refresh_metrics_metadata_cache(self):
        # type: () -> None
        """
        Request the list of counters (metrics) from vSphere and store them in a cache.
        """
        self.log.debug(
            "Refreshing the metrics metadata cache. Collecting all counters metadata for collection_level=%d",
            self.config.collection_level,
        )
        t0 = Timer()
        counters = self.api.get_perf_counter_by_level(
            self.config.collection_level)
        self.gauge(
            "datadog.vsphere.refresh_metrics_metadata_cache.time",
            t0.total(),
            tags=self.config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        self.log.debug("Collected %d counters metadata in %.3f seconds.",
                       len(counters), t0.total())

        for mor_type in self.config.collected_resource_types:
            allowed_counters = []
            for c in counters:
                metric_name = format_metric_name(c)
                if metric_name in ALLOWED_METRICS_FOR_MOR[
                        mor_type] and not is_metric_excluded_by_filters(
                            metric_name, mor_type, self.config.metric_filters):
                    allowed_counters.append(c)
            metadata = {
                c.key: format_metric_name(c)
                for c in allowed_counters
            }  # type: Dict[CounterId, MetricName]
            self.metrics_metadata_cache.set_metadata(mor_type, metadata)
            self.log.debug(
                "Set metadata for mor_type %s: %s",
                mor_type,
                metadata,
            )

        # TODO: Later - Understand how much data actually changes between check runs
        # Apparently only when the server restarts?
        # https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.5.html

    def collect_tags(self, infrastructure_data):
        # type: (InfrastructureData) -> ResourceTags
        """
        Fetch the all tags, build tags for each monitored resources and store all of that into the tags_cache.
        """
        if not self.api_rest:
            return {}

        # In order to be more efficient in tag collection, the infrastructure data is filtered as much as possible.
        # All filters are applied except the ones based on tags of course.
        resource_filters_without_tags = [
            f for f in self.config.resource_filters
            if not isinstance(f, TagFilter)
        ]
        filtered_infra_data = {
            mor: props
            for mor, props in iteritems(infrastructure_data)
            if isinstance(mor, tuple(self.config.collected_resource_types))
            and is_resource_collected_by_filters(
                mor, infrastructure_data, resource_filters_without_tags)
        }

        t0 = Timer()
        mors_list = list(filtered_infra_data.keys())
        try:
            mor_tags = self.api_rest.get_resource_tags_for_mors(mors_list)
        except Exception as e:
            self.log.error("Failed to collect tags: %s", e)
            return {}

        self.gauge('datadog.vsphere.query_tags.time',
                   t0.total(),
                   tags=self.config.base_tags,
                   raw=True,
                   hostname=self._hostname)

        return mor_tags

    def refresh_infrastructure_cache(self):
        # type: () -> None
        """Fetch the complete infrastructure, generate tags for each monitored resources and store all of that
        into the infrastructure_cache. It also computes the resource `hostname` property to be used when submitting
        metrics for this mor."""
        self.log.debug("Refreshing the infrastructure cache...")
        t0 = Timer()
        infrastructure_data = self.api.get_infrastructure()
        self.gauge(
            "datadog.vsphere.refresh_infrastructure_cache.time",
            t0.total(),
            tags=self.config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        self.log.debug("Infrastructure cache refreshed in %.3f seconds.",
                       t0.total())
        self.log.debug("Infrastructure cache: %s", infrastructure_data)

        all_tags = {}
        if self.config.should_collect_tags:
            all_tags = self.collect_tags(infrastructure_data)
        self.infrastructure_cache.set_all_tags(all_tags)

        for mor, properties in iteritems(infrastructure_data):
            if not isinstance(mor, tuple(
                    self.config.collected_resource_types)):
                # Do nothing for the resource types we do not collect
                continue

            if not is_resource_collected_by_filters(
                    mor, infrastructure_data, self.config.resource_filters,
                    self.infrastructure_cache.get_mor_tags(mor)):
                # The resource does not match the specified whitelist/blacklist patterns.
                continue

            mor_name = to_string(properties.get("name", "unknown"))
            mor_type_str = MOR_TYPE_AS_STRING[type(mor)]
            hostname = None
            tags = []

            if isinstance(mor, vim.VirtualMachine):
                power_state = properties.get("runtime.powerState")
                if power_state != vim.VirtualMachinePowerState.poweredOn:
                    # Skipping because the VM is not powered on
                    # TODO: Sometimes VM are "poweredOn" but "disconnected" and thus have no metrics
                    self.log.debug("Skipping VM %s in state %s", mor_name,
                                   to_string(power_state))
                    continue

                # Hosts are not considered as parents of the VMs they run, we use the `runtime.host` property
                # to get the name of the ESXi host
                runtime_host = properties.get("runtime.host")
                runtime_host_props = infrastructure_data[
                    runtime_host] if runtime_host else {}
                runtime_hostname = to_string(
                    runtime_host_props.get("name", "unknown"))
                tags.append('vsphere_host:{}'.format(runtime_hostname))

                if self.config.use_guest_hostname:
                    hostname = properties.get("guest.hostName", mor_name)
                else:
                    hostname = mor_name
            elif isinstance(mor, vim.HostSystem):
                hostname = mor_name
            else:
                tags.append('vsphere_{}:{}'.format(mor_type_str, mor_name))

            tags.extend(get_parent_tags_recursively(mor, infrastructure_data))
            tags.append('vsphere_type:{}'.format(mor_type_str))

            # Attach tags from fetched attributes.
            tags.extend(properties.get('attributes', []))

            mor_payload = {"tags": tags}  # type: Dict[str, Any]

            if hostname:
                mor_payload['hostname'] = hostname

            self.infrastructure_cache.set_mor_props(mor, mor_payload)

    def submit_metrics_callback(self, query_results):
        # type: (List[vim.PerformanceManager.EntityMetricBase]) -> None
        """
        Callback of the collection of metrics. This is run in the main thread!

        `query_results` currently contain results of one resource type in practice, but this function is generic
        and can handle results with mixed resource types.
        """

        # `have_instance_value` is used later to avoid collecting aggregated metrics
        # when instance metrics are collected.
        have_instance_value = defaultdict(
            set)  # type: Dict[Type[vim.ManagedEntity], Set[MetricName]]
        for results_per_mor in query_results:
            resource_type = type(results_per_mor.entity)
            metadata = self.metrics_metadata_cache.get_metadata(resource_type)
            for result in results_per_mor.value:
                if result.id.instance:
                    have_instance_value[resource_type].add(
                        metadata[result.id.counterId])

        for results_per_mor in query_results:
            mor_props = self.infrastructure_cache.get_mor_props(
                results_per_mor.entity)
            if mor_props is None:
                self.log.debug(
                    "Skipping results for mor %s because the integration is not yet aware of it. If this is a problem"
                    " you can increase the value of 'refresh_infrastructure_cache_interval'.",
                    results_per_mor.entity,
                )
                continue
            self.log.debug(
                "Retrieved mor props for entity %s: %s",
                results_per_mor.entity,
                mor_props,
            )
            resource_type = type(results_per_mor.entity)
            metadata = self.metrics_metadata_cache.get_metadata(resource_type)
            for result in results_per_mor.value:
                metric_name = metadata.get(result.id.counterId)
                if self.log.isEnabledFor(logging.DEBUG):
                    # Use isEnabledFor to avoid unnecessary processing
                    self.log.debug(
                        "Processing metric `%s`: resource_type=`%s`, result=`%s`",
                        metric_name,
                        resource_type,
                        str(result).replace("\n", "\\n"),
                    )
                if not metric_name:
                    # Fail-safe
                    self.log.debug(
                        "Skipping value for counter %s, because the integration doesn't have metadata about it. If this"
                        " is a problem you can increase the value of 'refresh_metrics_metadata_cache_interval'",
                        result.id.counterId,
                    )
                    continue

                if not result.value:
                    self.log.debug(
                        "Skipping metric %s because the value is empty",
                        to_string(metric_name))
                    continue

                # Get the most recent value that isn't negative
                valid_values = [v for v in result.value if v >= 0]
                if not valid_values:
                    self.log.debug(
                        "Skipping metric %s because the value returned by vCenter"
                        " is negative (i.e. the metric is not yet available). values: %s",
                        to_string(metric_name),
                        list(result.value),
                    )
                    continue

                tags = []
                if should_collect_per_instance_values(
                        self.config, metric_name, resource_type) and (
                            metric_name in have_instance_value[resource_type]):
                    instance_value = result.id.instance
                    # When collecting per instance values, it's possible that both aggregated metric and per instance
                    # metrics are received. In that case, the metric with no instance value is skipped.
                    if not instance_value:
                        continue
                    instance_tag_key = get_mapped_instance_tag(metric_name)
                    tags.append('{}:{}'.format(instance_tag_key,
                                               instance_value))

                vsphere_tags = self.infrastructure_cache.get_mor_tags(
                    results_per_mor.entity)
                mor_tags = mor_props['tags'] + vsphere_tags

                if resource_type in HISTORICAL_RESOURCES:
                    # Tags are attached to the metrics
                    tags.extend(mor_tags)
                    hostname = None
                else:
                    # Tags are (mostly) submitted as external host tags.
                    hostname = to_string(mor_props.get('hostname'))
                    if self.config.excluded_host_tags:
                        tags.extend([
                            t for t in mor_tags if t.split(":", 1)[0] in
                            self.config.excluded_host_tags
                        ])

                tags.extend(self.config.base_tags)

                value = valid_values[-1]
                if metric_name in PERCENT_METRICS:
                    # Convert the percentage to a float.
                    value /= 100.0

                self.log.debug(
                    "Submit metric: name=`%s`, value=`%s`, hostname=`%s`, tags=`%s`",
                    metric_name,
                    value,
                    hostname,
                    tags,
                )
                # vSphere "rates" should be submitted as gauges (rate is precomputed).
                self.gauge(to_string(metric_name),
                           value,
                           hostname=hostname,
                           tags=tags)

    def query_metrics_wrapper(self, query_specs):
        # type: (List[vim.PerformanceManager.QuerySpec]) -> List[vim.PerformanceManager.EntityMetricBase]
        """Just an instrumentation wrapper around the VSphereAPI.query_metrics method
        Warning: called in threads
        """
        t0 = Timer()
        metrics_values = self.api.query_metrics(query_specs)
        self.histogram(
            'datadog.vsphere.query_metrics.time',
            t0.total(),
            tags=self.config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        return metrics_values

    def make_query_specs(self):
        # type: () -> Iterable[List[vim.PerformanceManager.QuerySpec]]
        """
        Build query specs using MORs and metrics metadata.
        """
        server_current_time = self.api.get_current_time()
        self.log.debug("Server current datetime: %s", server_current_time)
        for resource_type in self.config.collected_resource_types:
            mors = self.infrastructure_cache.get_mors(resource_type)
            counters = self.metrics_metadata_cache.get_metadata(resource_type)
            metric_ids = []  # type: List[vim.PerformanceManager.MetricId]
            for counter_key, metric_name in iteritems(counters):
                # PerformanceManager.MetricId `instance` kwarg:
                # - An asterisk (*) to specify all instances of the metric for the specified counterId
                # - Double-quotes ("") to specify aggregated statistics
                # More info https://code.vmware.com/apis/704/vsphere/vim.PerformanceManager.MetricId.html
                if should_collect_per_instance_values(self.config, metric_name,
                                                      resource_type):
                    instance = "*"
                else:
                    instance = ''

                metric_ids.append(
                    vim.PerformanceManager.MetricId(counterId=counter_key,
                                                    instance=instance))

            for batch in self.make_batch(mors, metric_ids, resource_type):
                query_specs = []
                for mor, metrics in iteritems(batch):
                    query_spec = vim.PerformanceManager.QuerySpec(
                    )  # type: vim.PerformanceManager.QuerySpec
                    query_spec.entity = mor
                    query_spec.metricId = metrics
                    if resource_type in REALTIME_RESOURCES:
                        query_spec.intervalId = REALTIME_METRICS_INTERVAL_ID
                        query_spec.maxSample = 1  # Request a single datapoint
                    else:
                        # We cannot use `maxSample` for historical metrics, let's specify a timewindow that will
                        # contain at least one element
                        query_spec.startTime = server_current_time - dt.timedelta(
                            hours=2)
                    query_specs.append(query_spec)
                if query_specs:
                    yield query_specs

    def collect_metrics_async(self):
        # type: () -> None
        """Run queries in multiple threads and wait for completion."""
        tasks = []
        try:
            for query_specs in self.make_query_specs():
                tasks.append(
                    self.thread_pool.submit(self.query_metrics_wrapper,
                                            query_specs))
        except Exception as e:
            self.log.warning(
                "Unable to schedule all metric collection tasks: %s", e)
        finally:
            self.log.debug("Queued all %d tasks, waiting for completion.",
                           len(tasks))
            for future in as_completed(tasks):
                future_exc = future.exception()
                if isinstance(future_exc, vmodl.fault.InvalidArgument):
                    # The query was invalid or the resource does not have values for this metric.
                    continue
                elif future_exc is not None:
                    self.log.warning(
                        "A metric collection API call failed with the following error: %s",
                        future_exc)
                    continue

                results = future.result()
                if not results:
                    self.log.debug(
                        "A metric collection API call did not return data.")
                    continue

                try:
                    # Callback is called in the main thread
                    self.submit_metrics_callback(results)
                except Exception as e:
                    self.log.exception(
                        "Exception '%s' raised during the submit_metrics_callback. "
                        "Ignoring the error and continuing execution.",
                        e,
                    )

    def make_batch(
        self,
        mors,  # type: Iterable[vim.ManagedEntity]
        metric_ids,  # type: List[vim.PerformanceManager.MetricId]
        resource_type,  # type: Type[vim.ManagedEntity]
    ):  # type: (...) -> Generator[MorBatch, None, None]
        """Iterates over mor and generate batches with a fixed number of metrics to query.
        Querying multiple resource types in the same call is error prone if we query a cluster metric. Indeed,
        cluster metrics result in an unpredicatable number of internal metric queries which all count towards
        max_query_metrics. Therefore often collecting a single cluster metric can make the whole call to fail. That's
        why we should never batch cluster metrics with anything else.
        """
        # Safeguard, let's avoid collecting multiple resources in the same call
        mors_filtered = [m for m in mors if isinstance(m, resource_type)
                         ]  # type: List[vim.ManagedEntity]

        if resource_type == vim.ClusterComputeResource:
            # Cluster metrics are unpredictable and a single call can max out the limit. Always collect them one by one.
            max_batch_size = 1  # type: float
        elif resource_type in REALTIME_RESOURCES or self.config.max_historical_metrics < 0:
            # Queries are not limited by vCenter
            max_batch_size = self.config.metrics_per_query
        else:
            # Collection is limited by the value of `max_query_metrics`
            if self.config.metrics_per_query < 0:
                max_batch_size = self.config.max_historical_metrics
            else:
                max_batch_size = min(self.config.metrics_per_query,
                                     self.config.max_historical_metrics)

        batch = defaultdict(list)  # type: MorBatch
        batch_size = 0
        for m in mors_filtered:
            for metric_id in metric_ids:
                if batch_size == max_batch_size:
                    yield batch
                    batch = defaultdict(list)
                    batch_size = 0
                batch[m].append(metric_id)
                batch_size += 1
        # Do not yield an empty batch
        if batch:
            yield batch

    def submit_external_host_tags(self):
        # type: () -> None
        """Send external host tags to the Datadog backend. This is only useful for a REALTIME instance because
        only VMs and Hosts appear as 'datadog hosts'."""
        external_host_tags = []

        for resource_type in REALTIME_RESOURCES:
            for mor in self.infrastructure_cache.get_mors(resource_type):
                mor_props = self.infrastructure_cache.get_mor_props(mor)
                mor_tags = self.infrastructure_cache.get_mor_tags(mor)
                hostname = mor_props.get('hostname')
                # Safeguard if some mors have a None hostname
                if not hostname:
                    continue

                mor_tags = mor_props['tags'] + mor_tags
                tags = [
                    t for t in mor_tags
                    if t.split(':')[0] not in self.config.excluded_host_tags
                ]
                tags.extend(self.config.base_tags)
                external_host_tags.append((hostname, {
                    self.__NAMESPACE__: tags
                }))

        if external_host_tags:
            self.set_external_tags(external_host_tags)

    def collect_events(self):
        # type: () -> None
        self.log.debug("Starting events collection (query start time: %s).",
                       self.latest_event_query)
        latest_event_time = None
        collect_start_time = get_current_datetime()
        try:
            t0 = Timer()
            new_events = self.api.get_new_events(
                start_time=self.latest_event_query)
            self.gauge(
                'datadog.vsphere.collect_events.time',
                t0.total(),
                tags=self.config.base_tags,
                raw=True,
                hostname=self._hostname,
            )
            self.log.debug("Got %s new events from the vCenter event manager",
                           len(new_events))
            event_config = {'collect_vcenter_alarms': True}
            for event in new_events:
                self.log.debug("Processing event with id:%s, type:%s: msg:%s",
                               event.key, type(event),
                               event.fullFormattedMessage)
                normalized_event = VSphereEvent(event, event_config,
                                                self.config.base_tags)
                # Can return None if the event if filtered out
                event_payload = normalized_event.get_datadog_payload()
                if event_payload is not None:
                    self.log.debug("Submit event with id:%s, type:%s: msg:%s",
                                   event.key, type(event),
                                   event.fullFormattedMessage)
                    self.event(event_payload)
                if latest_event_time is None or event.createdTime > latest_event_time:
                    latest_event_time = event.createdTime
        except Exception as e:
            # Don't get stuck on a failure to fetch an event
            # Ignore them for next pass
            self.log.warning("Unable to fetch Events %s", e)

        if latest_event_time is not None:
            self.latest_event_query = latest_event_time + dt.timedelta(
                seconds=1)
        else:
            # Let's set `self.latest_event_query` to `collect_start_time` as safeguard in case no events are reported
            # OR something bad happened (which might happen again indefinitely).
            self.latest_event_query = collect_start_time

    def check(self, _):
        # type: (Any) -> None
        self._hostname = datadog_agent.get_hostname()
        # Assert the health of the vCenter API by getting the version, and submit the service_check accordingly
        try:
            version_info = self.api.get_version()
            if self.is_metadata_collection_enabled():
                self.set_metadata('version', version_info.version_str)
        except Exception:
            # Explicitly do not attach any host to the service checks.
            self.log.exception(
                "The vCenter API is not responding. The check will not run.")
            self.service_check(SERVICE_CHECK_NAME,
                               AgentCheck.CRITICAL,
                               tags=self.config.base_tags,
                               hostname=None)
            raise
        else:
            self.service_check(SERVICE_CHECK_NAME,
                               AgentCheck.OK,
                               tags=self.config.base_tags,
                               hostname=None)

        # Collect and submit events
        if self.config.should_collect_events:
            self.collect_events()

        if self.config.collect_events_only:
            return

        # Update the value of `max_query_metrics` if needed
        if self.config.is_historical():
            try:
                vcenter_max_hist_metrics = self.api.get_max_query_metrics()
                if vcenter_max_hist_metrics < self.config.max_historical_metrics:
                    self.log.warning(
                        "The integration was configured with `max_query_metrics: %d` but your vCenter has a"
                        "limit of %d which is lower. Ignoring your configuration in favor of the vCenter value."
                        "To update the vCenter value, please update the `%s` field",
                        self.config.max_historical_metrics,
                        vcenter_max_hist_metrics,
                        MAX_QUERY_METRICS_OPTION,
                    )
                    self.config.max_historical_metrics = vcenter_max_hist_metrics
            except Exception:
                self.config.max_historical_metrics = DEFAULT_MAX_QUERY_METRICS
                self.log.info(
                    "Could not fetch the value of %s, setting `max_historical_metrics` to %d.",
                    MAX_QUERY_METRICS_OPTION,
                    DEFAULT_MAX_QUERY_METRICS,
                )
                pass

        # Refresh the metrics metadata cache
        if self.metrics_metadata_cache.is_expired():
            with self.metrics_metadata_cache.update():
                self.refresh_metrics_metadata_cache()

        # Refresh the infrastructure cache
        if self.infrastructure_cache.is_expired():
            with self.infrastructure_cache.update():
                self.refresh_infrastructure_cache()
            # Submit host tags as soon as we have fresh data
            self.submit_external_host_tags()

        # Submit the number of VMs that are monitored
        for resource_type in self.config.collected_resource_types:
            for mor in self.infrastructure_cache.get_mors(resource_type):
                mor_props = self.infrastructure_cache.get_mor_props(mor)
                # Explicitly do not attach any host to those metrics.
                resource_tags = mor_props.get('tags', [])
                self.count(
                    '{}.count'.format(MOR_TYPE_AS_STRING[resource_type]),
                    1,
                    tags=self.config.base_tags + resource_tags,
                    hostname=None,
                )

        # Creating a thread pool and starting metric collection
        self.log.debug("Starting metric collection in %d threads.",
                       self.config.threads_count)
        self.collect_metrics_async()
        self.log.debug("Metric collection completed.")
예제 #24
0
def test_create_session(realtime_instance):
    config = VSphereConfig(realtime_instance, logger)
    mock_api = VSphereRestAPI(config, log=logger)

    assert mock_api._client._http.options['headers'][
        'vmware-api-session-id'] == "dummy-token"