def test_should_collect_per_instance_values(metric_name, resource_type, expect_match): config = VSphereConfig( { 'host': 'foo', 'username': '******', 'password': '******', 'collect_per_instance_filters': { 'vm': [r'cpu\..*\.sum'] }, }, None, ) assert expect_match == should_collect_per_instance_values( config, metric_name, resource_type)
def make_query_specs(self): # type: () -> Iterable[List[vim.PerformanceManager.QuerySpec]] """ Build query specs using MORs and metrics metadata. """ server_current_time = self.api.get_current_time() self.log.debug("Server current datetime: %s", server_current_time) for resource_type in self._config.collected_resource_types: mors = self.infrastructure_cache.get_mors(resource_type) counters = self.metrics_metadata_cache.get_metadata(resource_type) metric_ids = [] # type: List[vim.PerformanceManager.MetricId] for counter_key, metric_name in iteritems(counters): # PerformanceManager.MetricId `instance` kwarg: # - An asterisk (*) to specify all instances of the metric for the specified counterId # - Double-quotes ("") to specify aggregated statistics # More info https://code.vmware.com/apis/704/vsphere/vim.PerformanceManager.MetricId.html if should_collect_per_instance_values(self._config, metric_name, resource_type): instance = "*" else: instance = '' metric_ids.append( vim.PerformanceManager.MetricId(counterId=counter_key, instance=instance)) for batch in self.make_batch(mors, metric_ids, resource_type): query_specs = [] for mor, metrics in iteritems(batch): query_spec = vim.PerformanceManager.QuerySpec( ) # type: vim.PerformanceManager.QuerySpec query_spec.entity = mor query_spec.metricId = metrics if resource_type in REALTIME_RESOURCES: query_spec.intervalId = REALTIME_METRICS_INTERVAL_ID query_spec.maxSample = 1 # Request a single datapoint else: # We cannot use `maxSample` for historical metrics, let's specify a timewindow that will # contain at least one element query_spec.startTime = server_current_time - dt.timedelta( hours=2) query_specs.append(query_spec) if query_specs: yield query_specs
def make_query_specs(self): """ Build query specs using MORs and metrics metadata. :returns a list of vim.PerformanceManager.QuerySpec: https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.PerformanceManager.QuerySpec.html """ for resource_type in self.config.collected_resource_types: mors = self.infrastructure_cache.get_mors(resource_type) counters = self.metrics_metadata_cache.get_metadata(resource_type) metric_ids = [] for counter_key, metric_name in iteritems(counters): # PerformanceManager.MetricId `instance` kwarg: # - An asterisk (*) to specify all instances of the metric for the specified counterId # - Double-quotes ("") to specify aggregated statistics # More info https://code.vmware.com/apis/704/vsphere/vim.PerformanceManager.MetricId.html if should_collect_per_instance_values(self.config, metric_name, resource_type): instance = "*" else: instance = '' metric_ids.append( vim.PerformanceManager.MetricId(counterId=counter_key, instance=instance)) for batch in self.make_batch(mors, metric_ids, resource_type): query_specs = [] for mor, metrics in iteritems(batch): query_spec = vim.PerformanceManager.QuerySpec() query_spec.entity = mor query_spec.metricId = metrics if resource_type in REALTIME_RESOURCES: query_spec.intervalId = REALTIME_METRICS_INTERVAL_ID query_spec.maxSample = 1 # Request a single datapoint else: # We cannot use `maxSample` for historical metrics, let's specify a timewindow that will # contain at least one element query_spec.startTime = datetime.now() - timedelta( hours=2) query_specs.append(query_spec) if query_specs: yield query_specs
def submit_metrics_callback(self, query_results): # type: (List[vim.PerformanceManager.EntityMetricBase]) -> None """ Callback of the collection of metrics. This is run in the main thread! `query_results` currently contain results of one resource type in practice, but this function is generic and can handle results with mixed resource types. """ # `have_instance_value` is used later to avoid collecting aggregated metrics # when instance metrics are collected. have_instance_value = defaultdict( set) # type: Dict[Type[vim.ManagedEntity], Set[MetricName]] for results_per_mor in query_results: resource_type = type(results_per_mor.entity) metadata = self.metrics_metadata_cache.get_metadata(resource_type) for result in results_per_mor.value: if result.id.instance: have_instance_value[resource_type].add( metadata[result.id.counterId]) for results_per_mor in query_results: mor_props = self.infrastructure_cache.get_mor_props( results_per_mor.entity) if mor_props is None: self.log.debug( "Skipping results for mor %s because the integration is not yet aware of it. If this is a problem" " you can increase the value of 'refresh_infrastructure_cache_interval'.", results_per_mor.entity, ) continue self.log.debug( "Retrieved mor props for entity %s: %s", results_per_mor.entity, mor_props, ) resource_type = type(results_per_mor.entity) metadata = self.metrics_metadata_cache.get_metadata(resource_type) for result in results_per_mor.value: metric_name = metadata.get(result.id.counterId) if self.log.isEnabledFor(logging.DEBUG): # Use isEnabledFor to avoid unnecessary processing self.log.debug( "Processing metric `%s`: resource_type=`%s`, result=`%s`", metric_name, resource_type, str(result).replace("\n", "\\n"), ) if not metric_name: # Fail-safe self.log.debug( "Skipping value for counter %s, because the integration doesn't have metadata about it. If this" " is a problem you can increase the value of 'refresh_metrics_metadata_cache_interval'", result.id.counterId, ) continue if not result.value: self.log.debug( "Skipping metric %s because the value is empty", to_string(metric_name)) continue # Get the most recent value that isn't negative valid_values = [v for v in result.value if v >= 0] if not valid_values: self.log.debug( "Skipping metric %s because the value returned by vCenter" " is negative (i.e. the metric is not yet available). values: %s", to_string(metric_name), list(result.value), ) continue tags = [] if should_collect_per_instance_values( self.config, metric_name, resource_type) and ( metric_name in have_instance_value[resource_type]): instance_value = result.id.instance # When collecting per instance values, it's possible that both aggregated metric and per instance # metrics are received. In that case, the metric with no instance value is skipped. if not instance_value: continue instance_tag_key = get_mapped_instance_tag(metric_name) tags.append('{}:{}'.format(instance_tag_key, instance_value)) vsphere_tags = self.infrastructure_cache.get_mor_tags( results_per_mor.entity) mor_tags = mor_props['tags'] + vsphere_tags if resource_type in HISTORICAL_RESOURCES: # Tags are attached to the metrics tags.extend(mor_tags) hostname = None else: # Tags are (mostly) submitted as external host tags. hostname = to_string(mor_props.get('hostname')) if self.config.excluded_host_tags: tags.extend([ t for t in mor_tags if t.split(":", 1)[0] in self.config.excluded_host_tags ]) tags.extend(self.config.base_tags) value = valid_values[-1] if metric_name in PERCENT_METRICS: # Convert the percentage to a float. value /= 100.0 self.log.debug( "Submit metric: name=`%s`, value=`%s`, hostname=`%s`, tags=`%s`", metric_name, value, hostname, tags, ) # vSphere "rates" should be submitted as gauges (rate is precomputed). self.gauge(to_string(metric_name), value, hostname=hostname, tags=tags)