def _cache_morlist_process_atomic(self, instance, mor): """ Process one item of the self.morlist_raw list by querying the available metrics for this MOR and then putting it in self.morlist """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) self.log.debug("job_atomic: Querying available metrics for MOR %s (type=%s)", mor['mor'], mor['mor_type']) mor['interval'] = REAL_TIME_INTERVAL if mor['mor_type'] in REALTIME_RESOURCES else None available_metrics = perfManager.QueryAvailablePerfMetric(mor['mor'], intervalId=mor['interval']) self.log.debug("Computing list of metrics to keep from %s", available_metrics) mor['metrics'] = self._compute_needed_metrics(instance, available_metrics) mor_name = str(mor['mor']) if mor_name in self.morlist[i_key]: # Was already here last iteration self.log.debug("MOR %s already present in instance %s cache, refreshing metrics list only", mor_name, i_key) self.morlist[i_key][mor_name]['metrics'] = mor['metrics'] else: self.log.debug("Adding MOR %s to instance %s cache", mor_name, i_key) self.morlist[i_key][mor_name] = mor self.morlist[i_key][mor_name]['last_seen'] = time.time() # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total(), tags=custom_tags)
def _cache_metrics_metadata(self, instance): """ Get from the server instance, all the performance counters metadata meaning name/group/description... attached with the corresponding ID """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) self.log.info( "Warming metrics metadata cache for instance {}".format(i_key)) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) new_metadata = {} for counter in perfManager.perfCounter: d = dict( name="%s.%s" % (counter.groupInfo.key, counter.nameInfo.key), unit=counter.unitInfo.key, instance_tag='instance' # FIXME: replace by what we want to tag! ) new_metadata[counter.key] = d self.cache_config.set_last(CacheConfig.Metadata, i_key, time.time()) self.log.info( "Finished metadata collection for instance {}".format(i_key)) # Reset metadata self.metrics_metadata[i_key] = new_metadata # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total(), tags=custom_tags)
def _cache_morlist_process_atomic(self, instance, query_specs): """ Process one item of the self.morlist_raw list by querying the available metrics for this MOR and then putting it in self.morlist """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager # With QueryPerf, we can get metric information about several MORs at once. Let's use it # to avoid making one API call per object, even if we also get metrics values that are useless for now. # See https://code.vmware.com/apis/358/vsphere#/doc/vim.PerformanceManager.html#queryStats # query_specs is a list of QuerySpec objects. # See https://code.vmware.com/apis/358/vsphere#/doc/vim.PerformanceManager.QuerySpec.html res = perfManager.QueryPerf(query_specs) for mor_perfs in res: mor_name = str(mor_perfs.entity) available_metrics = [value.id for value in mor_perfs.value] self.morlist[i_key][mor_name][ 'metrics'] = self._compute_needed_metrics( instance, available_metrics) self.morlist[i_key][mor_name]['last_seen'] = time.time() # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total(), tags=instance.get('tags', []))
def _collect_metrics_async(self, instance, query_specs): """ Task that collects the metrics listed in the morlist for one MOR """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) results = perfManager.QueryPerf(query_specs) if results: for mor_perfs in results: mor_name = str(mor_perfs.entity) try: mor = self.mor_cache.get_mor(i_key, mor_name) except MorNotFoundError: self.log.error("Trying to get metrics from object %s deleted from the cache, skipping. " "Consider increasing the parameter `clean_morlist_interval` to avoid that", mor_name) continue for result in mor_perfs.value: counter_id = result.id.counterId if not self.metadata_cache.contains(i_key, counter_id): self.log.debug( "Skipping value for counter {}, because there is no metadata about it".format(counter_id) ) continue # Metric types are absolute, delta, and rate metric_name = self.metadata_cache.get_metadata(i_key, result.id.counterId).get('name') if self.in_compatibility_mode(instance): if metric_name not in ALL_METRICS: self.log.debug("Skipping unknown `{}` metric.".format(metric_name)) continue if not result.value: self.log.debug("Skipping `{}` metric because the value is empty".format(metric_name)) continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) tags = ['instance:{}'.format(instance_name)] if not mor['hostname']: # no host tags available tags.extend(mor['tags']) # vsphere "rates" should be submitted as gauges (rate is # precomputed). self.gauge( "vsphere.{}".format(metric_name), value, hostname=mor['hostname'], tags=tags + custom_tags ) # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_colection.time', t.total(), tags=custom_tags)
def _cache_metrics_metadata(self, instance): """ Get all the performance counters metadata meaning name/group/description... from the server instance, attached with the corresponding ID """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) self.metadata_cache.init_instance(i_key) self.log.info( b"Warming metrics metadata cache for instance {}".format(i_key)) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) new_metadata = {} metric_ids = [] # Use old behaviour with metrics to collect defined by our constants if self.in_compatibility_mode(instance, log_warning=True): for counter in perfManager.perfCounter: metric_name = self.format_metric_name(counter, compatibility=True) new_metadata[counter.key] = { 'name': metric_name, 'unit': counter.unitInfo.key, } # Build the list of metrics we will want to collect if instance.get("all_metrics") or metric_name in BASIC_METRICS: metric_ids.append( vim.PerformanceManager.MetricId(counterId=counter.key, instance="*")) else: collection_level = instance.get("collection_level", 1) for counter in perfManager.QueryPerfCounterByLevel( collection_level): new_metadata[counter.key] = { "name": self.format_metric_name(counter), "unit": counter.unitInfo.key } # Build the list of metrics we will want to collect metric_ids.append( vim.PerformanceManager.MetricId(counterId=counter.key, instance="*")) self.log.info( b"Finished metadata collection for instance {}".format(i_key)) # Reset metadata self.metadata_cache.set_metadata(i_key, new_metadata) self.metadata_cache.set_metric_ids(i_key, metric_ids) self.cache_config.set_last(CacheConfig.Metadata, i_key, time.time()) # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total(), tags=custom_tags)
def _collect_metrics_atomic(self, instance, query_specs): """ Task that collects the metrics listed in the morlist for one MOR """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) results = perfManager.QueryPerf(query_specs) if results: for mor_perfs in results: mor_name = str(mor_perfs.entity) mor = self.morlist[i_key][mor_name] for result in mor_perfs.value: if result.id.counterId not in self.metrics_metadata[i_key]: self.log.debug( "Skipping this metric value, because there is no metadata about it" ) continue # Metric types are absolute, delta, and rate metric_name = self.metrics_metadata[i_key].get( result.id.counterId, {}).get('name') if metric_name not in ALL_METRICS: self.log.debug(u"Skipping unknown `%s` metric.", metric_name) continue if not result.value: self.log.debug( u"Skipping `%s` metric because the value is empty", metric_name) continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) tags = ['instance:{}'.format(instance_name)] if not mor['hostname']: # no host tags available tags.extend(mor['tags']) # vsphere "rates" should be submitted as gauges (rate is # precomputed). self.gauge("vsphere.{}".format(metric_name), value, hostname=mor['hostname'], tags=['instance:{}'.format(instance_name)] + custom_tags) # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_colection.time', t.total(), tags=custom_tags)
def _collect_metrics_atomic(self, instance, mor): """ Task that collects the metrics listed in the morlist for one MOR """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager custom_tags = instance.get('tags', []) query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=mor['mor'], metricId=mor['metrics'], intervalId=mor['interval'], format='normal') results = perfManager.QueryPerf(querySpec=[query]) if results: for result in results[0].value: if result.id.counterId not in self.metrics_metadata[i_key]: self.log.debug("Skipping this metric value, because there is no metadata about it") continue # Metric types are absolute, delta, and rate try: metric_name = self.metrics_metadata[i_key][result.id.counterId]['name'] except KeyError: metric_name = None if metric_name not in ALL_METRICS: self.log.debug(u"Skipping unknown `%s` metric.", metric_name) continue if not result.value: self.log.debug(u"Skipping `%s` metric because the value is empty", metric_name) continue instance_name = result.id.instance or "none" value = self._transform_value(instance, result.id.counterId, result.value[0]) tags = ['instance:%s' % instance_name] if not mor['hostname']: # no host tags available tags.extend(mor['tags']) # vsphere "rates" should be submitted as gauges (rate is # precomputed). self.gauge( "vsphere.%s" % metric_name, value, hostname=mor['hostname'], tags=['instance:%s' % instance_name] + custom_tags ) # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.metric_colection.time', t.total(), tags=custom_tags)
def _cache_morlist_process_atomic(self, instance, query_specs): """ Process one item from the mor objects cache by querying the available metrics for this MOR and then putting it in self.morlist """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager # With QueryPerf, we can get metric information about several MORs at once. Let's use it # to avoid making one API call per object, even if we also get metrics values that are useless for now. # See https://code.vmware.com/apis/358/vsphere#/doc/vim.PerformanceManager.html#queryStats # query_specs is a list of QuerySpec objects. # See https://code.vmware.com/apis/358/vsphere#/doc/vim.PerformanceManager.QuerySpec.html res = perfManager.QueryPerf(query_specs) for mor_perfs in res: mor_name = str(mor_perfs.entity) available_metrics = [value.id for value in mor_perfs.value] try: self.morlist[i_key][mor_name][ 'metrics'] = self._compute_needed_metrics( instance, available_metrics) except KeyError: self.log.error( "Trying to compute needed metrics from object %s deleted from the cache, skipping. " "Consider increasing the parameter `clean_morlist_interval` to avoid that", mor_name) continue # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total(), tags=instance.get('tags', []))
def _cache_morlist_process_atomic(self, instance, mor): """ Process one item of the self.morlist_raw list by querying the available metrics for this MOR and then putting it in self.morlist """ # ## <TEST-INSTRUMENTATION> t = Timer() # ## </TEST-INSTRUMENTATION> i_key = self._instance_key(instance) server_instance = self._get_server_instance(instance) perfManager = server_instance.content.perfManager self.log.debug("job_atomic: Querying available metrics" " for MOR {0} (type={1})".format( mor['mor'], mor['mor_type'])) mor['interval'] = REAL_TIME_INTERVAL if mor[ 'mor_type'] in REALTIME_RESOURCES else None available_metrics = perfManager.QueryAvailablePerfMetric( mor['mor'], intervalId=mor['interval']) mor['metrics'] = self._compute_needed_metrics(instance, available_metrics) mor_name = str(mor['mor']) if mor_name in self.morlist[i_key]: # Was already here last iteration self.morlist[i_key][mor_name]['metrics'] = mor['metrics'] else: self.morlist[i_key][mor_name] = mor self.morlist[i_key][mor_name]['last_seen'] = time.time() # ## <TEST-INSTRUMENTATION> self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())