def test_gke_environment(self): patch = mock.patch.dict(os.environ, {'KUBERNETES_SERVICE_HOST': '127.0.0.1'}) with patch: monitored_resource = MonitoredResourceUtil.get_instance() self.assertIsNotNone(monitored_resource) self.assertIsInstance(monitored_resource, GcpGkeMonitoredResource)
def test_gce_environment(self): patch = mock.patch( 'opencensus.common.monitored_resource_util.' 'gcp_metadata_config.GcpMetadataConfig.' 'is_running_on_gcp', return_value=True) with patch: monitored_resource = MonitoredResourceUtil.get_instance() self.assertIsNotNone(monitored_resource) self.assertIsInstance(monitored_resource, GcpGceMonitoredResource)
def set_monitored_resource_attributes(span): monitored_resource = MonitoredResourceUtil.get_instance() if monitored_resource is not None: resource_labels = monitored_resource.get_resource_labels() for attribute_key, attribute_value in resource_labels.items(): attribute_value = 'aws:' + attribute_value if \ attribute_key == 'region' else attribute_value pair = { RESOURCE_LABEL % (monitored_resource.resource_type, attribute_key): attribute_value } pair_attrs = Attributes(pair) \ .format_attributes_json() \ .get('attributeMap') _update_attr_map(span, pair_attrs)
def set_monitored_resource(series, option_resource_type): """Set a resource(type and labels) that can be used for monitoring. :param series: TimeSeries object based on view data :param option_resource_type: Resource is an optional field that represents the Stackdriver MonitoredResource type. """ resource_type = GLOBAL_RESOURCE_TYPE if option_resource_type == "": monitored_resource = MonitoredResourceUtil.get_instance() if monitored_resource is not None: resource_labels = monitored_resource.get_resource_labels() if monitored_resource.resource_type == 'gke_container': resource_type = 'k8s_container' set_attribute_label(series, resource_labels, 'project_id') set_attribute_label(series, resource_labels, 'cluster_name') set_attribute_label(series, resource_labels, 'container_name') set_attribute_label(series, resource_labels, 'namespace_id', 'namespace_name') set_attribute_label(series, resource_labels, 'pod_id', 'pod_name') set_attribute_label(series, resource_labels, 'zone', 'location') elif monitored_resource.resource_type == 'gce_instance': resource_type = monitored_resource.resource_type set_attribute_label(series, resource_labels, 'project_id') set_attribute_label(series, resource_labels, 'instance_id') set_attribute_label(series, resource_labels, 'zone') elif monitored_resource.resource_type == 'aws_ec2_instance': resource_type = monitored_resource.resource_type set_attribute_label(series, resource_labels, 'aws_account') set_attribute_label(series, resource_labels, 'instance_id') set_attribute_label(series, resource_labels, 'region', label_value_prefix='aws:') else: resource_type = option_resource_type series.resource.type = resource_type
def set_monitored_resource_attributes(span): """Set labels to span that can be used for tracing. :param span: Span object """ monitored_resource = MonitoredResourceUtil.get_instance() if monitored_resource is not None: resource_type = monitored_resource.resource_type resource_labels = monitored_resource.get_resource_labels() if resource_type == 'gke_container': resource_type = 'k8s_container' set_attribute_label(span, resource_type, resource_labels, 'project_id') set_attribute_label(span, resource_type, resource_labels, 'cluster_name') set_attribute_label(span, resource_type, resource_labels, 'container_name') set_attribute_label(span, resource_type, resource_labels, 'namespace_id', 'namespace_name') set_attribute_label(span, resource_type, resource_labels, 'pod_id', 'pod_name') set_attribute_label(span, resource_type, resource_labels, 'zone', 'location') elif resource_type == 'gce_instance': set_attribute_label(span, resource_type, resource_labels, 'project_id') set_attribute_label(span, resource_type, resource_labels, 'instance_id') set_attribute_label(span, resource_type, resource_labels, 'zone') elif resource_type == 'aws_ec2_instance': set_attribute_label(span, resource_type, resource_labels, 'aws_account') set_attribute_label(span, resource_type, resource_labels, 'instance_id') set_attribute_label(span, resource_type, resource_labels, 'region', label_value_prefix='aws:')
def create_time_series_list(self, v_data, resource_type, metric_prefix): """ Create the TimeSeries object based on the view data """ series = monitoring_v3.types.TimeSeries() series.metric.type = namespaced_view_name(v_data.view.name, metric_prefix) if resource_type == "": monitor_resource = MonitoredResourceUtil.get_instance() if monitor_resource is not None: series.resource.type = monitor_resource.resource_type labels = monitor_resource.get_resource_labels() for attribute_key, attribute_value in labels.items(): attribute_value = 'aws:' + attribute_value if \ attribute_key == 'region' else attribute_value series.resource.labels[attribute_key] = attribute_value else: series.resource.type = 'global' else: series.resource.type = resource_type tag_agg = v_data.tag_value_aggregation_data_map for tag_value, agg in tag_agg.items(): point = series.points.add() if type(agg) is \ aggregation.aggregation_data.DistributionAggregationData: agg_data = tag_agg.get(tag_value) dist_value = point.value.distribution_value dist_value.count = agg_data.count_data dist_value.mean = agg_data.mean_data sum_of_sqd = agg_data.sum_of_sqd_deviations dist_value.sum_of_squared_deviation = sum_of_sqd # Uncomment this when stackdriver supports Range # point.value.distribution_value.range.min = agg_data.min # point.value.distribution_value.range.max = agg_data.max bounds = dist_value.bucket_options.explicit_buckets.bounds bounds.extend(list(map(float, agg_data.bounds))) buckets = dist_value.bucket_counts buckets.extend(list(map(int, agg_data.counts_per_bucket))) else: convFloat, isFloat = as_float(tag_value[0]) if isFloat: # pragma: NO COVER point.value.double_value = convFloat else: # pragma: NO COVER point.value.string_value = str(tag_value[0]) start = datetime.strptime(v_data.start_time, EPOCH_PATTERN) end = datetime.strptime(v_data.end_time, EPOCH_PATTERN) timestamp_start = (start - EPOCH_DATETIME).total_seconds() timestamp_end = (end - EPOCH_DATETIME).total_seconds() point.interval.end_time.seconds = int(timestamp_end) secs = point.interval.end_time.seconds point.interval.end_time.nanos = int((timestamp_end - secs) * 10**9) if type(agg) is not aggregation.aggregation_data.\ LastValueAggregationData: # pragma: NO COVER if timestamp_start == timestamp_end: # avoiding start_time and end_time to be equal timestamp_start = timestamp_start - 1 start_time = point.interval.start_time start_time.seconds = int(timestamp_start) start_secs = start_time.seconds start_time.nanos = int((timestamp_start - start_secs) * 1e9) return series
def test_non_supported_environment(self, aws_util_mock, gcp_metadata_mock): monitored_resource = MonitoredResourceUtil.get_instance() self.assertIsNone(monitored_resource)
def test_aws_environment(self, aws_util_mock, gcp_metadata_mock): monitored_resource = MonitoredResourceUtil.get_instance() self.assertIsNotNone(monitored_resource) self.assertIsInstance(monitored_resource, AwsMonitoredResource)