def test_finding_gce_resources(self, getter):
        # The necessary env variables were not set for GKE resource detection
        # to succeed. We should be falling back to detecting GCE resources
        resource_finder = GoogleCloudResourceDetector()
        getter.return_value.json.return_value = GCE_RESOURCES_JSON_STRING
        found_resources = resource_finder.detect()
        self.assertEqual(getter.call_args_list[0][0][0], _GCP_METADATA_URL)
        self.assertEqual(
            found_resources,
            Resource(
                attributes={
                    "host.id": "instance_id",
                    "cloud.provider": "gcp",
                    "cloud.account.id": "project_id",
                    "cloud.zone": "zone",
                    "gcp.resource_type": "gce_instance",
                }),
        )
        self.assertEqual(getter.call_count, 1)

        # Found resources should be cached and not require another network call
        found_resources = resource_finder.detect()
        self.assertEqual(getter.call_count, 1)
        self.assertEqual(
            found_resources,
            Resource(
                attributes={
                    "host.id": "instance_id",
                    "cloud.provider": "gcp",
                    "cloud.account.id": "project_id",
                    "cloud.zone": "zone",
                    "gcp.resource_type": "gce_instance",
                }),
        )
    def test_finding_resources(self, getter):
        resource_finder = GoogleCloudResourceDetector()
        getter.return_value.json.return_value = RESOURCES_JSON_STRING
        found_resources = resource_finder.detect()
        self.assertEqual(getter.call_args_list[0][0][0], _GCP_METADATA_URL)
        self.assertEqual(
            found_resources,
            Resource(
                labels={
                    "host.id": "instance_id",
                    "cloud.provider": "gcp",
                    "cloud.account.id": "project_id",
                    "cloud.zone": "zone",
                    "gcp.resource_type": "gce_instance",
                }),
        )
        self.assertEqual(getter.call_count, 1)

        # Found resources should be cached and not require another network call
        found_resources = resource_finder.detect()
        self.assertEqual(getter.call_count, 1)
        self.assertEqual(
            found_resources,
            Resource(
                labels={
                    "host.id": "instance_id",
                    "cloud.provider": "gcp",
                    "cloud.account.id": "project_id",
                    "cloud.zone": "zone",
                    "gcp.resource_type": "gce_instance",
                }),
        )
 def detect(self) -> "Resource":
     if not self.cached:
         self.cached = True
         for resource_finder in _RESOURCE_FINDERS:
             found_resources = resource_finder()
             self.gcp_resources.update(found_resources)
     return Resource(self.gcp_resources)
    def detect(self) -> "Resource":
        try:
            token = _get_token()
            identity_dict = json.loads(_get_identity(token))
            hostname = _get_host(token)

            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_EC2.value,
                ResourceAttributes.CLOUD_ACCOUNT_ID:
                identity_dict["accountId"],
                ResourceAttributes.CLOUD_REGION:
                identity_dict["region"],
                ResourceAttributes.CLOUD_AVAILABILITY_ZONE:
                identity_dict["availabilityZone"],
                ResourceAttributes.HOST_ID:
                identity_dict["instanceId"],
                ResourceAttributes.HOST_TYPE:
                identity_dict["instanceType"],
                ResourceAttributes.HOST_NAME:
                hostname,
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 5
0
    def detect(self) -> "Resource":
        if os.name == "nt":
            conf_file_path = (
                "C:\\Program Files\\Amazon\\XRay\\environment.conf")
        else:
            conf_file_path = "/var/elasticbeanstalk/xray/environment.conf"

        try:
            with open(conf_file_path, encoding="utf-8") as conf_file:
                parsed_data = json.load(conf_file)

            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_ELASTIC_BEANSTALK.value,
                ResourceAttributes.SERVICE_NAME:
                CloudPlatformValues.AWS_ELASTIC_BEANSTALK.value,
                ResourceAttributes.SERVICE_INSTANCE_ID:
                parsed_data["deployment_id"],
                ResourceAttributes.SERVICE_NAMESPACE:
                parsed_data["environment_name"],
                ResourceAttributes.SERVICE_VERSION:
                parsed_data["version_label"],
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 6
0
    def detect(self) -> "Resource":
        try:
            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_LAMBDA.value,
                ResourceAttributes.CLOUD_REGION:
                environ["AWS_REGION"],
                ResourceAttributes.FAAS_NAME:
                environ["AWS_LAMBDA_FUNCTION_NAME"],
                ResourceAttributes.FAAS_VERSION:
                environ["AWS_LAMBDA_FUNCTION_VERSION"],
                ResourceAttributes.FAAS_INSTANCE:
                environ["AWS_LAMBDA_LOG_STREAM_NAME"],
                ResourceAttributes.FAAS_MAX_MEMORY:
                int(environ["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"]),
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 7
0
 def test_populate_part_a_fields_default(self):
     resource = Resource({"service.name": "testServiceName"})
     tags = _utils._populate_part_a_fields(resource)
     self.assertIsNotNone(tags)
     self.assertEqual(tags.get("ai.cloud.role"), "testServiceName")
     self.assertEqual(tags.get("ai.cloud.roleInstance"), platform.node())
     self.assertEqual(tags.get("ai.internal.nodeName"),
                      tags.get("ai.cloud.roleInstance"))
Esempio n. 8
0
    def test_max_tag_value_length(self):
        span = trace._Span(
            name="span",
            resource=Resource(
                attributes={
                    "key_resource": "some_resource some_resource some_more_resource"
                }
            ),
            context=trace_api.SpanContext(
                trace_id=0x000000000000000000000000DEADBEEF,
                span_id=0x00000000DEADBEF0,
                is_remote=False,
            ),
        )

        span.start()
        span.set_attribute("key_bool", False)
        span.set_attribute("key_string", "hello_world hello_world hello_world")
        span.set_attribute("key_float", 111.22)
        span.set_attribute("key_int", 1100)
        span.set_attribute("key_tuple", ("tuple_element", "tuple_element2"))
        span.end()

        translate = Translate([span])

        # does not truncate by default
        # pylint: disable=protected-access
        spans = translate._translate(pb_translator.ProtobufTranslator("svc"))
        tags_by_keys = {
            tag.key: tag.v_str
            for tag in spans[0].tags
            if tag.v_type == model_pb2.ValueType.STRING
        }
        self.assertEqual(
            "hello_world hello_world hello_world", tags_by_keys["key_string"]
        )
        self.assertEqual(
            "('tuple_element', 'tuple_element2')", tags_by_keys["key_tuple"]
        )
        self.assertEqual(
            "some_resource some_resource some_more_resource",
            tags_by_keys["key_resource"],
        )

        # truncates when max_tag_value_length is passed
        # pylint: disable=protected-access
        spans = translate._translate(
            pb_translator.ProtobufTranslator("svc", max_tag_value_length=5)
        )
        tags_by_keys = {
            tag.key: tag.v_str
            for tag in spans[0].tags
            if tag.v_type == model_pb2.ValueType.STRING
        }
        self.assertEqual("hello", tags_by_keys["key_string"])
        self.assertEqual("('tup", tags_by_keys["key_tuple"])
        self.assertEqual("some_", tags_by_keys["key_resource"])
 def test_extract_unsupported_gcp_resources(self):
     resource = Resource(
         labels={
             "cloud.account.id": "123",
             "host.id": "host",
             "extra_info": "extra",
             "not_gcp_resource": "value",
             "gcp.resource_type": "unsupported_gcp_resource",
             "cloud.provider": "gcp",
         })
     self.assertEqual(_extract_resources(resource), {})
Esempio n. 10
0
 def test_valid_convert_to_timeseries(self):
     test_records = [
         ExportRecord(
             Counter("testname", "testdesc", "testunit", int, None),
             None,
             SumAggregator(),
             Resource({}),
         ),
         ExportRecord(
             Counter("testname", "testdesc", "testunit", int, None),
             None,
             MinMaxSumCountAggregator(),
             Resource({}),
         ),
         ExportRecord(
             Counter("testname", "testdesc", "testunit", int, None),
             None,
             HistogramAggregator(),
             Resource({}),
         ),
         ExportRecord(
             Counter("testname", "testdesc", "testunit", int, None),
             None,
             LastValueAggregator(),
             Resource({}),
         ),
         ExportRecord(
             Counter("testname", "testdesc", "testunit", int, None),
             None,
             ValueObserverAggregator(),
             Resource({}),
         ),
     ]
     for record in test_records:
         record.aggregator.update(5)
         record.aggregator.take_checkpoint()
     data = self.exporter._convert_to_timeseries(test_records)
     self.assertIsInstance(data, list)
     self.assertEqual(len(data), 13)
     for timeseries in data:
         self.assertIsInstance(timeseries, TimeSeries)
Esempio n. 11
0
 def test_log_to_envelope_partA_default(self):
     exporter = self._exporter
     old_resource = self._log_data.log_record.resource
     resource = Resource({"service.name": "testServiceName"})
     self._log_data.log_record.resource = resource
     envelope = exporter._log_to_envelope(self._log_data)
     self.assertEqual(envelope.tags.get("ai.cloud.role"), "testServiceName")
     self.assertEqual(envelope.tags.get("ai.cloud.roleInstance"),
                      platform.node())
     self.assertEqual(envelope.tags.get("ai.internal.nodeName"),
                      envelope.tags.get("ai.cloud.roleInstance"))
     self._log_data.log_record.resource = old_resource