Ejemplo n.º 1
0
class PrometheusMetricsExporterWire(WaitMixin, Wire):
    """

    .. wire:: harness.wires.opentelemetry.ext.prometheus.PrometheusMetricsExporterWire
      :type: output
      :runtime: python
      :config: harness.metrics.Prometheus
      :requirements: opentelemetry-ext-prometheus==0.5b0

    """

    _config: metrics_pb2.Prometheus
    _controller: PushController

    def configure(self, value: metrics_pb2.Prometheus):
        assert isinstance(value, metrics_pb2.Prometheus), type(value)
        self._config = value

    async def __aenter__(self):
        meter = metrics.get_meter(__name__)
        exporter = PrometheusMetricsExporter(self._config.prefix)
        self._controller = PushController(meter, exporter, 5)

        start_http_server(self._config.bind.port, self._config.bind.host)
        _log.info(
            "%s started: addr=%s:%d",
            self.__class__.__name__,
            self._config.bind.host,
            self._config.bind.port,
        )

    def close(self):
        super().close()
        if not self._controller.finished.is_set():
            self._controller.shutdown()
Ejemplo n.º 2
0
 def test_push_controller(self):
     meter = mock.Mock()
     exporter = mock.Mock()
     controller = PushController(meter, exporter, 5.0)
     meter.collect.assert_not_called()
     exporter.export.assert_not_called()
     controller.shutdown()
     self.assertTrue(controller.finished.isSet())
     exporter.shutdown.assert_any_call()
Ejemplo n.º 3
0
 def test_push_controller_suppress_instrumentation(self):
     meter = mock.Mock()
     exporter = mock.Mock()
     exporter.export = lambda x: self.assertIsNotNone(
         get_value("suppress_instrumentation"))
     with mock.patch(
             "opentelemetry.context._RUNTIME_CONTEXT") as context_patch:
         controller = PushController(meter, exporter, 30.0)
         controller.tick()
         self.assertEqual(context_patch.attach.called, True)
         self.assertEqual(context_patch.detach.called, True)
     self.assertEqual(get_value("suppress_instrumentation"), None)
Ejemplo n.º 4
0
    async def __aenter__(self):
        meter = metrics.get_meter(__name__)
        exporter = PrometheusMetricsExporter(self._config.prefix)
        self._controller = PushController(meter, exporter, 5)

        start_http_server(self._config.bind.port, self._config.bind.host)
        _log.info(
            "%s started: addr=%s:%d",
            self.__class__.__name__,
            self._config.bind.host,
            self._config.bind.port,
        )
Ejemplo n.º 5
0
    def test_push_controller(self):
        meter = mock.Mock()
        exporter = mock.Mock()
        controller = PushController(meter, exporter, 5.0)
        meter.collect.assert_not_called()
        exporter.export.assert_not_called()

        controller.shutdown()
        self.assertTrue(controller.finished.isSet())

        # shutdown should flush the meter
        self.assertEqual(meter.collect.call_count, 1)
        self.assertEqual(exporter.export.call_count, 1)
Ejemplo n.º 6
0
    def test_export(self):
        channel = grpc.insecure_channel(self.address)
        transport = metric_service_grpc_transport.MetricServiceGrpcTransport(
            channel=channel
        )
        client = MagicMock(wraps=MetricServiceClient(transport=transport))
        exporter = CloudMonitoringMetricsExporter(
            self.project_id, client=client
        )

        meter_provider = metrics.MeterProvider(
            resource=Resource.create(
                {
                    "cloud.account.id": "some_account_id",
                    "cloud.provider": "gcp",
                    "cloud.zone": "us-east1-b",
                    "host.id": 654321,
                    "gcp.resource_type": "gce_instance",
                }
            )
        )
        meter = meter_provider.get_meter(__name__)
        counter = meter.create_counter(
            # TODO: remove "opentelemetry/" prefix which is a hack
            # https://github.com/GoogleCloudPlatform/opentelemetry-operations-python/issues/84
            name="opentelemetry/name",
            description="desc",
            unit="1",
            value_type=int,
        )
        # interval doesn't matter, we don't start the thread and just run
        # tick() instead
        controller = PushController(meter, exporter, 10)

        counter.add(10, {"env": "test"})

        with patch(
            "opentelemetry.exporter.cloud_monitoring.logger"
        ) as mock_logger:
            controller.tick()

            # run tox tests with `-- -log-cli-level=0` to see mock calls made
            logger.debug(client.create_time_series.mock_calls)
            mock_logger.warning.assert_not_called()
            mock_logger.error.assert_not_called()
Ejemplo n.º 7
0
    def __init__(self, tracer, exporter, interval):
        self._tracer = tracer

        self._meter = None
        if exporter and interval:
            self._meter = metrics.get_meter(__name__)
            self.controller = PushController(
                meter=self._meter, exporter=exporter, interval=interval
            )
        self._metrics_recorder = TimedMetricRecorder(self._meter, "client")
    def setUp(self):
        super().setUp()
        self.server = create_test_server(25565)
        self.server.start()
        meter = metrics.get_meter(__name__)
        interceptor = client_interceptor()
        self.channel = intercept_channel(
            grpc.insecure_channel("localhost:25565"), interceptor
        )
        self._stub = test_server_pb2_grpc.GRPCTestServerStub(self.channel)

        self._controller = PushController(
            meter, self.memory_metrics_exporter, 30
        )
Ejemplo n.º 9
0
    def test_histogram_stateless(self):
        # Use the meter type provided by the SDK package
        meter = metrics.MeterProvider(stateful=False).get_meter(__name__)
        exporter = InMemoryMetricsExporter()
        controller = PushController(meter, exporter, 30)

        requests_size = meter.create_metric(
            name="requests_size",
            description="size of requests",
            unit="1",
            value_type=int,
            metric_type=ValueRecorder,
        )

        size_view = View(
            requests_size,
            HistogramAggregator,
            aggregator_config={"bounds": [20, 40, 60, 80, 100]},
            label_keys=["environment"],
            view_config=ViewConfig.LABEL_KEYS,
        )

        meter.register_view(size_view)

        # Since this is using the HistogramAggregator, the bucket counts will be reflected
        # with each record
        requests_size.record(25, {"environment": "staging", "test": "value"})
        requests_size.record(1, {"environment": "staging", "test": "value2"})
        requests_size.record(200, {"environment": "staging", "test": "value3"})

        controller.tick()

        metrics_list = exporter.get_exported_metrics()
        self.assertEqual(len(metrics_list), 1)
        checkpoint = metrics_list[0].aggregator.checkpoint
        self.assertEqual(
            tuple(checkpoint.items()),
            ((20, 1), (40, 1), (60, 0), (80, 0), (100, 0), (inf, 1)),
        )
        exporter.clear()

        requests_size.record(25, {"environment": "staging", "test": "value"})
        requests_size.record(1, {"environment": "staging", "test": "value2"})
        requests_size.record(200, {"environment": "staging", "test": "value3"})

        controller.tick()

        metrics_list = exporter.get_exported_metrics()
        self.assertEqual(len(metrics_list), 1)
        checkpoint = metrics_list[0].aggregator.checkpoint
        self.assertEqual(
            tuple(checkpoint.items()),
            ((20, 1), (40, 1), (60, 0), (80, 0), (100, 0), (inf, 1)),
        )
Ejemplo n.º 10
0
    def start_pipeline(
        self,
        meter: metrics_api.Meter,
        exporter: MetricsExporter = None,
        interval: float = 15.0,
    ) -> None:
        """Method to begin the collect/export pipeline.

        Args:
            meter: The meter to collect metrics from.
            exporter: The exporter to export metrics to.
            interval: The collect/export interval in seconds.
        """
        if not exporter:
            exporter = ConsoleMetricsExporter()
        self._exporters.add(exporter)
        # TODO: Controller type configurable?
        self._controllers.append(PushController(meter, exporter, interval))
    def __init__(
        self,
        exporter: MetricsExporter,
        interval: int = 30,
        labels: typing.Optional[typing.Dict[str, str]] = None,
        config: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
    ):
        self._labels = {} if labels is None else labels
        self.accumulator = metrics.get_meter(__name__)
        self.controller = PushController(accumulator=self.accumulator,
                                         exporter=exporter,
                                         interval=interval)
        self._python_implementation = python_implementation().lower()
        if config is None:
            self._config = {
                "system.cpu.time": ["idle", "user", "system", "irq"],
                "system.cpu.utilization": ["idle", "user", "system", "irq"],
                "system.memory.usage": ["used", "free", "cached"],
                "system.memory.utilization": ["used", "free", "cached"],
                "system.swap.usage": ["used", "free"],
                "system.swap.utilization": ["used", "free"],
                # system.swap.page.faults: [],
                # system.swap.page.operations: [],
                "system.disk.io": ["read", "write"],
                "system.disk.operations": ["read", "write"],
                "system.disk.time": ["read", "write"],
                "system.disk.merged": ["read", "write"],
                # "system.filesystem.usage": [],
                # "system.filesystem.utilization": [],
                "system.network.dropped.packets": ["transmit", "receive"],
                "system.network.packets": ["transmit", "receive"],
                "system.network.errors": ["transmit", "receive"],
                "system.network.io": ["trasmit", "receive"],
                "system.network.connections": ["family", "type"],
                "runtime.memory": ["rss", "vms"],
                "runtime.cpu.time": ["user", "system"],
            }
        else:
            self._config = config

        self._proc = psutil.Process(os.getpid())

        self._system_cpu_time_labels = self._labels.copy()
        self._system_cpu_utilization_labels = self._labels.copy()

        self._system_memory_usage_labels = self._labels.copy()
        self._system_memory_utilization_labels = self._labels.copy()

        self._system_swap_usage_labels = self._labels.copy()
        self._system_swap_utilization_labels = self._labels.copy()
        # self._system_swap_page_faults = self._labels.copy()
        # self._system_swap_page_operations = self._labels.copy()

        self._system_disk_io_labels = self._labels.copy()
        self._system_disk_operations_labels = self._labels.copy()
        self._system_disk_time_labels = self._labels.copy()
        self._system_disk_merged_labels = self._labels.copy()

        # self._system_filesystem_usage_labels = self._labels.copy()
        # self._system_filesystem_utilization_labels = self._labels.copy()

        self._system_network_dropped_packets_labels = self._labels.copy()
        self._system_network_packets_labels = self._labels.copy()
        self._system_network_errors_labels = self._labels.copy()
        self._system_network_io_labels = self._labels.copy()
        self._system_network_connections_labels = self._labels.copy()

        self._runtime_memory_labels = self._labels.copy()
        self._runtime_cpu_time_labels = self._labels.copy()
        self._runtime_gc_count_labels = self._labels.copy()

        self.accumulator.register_sumobserver(
            callback=self._get_system_cpu_time,
            name="system.cpu.time",
            description="System CPU time",
            unit="seconds",
            value_type=float,
        )

        self.accumulator.register_valueobserver(
            callback=self._get_system_cpu_utilization,
            name="system.cpu.utilization",
            description="System CPU utilization",
            unit="1",
            value_type=float,
        )

        self.accumulator.register_valueobserver(
            callback=self._get_system_memory_usage,
            name="system.memory.usage",
            description="System memory usage",
            unit="bytes",
            value_type=int,
        )

        self.accumulator.register_valueobserver(
            callback=self._get_system_memory_utilization,
            name="system.memory.utilization",
            description="System memory utilization",
            unit="1",
            value_type=float,
        )

        self.accumulator.register_valueobserver(
            callback=self._get_system_swap_usage,
            name="system.swap.usage",
            description="System swap usage",
            unit="pages",
            value_type=int,
        )

        self.accumulator.register_valueobserver(
            callback=self._get_system_swap_utilization,
            name="system.swap.utilization",
            description="System swap utilization",
            unit="1",
            value_type=float,
        )

        # self.accumulator.register_sumobserver(
        #     callback=self._get_system_swap_page_faults,
        #     name="system.swap.page_faults",
        #     description="System swap page faults",
        #     unit="faults",
        #     value_type=int,
        # )

        # self.accumulator.register_sumobserver(
        #     callback=self._get_system_swap_page_operations,
        #     name="system.swap.page_operations",
        #     description="System swap page operations",
        #     unit="operations",
        #     value_type=int,
        # )

        self.accumulator.register_sumobserver(
            callback=self._get_system_disk_io,
            name="system.disk.io",
            description="System disk IO",
            unit="bytes",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_disk_operations,
            name="system.disk.operations",
            description="System disk operations",
            unit="operations",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_disk_time,
            name="system.disk.time",
            description="System disk time",
            unit="seconds",
            value_type=float,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_disk_merged,
            name="system.disk.merged",
            description="System disk merged",
            unit="1",
            value_type=int,
        )

        # self.accumulator.register_valueobserver(
        #     callback=self._get_system_filesystem_usage,
        #     name="system.filesystem.usage",
        #     description="System filesystem usage",
        #     unit="bytes",
        #     value_type=int,
        # )

        # self.accumulator.register_valueobserver(
        #     callback=self._get_system_filesystem_utilization,
        #     name="system.filesystem.utilization",
        #     description="System filesystem utilization",
        #     unit="1",
        #     value_type=float,
        # )

        self.accumulator.register_sumobserver(
            callback=self._get_system_network_dropped_packets,
            name="system.network.dropped_packets",
            description="System network dropped_packets",
            unit="packets",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_network_packets,
            name="system.network.packets",
            description="System network packets",
            unit="packets",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_network_errors,
            name="system.network.errors",
            description="System network errors",
            unit="errors",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_system_network_io,
            name="system.network.io",
            description="System network io",
            unit="bytes",
            value_type=int,
        )

        self.accumulator.register_updownsumobserver(
            callback=self._get_system_network_connections,
            name="system.network.connections",
            description="System network connections",
            unit="connections",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_runtime_memory,
            name="runtime.{}.memory".format(self._python_implementation),
            description="Runtime {} memory".format(
                self._python_implementation),
            unit="bytes",
            value_type=int,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_runtime_cpu_time,
            name="runtime.{}.cpu_time".format(self._python_implementation),
            description="Runtime {} CPU time".format(
                self._python_implementation),
            unit="seconds",
            value_type=float,
        )

        self.accumulator.register_sumobserver(
            callback=self._get_runtime_gc_count,
            name="runtime.{}.gc_count".format(self._python_implementation),
            description="Runtime {} GC count".format(
                self._python_implementation),
            unit="bytes",
            value_type=int,
        )
span_processor = BatchExportSpanProcessor(span_exporter)
tracer_provider.add_span_processor(span_processor)

metric_exporter = OTLPMetricsExporter(
    # optional
    # endpoint:="myCollectorURL:55678",
    # credentials=ChannelCredentials(credentials),
    # metadata=(("metadata", "metadata")),
)

# Meter is responsible for creating and recording metrics
metrics.set_meter_provider(MeterProvider())
meter = metrics.get_meter(__name__)
# controller collects metrics created from meter and exports it via the
# exporter every interval
controller = PushController(meter, metric_exporter, 5)

# Configure the tracer to use the collector exporter
tracer = trace.get_tracer_provider().get_tracer(__name__)

with tracer.start_as_current_span("foo"):
    print("Hello world!")

requests_counter = meter.create_counter(
    name="requests",
    description="number of requests",
    unit="1",
    value_type=int,
    label_keys=("environment", ),
)
# Labels are used to identify key-values that are associated with a specific
Ejemplo n.º 13
0
"""
This example shows how to export metrics to the OT collector.
"""

from opentelemetry import metrics
from opentelemetry.ext.otcollector.metrics_exporter import (
    CollectorMetricsExporter, )
from opentelemetry.sdk.metrics import Counter, MeterProvider
from opentelemetry.sdk.metrics.export.controller import PushController

exporter = CollectorMetricsExporter(service_name="basic-service",
                                    endpoint="localhost:55678")

metrics.set_meter_provider(MeterProvider())
meter = metrics.get_meter(__name__)
controller = PushController(meter, exporter, 5)

requests_counter = meter.create_metric(
    name="requests",
    description="number of requests",
    unit="1",
    value_type=int,
    metric_type=Counter,
    label_keys=("environment", ),
)

staging_label_set = meter.get_label_set({"environment": "staging"})
requests_counter.add(25, staging_label_set)

print("Metrics are available now at http://localhost:9090/graph")
input("Press any key to exit...")
Ejemplo n.º 14
0
    def __init__(
        self,
        exporter: MetricsExporter,
        interval: int = 30,
        labels: typing.Optional[typing.Dict[str, str]] = None,
        config: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
    ):
        self._labels = {} if labels is None else labels
        self.meter = metrics.get_meter(__name__)
        self.controller = PushController(meter=self.meter,
                                         exporter=exporter,
                                         interval=interval)
        if config is None:
            self._config = {
                "system_memory": ["total", "available", "used", "free"],
                "system_cpu": ["user", "system", "idle"],
                "network_bytes": ["bytes_recv", "bytes_sent"],
                "runtime_memory": ["rss", "vms"],
                "runtime_cpu": ["user", "system"],
            }
        else:
            self._config = config
        self._proc = psutil.Process(os.getpid())
        self._system_memory_labels = {}
        self._system_cpu_labels = {}
        self._network_bytes_labels = {}
        self._runtime_memory_labels = {}
        self._runtime_cpu_labels = {}
        self._runtime_gc_labels = {}
        # create the label set for each observer once
        for key, value in self._labels.items():
            self._system_memory_labels[key] = value
            self._system_cpu_labels[key] = value
            self._network_bytes_labels[key] = value
            self._runtime_memory_labels[key] = value
            self._runtime_gc_labels[key] = value

        self.meter.register_observer(
            callback=self._get_system_memory,
            name="system.mem",
            description="System memory",
            unit="bytes",
            value_type=int,
            observer_type=ValueObserver,
        )

        self.meter.register_observer(
            callback=self._get_system_cpu,
            name="system.cpu",
            description="System CPU",
            unit="seconds",
            value_type=float,
            observer_type=ValueObserver,
        )

        self.meter.register_observer(
            callback=self._get_network_bytes,
            name="system.net.bytes",
            description="System network bytes",
            unit="bytes",
            value_type=int,
            observer_type=ValueObserver,
        )

        self.meter.register_observer(
            callback=self._get_runtime_memory,
            name="runtime.python.mem",
            description="Runtime memory",
            unit="bytes",
            value_type=int,
            observer_type=ValueObserver,
        )

        self.meter.register_observer(
            callback=self._get_runtime_cpu,
            name="runtime.python.cpu",
            description="Runtime CPU",
            unit="seconds",
            value_type=float,
            observer_type=ValueObserver,
        )

        self.meter.register_observer(
            callback=self._get_runtime_gc_count,
            name="runtime.python.gc.count",
            description="Runtime: gc objects",
            unit="objects",
            value_type=int,
            observer_type=ValueObserver,
        )
from opentelemetry.sdk.metrics import LabelSet, MeterProvider
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher
from opentelemetry.sdk.metrics.export.controller import PushController

# Configure a stateful batcher
batcher = UngroupedBatcher(stateful=True)

metrics.set_preferred_meter_provider_implementation(lambda _: MeterProvider())
meter = metrics.get_meter(__name__)

# Exporter to export metrics to the console
exporter = ConsoleMetricsExporter()

# Configure a push controller
controller = PushController(meter=meter, exporter=exporter, interval=2)


# Callback to gather cpu usage
def get_cpu_usage_callback(observer):
    for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
        label_set = meter.get_label_set({"cpu_number": str(number)})
        observer.observe(percent, label_set)


meter.register_observer(
    callback=get_cpu_usage_callback,
    name="cpu_percent",
    description="per-cpu usage",
    unit="1",
    value_type=float,
Ejemplo n.º 16
0
 def setUp(self):
     self.meter = metrics.MeterProvider(stateful=False).get_meter(__name__)
     self.exporter = InMemoryMetricsExporter()
     self.controller = PushController(self.meter, self.exporter, 30)
Ejemplo n.º 17
0
class TestStateless(unittest.TestCase):
    def setUp(self):
        self.meter = metrics.MeterProvider(stateful=False).get_meter(__name__)
        self.exporter = InMemoryMetricsExporter()
        self.controller = PushController(self.meter, self.exporter, 30)

    def tearDown(self):
        self.controller.shutdown()

    def test_label_keys(self):
        test_counter = self.meter.create_metric(
            name="test_counter",
            description="description",
            unit="By",
            value_type=int,
            metric_type=Counter,
        )
        counter_view = View(
            test_counter,
            SumAggregator,
            label_keys=["environment"],
            view_config=ViewConfig.LABEL_KEYS,
        )

        self.meter.register_view(counter_view)
        test_counter.add(6, {"environment": "production", "customer_id": 123})
        test_counter.add(5, {"environment": "production", "customer_id": 247})

        self.controller.tick()

        metric_data = self.exporter.get_exported_metrics()
        self.assertEqual(len(metric_data), 1)
        self.assertEqual(metric_data[0].labels,
                         (("environment", "production"), ))
        self.assertEqual(metric_data[0].aggregator.checkpoint, 11)

    def test_ungrouped(self):
        test_counter = self.meter.create_metric(
            name="test_counter",
            description="description",
            unit="By",
            value_type=int,
            metric_type=Counter,
        )
        counter_view = View(
            test_counter,
            SumAggregator,
            label_keys=["environment"],
            view_config=ViewConfig.UNGROUPED,
        )

        self.meter.register_view(counter_view)
        test_counter.add(6, {"environment": "production", "customer_id": 123})
        test_counter.add(5, {"environment": "production", "customer_id": 247})

        self.controller.tick()

        metric_data = self.exporter.get_exported_metrics()
        data_set = set()
        for data in metric_data:
            data_set.add((data.labels, data.aggregator.checkpoint))
        self.assertEqual(len(metric_data), 2)
        label1 = (("customer_id", 123), ("environment", "production"))
        label2 = (("customer_id", 247), ("environment", "production"))
        self.assertTrue((label1, 6) in data_set)
        self.assertTrue((label2, 5) in data_set)

    def test_multiple_views(self):
        test_counter = self.meter.create_metric(
            name="test_counter",
            description="description",
            unit="By",
            value_type=int,
            metric_type=Counter,
        )

        counter_view = View(
            test_counter,
            SumAggregator,
            label_keys=["environment"],
            view_config=ViewConfig.UNGROUPED,
        )

        mmsc_view = View(
            test_counter,
            MinMaxSumCountAggregator,
            label_keys=["environment"],
            view_config=ViewConfig.LABEL_KEYS,
        )

        self.meter.register_view(counter_view)
        self.meter.register_view(mmsc_view)
        test_counter.add(6, {"environment": "production", "customer_id": 123})
        test_counter.add(5, {"environment": "production", "customer_id": 247})

        self.controller.tick()

        metric_data = self.exporter.get_exported_metrics()
        sum_set = set()
        mmsc_set = set()
        for data in metric_data:
            if isinstance(data.aggregator, SumAggregator):
                tup = (data.labels, data.aggregator.checkpoint)
                sum_set.add(tup)
            elif isinstance(data.aggregator, MinMaxSumCountAggregator):
                mmsc_set.add(data)
                self.assertEqual(data.labels,
                                 (("environment", "production"), ))
                self.assertEqual(data.aggregator.checkpoint.sum, 11)
        # we have to assert this way because order is unknown
        self.assertEqual(len(sum_set), 2)
        self.assertEqual(len(mmsc_set), 1)
        label1 = (("customer_id", 123), ("environment", "production"))
        label2 = (("customer_id", 247), ("environment", "production"))
        self.assertTrue((label1, 6) in sum_set)
        self.assertTrue((label2, 5) in sum_set)
Ejemplo n.º 18
0
tracer = trace.get_tracer(__name__)

trace_exporter = AzureMonitorSpanExporter(
    instrumentation_key=os.environ['APPINSIGHTS_INSTRUMENTATION_KEY'])

span_processor = BatchExportSpanProcessor(trace_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)

RequestsInstrumentor().instrument()

# Setup metrics
metrics_exporter = AzureMonitorMetricsExporter(
    instrumentation_key=os.environ['APPINSIGHTS_INSTRUMENTATION_KEY'])
metrics.set_meter_provider(MeterProvider())
meter = metrics.get_meter(__name__)
PushController(meter, metrics_exporter, 10)

tfgen_counter = meter.create_metric(
    name="tfgen_counter",
    description="mydemo namespace",
    unit="1",
    value_type=int,
    metric_type=Counter,
)


# Define cloud role
def callback_function(envelope):
    envelope.tags['ai.cloud.role'] = os.getenv('APP_NAME')
    return True