Example #1
0
 def test_ungrouped_batcher_process_exists(self):
     meter = metrics.MeterProvider().get_meter(__name__)
     batcher = UngroupedBatcher(True)
     aggregator = CounterAggregator()
     aggregator2 = CounterAggregator()
     metric = metrics.Counter(
         "available memory",
         "available memory",
         "bytes",
         int,
         meter,
         ("environment", ),
     )
     label_set = metrics.LabelSet()
     _batch_map = {}
     _batch_map[(metric, label_set)] = aggregator
     aggregator2.update(1.0)
     batcher._batch_map = _batch_map
     record = metrics.Record(metric, label_set, aggregator2)
     batcher.process(record)
     self.assertEqual(len(batcher._batch_map), 1)
     self.assertIsNotNone(batcher._batch_map.get((metric, label_set)))
     self.assertEqual(
         batcher._batch_map.get((metric, label_set)).current, 0)
     self.assertEqual(
         batcher._batch_map.get((metric, label_set)).checkpoint, 1.0)
Example #2
0
 def test_aggregator_for_updowncounter(self):
     batcher = UngroupedBatcher(True)
     self.assertTrue(
         isinstance(
             batcher.aggregator_for(metrics.UpDownCounter),
             SumAggregator,
         ))
 def test_aggregator_for_counter(self):
     batcher = UngroupedBatcher(True)
     self.assertTrue(
         isinstance(
             batcher.aggregator_for(metrics.Counter), CounterAggregator
         )
     )
 def __init__(
     self,
     instrumentation_info: "InstrumentationInfo",
     stateful: bool,
 ):
     self.instrumentation_info = instrumentation_info
     self.metrics = set()
     self.observers = set()
     self.batcher = UngroupedBatcher(stateful)
Example #5
0
 def __init__(
     self,
     source: "MeterProvider",
     instrumentation_info: "InstrumentationInfo",
 ):
     self.instrumentation_info = instrumentation_info
     self.batcher = UngroupedBatcher(source.stateful)
     self.resource = source.resource
     self.metrics = set()
     self.observers = set()
     self.observers_lock = threading.Lock()
 def __init__(
     self,
     instrumentation_info: "InstrumentationInfo",
     stateful: bool,
     resource: Resource = Resource.create_empty(),
 ):
     self.instrumentation_info = instrumentation_info
     self.metrics = set()
     self.observers = set()
     self.batcher = UngroupedBatcher(stateful)
     self.observers_lock = threading.Lock()
     self.resource = resource
Example #7
0
 def test_finished_collection_stateless(self):
     meter = metrics.MeterProvider().get_meter(__name__)
     batcher = UngroupedBatcher(False)
     aggregator = CounterAggregator()
     metric = metrics.Counter(
         "available memory",
         "available memory",
         "bytes",
         int,
         meter,
         ("environment", ),
     )
     aggregator.update(1.0)
     label_set = metrics.LabelSet()
     _batch_map = {}
     _batch_map[(metric, label_set)] = aggregator
     batcher._batch_map = _batch_map
     batcher.finished_collection()
     self.assertEqual(len(batcher._batch_map), 0)
Example #8
0
 def test_checkpoint_set(self):
     meter = metrics.MeterProvider().get_meter(__name__)
     batcher = UngroupedBatcher(True)
     aggregator = CounterAggregator()
     metric = metrics.Counter(
         "available memory",
         "available memory",
         "bytes",
         int,
         meter,
         ("environment", ),
     )
     aggregator.update(1.0)
     label_set = metrics.LabelSet()
     _batch_map = {}
     _batch_map[(metric, label_set)] = aggregator
     batcher._batch_map = _batch_map
     records = batcher.checkpoint_set()
     self.assertEqual(len(records), 1)
     self.assertEqual(records[0].metric, metric)
     self.assertEqual(records[0].label_set, label_set)
     self.assertEqual(records[0].aggregator, aggregator)
Example #9
0
 def test_ungrouped_batcher_process_not_stateful(self):
     meter = metrics.MeterProvider().get_meter(__name__)
     batcher = UngroupedBatcher(True)
     aggregator = SumAggregator()
     metric = metrics.Counter(
         "available memory",
         "available memory",
         "bytes",
         int,
         meter,
         ("environment", ),
     )
     labels = ()
     _batch_map = {}
     aggregator.update(1.0)
     batcher._batch_map = _batch_map
     record = metrics.Record(metric, labels, aggregator)
     batcher.process(record)
     self.assertEqual(len(batcher._batch_map), 1)
     self.assertIsNotNone(batcher._batch_map.get((metric, labels)))
     self.assertEqual(batcher._batch_map.get((metric, labels)).current, 0)
     self.assertEqual(
         batcher._batch_map.get((metric, labels)).checkpoint, 1.0)
Example #10
0
class Meter(metrics_api.Meter):
    """See `opentelemetry.metrics.Meter`.

    Args:
        instrumentation_info: The `InstrumentationInfo` for this meter.
        stateful: Indicates whether the meter is stateful.
    """

    def __init__(
        self, instrumentation_info: "InstrumentationInfo", stateful: bool,
    ):
        self.instrumentation_info = instrumentation_info
        self.metrics = set()
        self.batcher = UngroupedBatcher(stateful)

    def collect(self) -> None:
        """Collects all the metrics created with this `Meter` for export.

        Utilizes the batcher to create checkpoints of the current values in
        each aggregator belonging to the metrics that were created with this
        meter instance.
        """
        for metric in self.metrics:
            if metric.enabled:
                for label_set, handle in metric.handles.items():
                    # TODO: Consider storing records in memory?
                    record = Record(metric, label_set, handle.aggregator)
                    # Checkpoints the current aggregators
                    # Applies different batching logic based on type of batcher
                    self.batcher.process(record)

    def record_batch(
        self,
        label_set: LabelSet,
        record_tuples: Sequence[Tuple[metrics_api.Metric, metrics_api.ValueT]],
    ) -> None:
        """See `opentelemetry.metrics.Meter.record_batch`."""
        for metric, value in record_tuples:
            metric.UPDATE_FUNCTION(value, label_set)

    def create_metric(
        self,
        name: str,
        description: str,
        unit: str,
        value_type: Type[metrics_api.ValueT],
        metric_type: Type[metrics_api.MetricT],
        label_keys: Sequence[str] = (),
        enabled: bool = True,
    ) -> metrics_api.MetricT:
        """See `opentelemetry.metrics.Meter.create_metric`."""
        # Ignore type b/c of mypy bug in addition to missing annotations
        metric = metric_type(  # type: ignore
            name,
            description,
            unit,
            value_type,
            self,
            label_keys=label_keys,
            enabled=enabled,
        )
        self.metrics.add(metric)
        return metric

    def get_label_set(self, labels: Dict[str, str]):
        """See `opentelemetry.metrics.Meter.create_metric`.

        This implementation encodes the labels to use as a map key.

        Args:
            labels: The dictionary of label keys to label values.
        """
        if len(labels) == 0:
            return EMPTY_LABEL_SET
        return LabelSet(labels=labels)
# limitations under the License.
#
"""
This example shows how the Observer metric instrument can be used to capture
asynchronous metrics data.
"""
import psutil

from opentelemetry import metrics
from opentelemetry.sdk.metrics import LabelSet, MeterProvider
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher
from opentelemetry.sdk.metrics.export.controller import PushController

# Configure a stateful batcher
batcher = UngroupedBatcher(stateful=True)

metrics.set_preferred_meter_provider_implementation(lambda _: MeterProvider())
meter = metrics.get_meter(__name__)

# Exporter to export metrics to the console
exporter = ConsoleMetricsExporter()

# Configure a push controller
controller = PushController(meter=meter, exporter=exporter, interval=2)


# Callback to gather cpu usage
def get_cpu_usage_callback(observer):
    for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
        label_set = meter.get_label_set({"cpu_number": str(number)})
class Meter(metrics_api.Meter):
    """See `opentelemetry.metrics.Meter`.

    Args:
        instrumentation_info: The `InstrumentationInfo` for this meter.
        stateful: Indicates whether the meter is stateful.
    """

    def __init__(
        self,
        instrumentation_info: "InstrumentationInfo",
        stateful: bool,
        resource: Resource = Resource.create_empty(),
    ):
        self.instrumentation_info = instrumentation_info
        self.metrics = set()
        self.observers = set()
        self.batcher = UngroupedBatcher(stateful)
        self.observers_lock = threading.Lock()
        self.resource = resource

    def collect(self) -> None:
        """Collects all the metrics created with this `Meter` for export.

        Utilizes the batcher to create checkpoints of the current values in
        each aggregator belonging to the metrics that were created with this
        meter instance.
        """

        self._collect_metrics()
        self._collect_observers()

    def _collect_metrics(self) -> None:
        for metric in self.metrics:
            if not metric.enabled:
                continue

            to_remove = []

            with metric.bound_instruments_lock:
                for label_set, bound_instr in metric.bound_instruments.items():
                    # TODO: Consider storing records in memory?
                    record = Record(metric, label_set, bound_instr.aggregator)
                    # Checkpoints the current aggregators
                    # Applies different batching logic based on type of batcher
                    self.batcher.process(record)

                    if bound_instr.ref_count() == 0:
                        to_remove.append(label_set)

                # Remove handles that were released
                for label_set in to_remove:
                    del metric.bound_instruments[label_set]

    def _collect_observers(self) -> None:
        with self.observers_lock:
            for observer in self.observers:
                if not observer.enabled:
                    continue

                # TODO: capture timestamp?
                if not observer.run():
                    continue

                for label_set, aggregator in observer.aggregators.items():
                    record = Record(observer, label_set, aggregator)
                    self.batcher.process(record)

    def record_batch(
        self,
        label_set: LabelSet,
        record_tuples: Sequence[Tuple[metrics_api.Metric, metrics_api.ValueT]],
    ) -> None:
        """See `opentelemetry.metrics.Meter.record_batch`."""
        for metric, value in record_tuples:
            metric.UPDATE_FUNCTION(value, label_set)

    def create_metric(
        self,
        name: str,
        description: str,
        unit: str,
        value_type: Type[metrics_api.ValueT],
        metric_type: Type[metrics_api.MetricT],
        label_keys: Sequence[str] = (),
        enabled: bool = True,
    ) -> metrics_api.MetricT:
        """See `opentelemetry.metrics.Meter.create_metric`."""
        # Ignore type b/c of mypy bug in addition to missing annotations
        metric = metric_type(  # type: ignore
            name,
            description,
            unit,
            value_type,
            self,
            label_keys=label_keys,
            enabled=enabled,
        )
        self.metrics.add(metric)
        return metric

    def register_observer(
        self,
        callback: metrics_api.ObserverCallbackT,
        name: str,
        description: str,
        unit: str,
        value_type: Type[metrics_api.ValueT],
        label_keys: Sequence[str] = (),
        enabled: bool = True,
    ) -> metrics_api.Observer:
        ob = Observer(
            callback,
            name,
            description,
            unit,
            value_type,
            self,
            label_keys,
            enabled,
        )
        with self.observers_lock:
            self.observers.add(ob)
        return ob

    def unregister_observer(self, observer: "Observer") -> None:
        with self.observers_lock:
            self.observers.remove(observer)

    def get_label_set(self, labels: Dict[str, str]):
        """See `opentelemetry.metrics.Meter.create_metric`.

        This implementation encodes the labels to use as a map key.

        Args:
            labels: The dictionary of label keys to label values.
        """
        if len(labels) == 0:
            return EMPTY_LABEL_SET
        return LabelSet(labels=labels)
Example #13
0
 def test_checkpoint_set_empty(self):
     batcher = UngroupedBatcher(True)
     records = batcher.checkpoint_set()
     self.assertEqual(len(records), 0)
Example #14
0
class Meter(metrics_api.Meter):
    """See `opentelemetry.metrics.Meter`.

    Args:
        source: The `MeterProvider` that created this meter.
        instrumentation_info: The `InstrumentationInfo` for this meter.
    """
    def __init__(
        self,
        source: "MeterProvider",
        instrumentation_info: "InstrumentationInfo",
    ):
        self.instrumentation_info = instrumentation_info
        self.batcher = UngroupedBatcher(source.stateful)
        self.resource = source.resource
        self.metrics = set()
        self.observers = set()
        self.observers_lock = threading.Lock()

    def collect(self) -> None:
        """Collects all the metrics created with this `Meter` for export.

        Utilizes the batcher to create checkpoints of the current values in
        each aggregator belonging to the metrics that were created with this
        meter instance.
        """

        self._collect_metrics()
        self._collect_observers()

    def _collect_metrics(self) -> None:
        for metric in self.metrics:
            if not metric.enabled:
                continue

            to_remove = []

            with metric.bound_instruments_lock:
                for labels, bound_instr in metric.bound_instruments.items():
                    # TODO: Consider storing records in memory?
                    record = Record(metric, labels, bound_instr.aggregator)
                    # Checkpoints the current aggregators
                    # Applies different batching logic based on type of batcher
                    self.batcher.process(record)

                    if bound_instr.ref_count() == 0:
                        to_remove.append(labels)

                # Remove handles that were released
                for labels in to_remove:
                    del metric.bound_instruments[labels]

    def _collect_observers(self) -> None:
        with self.observers_lock:
            for observer in self.observers:
                if not observer.enabled:
                    continue

                if not observer.run():
                    continue

                for labels, aggregator in observer.aggregators.items():
                    record = Record(observer, labels, aggregator)
                    self.batcher.process(record)

    def record_batch(
        self,
        labels: Dict[str, str],
        record_tuples: Sequence[Tuple[metrics_api.Metric, metrics_api.ValueT]],
    ) -> None:
        """See `opentelemetry.metrics.Meter.record_batch`."""
        # TODO: Avoid enconding the labels for each instrument, encode once
        # and reuse.
        for metric, value in record_tuples:
            metric.UPDATE_FUNCTION(value, labels)

    def create_metric(
        self,
        name: str,
        description: str,
        unit: str,
        value_type: Type[metrics_api.ValueT],
        metric_type: Type[metrics_api.MetricT],
        label_keys: Sequence[str] = (),
        enabled: bool = True,
    ) -> metrics_api.MetricT:
        """See `opentelemetry.metrics.Meter.create_metric`."""
        # Ignore type b/c of mypy bug in addition to missing annotations
        metric = metric_type(  # type: ignore
            name,
            description,
            unit,
            value_type,
            self,
            label_keys=label_keys,
            enabled=enabled,
        )
        self.metrics.add(metric)
        return metric

    def register_observer(
        self,
        callback: metrics_api.ObserverCallbackT,
        name: str,
        description: str,
        unit: str,
        value_type: Type[metrics_api.ValueT],
        observer_type=Type[metrics_api.ObserverT],
        label_keys: Sequence[str] = (),
        enabled: bool = True,
    ) -> metrics_api.Observer:
        ob = observer_type(
            callback,
            name,
            description,
            unit,
            value_type,
            self,
            label_keys,
            enabled,
        )
        with self.observers_lock:
            self.observers.add(ob)
        return ob

    def unregister_observer(self, observer: metrics_api.Observer) -> None:
        with self.observers_lock:
            self.observers.remove(observer)