def test_export(self): meter_provider = metrics.MeterProvider() meter = meter_provider.get_meter(__name__) exporter = ConsoleMetricsExporter() metric = metrics.Counter( "available memory", "available memory", "bytes", int, meter, ("environment", ), ) labels = {"environment": "staging"} aggregator = SumAggregator() record = MetricRecord(metric, labels, aggregator, meter_provider.resource) result = '{}(data="{}", labels="{}", value={}, resource={})'.format( ConsoleMetricsExporter.__name__, metric, labels, aggregator.checkpoint, meter_provider.resource.attributes, ) with mock.patch("sys.stdout") as mock_stdout: exporter.export([record]) mock_stdout.write.assert_any_call(result)
def test_export(self): exporter = ConsoleMetricsExporter() metric = metrics.Counter( "available memory", "available memory", "bytes", int, ("environment", ), ) label_values = ("staging", ) handle = metric.get_handle(label_values) result = '{}(data="{}", label_values="{}", metric_data={})'.format( ConsoleMetricsExporter.__name__, metric, label_values, handle) with mock.patch("sys.stdout") as mock_stdout: exporter.export([(metric, label_values)]) mock_stdout.write.assert_any_call(result)
def start_pipeline( self, meter: metrics_api.Meter, exporter: MetricsExporter = None, interval: float = 15.0, ) -> None: """Method to begin the collect/export pipeline. Args: meter: The meter to collect metrics from. exporter: The exporter to export metrics to. interval: The collect/export interval in seconds. """ if not exporter: exporter = ConsoleMetricsExporter() self._exporters.add(exporter) # TODO: Controller type configurable? self._controllers.append(PushController(meter, exporter, interval))
def test_export(self): meter = metrics.MeterProvider().get_meter(__name__) exporter = ConsoleMetricsExporter() metric = metrics.Counter( "available memory", "available memory", "bytes", int, meter, ("environment", ), ) kvp = {"environment": "staging"} label_set = metrics.LabelSet(kvp) aggregator = CounterAggregator() record = MetricRecord(aggregator, label_set, metric) result = '{}(data="{}", label_set="{}", value={})'.format( ConsoleMetricsExporter.__name__, metric, label_set.labels, aggregator.checkpoint, ) with mock.patch("sys.stdout") as mock_stdout: exporter.export([record]) mock_stdout.write.assert_any_call(result)
""" from opentelemetry import metrics from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter from opentelemetry.sdk.metrics.export.aggregate import ( HistogramAggregator, LastValueAggregator, MinMaxSumCountAggregator, SumAggregator, ) from opentelemetry.sdk.metrics.view import View, ViewConfig # Use the meter type provided by the SDK package metrics.set_meter_provider(MeterProvider()) meter = metrics.get_meter(__name__) metrics.get_meter_provider().start_pipeline(meter, ConsoleMetricsExporter(), 5) requests_counter = meter.create_counter( name="requests", description="number of requests", unit="1", value_type=int, ) requests_size = meter.create_valuerecorder( name="requests_size", description="size of requests", unit="1", value_type=int, )
import psutil from opentelemetry import metrics from opentelemetry.sdk.metrics import LabelSet, MeterProvider from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher from opentelemetry.sdk.metrics.export.controller import PushController # Configure a stateful batcher batcher = UngroupedBatcher(stateful=True) metrics.set_preferred_meter_provider_implementation(lambda _: MeterProvider()) meter = metrics.get_meter(__name__) # Exporter to export metrics to the console exporter = ConsoleMetricsExporter() # Configure a push controller controller = PushController(meter=meter, exporter=exporter, interval=2) # Callback to gather cpu usage def get_cpu_usage_callback(observer): for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)): label_set = meter.get_label_set({"cpu_number": str(number)}) observer.observe(percent, label_set) meter.register_observer( callback=get_cpu_usage_callback, name="cpu_percent",
from opentelemetry import metrics from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter from opentelemetry.exporter.otlp.metrics_exporter import OTLPMetricsExporter app = Flask("api") @app.route('/book/<username>') def hello_world(username): status = requests.post(os.getenv("BOOK_SVC"), json={ "card": "VISA", "name": username, "date": datetime.datetime.today().strftime('%Y-%m-%d') }) if status.ok: resp = status.json() return resp else: return 'bad request!', 400 if __name__ == '__main__': resource = Resource({"service.name": "gateway"}) trace.get_tracer_provider().resource = resource trace.get_tracer_provider().add_span_processor(BatchExportSpanProcessor(OTLPSpanExporter(os.getenv("OTC_HOST")))) metrics.get_meter_provider().resource = resource metrics.get_meter_provider().start_pipeline(RequestsInstrumentor().meter, ConsoleMetricsExporter(), 1) app.run(debug=True, host='0.0.0.0')