def __init__(self, test_name, test_description=None):
        # oc will automatically search for the ENV VAR 'APPLICATIONINSIGHTS_CONNECTION_STRING'
        self.exporter = metrics_exporter.new_metrics_exporter()
        self.stats = stats_module.stats
        self.view_manager = self.stats.view_manager
        self.stats_recorder = self.stats.stats_recorder
        self.azure_logger = get_azure_logger(test_name)
        self.name = test_name
        self.desc = test_description

        events_measure_name = "The number of events handled by " + self.name
        events_measure_desc = "The number of events handled by " + self.desc if self.desc else None
        memory_measure_name = "memory usage percentage for " + self.name
        memory_measure_desc = "memory usage percentage for " + self.desc if self.desc else None
        cpu_measure_name = "cpu usage percentage for " + self.name
        cpu_measure_desc = "cpu usage percentage for " + self.desc if self.desc else None
        error_measure_name = "error count for " + self.name
        error_measure_desc = "The number of errors happened while running the test for " + self.desc if self.desc else None

        self.events_measure = measure_module.MeasureInt(
            events_measure_name, events_measure_desc, "events")
        self.memory_measure = measure_module.MeasureFloat(
            memory_measure_name, memory_measure_desc)
        self.cpu_measure = measure_module.MeasureFloat(cpu_measure_name,
                                                       cpu_measure_desc)
        self.error_measure = measure_module.MeasureInt(error_measure_name,
                                                       error_measure_desc)

        self.events_measure_view = view_module.View(
            events_measure_name, events_measure_desc, [], self.events_measure,
            aggregation_module.SumAggregation())

        self.memory_measure_view = view_module.View(
            memory_measure_name, memory_measure_desc, [], self.memory_measure,
            aggregation_module.LastValueAggregation())

        self.cpu_measure_view = view_module.View(
            cpu_measure_name, cpu_measure_desc, [], self.cpu_measure,
            aggregation_module.LastValueAggregation())

        self.error_measure_view = view_module.View(
            error_measure_name, error_measure_desc, [], self.error_measure,
            aggregation_module.CountAggregation())

        self.view_manager.register_view(self.events_measure_view)
        self.view_manager.register_view(self.memory_measure_view)
        self.view_manager.register_view(self.cpu_measure_view)
        self.view_manager.register_view(self.error_measure_view)

        self.mmap = self.stats_recorder.new_measurement_map()
Ejemplo n.º 2
0
    def report_metric(self,
                      name: str,
                      value: float,
                      description="",
                      report_to_parent: bool = False):
        """Report a metric value to the AML run and to AppInsights.
        e.g. Condensed_Binocular.report_metric(name, value)
        :param name: The name of the metric.
        :param value: The value to be reported.
        :type value: Float or integer.
        :param description: An optional description about the metric.
        :param report_to_parent: Mark True if you want to report to AML parent run.
        """
        # Report to AML
        self.run.log(name, value)
        if report_to_parent and not self.offline_run:
            self.run.parent.log(name, value)

        # Report to AppInsights
        measurement_map = stats_module.stats.stats_recorder.new_measurement_map(
        )
        tag_map = tag_map_module.TagMap()
        measure = measure_module.MeasureFloat(name, description)
        self.set_view(name, description, measure)
        measurement_map.measure_float_put(measure, value)
        measurement_map.record(tag_map)
Ejemplo n.º 3
0
    def test_constructor_defaults(self):
        name = "testName"
        description = "testMeasure"

        measure = measure_module.MeasureFloat(name=name, description=description)

        self.assertEqual(None, measure.unit)
Ejemplo n.º 4
0
def setup_open_census():
    stats_stats = stats.Stats()

    app.m_response_ms = measure_module.MeasureFloat("flask_response_time",
                                                    "The request duration",
                                                    "ms")

    app.key_method = tag_key_module.TagKey("method")
    # Create the status key
    app.key_status = tag_key_module.TagKey("status")
    # Create the error key
    app.key_error = tag_key_module.TagKey("error")

    app.view_manager = stats_stats.view_manager
    app.stats_recorder = stats_stats.stats_recorder
    response_time_view = view.View(
        "response_time", "The time it took to respond",
        [app.key_method, app.key_status, app.key_error], app.m_response_ms,
        aggregation.LastValueAggregation())

    app.exporter = stackdriver.new_stats_exporter(options=stackdriver.Options(
        project_id=os.getenv('PROJECT_ID')))

    app.view_manager.register_exporter(app.exporter)
    app.view_manager.register_view(response_time_view)
Ejemplo n.º 5
0
    def report_metric_with_run_tagging(self,
                                       name: str,
                                       value: float,
                                       description=""):
        """Report a metric value to the AML run and to AppInsights, and tag the parent run with the metric.
        Please note tags are mutable. By default, this method reports to AML parent run.
        e.g. Condensed_Binocular.report_metric(name, value)
        :param name: The name of the metric.
        :param value: The value to be reported.
        :type value: Float or integer.
        :param description: An optional description about the metric.
        :param report_to_parent: Mark True if you want to report to AML parent run.
        """
        # Report to AML
        self.run.log(name, value)
        if not self.offline_run:
            self.run.parent.log(name, value)
            self.run.parent.tag(name, value)

        # Report to AppInsights
        measurement_map = stats_module.stats.stats_recorder.new_measurement_map(
        )
        tag_map = tag_map_module.TagMap()
        measure = measure_module.MeasureFloat(name, description)
        self.set_view(name, description, measure)
        measurement_map.measure_float_put(measure, value)
        measurement_map.record(tag_map)
Ejemplo n.º 6
0
    def test_constructor_explicit(self):
        name = "testName"
        description = "testMeasure"
        unit = "testUnit"

        measure = measure_module.MeasureFloat(name=name, description=description, unit=unit)

        self.assertEqual("testName", measure.name)
        self.assertEqual("testMeasure", measure.description)
        self.assertEqual("testUnit", measure.unit)
Ejemplo n.º 7
0
    def __init__(self, app=None, blacklist_paths=None, exporter=None):
        self.app = app
        self.blacklist_paths = blacklist_paths
        self.exporter = exporter

        self.app.m_response_ms = measure_module.MeasureFloat(
            "flask_response_time", "The request duration", "ms")

        self.app.key_method = tag_key_module.TagKey("method")
        # Create the status key
        self.app.key_status = tag_key_module.TagKey("status")
        # Create the error key
        self.app.key_error = tag_key_module.TagKey("error")

        if self.app is not None:
            self.init_app(app)
    def log_metric(
        self,
        name="",
        value="",
        description="",
        log_parent=False,
    ):
        """
        Sends a custom metric to appInsights
        :param name: name  of the metric
        :param value: value of the metric
        :param description: description of the metric
        :param log_parent: not being used for this logger
        :return:
        """
        measurement_map = \
            stats_module.stats.stats_recorder.new_measurement_map()
        tag_map = tag_map_module.TagMap()

        measure = measure_module.MeasureFloat(name, description)
        self.set_view(name, description, measure)
        measurement_map.measure_float_put(measure, value)
        measurement_map.record(tag_map)
Ejemplo n.º 9
0
from flask import Flask, g
from opencensus.stats.exporters import prometheus_exporter as prometheus
from opencensus.stats import measure as measure_module
from opencensus.stats import aggregation
from opencensus.stats import view
from opencensus.stats import stats
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module
import time
'''A simple flask app making use of the open census lib to measure metrics in prometheus'''

m_response_ms = measure_module.MeasureFloat("flask_response_time",
                                            "The request duration", "ms")

key_method = tag_key_module.TagKey("method")
# Create the status key
key_status = tag_key_module.TagKey("status")
# Create the error key
key_error = tag_key_module.TagKey("error")

app = Flask(__name__)


def setup_open_census():
    stats_stats = stats.Stats()
    app.view_manager = stats_stats.view_manager
    app.stats_recorder = stats_stats.stats_recorder
    response_time_view = view.View("response_time",
                                   "The time it took to respond",
                                   [key_method, key_status, key_error],
Ejemplo n.º 10
0
from tracer import get_flask_middleware

SUPPLIER_URL = 'http://127.0.0.1:5001/get_food_vendors?target_food={}'

app = Flask(__name__)
get_flask_middleware(app)

stats_recorder = stats_module.stats.stats_recorder
# Create the tag key
key_method = tag_key_module.TagKey("method")
# Create the status key
key_status = tag_key_module.TagKey("status")
# Create the error key
key_error = tag_key_module.TagKey("error")

m_latency_ms = measure_module.MeasureFloat(
    "latency", "The latency in milliseconds per find_food request", "ms")
m_num_requests = measure_module.MeasureInt("request count",
                                           "The number of find_food requests",
                                           "By")
latency_view = view_module.View(
    "latency_graph",
    "The distribution of the latencies",
    [key_method, key_status, key_error],
    m_latency_ms,
    # Latency in buckets:
    # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
    aggregation_module.DistributionAggregation(
        [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]))

line_count_view = view_module.View("request_counter", "The number of requests",
                                   [key_method, key_status, key_error],
def create_metric_measure(metric_name, metric_description, metric_unit):
    # The description of our metric
    measure = measure_module.MeasureFloat(metric_name, metric_description,
                                          metric_unit)
    return measure
Ejemplo n.º 12
0
import time

from opencensus import tags
from opencensus.stats import aggregation
from opencensus.stats import measure
from opencensus.stats import stats
from opencensus.stats import view
from opencensus.stats.exporters import stackdriver_exporter
from opencensus.stats.exporters.base import StatsExporter
from opencensus.tags import execution_context
from opencensus.tags.propagation import binary_serializer

_logger = logging.getLogger('fireci.stats')
STATS = stats.Stats()

_m_latency = measure.MeasureFloat("latency", "The latency in milliseconds",
                                  "ms")
_m_success = measure.MeasureInt("success", "Indicated success or failure.", "1")

_key_stage = tags.TagKey("stage")

_TAGS = [
    _key_stage,
    tags.TagKey("repo_owner"),
    tags.TagKey("repo_name"),
    tags.TagKey("pull_number"),
    tags.TagKey("job_name"),
]

_METRICS_ENABLED = False

import json
import pytest
import time
from datetime import datetime
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.stats import view as view_module
from opencensus.stats import view_data as view_data_module
from opencensus.stats import metric_utils
from opencensus_ext_newrelic import NewRelicStatsExporter
from newrelic_telemetry_sdk import MetricClient


# The latency in milliseconds
MEASURE = measure_module.MeasureFloat("number", "A number!", "things")

GAUGE_VIEWS = {
    "last": view_module.View(
        "last",
        "A last value",
        ("tag",),
        MEASURE,
        aggregation_module.LastValueAggregation(),
    )
}
COUNT_VIEWS = {
    "count": view_module.View(
        "count", "A count", ("tag",), MEASURE, aggregation_module.CountAggregation()
    ),
    "sum": view_module.View(
from opencensus.stats import view_data as view_data_module
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module

MiB = 1 << 20
FRONTEND_KEY = tag_key_module.TagKey("myorg_keys_frontend")
FRONTEND_KEY_FLOAT = tag_key_module.TagKey("myorg_keys_frontend_FLOAT")
FRONTEND_KEY_INT = tag_key_module.TagKey("myorg_keys_frontend_INT")
FRONTEND_KEY_STR = tag_key_module.TagKey("myorg_keys_frontend_INT")

VIDEO_SIZE_MEASURE = measure_module.MeasureInt(
    "myorg_measure_video_size_test2", "size of processed videos", "By")

VIDEO_SIZE_MEASURE_FLOAT = measure_module.MeasureFloat(
    "myorg_measure_video_size_test_float", "size of processed videos float",
    "By")

VIDEO_SIZE_VIEW_NAME = "myorg_views_video_size_test2"
VIDEO_SIZE_DISTRIBUTION = aggregation_module.DistributionAggregation(
    [16.0 * MiB, 256.0 * MiB])
VIDEO_SIZE_VIEW = view_module.View(VIDEO_SIZE_VIEW_NAME,
                                   "processed video size over time",
                                   [FRONTEND_KEY], VIDEO_SIZE_MEASURE,
                                   VIDEO_SIZE_DISTRIBUTION)
REGISTERED_VIEW = {
    'test1_myorg_views_video_size_test2': {
        'documentation': 'processed video size over time',
        'labels': ['myorg_keys_frontend'],
        'name': 'test1_myorg_views_video_size_test2'
    }
Ejemplo n.º 15
0
from opencensus.trace.status import Status
from opencensus.trace.tracer import noop_tracer

from opencensus.stats import stats
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module

key_method = tag_key_module.TagKey("method")
key_error  = tag_key_module.TagKey("error")
key_status = tag_key_module.TagKey("status")

m_latency_ms = measure_module.MeasureFloat("pymemcache/latency", "The latency in milliseconds per method", "ms")
m_calls = measure_module.MeasureInt("pymemcache/calls", "The number of calls made", "1")

def enable_metrics_views():
    calls_view = view_module.View("pymemcache/calls", "The number of calls",
        [key_method, key_error, key_status],
        m_calls,
        aggregation_module.CountAggregation())

    latency_view = view_module.View("pymemcache/latency", "The distribution of the latencies",
        [key_method, key_error, key_status],
        m_latency_ms,
        aggregation_module.DistributionAggregation([
            # Latency in buckets:
            # [>=0ms, >=5ms, >=10ms, >=25ms, >=40ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s, >=10s, >-20s]
            0, 5, 10, 25, 40, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000, 10000, 20000
Ejemplo n.º 16
0
from opencensus.ext.azure import metrics_exporter
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module


    


# Create the measures
# The latency in milliseconds
m_latency_ms = measure_module.MeasureFloat("repl_latency", "The latency in milliseconds per REPL loop", "ms")

# Counts/groups the lengths of lines read in.
m_line_lengths = measure_module.MeasureInt("repl_line_lengths", "The distribution of line lengths", "By")

# The stats recorder
stats_recorder = stats_module.stats.stats_recorder

# Create the tag key
key_method = tag_key_module.TagKey("method")
# Create the status key
key_status = tag_key_module.TagKey("status")
# Create the error key
key_error = tag_key_module.TagKey("error")

latency_view = view_module.View(
    def __init__(self):
        """
        Define client and server tags 
        """
        # Client Tags
        # gRPC server status code received, e.g. OK, CANCELLED, DEADLINE_EXCEEDED
        self.grpc_client_status = tag_key.TagKey("grpc_client_status")

        # Full gRPC method name, including package, service and method,
        # e.g. google.bigtable.v2.Bigtable/CheckAndMutateRow
        self.grpc_client_method = tag_key.TagKey("grpc_client_method")

        # Server Tags
        # gRPC server status code returned, e.g. OK, CANCELLED, DEADLINE_EXCEEDED
        self.grpc_server_status = tag_key.TagKey("grpc_server_status")

        # Full gRPC method name, including package, service and method,
        # e.g. com.exampleapi.v4.BookshelfService/Checkout
        self.grpc_server_method = tag_key.TagKey("grpc_server_method")
        """
        Client Measures 
        """
        # Number of messages sent in the RPC (always 1 for non-streaming RPCs)
        self.grpc_client_sent_messages_per_rpc = measure.MeasureInt(
            name="grpc.io/client/sent_messages_per_rpc",
            description="Number of messages sent in the RPC",
            unit=self.count)

        # Total bytes sent across all request messages per RPC
        self.grpc_client_sent_bytes_per_rpc = measure.MeasureFloat(
            name="grpc.io/client/sent_bytes_per_rpc",
            description="Total bytes sent across all request messages per RPC",
            unit=self.byte)

        # Number of response messages received per RPC (always 1 for non-streaming RPCs)
        self.grpc_client_received_messages_per_rpc = measure.MeasureInt(
            name="grpc.io/client/received_messages_per_rpc",
            description="Number of response messages received per RPC",
            unit=self.count)

        # Total bytes received across all response messages per RPC
        self.grpc_client_received_bytes_per_rpc = measure.MeasureFloat(
            name="grpc.io/client/received_bytes_per_rpc",
            description=
            "Total bytes received across all response messages per RPC",
            unit=self.byte)

        # Time between first byte of request sent to last byte of response received, or terminal error
        self.grpc_client_roundtrip_latency = measure.MeasureFloat(
            name="grpc.io/client/roundtrip_latency",
            description="Time between first byte of request sent to"
            " last byte of response received or terminal error.",
            unit=self.millisecond)

        # Propagated from the server and should have the same value as "grpc.io/server/latency"
        self.grpc_client_server_latency = measure.MeasureFloat(
            name="grpc.io/client/server_latency",
            description="Server latency in msecs",
            unit=self.millisecond)

        # The total number of client RPCs ever opened, including those that have not completed
        self.grpc_client_started_rpcs = measure.MeasureInt(
            name="grpc.io/client/started_rpcs",
            description="Number of started client RPCs.",
            unit=self.count)

        # Total messages sent per method
        self.grpc_client_sent_messages_per_method = measure.MeasureInt(
            name="grpc.io/client/sent_messages_per_method",
            description="Total messages sent per method.",
            unit=self.count)

        # Total messages received per method
        self.grpc_client_received_messages_per_method = measure.MeasureInt(
            name="grpc.io/client/received_messages_per_method",
            description="Total messages received per method.",
            unit=self.count)

        # Total bytes sent per method, recorded real-time as bytes are sent
        self.grpc_client_sent_bytes_per_method = measure.MeasureFloat(
            name="grpc.io/client/sent_bytes_per_method",
            description=
            "Total bytes sent per method, recorded real-time as bytes are sent.",
            unit=self.byte)

        # Total bytes received per method, recorded real-time as bytes are received
        self.grpc_client_received_bytes_per_method = measure.MeasureFloat(
            name="grpc.io/client/received_bytes_per_method",
            description="Total bytes received per method,"
            " recorded real-time as bytes are received.",
            unit=self.byte)
        """
        Server Measures 
        """
        # Number of messages received in each RPC. Has value 1 for non-streaming RPCs
        self.grpc_server_received_messages_per_rpc = measure.MeasureInt(
            name="grpc.io/server/received_messages_per_rpc",
            description="Number of messages received in each RPC",
            unit=self.count)

        # Total bytes received across all messages per RPC
        self.grpc_server_received_bytes_per_rpc = measure.MeasureFloat(
            name="grpc.io/server/received_bytes_per_rpc",
            description="Total bytes received across all messages per RPC",
            unit=self.byte)

        # Number of messages sent in each RPC. Has value 1 for non-streaming RPCs
        self.grpc_server_sent_messages_per_rpc = measure.MeasureInt(
            name="grpc.io/server/sent_messages_per_rpc",
            description="Number of messages sent in each RPC",
            unit=self.count)

        # Total bytes sent in across all response messages per RPC
        self.grpc_server_sent_bytes_per_rpc = measure.MeasureFloat(
            name="grpc.io/server/sent_bytes_per_rpc",
            description="Total bytes sent across all response messages per RPC",
            unit=self.byte)

        # Time between first byte of request received to last byte of response sent, or terminal error
        self.grpc_server_server_latency = measure.MeasureFloat(
            name="grpc.io/server/server_latency",
            description="Time between first byte of request received"
            " to last byte of response sent or terminal error.",
            unit=self.millisecond)

        # The total number of server RPCs ever opened, including those that have not completed
        self.grpc_server_started_rpcs = measure.MeasureInt(
            name="grpc.io/server/started_rpcs",
            description="Number of started server RPCs.",
            unit=self.count)

        # Total messages sent per method
        self.grpc_server_sent_messages_per_method = measure.MeasureInt(
            name="grpc.io/server/sent_messages_per_method",
            description="Total messages sent per method.",
            unit=self.count)

        # Total messages received per method
        self.grpc_server_received_messages_per_method = measure.MeasureInt(
            name="grpc.io/server/received_messages_per_method",
            description="Total messages received per method.",
            unit=self.count)

        # Total bytes sent per method, recorded real-time as bytes are sent
        self.grpc_server_sent_bytes_per_method = measure.MeasureFloat(
            name="grpc.io/server/sent_bytes_per_method",
            description=
            "Total bytes sent per method, recorded real-time as bytes are sent.",
            unit=self.byte)

        # Total bytes received per method, recorded real-time as bytes are received
        self.grpc_server_received_bytes_per_method = measure.MeasureFloat(
            name="grpc.io/server/received_bytes_per_method",
            description=
            "Total bytes received per method, recorded real-time as bytes are received.",
            unit=self.byte)
Ejemplo n.º 18
0
from opencensus.ext.ocagent import (
    stats_exporter,
    trace_exporter,
)
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module

# Create the measures
# The latency in milliseconds
m_latency_ms = measure_module.MeasureFloat(
    "spanner/latency", "The latency in milliseconds per method", "ms")
# The stats recorder
stats_recorder = stats_module.stats.stats_recorder

key_method = tag_key_module.TagKey("method")
key_status = tag_key_module.TagKey("status")
key_error = tag_key_module.TagKey("error")
key_service = tag_key_module.TagKey("service")
status_OK = tag_value_module.TagValue("OK")
status_ERROR = tag_value_module.TagValue("ERROR")

tag_value_DDL = tag_value_module.TagValue("DDL")
tag_value_DML = tag_value_module.TagValue("DML")
tag_value_DQL = tag_value_module.TagValue("DQL")
tag_value_SPANNER_V1 = tag_value_module.TagValue("spanner_v1")
tag_value_DBAPI = tag_value_module.TagValue("spanner_dbapi")
Ejemplo n.º 19
0
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module

from prometheus_flask_exporter import PrometheusMetrics

# [END monitoring_sli_metrics_opencensus_setup]

# set up measures
# [START monitoring_sli_metrics_opencensus_measure]
m_request_count = measure_module.MeasureInt("python_request_count",
                                            "total requests", "requests")
m_failed_request_count = measure_module.MeasureInt(
    "python_failed_request_count", "failed requests", "requests")
m_response_latency = measure_module.MeasureFloat("python_response_latency",
                                                 "response latency", "s")
# [END monitoring_sli_metrics_opencensus_measure]

# set up stats recorder
stats_recorder = stats_module.stats.stats_recorder
# [START monitoring_sli_metrics_opencensus_view]
# set up views
latency_view = view_module.View(
    "python_response_latency",
    "The distribution of the latencies",
    [],
    m_response_latency,
    aggregation_module.DistributionAggregation(
        [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]),
)
Ejemplo n.º 20
0
from threading import Thread
from flask import Flask, request

app = Flask(__name__)

FOOD_SUPPLIER_ADDRESS = "http://34.86.204.38:5000"
FOOD_VENDOR_ADDRESS = "http://34.86.232.249:5000"

SUBMISSION_FORM = """
                	<form method="GET" action="/search-vendors" enctype="multipart/form-data">
                        	<input type="text" name="food_product">
                        	<input type="submit">
              		</form>
        	  """

LATENCY_MEASURE = measure.MeasureFloat("request_latency",
                                       "The request latency in ms", "ms")

RPC_MEASURE = measure.MeasureInt("rpc_count", "The number of RPCs", "1")

FLOAT_AGGREGATION_DISTRIBUTION = aggregation.DistributionAggregation([
    1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0,
    5000.0
])

INT_AGGREGATION_DISTRIBUTION = aggregation.DistributionAggregation(
    [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000])

FOOD_SERVICE_LATENCY_VIEW = view.View(
    "foodservice_request_latency_distribution",
    "The distribution of the request latencies for FoodService calls", [],
    LATENCY_MEASURE, FLOAT_AGGREGATION_DISTRIBUTION)
Ejemplo n.º 21
0
# See the License for the specific language governing permissions and
# limitations under the License.

# [START monitoring_opencensus_metrics_quickstart]

from random import random
import time

from opencensus.ext.stackdriver import stats_exporter
from opencensus.stats import aggregation
from opencensus.stats import measure
from opencensus.stats import stats
from opencensus.stats import view

# A measure that represents task latency in ms.
LATENCY_MS = measure.MeasureFloat("task_latency",
                                  "The task latency in milliseconds", "ms")

# A view of the task latency measure that aggregates measurements according to
# a histogram with predefined bucket boundaries. This aggregate is periodically
# exported to Stackdriver Monitoring.
LATENCY_VIEW = view.View(
    "task_latency_distribution",
    "The distribution of the task latencies",
    [],
    LATENCY_MS,
    # Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s]
    aggregation.DistributionAggregation(
        [100.0, 200.0, 400.0, 1000.0, 2000.0, 4000.0]))


def main():
Ejemplo n.º 22
0
 def test_measure_creation(self):
     measure.MeasureFloat("task_latency",
                          "The task latency in milliseconds", "ms")
Ejemplo n.º 23
0
import time
from typing import Any, Callable, Dict, List

from flask import request
from opencensus.stats import measure, view, aggregation
from opencensus.trace import (
    execution_context,
    samplers,
    span_context as span_ctx,
    tracer as tracer_module,
)

from recidiviz.utils import monitoring

m_duration_s = measure.MeasureFloat(
    "function_duration", "The time it took for this function to run", "s"
)

duration_distribution_view = view.View(
    "recidiviz/function_durations",
    "The distribution of the function durations",
    [monitoring.TagKey.REGION, monitoring.TagKey.FUNCTION],
    m_duration_s,
    aggregation.DistributionAggregation(monitoring.exponential_buckets(0.1, 5, 10)),
)
monitoring.register_views([duration_distribution_view])

# Contains a list of all the addresses of all of the functions in our stack that are currently being timed. Used to
# detect recursion.
stack: ContextVar[List[int]] = ContextVar("stack", default=[])
Ejemplo n.º 24
0
import time

from opencensus.ext.azure import metrics_exporter
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module

stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder

CHIPS_EATEN_MEASURE = measure_module.MeasureFloat("chips_eaten",
                                                  "number of chips eaten",
                                                  "chips")
CHIPS_EATEN_VIEW = view_module.View("chips_eaten_view",
                                    "number of chips eaten", [],
                                    CHIPS_EATEN_MEASURE,
                                    aggregation_module.SumAggregation())


def main():
    # Enable metrics
    # Set the interval in seconds in which you want to send metrics
    exporter = metrics_exporter.new_metrics_exporter(export_interval=5)
    view_manager.register_exporter(exporter)

    view_manager.register_view(CHIPS_EATEN_VIEW)
    mmap = stats_recorder.new_measurement_map()
Ejemplo n.º 25
0
# limitations under the License.

import time

from opencensus.ext.azure import metrics_exporter
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module

stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder

REQUEST_MEASURE = measure_module.MeasureFloat("Requests", "number of requests",
                                              "requests")
NUM_REQUESTS_VIEW = view_module.View("Number of Requests",
                                     "number of requests", ["url"],
                                     REQUEST_MEASURE,
                                     aggregation_module.SumAggregation())


def main():
    # Enable metrics
    # Set the interval in seconds in which you want to send metrics
    # TODO: you need to specify the instrumentation key in a connection string
    # and place it in the APPLICATIONINSIGHTS_CONNECTION_STRING
    # environment variable.
    exporter = metrics_exporter.new_metrics_exporter()
    view_manager.register_exporter(exporter)
Ejemplo n.º 26
0
from opencensus.ext.zenoss import stats_exporter as zenoss
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module

# Setup aliases to make working with OpenCensus easier.
stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder

# Create a measure.
m_latency_ms = measure_module.MeasureFloat("task_latency",
                                           "The task latency in milliseconds",
                                           "ms")

# Create a view using the measure.
latency_view = view_module.View(
    "task_latency_distribution",
    "The distribution of the task latencies",
    [],
    m_latency_ms,
    # Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s]
    aggregation_module.DistributionAggregation(
        [100.0, 200.0, 400.0, 1000.0, 2000.0, 4000.0]))


def main():
    address = os.environ.get("ZENOSS_ADDRESS", zenoss.DEFAULT_ADDRESS)
from opencensus.stats.exporters import stackdriver_exporter as stackdriver
from opencensus.tags import tag_key as tag_key_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module

MiB = 1 << 20
FRONTEND_KEY = tag_key_module.TagKey("my.org/keys/frontend")
FRONTEND_KEY_FLOAT = tag_key_module.TagKey("my.org/keys/frontend-FLOAT")
FRONTEND_KEY_INT = tag_key_module.TagKey("my.org/keys/frontend-INT")
FRONTEND_KEY_STR = tag_key_module.TagKey("my.org/keys/frontend-INT")

VIDEO_SIZE_MEASURE = measure_module.MeasureInt(
    "my.org/measure/video_size_test2", "size of processed videos", "By")

VIDEO_SIZE_MEASURE_FLOAT = measure_module.MeasureFloat(
    "my.org/measure/video_size_test-float", "size of processed videos-float",
    "By")

VIDEO_SIZE_VIEW_NAME = "my.org/views/video_size_test2"
VIDEO_SIZE_DISTRIBUTION = aggregation_module.DistributionAggregation(
    [16.0 * MiB, 256.0 * MiB])
VIDEO_SIZE_VIEW = view_module.View(VIDEO_SIZE_VIEW_NAME,
                                   "processed video size over time",
                                   [FRONTEND_KEY], VIDEO_SIZE_MEASURE,
                                   VIDEO_SIZE_DISTRIBUTION)


class _Client(object):
    def __init__(self, client_info=None):
        self.client_info = client_info
from opencensus.trace.tracer import noop_tracer

from opencensus.stats import stats
from opencensus.stats import aggregation
from opencensus.stats import measure
from opencensus.stats import view
from opencensus.tags import tag_key
from opencensus.tags import tag_map
from opencensus.tags import tag_value

key_error = tag_key.TagKey("error")
key_method = tag_key.TagKey("method")
key_status = tag_key.TagKey("status")

m_latency_ms = measure.MeasureFloat("redispy/latency",
                                    "The latency per call in milliseconds",
                                    "ms")
m_key_length = measure.MeasureInt("redispy/key_length",
                                  "The length of each key", "By")
m_value_length = measure.MeasureInt("redispy/value_length",
                                    "The length of each value", "By")


def register_views():
    all_tag_keys = [key_method, key_error, key_status]
    calls_view = view.View("redispy/calls", "The number of calls",
                           all_tag_keys, m_latency_ms,
                           aggregation.CountAggregation())

    latency_view = view.View(
        "redispy/latency",