def test_record_with_attachment(self): boundaries = [1, 2, 3] distribution = {1: "test"} distribution_aggregation = aggregation_module.DistributionAggregation( boundaries=boundaries, distribution=distribution) name = "testName" description = "testMeasure" unit = "testUnit" measure = measure_module.MeasureInt(name=name, description=description, unit=unit) description = "testMeasure" columns = ["key1", "key2"] view = view_module.View(name=name, description=description, columns=columns, measure=measure, aggregation=distribution_aggregation) start_time = datetime.utcnow() attachments = {"One": "one", "Two": "two"} end_time = datetime.utcnow() view_data = view_data_module.ViewData(view=view, start_time=start_time, end_time=end_time) context = mock.Mock context.map = {'key1': 'val1', 'key2': 'val2'} time = utils.to_iso_str() value = 1 view_data.record(context=context, value=value, timestamp=time, attachments=attachments) tag_values = view_data.get_tag_values(tags=context.map, columns=view.columns) tuple_vals = tuple(tag_values) self.assertEqual(['val1', 'val2'], tag_values) self.assertIsNotNone(view_data.tag_value_aggregation_data_map) self.assertTrue(tuple_vals in view_data.tag_value_aggregation_data_map) self.assertIsNotNone( view_data.tag_value_aggregation_data_map[tuple_vals]) self.assertEqual( attachments, view_data.tag_value_aggregation_data_map[tuple_vals]. exemplars[1].attachments)
def test_create_timeseries_from_distribution(self): """Check for explicit 0-bound bucket for SD export.""" agg = aggregation_module.DistributionAggregation( aggregation_type=aggregation_module.Type.DISTRIBUTION) view = view_module.View( name="example.org/test_view", description="example.org/test_view", columns=['tag_key'], measure=mock.Mock(), aggregation=agg, ) v_data = view_data_module.ViewData( view=view, start_time=TEST_TIME_STR, end_time=TEST_TIME_STR, ) # Aggregation over (10 * range(10)) for buckets [2, 4, 6, 8] dad = aggregation_data_module.DistributionAggregationData( mean_data=4.5, count_data=100, sum_of_sqd_deviations=825, counts_per_bucket=[20, 20, 20, 20, 20], bounds=[2, 4, 6, 8], exemplars={mock.Mock() for ii in range(5)}) v_data._tag_value_aggregation_data_map = {('tag_value', ): dad} v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME) exporter = stackdriver.StackdriverStatsExporter() time_series_list = exporter.create_time_series_list(v_data) self.assertEqual(len(time_series_list), 1) [time_series] = time_series_list self.check_labels(time_series.metric.labels, {'tag_key': 'tag_value'}, include_opencensus=True) self.assertEqual(len(time_series.points), 1) [point] = time_series.points dv = point.value.distribution_value self.assertEqual(100, dv.count) self.assertEqual(825.0, dv.sum_of_squared_deviation) self.assertEqual([0, 20, 20, 20, 20, 20], dv.bucket_counts) self.assertEqual([0, 2, 4, 6, 8], dv.bucket_options.explicit_buckets.bounds)
def test_get_metrics(self): """Test that Stats converts recorded values into metrics.""" stats = stats_module.stats # Check that metrics are empty before view registration initial_metrics = list(stats.get_metrics()) self.assertEqual(initial_metrics, []) mock_measure = Mock(spec=measure_oc.MeasureFloat) mock_md = Mock(spec=metric_descriptor.MetricDescriptor) mock_md.type =\ metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION mock_view = Mock(spec=view.View) mock_view.measure = mock_measure mock_view.get_metric_descriptor.return_value = mock_md mock_view.columns = ['k1'] stats.view_manager.measure_to_view_map.register_view(mock_view, Mock()) # Check that metrics are stil empty until we record empty_metrics = list(stats.get_metrics()) self.assertEqual(empty_metrics, []) mm = stats.stats_recorder.new_measurement_map() mm._measurement_map = {mock_measure: 1.0} mock_view.aggregation = aggregation.DistributionAggregation() mock_view.new_aggregation_data.return_value = \ mock_view.aggregation.new_aggregation_data() tm = tag_map.TagMap() tm.insert('k1', 'v1') mm.record(tm) metrics = list(stats.get_metrics()) self.assertEqual(len(metrics), 1) [metric] = metrics self.assertEqual(len(metric.time_series), 1) [ts] = metric.time_series self.assertEqual(len(ts.points), 1) [point] = ts.points self.assertTrue(isinstance(point.value, value.ValueDistribution))
def enable_metrics_views(): calls_view = view_module.View("pymemcache/calls", "The number of calls", [key_method, key_error, key_status], m_calls, aggregation_module.CountAggregation()) latency_view = view_module.View("pymemcache/latency", "The distribution of the latencies", [key_method, key_error, key_status], m_latency_ms, aggregation_module.DistributionAggregation([ # Latency in buckets: # [>=0ms, >=5ms, >=10ms, >=25ms, >=40ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s, >=10s, >-20s] 0, 5, 10, 25, 40, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000, 10000, 20000 ])) view_manager = stats.Stats().view_manager view_manager.register_view(calls_view) view_manager.register_view(latency_view)
def test_new_aggregation_data_defaults(self): distribution_aggregation = aggregation_module.DistributionAggregation() agg_data = distribution_aggregation.new_aggregation_data() self.assertEqual([], agg_data.bounds)
def test_init_bad_boundaries(self): """Check that boundaries must be sorted and unique.""" with self.assertRaises(ValueError): aggregation_module.DistributionAggregation([1, 3, 2]) with self.assertRaises(ValueError): aggregation_module.DistributionAggregation([1, 1, 2])
} COUNT_VIEWS = { "count": view_module.View( "count", "A count", ("tag",), MEASURE, aggregation_module.CountAggregation() ), "sum": view_module.View( "sum", "A sum", ("tag",), MEASURE, aggregation_module.SumAggregation() ), } DISTRIBUTION_VIEWS = { "distribution": view_module.View( "distribution", "A distribution", ("tag",), MEASURE, aggregation_module.DistributionAggregation([50.0, 200.0]), ) } VIEWS = {} VIEWS.update(GAUGE_VIEWS) VIEWS.update(COUNT_VIEWS) VIEWS.update(DISTRIBUTION_VIEWS) TEST_TIME = time.time() EXPECTED_TIMESTAMP = int(TEST_TIME * 1000.0) TEST_TIMESTAMP = datetime.utcfromtimestamp(TEST_TIME) class InvalidPoint(object): value = "invalid"
def test_new_aggregation_data_explicit(self): boundaries = [1, 2] distribution_aggregation = aggregation_module.DistributionAggregation( boundaries=boundaries) agg_data = distribution_aggregation.new_aggregation_data() self.assertEqual(boundaries, agg_data.bounds)
# Create the tag key key_method = tag_key_module.TagKey("method") # Create the status key key_status = tag_key_module.TagKey("status") # Create the error key key_error = tag_key_module.TagKey("error") latency_view = view_module.View( "demo_latency", "The distribution of the latencies", [key_method, key_status, key_error], m_latency_ms, # Latency in buckets: # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] aggregation_module.DistributionAggregation( [1, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]) ) line_count_view = view_module.View( "demo_lines_in", "The number of lines from standard input", [key_method, key_status, key_error], m_line_lengths, aggregation_module.CountAggregation()) line_length_view = view_module.View( "demo_line_lengths", "Groups the lengths of keys in buckets", [key_method, key_status, key_error], m_line_lengths, # Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
view_manager = stats.view_manager stats_recorder = stats.stats_recorder # Create a measure. m_latency_ms = measure_module.MeasureFloat("task_latency", "The task latency in milliseconds", "ms") # Create a view using the measure. latency_view = view_module.View( "task_latency_distribution", "The distribution of the task latencies", [], m_latency_ms, # Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s] aggregation_module.DistributionAggregation( [100.0, 200.0, 400.0, 1000.0, 2000.0, 4000.0])) def main(): address = os.environ.get("ZENOSS_ADDRESS", zenoss.DEFAULT_ADDRESS) api_key = os.environ.get("ZENOSS_API_KEY") if not api_key: sys.exit("ZENOSS_API_KEY must be set") # Create Zenoss exporter. exporter = zenoss.new_stats_exporter(options=zenoss.Options( address=address, api_key=api_key, source="app.example.com"), interval=10) # Register Zenoss exporter. view_manager.register_exporter(exporter)
# Create the error key key_error = tag_key_module.TagKey("error") m_latency_ms = measure_module.MeasureFloat( "latency", "The latency in milliseconds per find_food request", "ms") m_num_requests = measure_module.MeasureInt("request count", "The number of find_food requests", "By") latency_view = view_module.View( "latency_graph", "The distribution of the latencies", [key_method, key_status, key_error], m_latency_ms, # Latency in buckets: # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] aggregation_module.DistributionAggregation( [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000])) line_count_view = view_module.View("request_counter", "The number of requests", [key_method, key_status, key_error], m_num_requests, aggregation_module.CountAggregation()) @app.route('/') def target_food_input(): return render_template('food_input_form.html') @app.route('/', methods=['POST']) def target_food_input_post(): target_food = request.form['target_food']
def test_stats_record_sync(self): # We are using sufix in order to prevent cached objects sufix = str(os.getgid()) tag_key = "SampleKeySyncTest%s" % sufix measure_name = "SampleMeasureNameSyncTest%s" % sufix measure_description = "SampleDescriptionSyncTest%s" % sufix view_name = "SampleViewNameSyncTest%s" % sufix view_description = "SampleViewDescriptionSyncTest%s" % sufix FRONTEND_KEY = tag_key_module.TagKey(tag_key) VIDEO_SIZE_MEASURE = measure_module.MeasureInt(measure_name, measure_description, "By") VIDEO_SIZE_VIEW_NAME = view_name VIDEO_SIZE_DISTRIBUTION = aggregation_module.DistributionAggregation( [0.0, 16.0 * MiB, 256.0 * MiB]) VIDEO_SIZE_VIEW = view_module.View(VIDEO_SIZE_VIEW_NAME, view_description, [FRONTEND_KEY], VIDEO_SIZE_MEASURE, VIDEO_SIZE_DISTRIBUTION) stats = stats_module.Stats() view_manager = stats.view_manager stats_recorder = stats.stats_recorder client = monitoring_v3.MetricServiceClient() exporter = stackdriver.StackdriverStatsExporter( options=stackdriver.Options(project_id=PROJECT), client=client, transport=sync.SyncTransport) view_manager.register_exporter(exporter) # Register view. view_manager.register_view(VIDEO_SIZE_VIEW) # Sleep for [0, 10] milliseconds to fake work. time.sleep(random.randint(1, 10) / 1000.0) # Process video. # Record the processed video size. tag_value = tag_value_module.TagValue("1200") tag_map = tag_map_module.TagMap() tag_map.insert(FRONTEND_KEY, tag_value) measure_map = stats_recorder.new_measurement_map() measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 25 * MiB) measure_map.record(tag_map) # Sleep for [0, 10] milliseconds to fake wait. time.sleep(random.randint(1, 10) / 1000.0) @retry(wait_fixed=RETRY_WAIT_PERIOD, stop_max_attempt_number=RETRY_MAX_ATTEMPT) def get_metric_descriptors(self, exporter, view_description): name = exporter.client.project_path(PROJECT) list_metrics_descriptors = exporter.client.list_metric_descriptors( name) element = next((element for element in list_metrics_descriptors if element.description == view_description), None) self.assertIsNotNone(element) self.assertEqual(element.description, view_description) self.assertEqual(element.unit, "By") get_metric_descriptors(self, exporter, view_description)
span_context as span_ctx, tracer as tracer_module, ) from recidiviz.utils import monitoring m_duration_s = measure.MeasureFloat( "function_duration", "The time it took for this function to run", "s" ) duration_distribution_view = view.View( "recidiviz/function_durations", "The distribution of the function durations", [monitoring.TagKey.REGION, monitoring.TagKey.FUNCTION], m_duration_s, aggregation.DistributionAggregation(monitoring.exponential_buckets(0.1, 5, 10)), ) monitoring.register_views([duration_distribution_view]) # Contains a list of all the addresses of all of the functions in our stack that are currently being timed. Used to # detect recursion. stack: ContextVar[List[int]] = ContextVar("stack", default=[]) def span(func: Callable) -> Callable: """Creates a new span for this function in the trace. This allows us to visualize how much of the processing time of a given request is spent inside of this function without relying on log entries. Additionally the duration of the function call is recorded as a metric. """
def register_views(): all_tag_keys = [key_method, key_error, key_status] calls_view = view.View("redispy/calls", "The number of calls", all_tag_keys, m_latency_ms, aggregation.CountAggregation()) latency_view = view.View( "redispy/latency", "The distribution of the latencies per method", all_tag_keys, m_latency_ms, aggregation.DistributionAggregation([ # Latency in buckets: # [ # >=0ms, >=5ms, >=10ms, >=25ms, >=40ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, # >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s, >=10s, >-20s, >=50s, >=100s # ] 0, 5, 10, 25, 40, 50, 75, 1e2, 2e2, 4e2, 6e2, 8e2, 1e3, 2e3, 4e3, 6e3, 1e4, 2e4, 5e4, 10e5 ])) key_lengths_view = view.View( "redispy/key_lengths", "The distribution of the key lengths", all_tag_keys, m_key_length, aggregation.DistributionAggregation([ # Key length buckets: # [ # 0B, 5B, 10B, 20B, 50B, 100B, 200B, 500B, 1000B, 2000B, 5000B # ] 0, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000 ])) value_lengths_view = view.View( "redispy/value_lengths", "The distribution of the value lengths", all_tag_keys, m_value_length, aggregation.DistributionAggregation([ # Value length buckets: # [ # 0B, 5B, 10B, 20B, 50B, 100B, 200B, 500B, 1000B, 2000B, 5000B, 10000B, 20000B # ] 0, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000 ])) view_manager = stats.stats.view_manager for each_view in [ calls_view, latency_view, key_lengths_view, value_lengths_view ]: view_manager.register_view(each_view)
from opencensus.stats import view # A measure that represents task latency in ms. LATENCY_MS = measure.MeasureFloat("task_latency", "The task latency in milliseconds", "ms") # A view of the task latency measure that aggregates measurements according to # a histogram with predefined bucket boundaries. This aggregate is periodically # exported to Stackdriver Monitoring. LATENCY_VIEW = view.View( "task_latency_distribution", "The distribution of the task latencies", [], LATENCY_MS, # Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s] aggregation.DistributionAggregation( [100.0, 200.0, 400.0, 1000.0, 2000.0, 4000.0])) def main(): # Register the view. Measurements are only aggregated and exported if # they're associated with a registered view. stats.stats.view_manager.register_view(LATENCY_VIEW) # Create the Stackdriver stats exporter and start exporting metrics in the # background, once every 60 seconds by default. exporter = stats_exporter.new_stats_exporter() print('Exporting stats to project "{}"'.format( exporter.options.project_id)) # Record 100 fake latency values between 0 and 5 seconds. for num in range(100):
from opencensus.tags import TagMap LATENCY_MS = measure.MeasureFloat( "task_latency", "The task latency in milliseconds", "ms" ) # A view of the task latency measure that aggregates measurements according to # a histogram with predefined bucket boundaries. This aggregate is periodically # exported to Stackdriver Monitoring. LATENCY_VIEW = view.View( "task_latency_distribution", "The distribution of the task latencies", ["mylabel"], LATENCY_MS, # Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s] aggregation.DistributionAggregation([100.0, 120.0, 400.0, 1000.0, 2500.0, 5000.0]), ) def main(): # Register the view. Measurements are only aggregated and exported if # they're associated with a registered view. stats.stats.view_manager.register_view(LATENCY_VIEW) # Create the Stackdriver stats exporter and start exporting metrics in the # background, once every 60 seconds by default. exporter = stats_exporter.new_stats_exporter() print('Exporting stats to project "{}"'.format(exporter.options.project_id)) # Register exporter to the view manager. stats.stats.view_manager.register_exporter(exporter)
m_failed_request_count = measure_module.MeasureInt( "python_failed_request_count", "failed requests", "requests") m_response_latency = measure_module.MeasureFloat("python_response_latency", "response latency", "s") # [END monitoring_sli_metrics_opencensus_measure] # set up stats recorder stats_recorder = stats_module.stats.stats_recorder # [START monitoring_sli_metrics_opencensus_view] # set up views latency_view = view_module.View( "python_response_latency", "The distribution of the latencies", [], m_response_latency, aggregation_module.DistributionAggregation( [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]), ) request_count_view = view_module.View( "python_request_count", "total requests", [], m_request_count, aggregation_module.CountAggregation(), ) failed_request_count_view = view_module.View( "python_failed_request_count", "failed requests", [], m_failed_request_count,
import random from opencensus.stats import aggregation as aggregation_module from opencensus.stats.exporters import stackdriver_exporter as stackdriver from opencensus.stats import measure as measure_module from opencensus.stats import stats as stats_module from opencensus.stats import view as view_module from opencensus.tags import tag_key as tag_key_module from opencensus.tags import tag_map as tag_map_module from opencensus.tags import tag_value as tag_value_module MiB = 1 << 20 FRONTEND_KEY = tag_key_module.TagKey("my.org/keys/frontend") VIDEO_SIZE_MEASURE = measure_module.MeasureInt( "my.org/measure/video_size_test2", "size of processed videos", "By") VIDEO_SIZE_VIEW_NAME = "my.org/views/video_size_test2" VIDEO_SIZE_DISTRIBUTION = aggregation_module.DistributionAggregation( [0.0, 16.0 * MiB, 256.0 * MiB]) VIDEO_SIZE_VIEW = view_module.View(VIDEO_SIZE_VIEW_NAME, "processed video size over time", [FRONTEND_KEY], VIDEO_SIZE_MEASURE, VIDEO_SIZE_DISTRIBUTION) stats = stats_module.Stats() view_manager = stats.view_manager stats_recorder = stats.stats_recorder exporter = stackdriver.new_stats_exporter( stackdriver.Options(project_id="opencenus-node")) view_manager.register_exporter(exporter) # Register view. view_manager.register_view(VIDEO_SIZE_VIEW)
FOOD_VENDOR_ADDRESS = "http://34.86.232.249:5000" SUBMISSION_FORM = """ <form method="GET" action="/search-vendors" enctype="multipart/form-data"> <input type="text" name="food_product"> <input type="submit"> </form> """ LATENCY_MEASURE = measure.MeasureFloat("request_latency", "The request latency in ms", "ms") RPC_MEASURE = measure.MeasureInt("rpc_count", "The number of RPCs", "1") FLOAT_AGGREGATION_DISTRIBUTION = aggregation.DistributionAggregation([ 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0 ]) INT_AGGREGATION_DISTRIBUTION = aggregation.DistributionAggregation( [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]) FOOD_SERVICE_LATENCY_VIEW = view.View( "foodservice_request_latency_distribution", "The distribution of the request latencies for FoodService calls", [], LATENCY_MEASURE, FLOAT_AGGREGATION_DISTRIBUTION) FOOD_VENDOR_LATENCY_VIEW = view.View( "foodvendor_request_latency_distribution", "The distribution of the request latencies for FoodVendor calls", [], LATENCY_MEASURE, FLOAT_AGGREGATION_DISTRIBUTION)