def PercentageDistribution( name, num_buckets=1000, reset_after=False, description=None, field_spec=_MISSING): """Returns a metric handle for a cumulative distribution for percentage. The distribution handle returned by this method is better suited for reporting percentage values than the default one. The bucketing is optimized for values in [0,100]. Args: name: The name of this metric. num_buckets: This metric buckets the percentage values before reporting. This argument controls the number of the bucket the range [0,100] is divided in. The default gives you 0.1% resolution. reset_after: Should the metric be reset after reporting. description: A string description of the metric. field_spec: A sequence of ts_mon.Field objects to specify the field schema. """ # The last bucket actually covers [100, 100 + 1.0/num_buckets), so it # corresponds to values that exactly match 100%. bucket_width = 100 / num_buckets b = ts_mon.FixedWidthBucketer(bucket_width, num_buckets) return ts_mon.CumulativeDistributionMetric( name, bucketer=b, description=description, field_spec=field_spec)
def CumulativeSmallIntegerDistribution(name, reset_after=False): """Returns a metric handle for a cumulative distribution named |name|. This differs slightly from CumulativeDistribution, in that the underlying metric uses a uniform bucketer rather than a geometric one. This metric type is suitable for holding a distribution of numbers that are nonnegative integers in the range of 0 to 100. """ return ts_mon.CumulativeDistributionMetric( name, bucketer=ts_mon.FixedWidthBucketer(1))
def SecondsDistribution(name): """Returns a metric handle for a cumulative distribution named |name|. The distribution handle returned by this method is better suited than the default one for recording handling times, in seconds. This metric handle has bucketing that is optimized for time intervals (in seconds) in the range of 1 second to 32 days. """ b = ts_mon.GeometricBucketer(growth_factor=_SECONDS_BUCKET_FACTOR) return ts_mon.CumulativeDistributionMetric(name, bucketer=b)
def CumulativeSecondsDistribution(name, scale=1, reset_after=False, description=None, field_spec=_MISSING): """Returns a metric handle for a cumulative distribution named |name|. The distribution handle returned by this method is better suited than the default one for recording handling times, in seconds. This metric handle has bucketing that is optimized for time intervals (in seconds) in the range of 1 second to 32 days. Use |scale| to adjust this (e.g. scale=0.1 covers a range from .1 seconds to 3.2 days). Args: name: string name of metric scale: scaling factor of buckets, and size of the first bucket. default: 1 reset_after: Should the metric be reset after reporting. description: A string description of the metric. field_spec: A sequence of ts_mon.Field objects to specify the field schema. """ b = ts_mon.GeometricBucketer(growth_factor=_SECONDS_BUCKET_FACTOR, scale=scale) return ts_mon.CumulativeDistributionMetric( name, bucketer=b, units=ts_mon.MetricsDataUnits.SECONDS, description=description, field_spec=field_spec)
def CumulativeDistribution(name, reset_after=False): """Returns a metric handle for a cumulative distribution named |name|.""" return ts_mon.CumulativeDistributionMetric(name)
from gae_ts_mon.handlers import TSMonJSHandler from google.appengine.api import users from infra_libs import ts_mon STANDARD_FIELDS = [ ts_mon.StringField('client_id'), ts_mon.StringField('host_name'), ts_mon.BooleanField('document_visible'), ] # User action metrics. ISSUE_CREATE_LATENCY_METRIC = ts_mon.CumulativeDistributionMetric( 'monorail/frontend/issue_create_latency', ('Latency between Issue Entry form submission and page load of ' 'the subsequent issue page.'), field_spec=STANDARD_FIELDS, units=ts_mon.MetricsDataUnits.MILLISECONDS) ISSUE_UPDATE_LATENCY_METRIC = ts_mon.CumulativeDistributionMetric( 'monorail/frontend/issue_update_latency', ('Latency between Issue Update form submission and page load of ' 'the subsequent issue page.'), field_spec=STANDARD_FIELDS, units=ts_mon.MetricsDataUnits.MILLISECONDS) AUTOCOMPLETE_POPULATE_LATENCY_METRIC = ts_mon.CumulativeDistributionMetric( 'monorail/frontend/autocomplete_populate_latency', ('Latency between page load and autocomplete options loading.'), field_spec=STANDARD_FIELDS, units=ts_mon.MetricsDataUnits.MILLISECONDS) CHARTS_SWITCH_DATE_RANGE_METRIC = ts_mon.CounterMetric( 'monorail/frontend/charts/switch_date_range',
def CumulativeDistributionMetric(name, reset_after=False, description=None, bucketer=None, field_spec=_MISSING): """Returns a metric handle for a cumulative distribution named |name|.""" return ts_mon.CumulativeDistributionMetric( name, description=description, bucketer=bucketer, field_spec=field_spec)
from infra_libs import ts_mon step_field_spec = [ ts_mon.StringField('builder'), ts_mon.StringField('master'), ts_mon.StringField('project_id'), ts_mon.StringField('result'), ts_mon.StringField('slave'), ts_mon.StringField('step_name'), ts_mon.StringField('subproject_tag'), ] step_durations = ts_mon.CumulativeDistributionMetric( 'buildbot/master/builders/steps/durations', 'Time (in seconds) from step start to step end', step_field_spec, units=ts_mon.MetricsDataUnits.SECONDS, # Use fixed-width bucketer up to 2.7 hours with 10-second precision. bucketer=ts_mon.FixedWidthBucketer(10, 1000)) step_counts = ts_mon.CounterMetric( 'buildbot/master/builders/steps/count', 'Count of step results, per builder and step', step_field_spec) field_spec = [ ts_mon.StringField('builder'), ts_mon.StringField('master'), ts_mon.StringField('project_id'), ts_mon.StringField('result'), ts_mon.StringField('slave'),
# issue comments, stars, spam verdicts and spam verdict history in parallel # with promises. cnxn_pool = ConnectionPool(settings.db_cnxn_pool_size) # MonorailConnection maintains a dictionary of connections to SQL databases. # Each is identified by an int shard ID. # And there is one connection to the master DB identified by key MASTER_CNXN. MASTER_CNXN = 'master_cnxn' CONNECTION_COUNT = ts_mon.CounterMetric( 'monorail/sql/connection_count', 'Count of connections made to the SQL database.', [ts_mon.BooleanField('success')]) DB_CNXN_LATENCY = ts_mon.CumulativeDistributionMetric( 'monorail/sql/db_cnxn_latency', 'Time needed to establish a DB connection.', None) DB_QUERY_LATENCY = ts_mon.CumulativeDistributionMetric( 'monorail/sql/db_query_latency', 'Time needed to make a DB query.', [ts_mon.StringField('type')]) DB_COMMIT_LATENCY = ts_mon.CumulativeDistributionMetric( 'monorail/sql/db_commit_latency', 'Time needed to make a DB commit.', None) DB_ROLLBACK_LATENCY = ts_mon.CumulativeDistributionMetric( 'monorail/sql/db_rollback_latency', 'Time needed to make a DB rollback.', None) DB_RETRY_COUNT = ts_mon.CounterMetric('monorail/sql/db_retry_count', 'Count of queries retried.', None)
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from gae_ts_mon.handlers import TSMonJSHandler from infra_libs import ts_mon FIELDS = [ ts_mon.IntegerField('fe_version'), ts_mon.BooleanField('signed_in'), ] METRICS = [ ts_mon.CumulativeDistributionMetric( 'chromeperf/load/page', 'page loadEventEnd - fetchStart', units=ts_mon.MetricsDataUnits.MILLISECONDS, field_spec=FIELDS), ts_mon.CumulativeDistributionMetric( 'chromeperf/load/chart', 'chart load latency', units=ts_mon.MetricsDataUnits.MILLISECONDS, field_spec=FIELDS), ts_mon.CumulativeDistributionMetric( 'chromeperf/load/alerts', 'alerts load latency', units=ts_mon.MetricsDataUnits.MILLISECONDS, field_spec=FIELDS), ts_mon.CumulativeDistributionMetric( 'chromeperf/action/triage', 'alert triage latency',
import datetime import logging import random import re import threading import time from infra_libs import ts_mon from contextlib import contextmanager from google.appengine.api import app_identity PHASE_TIME = ts_mon.CumulativeDistributionMetric( 'monorail/servlet/phase_time', 'Time spent in profiler phases, in ms', [ts_mon.StringField('phase')]) # trace_service does not like very long names. MAX_PHASE_NAME_LENGTH = 200 class Profiler(object): """Object to record and help display request processing profiling info. The Profiler class holds a list of phase objects, which can hold additional phase objects (which are subphases). Each phase or subphase represents some meaningful part of this application's HTTP request processing. """ _COLORS = [
# True on no errors or if all failed attempts were successfully retried. 'success', # Total number of errors seen (some may have been fixed with retries). 'error_count', ], ) count_metric = ts_mon.CounterMetric( 'proc/outer_loop/count', 'Counter of loop iterations for this process, by success or failure', [ts_mon.StringField('status')]) success_metric = ts_mon.BooleanMetric('proc/outer_loop/success', 'Set immediately before the loop exits', None) durations_metric = ts_mon.CumulativeDistributionMetric( 'proc/outer_loop/durations', 'Times (in seconds) taken to execute the task', None) def loop(task, sleep_timeout, duration=None, max_errors=None, time_mod=time): """Runs the task in a loop for a given duration. Handles and logs all uncaught exceptions. ``task`` callback should return True on success, and False (or raise an exception) in error. Doesn't leak any exceptions (including KeyboardInterrupt). Args: @param task: Callable with no arguments returning True or False. @param sleep_timeout: A function returning how long to sleep between task invocations (sec), called once per loop.