示例#1
0
    def test_reset_cumulative_metrics(self):
        gauge = gae_ts_mon.GaugeMetric('gauge', 'foo', None)
        counter = gae_ts_mon.CounterMetric('counter', 'foo', None)
        gauge.set(5)
        counter.increment()
        self.assertEqual(5, gauge.get())
        self.assertEqual(1, counter.get())

        config._reset_cumulative_metrics()
        self.assertEqual(5, gauge.get())
        self.assertIsNone(counter.get())
示例#2
0
 def test_flush_metrics_successfully(self, mock_flush):
     # We have task_num and due for sending metrics.
     time_now = 10000
     datetime_now = datetime.datetime.utcfromtimestamp(time_now)
     more_than_min_ago = datetime_now - datetime.timedelta(seconds=61)
     interface.state.last_flushed = more_than_min_ago
     entity = shared.get_instance_entity()
     entity.task_num = 2
     # Global metrics must be erased after flush.
     test_global_metric = gae_ts_mon.GaugeMetric('test', 'foo', None)
     test_global_metric.set(42)
     interface.register_global_metrics([test_global_metric])
     self.assertEqual(42, test_global_metric.get())
     self.assertTrue(config.flush_metrics_if_needed(time_now))
     self.assertEqual(None, test_global_metric.get())
     mock_flush.assert_called_once_with()
示例#3
0
add_build_cycle_duration = _duration_adder(  # pragma: no branch
    'cycle_durations', 'Duration between build creation and completion',
    lambda b: _ts_delta_sec(b.create_time, b.end_time))

# requires the argument to have start_time and end_time.
add_build_run_duration = _duration_adder(  # pragma: no branch
    'run_durations', 'Duration between build start and completion',
    lambda b: _ts_delta_sec(b.start_time, b.end_time))

# requires the argument to have create_time and start_time.
add_build_scheduling_duration = _duration_adder(  # pragma: no branch
    'scheduling_durations', 'Duration between build creation and start',
    lambda b: _ts_delta_sec(b.create_time, b.start_time))

BUILD_COUNT_PROD = gae_ts_mon.GaugeMetric(
    _METRIC_PREFIX_PROD + 'count', 'Number of pending/running prod builds',
    _build_fields('bucket', 'builder', 'status'))
BUILD_COUNT_EXPERIMENTAL = gae_ts_mon.GaugeMetric(
    _METRIC_PREFIX_EXPERIMENTAL + 'count',
    'Number of pending/running experimental builds',
    _build_fields('bucket', 'builder', 'status'))

MAX_AGE_SCHEDULED = gae_ts_mon.FloatMetric(
    'buildbucket/builds/max_age_scheduled',
    'Age of the oldest SCHEDULED build.',
    (_build_fields('bucket', 'builder') +
     [gae_ts_mon.BooleanField('must_be_never_leased')]),
    units=gae_ts_mon.MetricsDataUnits.SECONDS)
SEQUENCE_NUMBER_GEN_DURATION_MS = gae_ts_mon.CumulativeDistributionMetric(
    'buildbucket/sequence_number/gen_duration',
    'Duration of a sequence number generation in ms',
示例#4
0
        gae_ts_mon.StringField('subproject_id'),
        gae_ts_mon.StringField('pool'),
    ])

# Global metric. Metric fields:
# - project_id: e.g. 'chromium'
# - subproject_id: e.g. 'blink'. Set to empty string if not used.
# - pool: e.g. 'Chrome'
# - spec_name: name of a job specification, e.g. '<master>:<builder>'
#     for buildbot jobs.
# - status: 'pending' or 'running'.
_jobs_active = gae_ts_mon.GaugeMetric(
    'jobs/active',
    'Number of running, pending or otherwise active jobs.', [
        gae_ts_mon.StringField('spec_name'),
        gae_ts_mon.StringField('project_id'),
        gae_ts_mon.StringField('subproject_id'),
        gae_ts_mon.StringField('pool'),
        gae_ts_mon.StringField('status'),
    ])


# Global metric. Target field: hostname = 'autogen:<executor_id>' (bot id).
_executors_pool = gae_ts_mon.StringMetric(
    'executors/pool',
    'Pool name for a given job executor.',
    None)


# Global metric. Target fields:
# - hostname = 'autogen:<executor_id>' (bot id).
示例#5
0
        bucketer=BUCKETER_48_HR,
        units=gae_ts_mon.MetricsDataUnits.SECONDS), lambda b:
    (b.complete_time - b.create_time).total_seconds())

# requires the argument to have non-None start_time and complete_time.
add_build_run_duration = _adder(  # pragma: no branch
    gae_ts_mon.NonCumulativeDistributionMetric(
        'buildbucket/builds/run_durations',
        'Duration between build start and completion',
        _BUILD_DURATION_FIELDS,
        bucketer=BUCKETER_48_HR,
        units=gae_ts_mon.MetricsDataUnits.SECONDS), lambda b:
    (b.complete_time - b.start_time).total_seconds())

CURRENTLY_PENDING = gae_ts_mon.GaugeMetric('buildbucket/builds/pending',
                                           'Number of pending builds',
                                           _string_fields('bucket'))
CURRENTLY_RUNNING = gae_ts_mon.GaugeMetric('buildbucket/builds/running',
                                           'Number of running builds',
                                           _string_fields('bucket'))
LEASE_LATENCY_SEC = gae_ts_mon.NonCumulativeDistributionMetric(
    'buildbucket/builds/never_leased_duration',
    'Duration between a build is created and it is leased for the first time',
    _string_fields('bucket'),
    bucketer=BUCKETER_24_HR,
    units=gae_ts_mon.MetricsDataUnits.SECONDS)
SCHEDULING_LATENCY_SEC = gae_ts_mon.NonCumulativeDistributionMetric(
    'buildbucket/builds/scheduling_duration',
    'Duration of a build remaining in SCHEDULED state',
    _string_fields('bucket'),
    bucketer=BUCKETER_48_HR,
示例#6
0
import instance_group_managers

# Overrides to create app-global metrics.
GLOBAL_TARGET_FIELDS = {
    # Name of the module reporting the metric.
    'job_name': '',
    # Version of the app reporting the metric.
    'hostname': '',
    # ID of the instance reporting the metric.
    'task_num': 0,
}

GLOBAL_METRICS = {
    'config_max_instances':
    gae_ts_mon.GaugeMetric(
        'machine_provider/gce_backend/config/instances/max',
        'Maximum number of instances currently configured.',
        [gae_ts_mon.StringField('instance_template')]),
    'config_min_instances':
    gae_ts_mon.GaugeMetric(
        'machine_provider/gce_backend/config/instances/min',
        'Minimum number of instances currently configured.',
        [gae_ts_mon.StringField('instance_template')]),
    'instances':
    gae_ts_mon.GaugeMetric('machine_provider/gce_backend/instances',
                           'Current count of the number of instances.',
                           [gae_ts_mon.StringField('instance_template')]),
}

config_valid = gae_ts_mon.BooleanMetric(
    'machine_provider/gce_backend/config/valid',
    'Whether or not the current config is valid.',
示例#7
0
    ])

code_coverage_cq_errors = gae_ts_mon.CounterMetric(
    'code-coverage/cq-bot-errors',
    'Number of cq builds with coverage data step failures', [
        gae_ts_mon.StringField('project'),
        gae_ts_mon.StringField('bucket'),
        gae_ts_mon.StringField('builder'),
    ])

code_coverage_full_reports = gae_ts_mon.CounterMetric(
    'code-coverage/full-reports',
    'Number of whole-codebase coverage reports',
    [
        gae_ts_mon.StringField('host'),
        gae_ts_mon.StringField('project'),  # Gerrit project.
        gae_ts_mon.StringField('ref'),
        gae_ts_mon.StringField('builder'),  # <luci_project>/<bucket>/<builder>
    ])

code_coverage_report_timestamp = gae_ts_mon.GaugeMetric(
    'code-coverage/report_timestamp',
    'Timestamp of the completion of the last report',
    [
        gae_ts_mon.StringField('host'),
        gae_ts_mon.StringField('project'),  # Gerrit project.
        gae_ts_mon.StringField('ref'),
        gae_ts_mon.StringField('builder'),  # <luci_project>/<bucket>/<builder>
        gae_ts_mon.BooleanField('is_success'),
    ])
示例#8
0
        gae_ts_mon.StringField('project_id'),
        gae_ts_mon.StringField('subproject_id'),
        gae_ts_mon.StringField('pool'),
    ])

# Global metric. Metric fields:
# - project_id: e.g. 'chromium'
# - subproject_id: e.g. 'blink'. Set to empty string if not used.
# - pool: e.g. 'Chrome'
# - spec_name: name of a job specification, e.g. '<master>:<builder>'
#     for buildbot jobs.
# - status: 'pending' or 'running'.
_jobs_active = gae_ts_mon.GaugeMetric(
    'jobs/active', 'Number of running, pending or otherwise active jobs.', [
        gae_ts_mon.StringField('spec_name'),
        gae_ts_mon.StringField('project_id'),
        gae_ts_mon.StringField('subproject_id'),
        gae_ts_mon.StringField('pool'),
        gae_ts_mon.StringField('status'),
    ])

# Global metric. Target field: hostname = 'autogen:<executor_id>' (bot id).
_executors_pool = gae_ts_mon.StringMetric(
    'executors/pool', 'Pool name for a given job executor.', None)

# Global metric. Target fields:
# - hostname = 'autogen:<executor_id>' (bot id).
# Status value must be 'ready', 'running', or anything else, possibly
# swarming-specific, when it cannot run a job. E.g. 'quarantined' or
# 'dead'.
_executors_status = gae_ts_mon.StringMetric('executors/status',
                                            'Status of a job executor.', None)
示例#9
0
reports_processed = gae_ts_mon.CounterMetric(
    'predator/reports_count',
    'Metric counting the number of crash reports that Predator has processed. '
    'Contains fields describing whether Predator was successful at finding a '
    'regression range, a components, or suspect changes for each report.', [
        gae_ts_mon.BooleanField('found_suspects'),
        gae_ts_mon.BooleanField('found_components'),
        gae_ts_mon.BooleanField('has_regression_range'),
        gae_ts_mon.StringField('client_id'),
        gae_ts_mon.BooleanField('success')
    ])

wrong_cls = gae_ts_mon.GaugeMetric(
    'predator/wrong_cls',
    'Number of wrong suspected cls found by Predator per '
    'day. Contains fields describing which client this wrong cl comes from, '
    'can be clusterfuzz or cracas.', [gae_ts_mon.StringField('client_id')])

wrong_components = gae_ts_mon.GaugeMetric(
    'predator/wrong_components',
    'Number of wrong suspected components found by '
    'Predator per day. Contains fields describing which client this wrong cl '
    'comes from, can be clusterfuzz or cracas.',
    [gae_ts_mon.StringField('client_id')])

clusterfuzz_reports = gae_ts_mon.CounterMetric(
    'predator/clusterfuzz_reports',
    'Metric counting the number of clusterfuzz crash reports that Predator '
    'has processed. Contains fields that describe the crash', [
        gae_ts_mon.BooleanField('found_suspects'),
示例#10
0
# - hostname: 'autogen:<executor_id>': name of the bot that executed a job,
#     or an empty string. e.g. 'autogen:swarm42-m4'.
# Value should be 'pending' or 'running'. Completed / canceled jobs should not
# send this metric.
jobs_running = gae_ts_mon.BooleanMetric(
    'jobs/running',
    description='Presence metric for a running job.')

# Global metric. Metric fields:
# - project_id: e.g. 'chromium'
# - subproject_id: e.g. 'blink'. Set to empty string if not used.
# - spec_name: name of a job specification, e.g. '<master>:<builder>:<test>'
#     for buildbot jobs.
# - status: 'pending' or 'running'.
jobs_active = gae_ts_mon.GaugeMetric(
    'jobs/active',
    description='Number of running, pending or otherwise active jobs.')


# Global metric. Target field: hostname = 'autogen:<executor_id>' (bot id).
executors_pool = gae_ts_mon.StringMetric(
    'executors/pool',
    description='Pool name for a given job executor.')


# Global metric. Target fields:
# - hostname = 'autogen:<executor_id>' (bot id).
# Status value must be 'ready', 'running', or anything else, possibly
# swarming-specific, when it cannot run a job. E.g. 'quarantined' or
# 'dead'.
executors_status = gae_ts_mon.StringMetric(
示例#11
0

# Overrides to create app-global metrics.
GLOBAL_TARGET_FIELDS = {
  # Name of the module reporting the metric.
  'job_name': '',
  # Version of the app reporting the metric.
  'hostname': '',
  # ID of the instance reporting the metric.
  'task_num': 0,
}


GLOBAL_METRICS = {
    'instances': gae_ts_mon.GaugeMetric(
        'machine_provider/gce_backend/instances',
        description='Current count of the number of instances.',
    ),
}


config_valid = gae_ts_mon.BooleanMetric(
    'machine_provider/gce_backend/config/valid',
    description='Whether or not the current config is valid.',
)


def compute_global_metrics():
  orphaned, total = instances.count_instances()
  GLOBAL_METRICS['instances'].set(
      orphaned,
      fields={