示例#1
0
    def test_metric_bounds_fixed_value(self, comparison, threshold_value,
                                       expected_bounds):
        # `fixed_value` comparison ignores value history
        value_history = []
        threshold = metrics.Threshold('fixed_value', threshold_value)
        bounds = metrics.metric_bounds(value_history, threshold, comparison)

        self.assertSequenceAlmostEqual(bounds, expected_bounds)
示例#2
0
    def test_metric_bounds_stddevs_from_mean(self, comparison, threshold_value,
                                             expected_bounds):
        # mean = 3, stddev = ~1.414
        value_history = [
            metrics.MetricPoint(metric_value=1, wall_time=10),
            metrics.MetricPoint(metric_value=2, wall_time=20),
            metrics.MetricPoint(metric_value=3, wall_time=30),
            metrics.MetricPoint(metric_value=4, wall_time=40),
            metrics.MetricPoint(metric_value=5, wall_time=50),
        ]

        threshold = metrics.Threshold('stddevs_from_mean', threshold_value)
        bounds = metrics.metric_bounds(value_history, threshold, comparison)

        self.assertSequenceAlmostEqual(bounds, expected_bounds, places=3)
示例#3
0
    def skip_oob_alerting(self, job_status, value_history, threshold,
                          comparison):
        """Decide whether to skip alerts for any out-of-bounds metrics.

    Args:
      job_status (string): Final state of the most recent run of the test.
        Should be one of the status constants found in job_status_handler.py.
      value_history (list[MetricValue]): Full history of values for this metric.
      threshold (metrics.Threshold): Threshold to use for comparison.
      comparison (string): Comparison type to use for metrics.

    Returns:
      bool: True if alerting should be skipped for any oob metrics.
    """
        if not self.regression_test_config.get('alert_for_oob_metrics', True):
            return True
        # Skip alerts if job failed since metrics are unreliable for partial runs.
        if job_status != job_status_handler.SUCCESS:
            return True
        # Check the metric status of the previous run depending on config.
        if self.regression_test_config.get('alert_after_second_test_failure',
                                           True):
            try:
                previous_value = value_history[-2].metric_value
                previous_lower_bound, previous_upper_bound = metrics.metric_bounds(
                    value_history[:-1], threshold, comparison)
                previous_within_bounds = metrics.within_bounds(
                    previous_value,
                    previous_lower_bound,
                    previous_upper_bound,
                    inclusive=('equal' in comparison))
                if previous_within_bounds:
                    # Since the previous run was OK, there is no chance of triggering an
                    # alert since we are guaranteed to not have 2 consecutive oob runs.
                    return True
            except Exception as e:
                self.logger.warning(
                    'Failed to find metric status of previous run: {}.'
                    'value_history: {}'.format(e, value_history))
        return False
示例#4
0
  def compute_bounds_and_report_errors(self, metrics_history, new_metrics,
                                       job_status):
    """Compute the bounds for metrics and report abnormal values.

    Any metric that is currently outside the expected bounds is reported to
    Stackdriver Error Reporting unless `alert_for_oob_metrics` is set to
    False in the regression test config. Even if this reporting is turned off,
    this method computes the upper and lower bounds for each metric to provide
    to BigQuery as a visual aid when rendering metrics history into charts.

    Args:
      metrics_history(dict): Historic values of each metric.
      new_metrics(dict): Key is metric name and value is MetricPoint containing
        the latest aggregated value for that metric.
      job_status(string): Final state of the job, should be one of the status
        constants found in job_status_handler.py.

    Returns:
      metric_name_to_visual_bounds (dict): Key is metric name and value is a
        tuple of floats of the form (lower_bound, upper_bound).
    """
    if not self.regression_test_config:
      return {}
    success_conditions = self.regression_test_config.get(
        'metric_success_conditions')
    if not success_conditions:
      return {}

    metrics_history = metrics_history.copy()

    # Add the metrics from the latest run. These aren't in Bigquery yet.
    for metric_name, metric_value in new_metrics.items():
      metrics_history[metric_name].append(metric_value)

    metric_name_to_visual_bounds = {}
    metric_subset_to_report = set(
        self.regression_test_config.get('metric_subset_to_alert', []))
    for metric_name, value_history in metrics_history.items():
      if metric_subset_to_report and metric_name not in metric_subset_to_report:
        self.logger.info(
            'Skipping alerts and bounds for metric `{}` since '
            'it does not appear in `metric_subset_to_report` in your '
            'regression test config.'.format(metric_name))
        continue
      success_condition = success_conditions.get(metric_name) or \
        success_conditions.get('default')
      if not success_condition:
        self.logger.warning(
            'metric: `{}` has an empty success condition in the '
            '`metric_success_conditions` dict in the regression_test_config '
            'but there is no default condition provided. No bounds or '
            'alerts will be computed. See README for config details.'.format(
                metric_name))
        continue
      elif len(value_history) <= success_condition.get(
        'wait_for_n_points_of_history', -1):
        self.logger.info(
            'Metric: {} had only {} points of history. Skipping bounds '
            'enforcement. Success condition: {}'.format(
                metric_name, len(value_history), success_condition))
        continue

      threshold_type, threshold_value = list(success_condition.get('success_threshold').items())[0]
      threshold = metrics.Threshold(threshold_type, threshold_value)
      comparison = success_condition.get('comparison')
      lower_bound, upper_bound = metrics.metric_bounds(
          value_history, threshold, comparison)
      metric_name_to_visual_bounds[metric_name] = (lower_bound, upper_bound)

      metric_value = value_history[-1].metric_value
      within_bounds = metrics.within_bounds(metric_value, lower_bound, upper_bound, inclusive=('equal' in comparison))

      # Generate an alert unless one of these is True:
      #   1. metrics are within bounds.
      #   2. alerting is disabled by config.
      #   3. the job failed and therefore metrics are unreliable.
      if within_bounds or not self.regression_test_config.get(
          'alert_for_oob_metrics', True) or \
              job_status != job_status_handler.SUCCESS:
        continue
      self.logger.error(
          'Metric `{}` was out of bounds for test `{}`. Bounds were '
          '({}, {}) and value was {:.2f}'.format(
              metric_name, self.test_name, lower_bound, upper_bound,
              metric_value),
          debug_info=self.debug_info)

    return metric_name_to_visual_bounds