Пример #1
0
 def testMatchFailed(self):
     clt = SheriffConfigClient()
     clt._session = self._Session(
         self._Response(False, 'some error message'))
     res, err_msg = clt.Match('Foo2/a/Bar2/b')
     self.assertIsNone(res)
     self.assertIn('some error message', err_msg)
Пример #2
0
def _GetSheriffList():
  """Returns a list of sheriff names for all sheriffs in the datastore."""
  clt = SheriffConfigClient()
  subscriptions, err_msg = clt.List()
  if err_msg:
    raise InternalServerError(err_msg)
  return [s.name for s in subscriptions]
Пример #3
0
def _ProcessTestStat(config, test, stat, rows, ref_rows):
  test_key = test.key

  # If there were no rows fetched, then there's nothing to analyze.
  if not rows:
    logging.error('No rows fetched for %s', test.test_path)
    raise ndb.Return(None)

  # Get anomalies and check if they happen in ref build also.
  change_points = FindChangePointsForTest(rows, config)

  if ref_rows:
    ref_change_points = FindChangePointsForTest(ref_rows, config)

    # Filter using any jumps in ref
    change_points = _FilterAnomaliesFoundInRef(
        change_points, ref_change_points, test_key)

  anomalies = yield [
      _MakeAnomalyEntity(c, test, stat, rows) for c in change_points]

  # If no new anomalies were found, then we're done.
  if not anomalies:
    raise ndb.Return(None)

  logging.info('Created %d anomalies', len(anomalies))
  logging.info(' Test: %s', test_key.id())
  logging.info(' Stat: %s', stat)

  # Get all the sheriff from sheriff-config match the path
  client = SheriffConfigClient()
  subscriptions, err_msg = client.Match(test.test_path)
  subscription_names = [s.name for s in subscriptions or []]

  # Breaks the process when Match failed to ensure find_anomaly do the best
  # effort to find the subscriber. Leave retrying to upstream.
  if err_msg is not None:
    raise RuntimeError(err_msg)

  if not subscriptions:
    logging.warning('No subscription for %s', test.test_path)

  for a in anomalies:
    a.subscriptions = subscriptions
    a.subscription_names = subscription_names
    a.internal_only = (any([s.visibility != subscription.VISIBILITY.PUBLIC
                            for s in subscriptions]) or test.internal_only)
    a.groups = alert_group.AlertGroup.GetGroupsForAnomaly(a)

  yield ndb.put_multi_async(anomalies)

  # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
  # code will run serially.
  # Email sheriff about any new regressions.
  for anomaly_entity in anomalies:
    if (anomaly_entity.bug_id is None and
        not anomaly_entity.is_improvement):
      deferred.defer(_EmailSheriff, anomaly_entity.subscriptions, test.key,
                     anomaly_entity.key)
Пример #4
0
def _GetSheriffForTest(test):
    """Gets the Sheriff for a test, or None if no sheriff."""
    # Get all the sheriff from sheriff-config match the path
    # Currently just used for logging
    client = SheriffConfigClient()
    match_res = client.Match(test.test_path)
    # Old one. Get the sheriff from TestMetadata
    sheriff = None
    if test.sheriff:
        sheriff = yield test.sheriff.get_async()
    raise ndb.Return((sheriff, match_res))
Пример #5
0
def _ProcesssTestStat(config, sheriff, test, stat, rows, ref_rows):
    test_key = test.key

    # If there were no rows fetched, then there's nothing to analyze.
    if not rows:
        logging.error('No rows fetched for %s', test.test_path)
        raise ndb.Return(None)

    # Get anomalies and check if they happen in ref build also.
    change_points = FindChangePointsForTest(rows, config)

    if ref_rows:
        ref_change_points = FindChangePointsForTest(ref_rows, config)

        # Filter using any jumps in ref
        change_points = _FilterAnomaliesFoundInRef(change_points,
                                                   ref_change_points, test_key)

    anomalies = yield [
        _MakeAnomalyEntity(c, test, stat, rows) for c in change_points
    ]

    # If no new anomalies were found, then we're done.
    if not anomalies:
        raise ndb.Return(None)

    logging.info('Created %d anomalies', len(anomalies))
    logging.info(' Test: %s', test_key.id())
    logging.info(' Stat: %s', stat)
    logging.info(' Sheriff: %s', test.sheriff.id())

    yield ndb.put_multi_async(anomalies)

    # Get all the sheriff from sheriff-config match the path
    # Currently just used for logging
    client = SheriffConfigClient()
    new_sheriffs, err_msg = client.Match(test.test_path)
    new_sheriffs_keys = [s.key.string_id() for s in new_sheriffs or []]
    logging.info('Sheriff for %s: old: %s, new: %s', test.test_path,
                 'None' if sheriff is None else sheriff.key.string_id(),
                 err_msg if new_sheriffs is None else new_sheriffs_keys)
    if sheriff and sheriff.key.string_id() not in new_sheriffs_keys:
        logging.warn('Sheriff do not match: %s', test_key.string_id())

    # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
    # code will run serially.
    # Email sheriff about any new regressions.
    for anomaly_entity in anomalies:
        if (anomaly_entity.bug_id is None and not anomaly_entity.is_improvement
                and not sheriff.summarize):
            deferred.defer(_EmailSheriff, sheriff.key, test.key,
                           anomaly_entity.key)
Пример #6
0
 def testMatch(self):
   clt = SheriffConfigClient()
   response_text = """
   {
     "subscriptions": [
       {
         "config_set": "projects/catapult",
         "revision": "c9d4943dc832e448f9786e244f918fdabc1e5303",
         "subscription": {
           "name": "Public Team1",
           "rotation_url": "https://some/url",
           "notification_email": "*****@*****.**",
           "bug_labels": [
             "Lable1",
             "Lable2"
           ],
           "bug_components": [
             "foo>bar"
           ],
           "visibility": "PUBLIC",
           "patterns": [
             {
               "glob": "Foo2/*/Bar2/*"
             },
             {
               "regex": ".*"
             }
           ]
         }
       }
     ]
   }
   """
   clt._session = self._Session(self._Response(True, response_text))
   expected = [
       Subscription(
           revision='c9d4943dc832e448f9786e244f918fdabc1e5303',
           name='Public Team1',
           rotation_url='https://some/url',
           notification_email='*****@*****.**',
           visibility=VISIBILITY.PUBLIC,
           bug_labels=['Lable1', 'Lable2'],
           bug_components=['foo>bar']
       ),
   ]
   self.assertEqual(clt.Match('Foo2/a/Bar2/b'), (expected, None))
Пример #7
0
 def get(self):
   client = SheriffConfigClient()
   ok, err_msg = client.Update()
   if not ok:
     return webapp2.Response('FAILED: %s' % err_msg)
   return webapp2.Response('OK')
Пример #8
0
    def get(self):
        """Displays UI for debugging the anomaly detection function.

    Request parameters:
      test_path: Full test path (Master/bot/suite/chart) for test with alert.
      rev: A revision (Row id number) to center the graph on.
      num_before: Maximum number of points after the given revision to get.
      num_after: Maximum number of points before the given revision.
      config: Config parameters for in JSON form.

    Outputs:
      A HTML page with a chart (if test_path is given) and a form.
    """
        try:
            test = self._GetTest()
            num_before, num_after = self._GetNumBeforeAfter()
            config_name = self._GetConfigName(test)
            config_dict = anomaly_config.CleanConfigDict(
                self._GetConfigDict(test))
        except QueryParameterError as e:
            self.RenderHtml('debug_alert.html', {'error': e.message})
            return

        revision = self.request.get('rev')
        if revision:
            rows = _FetchRowsAroundRev(test, int(revision), num_before,
                                       num_after)
        else:
            rows = _FetchLatestRows(test, num_before)

        chart_series = _ChartSeries(rows)
        lookup = _RevisionList(rows)

        # Get the anomaly data from the new anomaly detection module. This will
        # also be passed to the template so that it can be shown on the page.
        change_points = SimulateAlertProcessing(chart_series, **config_dict)
        anomaly_indexes = [c.x_value for c in change_points]
        anomaly_points = [(i, chart_series[i][1]) for i in anomaly_indexes]
        anomaly_segments = _AnomalySegmentSeries(change_points)

        plot_data = _GetPlotData(chart_series, anomaly_points,
                                 anomaly_segments)
        subscriptions, err_msg = SheriffConfigClient().Match(test.test_path)
        subscription_names = ','.join([s.name for s in subscriptions or []])
        if err_msg is not None:
            self.RenderHtml('debug_alert.html', {'error': err_msg})

        # Render the debug_alert page with all of the parameters filled in.
        self.RenderHtml(
            'debug_alert.html', {
                'test_path': test.test_path,
                'rev': revision or '',
                'num_before': num_before,
                'num_after': num_after,
                'sheriff_name': subscription_names,
                'config_name': config_name,
                'config_json': json.dumps(
                    config_dict, indent=2, sort_keys=True),
                'plot_data': json.dumps(plot_data),
                'lookup': json.dumps(lookup),
                'anomalies': json.dumps([c.AsDict() for c in change_points]),
                'csv_url': _CsvUrl(test.test_path, rows),
                'graph_url': _GraphUrl(test, revision),
                'stored_anomalies': _FetchStoredAnomalies(test, lookup),
            })
Пример #9
0
def _ProcessTestStat(config, test, stat, rows, ref_rows):
  test_key = test.key

  # If there were no rows fetched, then there's nothing to analyze.
  if not rows:
    logging.error('No rows fetched for %s', test.test_path)
    raise ndb.Return(None)

  # Get anomalies and check if they happen in ref build also.
  change_points = FindChangePointsForTest(rows, config)

  if ref_rows:
    ref_change_points = FindChangePointsForTest(ref_rows, config)

    # Filter using any jumps in ref
    change_points = _FilterAnomaliesFoundInRef(
        change_points, ref_change_points, test_key)

  anomalies = yield [
      _MakeAnomalyEntity(c, test, stat, rows) for c in change_points]

  # If no new anomalies were found, then we're done.
  if not anomalies:
    raise ndb.Return(None)

  logging.info('Created %d anomalies', len(anomalies))
  logging.info(' Test: %s', test_key.id())
  logging.info(' Stat: %s', stat)

  # Get all the sheriff from sheriff-config match the path
  legacy_sheriff = yield _GetSheriffForTest(test)
  client = SheriffConfigClient()
  subscriptions, err_msg = client.Match(test.test_path)
  subscription_names = [s.name for s in subscriptions or []]
  if legacy_sheriff is not None:
    logging.info('Sheriff for %s: old: %s, new: %s', test.test_path,
                 legacy_sheriff.key.string_id(),
                 err_msg if subscriptions is None else subscription_names)
    if legacy_sheriff.key.string_id() not in subscription_names:
      logging.warn('Sheriff do not match: %s', test_key.string_id())

  # We still check legacy sheriff for backward compatibility. So during the
  # migration, we should update both legacy and new sheriff_config to ensure
  # config take effect.
  if not legacy_sheriff:
    logging.error('No sheriff for %s', test_key)
    raise ndb.Return(None)

  for a in anomalies:
    a.sheriff = legacy_sheriff.key
    a.subscriptions = subscriptions
    a.subscription_names = subscription_names
    a.internal_only = (any([s.visibility != subscription.VISIBILITY.PUBLIC
                            for s in subscriptions]) or test.internal_only)

  yield ndb.put_multi_async(anomalies)

  # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
  # code will run serially.
  # Email sheriff about any new regressions.
  for anomaly_entity in anomalies:
    if (anomaly_entity.bug_id is None and
        not anomaly_entity.is_improvement):
      deferred.defer(_EmailSheriff, legacy_sheriff, test.key,
                     anomaly_entity.key)