예제 #1
0
 def testMatchFailed(self):
     clt = SheriffConfigClient()
     clt._session = self._Session(
         self._Response(False, 'some error message'))
     res, err_msg = clt.Match('Foo2/a/Bar2/b')
     self.assertIsNone(res)
     self.assertIn('some error message', err_msg)
예제 #2
0
def _ProcessTestStat(config, test, stat, rows, ref_rows):
  test_key = test.key

  # If there were no rows fetched, then there's nothing to analyze.
  if not rows:
    logging.error('No rows fetched for %s', test.test_path)
    raise ndb.Return(None)

  # Get anomalies and check if they happen in ref build also.
  change_points = FindChangePointsForTest(rows, config)

  if ref_rows:
    ref_change_points = FindChangePointsForTest(ref_rows, config)

    # Filter using any jumps in ref
    change_points = _FilterAnomaliesFoundInRef(
        change_points, ref_change_points, test_key)

  anomalies = yield [
      _MakeAnomalyEntity(c, test, stat, rows) for c in change_points]

  # If no new anomalies were found, then we're done.
  if not anomalies:
    raise ndb.Return(None)

  logging.info('Created %d anomalies', len(anomalies))
  logging.info(' Test: %s', test_key.id())
  logging.info(' Stat: %s', stat)

  # Get all the sheriff from sheriff-config match the path
  client = SheriffConfigClient()
  subscriptions, err_msg = client.Match(test.test_path)
  subscription_names = [s.name for s in subscriptions or []]

  # Breaks the process when Match failed to ensure find_anomaly do the best
  # effort to find the subscriber. Leave retrying to upstream.
  if err_msg is not None:
    raise RuntimeError(err_msg)

  if not subscriptions:
    logging.warning('No subscription for %s', test.test_path)

  for a in anomalies:
    a.subscriptions = subscriptions
    a.subscription_names = subscription_names
    a.internal_only = (any([s.visibility != subscription.VISIBILITY.PUBLIC
                            for s in subscriptions]) or test.internal_only)
    a.groups = alert_group.AlertGroup.GetGroupsForAnomaly(a)

  yield ndb.put_multi_async(anomalies)

  # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
  # code will run serially.
  # Email sheriff about any new regressions.
  for anomaly_entity in anomalies:
    if (anomaly_entity.bug_id is None and
        not anomaly_entity.is_improvement):
      deferred.defer(_EmailSheriff, anomaly_entity.subscriptions, test.key,
                     anomaly_entity.key)
예제 #3
0
def _GetSheriffForTest(test):
    """Gets the Sheriff for a test, or None if no sheriff."""
    # Get all the sheriff from sheriff-config match the path
    # Currently just used for logging
    client = SheriffConfigClient()
    match_res = client.Match(test.test_path)
    # Old one. Get the sheriff from TestMetadata
    sheriff = None
    if test.sheriff:
        sheriff = yield test.sheriff.get_async()
    raise ndb.Return((sheriff, match_res))
예제 #4
0
def _ProcesssTestStat(config, sheriff, test, stat, rows, ref_rows):
    test_key = test.key

    # If there were no rows fetched, then there's nothing to analyze.
    if not rows:
        logging.error('No rows fetched for %s', test.test_path)
        raise ndb.Return(None)

    # Get anomalies and check if they happen in ref build also.
    change_points = FindChangePointsForTest(rows, config)

    if ref_rows:
        ref_change_points = FindChangePointsForTest(ref_rows, config)

        # Filter using any jumps in ref
        change_points = _FilterAnomaliesFoundInRef(change_points,
                                                   ref_change_points, test_key)

    anomalies = yield [
        _MakeAnomalyEntity(c, test, stat, rows) for c in change_points
    ]

    # If no new anomalies were found, then we're done.
    if not anomalies:
        raise ndb.Return(None)

    logging.info('Created %d anomalies', len(anomalies))
    logging.info(' Test: %s', test_key.id())
    logging.info(' Stat: %s', stat)
    logging.info(' Sheriff: %s', test.sheriff.id())

    yield ndb.put_multi_async(anomalies)

    # Get all the sheriff from sheriff-config match the path
    # Currently just used for logging
    client = SheriffConfigClient()
    new_sheriffs, err_msg = client.Match(test.test_path)
    new_sheriffs_keys = [s.key.string_id() for s in new_sheriffs or []]
    logging.info('Sheriff for %s: old: %s, new: %s', test.test_path,
                 'None' if sheriff is None else sheriff.key.string_id(),
                 err_msg if new_sheriffs is None else new_sheriffs_keys)
    if sheriff and sheriff.key.string_id() not in new_sheriffs_keys:
        logging.warn('Sheriff do not match: %s', test_key.string_id())

    # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
    # code will run serially.
    # Email sheriff about any new regressions.
    for anomaly_entity in anomalies:
        if (anomaly_entity.bug_id is None and not anomaly_entity.is_improvement
                and not sheriff.summarize):
            deferred.defer(_EmailSheriff, sheriff.key, test.key,
                           anomaly_entity.key)
예제 #5
0
 def testMatch(self):
   clt = SheriffConfigClient()
   response_text = """
   {
     "subscriptions": [
       {
         "config_set": "projects/catapult",
         "revision": "c9d4943dc832e448f9786e244f918fdabc1e5303",
         "subscription": {
           "name": "Public Team1",
           "rotation_url": "https://some/url",
           "notification_email": "*****@*****.**",
           "bug_labels": [
             "Lable1",
             "Lable2"
           ],
           "bug_components": [
             "foo>bar"
           ],
           "visibility": "PUBLIC",
           "patterns": [
             {
               "glob": "Foo2/*/Bar2/*"
             },
             {
               "regex": ".*"
             }
           ]
         }
       }
     ]
   }
   """
   clt._session = self._Session(self._Response(True, response_text))
   expected = [
       Subscription(
           revision='c9d4943dc832e448f9786e244f918fdabc1e5303',
           name='Public Team1',
           rotation_url='https://some/url',
           notification_email='*****@*****.**',
           visibility=VISIBILITY.PUBLIC,
           bug_labels=['Lable1', 'Lable2'],
           bug_components=['foo>bar']
       ),
   ]
   self.assertEqual(clt.Match('Foo2/a/Bar2/b'), (expected, None))
예제 #6
0
def _ProcessTestStat(config, test, stat, rows, ref_rows):
  test_key = test.key

  # If there were no rows fetched, then there's nothing to analyze.
  if not rows:
    logging.error('No rows fetched for %s', test.test_path)
    raise ndb.Return(None)

  # Get anomalies and check if they happen in ref build also.
  change_points = FindChangePointsForTest(rows, config)

  if ref_rows:
    ref_change_points = FindChangePointsForTest(ref_rows, config)

    # Filter using any jumps in ref
    change_points = _FilterAnomaliesFoundInRef(
        change_points, ref_change_points, test_key)

  anomalies = yield [
      _MakeAnomalyEntity(c, test, stat, rows) for c in change_points]

  # If no new anomalies were found, then we're done.
  if not anomalies:
    raise ndb.Return(None)

  logging.info('Created %d anomalies', len(anomalies))
  logging.info(' Test: %s', test_key.id())
  logging.info(' Stat: %s', stat)

  # Get all the sheriff from sheriff-config match the path
  legacy_sheriff = yield _GetSheriffForTest(test)
  client = SheriffConfigClient()
  subscriptions, err_msg = client.Match(test.test_path)
  subscription_names = [s.name for s in subscriptions or []]
  if legacy_sheriff is not None:
    logging.info('Sheriff for %s: old: %s, new: %s', test.test_path,
                 legacy_sheriff.key.string_id(),
                 err_msg if subscriptions is None else subscription_names)
    if legacy_sheriff.key.string_id() not in subscription_names:
      logging.warn('Sheriff do not match: %s', test_key.string_id())

  # We still check legacy sheriff for backward compatibility. So during the
  # migration, we should update both legacy and new sheriff_config to ensure
  # config take effect.
  if not legacy_sheriff:
    logging.error('No sheriff for %s', test_key)
    raise ndb.Return(None)

  for a in anomalies:
    a.sheriff = legacy_sheriff.key
    a.subscriptions = subscriptions
    a.subscription_names = subscription_names
    a.internal_only = (any([s.visibility != subscription.VISIBILITY.PUBLIC
                            for s in subscriptions]) or test.internal_only)

  yield ndb.put_multi_async(anomalies)

  # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
  # code will run serially.
  # Email sheriff about any new regressions.
  for anomaly_entity in anomalies:
    if (anomaly_entity.bug_id is None and
        not anomaly_entity.is_improvement):
      deferred.defer(_EmailSheriff, legacy_sheriff, test.key,
                     anomaly_entity.key)