def CreateStoppageAlert(test, row): """Creates a new StoppageAlert entity. Args: test: A TestMetadata entity. row: A Row entity; the last Row that was put before the stoppage. Returns: A new StoppageAlert entity which has not been put, or None, if we don't want to create a new StoppageAlert. """ display_start = display_end = None if test.master_name == 'ClankInternal' and hasattr(row, 'r_commit_pos'): display_start = display_end = row.r_commit_pos new_alert = StoppageAlert(parent=ndb.Key('StoppageAlertParent', test.test_path), id=row.revision, internal_only=test.internal_only, sheriff=test.sheriff, last_row_timestamp=row.timestamp, display_start=display_start, display_end=display_end) alert_group.GroupAlerts([new_alert], test.suite_name, 'StoppageAlert') grouped_alert_keys = StoppageAlert.query( StoppageAlert.group == new_alert.group).fetch(keys_only=True) if len(grouped_alert_keys) >= _MAX_GROUP_SIZE: # Too many stoppage alerts in this group; we don't want to put any more. return None test.stoppage_alert = new_alert.key test.put() return new_alert
def CreateStoppageAlert(test, row): """Creates a new StoppageAlert entity. Args: test: A Test entity. row: A Row entity; the last Row that was put before the stoppage. Returns: A new StoppageAlert entity which has not been put, or None, if we don't want to create a new StoppageAlert. """ new_alert = StoppageAlert( parent=ndb.Key('StoppageAlertParent', test.test_path), id=row.revision, internal_only=test.internal_only, sheriff=test.sheriff) alert_group.GroupAlerts([new_alert], test.suite_name, 'StoppageAlert') grouped_alert_keys = StoppageAlert.query( StoppageAlert.group == new_alert.group).fetch(keys_only=True) if len(grouped_alert_keys) >= _MAX_GROUP_SIZE: # Too many stoppage alerts in this group; we don't want to put any more. return None test.stoppage_alert = new_alert.key test.put() return new_alert
def ProcessTest(test_key): """Processes a test to find new anomalies. Args: test_key: The ndb.Key for a TestMetadata. """ test = test_key.get() config = anomaly_config.GetAnomalyConfigDict(test) max_num_rows = config.get('max_window_size', DEFAULT_NUM_POINTS) rows = GetRowsToAnalyze(test, max_num_rows) # If there were no rows fetched, then there's nothing to analyze. if not rows: # In some cases (e.g. if some points are deleted) it might be possible # that last_alerted_revision is incorrect. In this case, reset it. highest_rev = _HighestRevision(test_key) if test.last_alerted_revision > highest_rev: logging.error( 'last_alerted_revision %d is higher than highest rev %d ' 'for test %s; setting last_alerted_revision to None.', test.last_alerted_revision, highest_rev, test.test_path) test.last_alerted_revision = None test.put() logging.error('No rows fetched for %s', test.test_path) return test = test_key.get() sheriff = _GetSheriffForTest(test) if not sheriff: logging.error('No sheriff for %s', test_key) return # Get anomalies and check if they happen in ref build also. change_points = FindChangePointsForTest(rows, config) change_points = _FilterAnomaliesFoundInRef(change_points, test_key, len(rows)) anomalies = [_MakeAnomalyEntity(c, test, rows) for c in change_points] # If no new anomalies were found, then we're done. if not anomalies: return logging.info('Found at least one anomaly in: %s', test.test_path) # Update the last_alerted_revision property of the test. test.last_alerted_revision = anomalies[-1].end_revision test.put() alert_group.GroupAlerts(anomalies, utils.TestSuiteName(test.key), 'Anomaly') # Email sheriff about any new regressions. for anomaly_entity in anomalies: if (anomaly_entity.bug_id is None and not anomaly_entity.is_improvement and not sheriff.summarize): email_sheriff.EmailSheriff(sheriff, test, anomaly_entity) ndb.put_multi(anomalies)
def testGroupAlerts_WithExistingGroupThatHasDifferentKind_DoesntGroup( self): sheriffs = self._AddSheriffs() tests = self._AddTests() group_key = alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='OtherAlert', test_suites=['tab_capture']).put() my_alert = self._CreateAnomalyForTests(revision_range=(4000, 5000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) alert_group.GroupAlerts([my_alert], 'tab_capture', 'Anomaly') self.assertNotEqual(group_key, my_alert.group) self.assertEqual('Anomaly', my_alert.group.get().alert_kind) # If the alert kind that's passed when calling GroupAlerts matches # the alert kind of the existing group, then it will be grouped. alert_group.GroupAlerts([my_alert], 'tab_capture', 'OtherAlert') self.assertEqual(group_key, my_alert.group) self.assertEqual('OtherAlert', my_alert.group.get().alert_kind)
def testGroupAlerts_WithNoAssociation_MakesNewGroup(self): sheriffs = self._AddSheriffs() tests = self._AddTests() # Add some anomaly groups. alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put() alert_group.AlertGroup(bug_id=104, start_revision=7000, end_revision=9000, alert_kind='Anomaly', test_suites=['tab_capture']).put() improvement_anomaly = self._CreateAnomalyForTests( revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0], bug_id=None, is_improvement=True) regression_anomaly = self._CreateAnomalyForTests( revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) test_suite = 'scrolling_benchmark' alert_group.GroupAlerts([regression_anomaly, improvement_anomaly], test_suite, 'Anomaly') # The regression Anomaly was not grouped with a group that has a bug ID, # so the bug ID is not changed. self.assertIsNone(regression_anomaly.bug_id) # Improvement Anomaly should not be auto-triaged. self.assertIsNone(improvement_anomaly.group) alert_groups = alert_group.AlertGroup.query().fetch() self.assertEqual(3, len(alert_groups)) self.assertEqual( (1000, 2000), (alert_groups[2].start_revision, alert_groups[2].end_revision)) self.assertIsNone(alert_groups[2].bug_id) self.assertEqual(alert_groups[2].test_suites, [test_suite])
def testGroupAlerts_WithExistingGroup(self): sheriffs = self._AddSheriffs() tests = self._AddTests() # Add some anomaly groups. alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put() tab_capture_group = alert_group.AlertGroup(bug_id=104, start_revision=7000, end_revision=9000, alert_kind='Anomaly', test_suites=['tab_capture' ]).put() improvement_anomaly = self._CreateAnomalyForTests( revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=True) regression_anomaly = self._CreateAnomalyForTests( revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) alert_group.GroupAlerts([regression_anomaly, improvement_anomaly], 'tab_capture', 'Anomaly') # The regression Anomaly's bug ID is changed because it has been grouped. self.assertEqual(104, regression_anomaly.bug_id) self.assertEqual(tab_capture_group, regression_anomaly.group) # Improvement Anomaly should not be grouped. self.assertIsNone(improvement_anomaly.group) alert_groups = alert_group.AlertGroup.query().fetch() self.assertEqual(2, len(alert_groups)) self.assertEqual( (7000, 8000), (alert_groups[1].start_revision, alert_groups[1].end_revision))
def CreateStoppageAlert(test, row): """Creates a new StoppageAlert entity. Args: test: A Test entity. row: A Row entity; the last Row that was put before the stoppage. Returns: A new StoppageAlert entity (although this entity has not yet been put. """ new_alert = StoppageAlert(parent=ndb.Key('StoppageAlertParent', test.test_path), id=row.revision, internal_only=test.internal_only, sheriff=test.sheriff) test.stoppage_alert = new_alert.key test.put() alert_group.GroupAlerts([new_alert], test.suite_name, 'StoppageAlert') return new_alert