def _AddAnomalies(self): """Adds a set of sample data used in the tests below.""" testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {'scrolling_benchmark': { 'first_paint': {} }}) first_paint_key = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/first_paint') first_paint_test = first_paint_key.get() first_paint_test.improvement_direction = anomaly.DOWN first_paint_test.put() group_keys = [ alert_group.AlertGroup(start_revision=3000, end_revision=4000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put(), alert_group.AlertGroup(start_revision=6000, end_revision=8000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put(), ] anomaly_keys = [ anomaly.Anomaly(start_revision=2000, end_revision=4000, bug_id=12345, test=first_paint_key).put(), anomaly.Anomaly(start_revision=3000, end_revision=5000, bug_id=12345, test=first_paint_key).put(), anomaly.Anomaly(start_revision=6000, end_revision=8000, bug_id=None, test=first_paint_key).put(), ] anomalies = ndb.get_multi(anomaly_keys) # Add these anomalies to groups and put them again. When anomalies are # put for the second time onward, the pre-put hook will be called and # the groups of the anomalies will be updated. anomalies[0].group = group_keys[0] anomalies[0].put() anomalies[1].group = group_keys[0] anomalies[1].put() anomalies[2].group = group_keys[1] anomalies[2].put() # Note that after these anomalies are added, the state of the two groups # is updated. Also, the first two anomalies are in the same group. self.assertEqual(anomalies[0].group, anomalies[1].group) self.assertNotEqual(anomalies[0].group, anomalies[2].group) return anomalies
def _AddAlertGroup(anomaly_key, subscription_name=None, issue=None, anomalies=None, status=None, project_id=None, bisection_ids=None): anomaly_entity = anomaly_key.get() group = alert_group.AlertGroup( id=str(uuid.uuid4()), name=anomaly_entity.benchmark_name, subscription_name=subscription_name or 'sheriff', status=alert_group.AlertGroup.Status.untriaged, project_id=project_id or 'chromium', active=True, revision=alert_group.RevisionRange( repository='chromium', start=anomaly_entity.start_revision, end=anomaly_entity.end_revision, ), bisection_ids=bisection_ids or [], ) if issue: group.bug = alert_group.BugInfo( bug_id=issue.get('id'), project=issue.get('projectId', 'chromium'), ) group.project_id = issue.get('projectId', 'chromium') if anomalies: group.anomalies = anomalies if status: group.status = status return group.put()
def _AddAnomalyEntities(self, revision_ranges, test_key, subscriptions, bug_id=None, group_id=None): """Adds a group of Anomaly entities to the datastore.""" urlsafe_keys = [] keys = [] for start_rev, end_rev in revision_ranges: subscription_names = [s.name for s in subscriptions] anomaly_key = anomaly.Anomaly( start_revision=start_rev, end_revision=end_rev, test=test_key, bug_id=bug_id, subscription_names=subscription_names, subscriptions=subscriptions, median_before_anomaly=100, median_after_anomaly=200).put() urlsafe_keys.append(anomaly_key.urlsafe()) keys.append(anomaly_key) if group_id: alert_group.AlertGroup( id=group_id, anomalies=keys, ).put() return urlsafe_keys
def _ProcessUngroupedAlerts(): groups = alert_group.AlertGroup.GetAll() # TODO(fancl): This is an inefficient algorithm, as it's linear to the number # of groups. We should instead create an interval tree so that it's # logarithmic to the number of unique revision ranges. def FindGroup(group): for g in groups: if group.IsOverlapping(g): return g.key groups.append(group) return None logging.info('Processing un-grouped alerts.') ungrouped_list = alert_group.AlertGroup.Get('Ungrouped', None) if not ungrouped_list: alert_group.AlertGroup(name='Ungrouped', active=True).put() return ungrouped = ungrouped_list[0] ungrouped_anomalies = ndb.get_multi(ungrouped.anomalies) # Scan all ungrouped anomalies and create missing groups. This doesn't # mean their groups are not created so we still need to check if group # has been created. There are two cases: # 1. If multiple groups are related to an anomaly, maybe only part of # groups are not created. # 2. Groups may be created during the iteration. # Newly created groups won't be updated until next iteration. for anomaly_entity in ungrouped_anomalies: anomaly_entity.groups = [ FindGroup(g) or g.put() for g in alert_group.AlertGroup.GenerateAllGroupsForAnomaly(anomaly_entity) ] logging.info('Persisting anomalies') ndb.put_multi(ungrouped_anomalies)
def testGroupAlerts_WithNoAssociation_MakesNewGroup(self): sheriffs = self._AddSheriffs() tests = self._AddTests() # Add some anomaly groups. alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put() alert_group.AlertGroup(bug_id=104, start_revision=7000, end_revision=9000, alert_kind='Anomaly', test_suites=['tab_capture']).put() improvement_anomaly = self._CreateAnomalyForTests( revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0], bug_id=None, is_improvement=True) regression_anomaly = self._CreateAnomalyForTests( revision_range=(1000, 2000), test=tests[0], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) test_suite = 'scrolling_benchmark' alert_group.GroupAlerts([regression_anomaly, improvement_anomaly], test_suite, 'Anomaly') # The regression Anomaly was not grouped with a group that has a bug ID, # so the bug ID is not changed. self.assertIsNone(regression_anomaly.bug_id) # Improvement Anomaly should not be auto-triaged. self.assertIsNone(improvement_anomaly.group) alert_groups = alert_group.AlertGroup.query().fetch() self.assertEqual(3, len(alert_groups)) self.assertEqual( (1000, 2000), (alert_groups[2].start_revision, alert_groups[2].end_revision)) self.assertIsNone(alert_groups[2].bug_id) self.assertEqual(alert_groups[2].test_suites, [test_suite])
def testGroupAlerts_WithExistingGroup(self): sheriffs = self._AddSheriffs() tests = self._AddTests() # Add some anomaly groups. alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put() tab_capture_group = alert_group.AlertGroup(bug_id=104, start_revision=7000, end_revision=9000, alert_kind='Anomaly', test_suites=['tab_capture' ]).put() improvement_anomaly = self._CreateAnomalyForTests( revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=True) regression_anomaly = self._CreateAnomalyForTests( revision_range=(6000, 8000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) alert_group.GroupAlerts([regression_anomaly, improvement_anomaly], 'tab_capture', 'Anomaly') # The regression Anomaly's bug ID is changed because it has been grouped. self.assertEqual(104, regression_anomaly.bug_id) self.assertEqual(tab_capture_group, regression_anomaly.group) # Improvement Anomaly should not be grouped. self.assertIsNone(improvement_anomaly.group) alert_groups = alert_group.AlertGroup.query().fetch() self.assertEqual(2, len(alert_groups)) self.assertEqual( (7000, 8000), (alert_groups[1].start_revision, alert_groups[1].end_revision))
def get(self): groups = alert_group.AlertGroup.GetAll() for group in groups: group.Update() if group.status == alert_group.AlertGroup.Status.untriaged: group.TryTriage() elif group.status == alert_group.AlertGroup.Status.triaged: group.TryBisect() else: deadline = group.updated + datetime.timedelta(days=7) past_due = deadline < datetime.datetime.now() closed = (group.status == alert_group.AlertGroup.Status.closed) untriaged = ( group.status == alert_group.AlertGroup.Status.untriaged) if past_due and (closed or untriaged): group.Archive() ndb.put_multi(groups) def FindGroup(group): for g in groups: if group.revision.IsOverlapping(g.revision): return g.key groups.append(group) return None ungrouped_list = alert_group.AlertGroup.Get('Ungrouped', None) if not ungrouped_list: alert_group.AlertGroup(name='Ungrouped', active=True).put() return ungrouped = ungrouped_list[0] ungrouped_anomalies = ndb.get_multi(ungrouped.anomalies) # Scan all ungrouped anomalies and create missing groups. This doesn't # mean their groups are not created so we still need to check if group # has been created. There are two cases: # 1. If multiple groups are related to an anomaly, maybe only part of # groups are not created. # 2. Groups may be created during the iteration. # Newly created groups won't be updated until next iteration. for anomaly_entity in ungrouped_anomalies: anomaly_entity.groups = [ FindGroup(g) or g.put() for g in alert_group.AlertGroup.GenerateAllGroupsForAnomaly( anomaly_entity) ] ndb.put_multi(ungrouped_anomalies)
def testGroupAlerts_WithExistingGroupThatHasDifferentKind_DoesntGroup( self): sheriffs = self._AddSheriffs() tests = self._AddTests() group_key = alert_group.AlertGroup(bug_id=None, start_revision=3000, end_revision=6000, alert_kind='OtherAlert', test_suites=['tab_capture']).put() my_alert = self._CreateAnomalyForTests(revision_range=(4000, 5000), test=tests[1], sheriff_key=sheriffs[0], bug_id=None, is_improvement=False) alert_group.GroupAlerts([my_alert], 'tab_capture', 'Anomaly') self.assertNotEqual(group_key, my_alert.group) self.assertEqual('Anomaly', my_alert.group.get().alert_kind) # If the alert kind that's passed when calling GroupAlerts matches # the alert kind of the existing group, then it will be grouped. alert_group.GroupAlerts([my_alert], 'tab_capture', 'OtherAlert') self.assertEqual(group_key, my_alert.group) self.assertEqual('OtherAlert', my_alert.group.get().alert_kind)
def _AddAlertGroup(anomaly_key, issue=None, anomalies=None, status=None): anomaly_entity = anomaly_key.get() group = alert_group.AlertGroup( id=str(uuid.uuid4()), name=anomaly_entity.benchmark_name, project_id='chromium', status=alert_group.AlertGroup.Status.untriaged, active=True, revision=alert_group.RevisionRange( repository='chromium', start=anomaly_entity.start_revision, end=anomaly_entity.end_revision, ), ) if issue: group.bug = alert_group.BugInfo( bug_id=issue.get('id'), project='chromium', ) if anomalies: group.anomalies = anomalies if status: group.status = status return group.put()
def testProcessTest(self, mock_email_sheriff): self._AddDataForTests() test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref' test = utils.TestKey(test_path).get() test.UpdateSheriff() test.put() alert_group_key1 = alert_group.AlertGroup( name='scrolling_benchmark', subscription_name='sheriff1', status=alert_group.AlertGroup.Status.untriaged, active=True, revision=alert_group.RevisionRange(repository='chromium', start=10000, end=10070), ).put() alert_group_key2 = alert_group.AlertGroup( name='scrolling_benchmark', subscription_name='sheriff2', status=alert_group.AlertGroup.Status.untriaged, active=True, revision=alert_group.RevisionRange(repository='chromium', start=10000, end=10070), ).put() s1 = Subscription(name='sheriff1', visibility=VISIBILITY.PUBLIC) s2 = Subscription(name='sheriff2', visibility=VISIBILITY.PUBLIC) with mock.patch.object(SheriffConfigClient, 'Match', mock.MagicMock(return_value=([s1, s2], None))) as m: find_anomalies.ProcessTests([test.key]) self.assertEqual(m.call_args_list, [mock.call(test.key.id())]) self.ExecuteDeferredTasks('default') expected_calls = [ mock.call([ModelMatcher('sheriff1'), ModelMatcher('sheriff2')], ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10011)), mock.call([ModelMatcher('sheriff1'), ModelMatcher('sheriff2')], ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10041)), mock.call([ModelMatcher('sheriff1'), ModelMatcher('sheriff2')], ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10061)) ] self.assertEqual(expected_calls, mock_email_sheriff.call_args_list) anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(anomalies), 3) for a in anomalies: self.assertEqual(a.groups, [alert_group_key1, alert_group_key2]) def AnomalyExists(anomalies, test, percent_changed, direction, start_revision, end_revision, subscription_names, internal_only, units, absolute_delta, statistic): for a in anomalies: if (a.test == test and a.percent_changed == percent_changed and a.direction == direction and a.start_revision == start_revision and a.end_revision == end_revision and a.subscription_names == subscription_names and a.internal_only == internal_only and a.units == units and a.absolute_delta == absolute_delta and a.statistic == statistic): return True return False self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=100, direction=anomaly.UP, start_revision=10007, end_revision=10011, subscription_names=['sheriff1', 'sheriff2'], internal_only=False, units='ms', absolute_delta=50, statistic='avg')) self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=-50, direction=anomaly.DOWN, start_revision=10037, end_revision=10041, subscription_names=['sheriff1', 'sheriff2'], internal_only=False, units='ms', absolute_delta=-100, statistic='avg')) self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=sys.float_info.max, direction=anomaly.UP, start_revision=10057, end_revision=10061, internal_only=False, units='ms', subscription_names=['sheriff1', 'sheriff2'], absolute_delta=100, statistic='avg')) # This is here just to verify that AnomalyExists returns False sometimes. self.assertFalse( AnomalyExists(anomalies, test.key, percent_changed=100, direction=anomaly.DOWN, start_revision=10037, end_revision=10041, subscription_names=['sheriff1', 'sheriff2'], internal_only=False, units='ms', absolute_delta=500, statistic='avg'))