def testPost_BugHasNoAlerts_NotMarkRecovered(self, close_recovered_bug_mock): bug_id = 1234 bug_data.Bug(id=bug_id).put() self.testapp.post('/auto_triage') self.ExecuteTaskQueueTasks('/auto_triage', auto_triage._TASK_QUEUE_NAME) bug = ndb.Key('Bug', bug_id).get() self.assertEqual(bug.status, bug_data.BUG_STATUS_CLOSED) self.assertFalse(close_recovered_bug_mock.called)
def testPost_WithBugIdParameter_ListsStoppageAlerts(self, mock_oauth): self._SetGooglerOAuth(mock_oauth) test_keys = self._AddTests() bug_data.Bug(id=123).put() row = testing_common.AddRows(utils.TestPath(test_keys[0]), {100})[0] alert = stoppage_alert.CreateStoppageAlert(test_keys[0].get(), row) alert.bug_id = 123 alert.put() response = self.testapp.post('/api/alerts/bug_id/123') stoppage_alerts = self.GetJsonValue(response, 'stoppage_alerts') self.assertEqual(1, len(stoppage_alerts))
def testGet(self): # Put succeeded, failed, and not yet finished jobs in the datastore. try_job.TryJob(bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob(bug_id=54321, rietveld_issue_id=302304, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob(bug_id=99999, rietveld_issue_id=100001, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob(bug_id=77777, buildbucket_job_id='1234567', use_buildbucket=True, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() bug_data.Bug(id=54321).put() bug_data.Bug(id=99999).put() bug_data.Bug(id=77777).put() self.testapp.get('/update_bug_with_results') pending_jobs = try_job.TryJob.query().fetch() # Expects a failed and not yet finished bisect job to be in datastore. self.assertEqual(3, len(pending_jobs)) self.assertEqual(54321, pending_jobs[0].bug_id) self.assertEqual('failed', pending_jobs[0].status) self.assertEqual(99999, pending_jobs[1].bug_id) self.assertEqual(77777, pending_jobs[2].bug_id) self.assertEqual('started', pending_jobs[1].status) self.assertEqual('started', pending_jobs[2].status) self.assertEqual('bisect', pending_jobs[0].job_type) self.assertEqual('bisect', pending_jobs[1].job_type) self.assertEqual('bisect', pending_jobs[2].job_type)
def testPost_WithBugIdParameter(self): sheriff_key = self._AddSheriff() test_keys = self._AddTests() bug_data.Bug(id=123).put() self._AddAnomalyEntities([(200, 300), (100, 200), (400, 500)], test_keys[0], sheriff_key, bug_id=123) self._AddAnomalyEntities([(150, 250)], test_keys[0], sheriff_key) response = self.testapp.post('/group_report?bug_id=123') alert_list = self.GetJsonValue(response, 'alert_list') self.assertEqual(3, len(alert_list))
def _CreateBug(self, summary, description, labels, components, urlsafe_keys): """Creates a bug, associates it with the alerts, sends a HTML response. Args: summary: The new bug summary string. description: The new bug description string. labels: List of label strings for the new bug. components: List of component strings for the new bug. urlsafe_keys: Comma-separated alert keys in urlsafe format. """ alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] alerts = ndb.get_multi(alert_keys) if not description: description = 'See the link to graphs below.' milestone_label = _MilestoneLabel(alerts) if milestone_label: labels.append(milestone_label) # Only project members (@chromium.org accounts) can be owners of bugs. owner = self.request.get('owner') if owner and not owner.endswith('@chromium.org'): self.RenderHtml('bug_result.html', { 'error': 'Owner email address must end with @chromium.org.' }) return http = oauth2_decorator.DECORATOR.http() service = issue_tracker_service.IssueTrackerService(http=http) bug_id = service.NewBug( summary, description, labels=labels, components=components, owner=owner) if not bug_id: self.RenderHtml('bug_result.html', {'error': 'Error creating bug!'}) return bug_data.Bug(id=bug_id).put() for alert_entity in alerts: alert_entity.bug_id = bug_id ndb.put_multi(alerts) comment_body = _AdditionalDetails(bug_id, alerts) service.AddBugComment(bug_id, comment_body) template_params = {'bug_id': bug_id} if all(k.kind() == 'Anomaly' for k in alert_keys): bisect_result = auto_bisect.StartNewBisectForBug(bug_id) if 'error' in bisect_result: template_params['bisect_error'] = bisect_result['error'] else: template_params.update(bisect_result) self.RenderHtml('bug_result.html', template_params)
def testPost_WithProjectIdMissing(self): subscription = self._Subscription() test_keys = self._AddTests() bug_data.Bug(id=123).put() self._AddAnomalyEntities([(200, 300), (100, 200), (400, 500)], test_keys[0], [subscription], bug_id=123, project_id='chromium') self._AddAnomalyEntities([(150, 250)], test_keys[0], [subscription]) response = self.testapp.post('/group_report?bug_id=123') alert_list = self.GetJsonValue(response, 'alert_list') self.assertEqual(3, len(alert_list))
def testPost_WithBugIdParameter(self): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) sheriff_key = self._AddSheriff() test_keys = self._AddTests() bug_data.Bug(id=123).put() self._AddAnomalyEntities([(200, 300), (100, 200), (400, 500)], test_keys[0], sheriff_key, bug_id=123) self._AddAnomalyEntities([(150, 250)], test_keys[0], sheriff_key) response = self.Post('/api/alerts/bug_id/123') anomalies = self.GetJsonValue(response, 'anomalies') self.assertEqual(3, len(anomalies))
def testPost_WithBugIdParameter(self, mock_oauth): self._SetGooglerOAuth(mock_oauth) sheriff_key = self._AddSheriff() test_keys = self._AddTests() bug_data.Bug(id=123).put() self._AddAnomalyEntities([(200, 300), (100, 200), (400, 500)], test_keys[0], sheriff_key, bug_id=123) self._AddAnomalyEntities([(150, 250)], test_keys[0], sheriff_key) response = self.testapp.post('/api/alerts/bug_id/123') anomalies = self.GetJsonValue(response, 'anomalies') self.assertEqual(3, len(anomalies))
def testPost_WithBugIdForBugThatHasOwner_ShowsOwnerInfo(self): sheriff_key = self._AddSheriff() test_keys = self._AddTests() bug_data.Bug(id=123).put() test_key = test_keys[0] test_path_parts = utils.TestPath(test_key).split('/') test_suite_path = '%s/%s' % (test_path_parts[0], test_path_parts[2]) test_owner.AddOwnerFromDict({test_suite_path: ['*****@*****.**']}) self._AddAnomalyEntities([(150, 250)], test_key, sheriff_key, bug_id=123) response = self.testapp.post('/group_report?bug_id=123') owner_info = self.GetJsonValue(response, 'owner_info') self.assertEqual('*****@*****.**', owner_info[0]['email'])
def _AddAnomalyForTest(self, sheriff_key, test_key, revision, median_before, median_after, bug_id=None): """Adds a sample Anomaly and returns the key.""" if bug_id > 0: bug = ndb.Key('Bug', bug_id).get() if not bug: bug_data.Bug(id=bug_id).put() return anomaly.Anomaly( start_revision=revision, end_revision=revision, test=test_key, median_before_anomaly=median_before, median_after_anomaly=median_after, bug_id=bug_id, sheriff=sheriff_key).put()
def testGet_BisectJobWithPartialResults(self, mock_update_bug): # Put failed job in the datastore. try_job.TryJob(bug_id=54321, rietveld_issue_id=200039, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=54321).put() self.testapp.get('/update_bug_with_results') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(1, len(pending_jobs)) self.assertEqual('failed', pending_jobs[0].status) mock_update_bug.assert_called_once_with( 54321, _EXPECTED_BISECT_LOG_PARTIAL_RESULT, labels=None)
def testGet_InternalOnlyTryJob_AddsInternalOnlyBugLabel( self, mock_update_bug): try_job.TryJob(bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf', internal_only=True).put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=mock.ANY, merge_issue=None, labels=['Restrict-View-Google'], owner=mock.ANY)
def testGet_FailedRevisionResponse(self, mock_add_bug): # When a Rietveld CL link fails to respond, only update CL owner in CC list. try_job.TryJob(bug_id=12345, rietveld_issue_id=200038, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_add_bug.assert_called_once_with(mock.ANY, mock.ANY, cc_list=['*****@*****.**'], merge_issue=None, labels=None, owner='*****@*****.**') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs))
def testGet_BotInfoInBisectResults(self, mock_update_bug): # When a bisect finds multiple culprits by same Author for a perf # regression, owner of CLs should be cc'ed. try_job.TryJob(bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( 12345, _EXPECTED_BISECT_RESULTS_ON_BUG, cc_list=['*****@*****.**', '*****@*****.**'], merge_issue=None, labels=None, owner='*****@*****.**')
def testGet_BisectCulpritHasMultipleAuthors_NoneCCd(self, mock_update_bug): # When a bisect finds multiple culprits for a perf regression, # owners of CLs shouldn't be cc'ed on issue update. try_job.TryJob(bug_id=12345, rietveld_issue_id=200035, rietveld_patchset_id=1, status='started', bot='win_perf').put() bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with(mock.ANY, mock.ANY, cc_list=[], merge_issue=None, labels=None, owner=None) pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs))
def testPost_WithBugIdParameterExternalUser_ExternaData( self, mock_oauth, mock_utils): mock_oauth.get_current_user.return_value = NON_GOOGLE_USER mock_oauth.get_client_id.return_value = ( api_auth.OAUTH_CLIENT_ID_WHITELIST[0]) mock_utils.return_value = False datastore_hooks.InstallHooks() sheriff_key = self._AddSheriff() test_keys = self._AddTests() bug_data.Bug(id=123).put() self._AddAnomalyEntities([(200, 300), (100, 200), (400, 500)], test_keys[0], sheriff_key, bug_id=123, internal_only=True) self._AddAnomalyEntities([(150, 250)], test_keys[0], sheriff_key, bug_id=123) response = self.testapp.post('/api/alerts/bug_id/123') anomalies = self.GetJsonValue(response, 'anomalies') self.assertEqual(1, len(anomalies))
def testGet_MultipleCulpritsSameAuthor_AssignsAuthor( self, mock_update_bug): # When a bisect finds multiple culprits by same Author for a perf # regression, owner of CLs should be cc'ed. try_job.TryJob(bug_id=12345, rietveld_issue_id=200036, rietveld_patchset_id=1, status='started', bot='win_perf').put() bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=['*****@*****.**', '*****@*****.**'], merge_issue=None, labels=None, owner='*****@*****.**') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs))
def testGet_BisectCulpritHasSingleAuthor_AssignsAuthor( self, mock_update_bug): # When a bisect finds a single culprit for a perf regression, # author and reviewer of the CL should be cc'ed on issue update. try_job.TryJob(bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=['*****@*****.**', '*****@*****.**'], merge_issue=None, labels=None, owner='*****@*****.**') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs))
def testPerformBisect(self): self.SetCurrentUser('*****@*****.**') # Fake Rietveld auth info cfg = rietveld_service.RietveldConfig( id='default_rietveld_config', client_email='*****@*****.**', service_account_key='Fake Account Key', server_url='https://test-rietveld.appspot.com') cfg.put() # Create bug. bug_data.Bug(id=12345).put() query_parameters = { 'bisect_bot': 'win_perf_bisect', 'suite': 'dromaeo.jslibstylejquery', 'metric': 'jslib/jslib', 'good_revision': '215806', 'bad_revision': '215828', 'repeat_count': '20', 'max_time_minutes': '20', 'truncate_percent': '25', 'bug_id': 12345, 'use_archive': '', 'step': 'perform-bisect', } global _EXPECTED_CONFIG_DIFF global _TEST_EXPECTED_BOT global _TEST_EXPECTED_CONFIG_CONTENTS _EXPECTED_CONFIG_DIFF = _EXPECTED_BISECT_CONFIG_DIFF _TEST_EXPECTED_BOT = 'win_perf_bisect' _TEST_EXPECTED_CONFIG_CONTENTS = _BISECT_CONFIG_CONTENTS response = self.testapp.post('/start_try_job', query_parameters) self.assertEqual( json.dumps({ 'issue_id': '33001', 'issue_url': 'https://test-rietveld.appspot.com/33001' }), response.body)
def testPerformBisectWithArchive(self, _): self.SetCurrentUser('*****@*****.**') # Create bug. bug_data.Bug(id=12345).put() query_parameters = { 'bisect_bot': 'linux_perf_tester', 'suite': 'dromaeo.jslibstylejquery', 'metric': 'jslib/jslib', 'good_revision': '215806', 'bad_revision': '215828', 'repeat_count': '20', 'max_time_minutes': '20', 'bug_id': 12345, 'bisect_mode': 'mean', 'step': 'perform-bisect', } response = self.testapp.post('/start_try_job', query_parameters) self.assertEqual( json.dumps({'issue_id': '1234567', 'issue_url': ('https://my-dashboard.appspot.com' '/buildbucket_job_status/1234567')}), response.body)
def _AddAlertsToDataStore(self): """Adds sample data, including triaged and non-triaged alerts.""" key_map = {} sheriff_key = sheriff.Sheriff(id='Chromium Perf Sheriff', email='*****@*****.**').put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) first_paint = utils.TestKey( 'ChromiumGPU/linux-release/scrolling-benchmark/first_paint') mean_frame_time = utils.TestKey( 'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time') # By default, all Test entities have an improvement_direction of UNKNOWN, # meaning that neither direction is considered an improvement. # Here we set the improvement direction so that some anomalies are # considered improvements. for test_key in [first_paint, mean_frame_time]: test = test_key.get() test.improvement_direction = anomaly.DOWN test.put() # Add some (12) non-triaged alerts. for end_rev in range(10000, 10120, 10): test_key = first_paint if end_rev % 20 == 0 else mean_frame_time anomaly_entity = anomaly.Anomaly(start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=100, median_after_anomaly=200, sheriff=sheriff_key) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() key_map[end_rev] = anomaly_key.urlsafe() # Add some (2) already-triaged alerts. for end_rev in range(10120, 10140, 10): test_key = first_paint if end_rev % 20 == 0 else mean_frame_time bug_id = -1 if end_rev % 20 == 0 else 12345 anomaly_entity = anomaly.Anomaly(start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=100, median_after_anomaly=200, bug_id=bug_id, sheriff=sheriff_key) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() key_map[end_rev] = anomaly_key.urlsafe() if bug_id > 0: bug_data.Bug(id=bug_id).put() # Add some (6) non-triaged improvements. for end_rev in range(10140, 10200, 10): test_key = mean_frame_time anomaly_entity = anomaly.Anomaly(start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=200, median_after_anomaly=100, sheriff=sheriff_key) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() self.assertTrue(anomaly_entity.is_improvement) key_map[end_rev] = anomaly_key.urlsafe() return key_map
def _AddTryJob(self, bug_id, status, bot, **kwargs): job = try_job.TryJob(bug_id=bug_id, status=status, bot=bot, **kwargs) job.put() bug_data.Bug(id=bug_id).put() return job
def _CreateBug(self, summary, description, labels, components, urlsafe_keys): """Creates a bug, associates it with the alerts, sends a HTML response. Args: summary: The new bug summary string. description: The new bug description string. labels: List of label strings for the new bug. components: List of component strings for the new bug. urlsafe_keys: Comma-separated alert keys in urlsafe format. """ # Only project members (@chromium.org accounts) can be owners of bugs. owner = self.request.get('owner') if owner and not owner.endswith('@chromium.org'): self.RenderHtml( 'bug_result.html', {'error': 'Owner email address must end with @chromium.org.'}) return alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] alerts = ndb.get_multi(alert_keys) if not description: description = 'See the link to graphs below.' milestone_label = _MilestoneLabel(alerts) if milestone_label: labels.append(milestone_label) cc = self.request.get('cc') http = oauth2_decorator.DECORATOR.http() user_issue_tracker_service = issue_tracker_service.IssueTrackerService( http) new_bug_response = user_issue_tracker_service.NewBug( summary, description, labels=labels, components=components, owner=owner, cc=cc) if 'error' in new_bug_response: self.RenderHtml('bug_result.html', {'error': new_bug_response['error']}) return bug_id = new_bug_response['bug_id'] bug_data.Bug(id=bug_id).put() alert_group.ModifyAlertsAndAssociatedGroups(alerts, bug_id=bug_id) comment_body = _AdditionalDetails(bug_id, alerts) # Add the bug comment with the service account, so that there are no # permissions issues. dashboard_issue_tracker_service = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) dashboard_issue_tracker_service.AddBugComment(bug_id, comment_body) template_params = {'bug_id': bug_id} if all(k.kind() == 'Anomaly' for k in alert_keys): logging.info('Kicking bisect for bug ' + str(bug_id)) bisect_result = auto_bisect.StartNewBisectForBug(bug_id) if 'error' in bisect_result: logging.info('Failed to kick bisect for ' + str(bug_id)) template_params['bisect_error'] = bisect_result['error'] else: logging.info('Successfully kicked bisect for ' + str(bug_id)) template_params.update(bisect_result) else: kinds = set() for k in alert_keys: kinds.add(k.kind()) logging.info( 'Didn\'t kick bisect for bug id %s because alerts had kinds %s', bug_id, list(kinds)) self.RenderHtml('bug_result.html', template_params)
def FileBug(http, owner, cc, summary, description, labels, components, urlsafe_keys, needs_bisect=True): alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys] alerts = ndb.get_multi(alert_keys) if not description: description = 'See the link to graphs below.' milestone_label = _MilestoneLabel(alerts) if milestone_label: labels.append(milestone_label) user_issue_tracker_service = issue_tracker_service.IssueTrackerService( http) new_bug_response = user_issue_tracker_service.NewBug( summary, description, labels=labels, components=components, owner=owner, cc=[email for email in cc.split(',') if email.strip()]) if 'error' in new_bug_response: return {'error': new_bug_response['error']} bug_id = new_bug_response['bug_id'] bug_data.Bug(id=bug_id).put() for a in alerts: a.bug_id = bug_id ndb.put_multi(alerts) comment_body = _AdditionalDetails(bug_id, alerts) # Add the bug comment with the service account, so that there are no # permissions issues. dashboard_issue_tracker_service = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) dashboard_issue_tracker_service.AddBugComment(bug_id, comment_body) template_params = {'bug_id': bug_id} if all(k.kind() == 'Anomaly' for k in alert_keys): logging.info('Kicking bisect for bug ' + str(bug_id)) culprit_rev = _GetSingleCLForAnomalies(alerts) if culprit_rev is not None: commit_info = _GetCommitInfoForAlert(alerts[0]) if commit_info: author = commit_info['author']['email'] message = commit_info['message'] if not utils.GetSheriffForAutorollCommit(author, message): needs_bisect = False _AssignBugToCLAuthor(bug_id, commit_info, dashboard_issue_tracker_service) if needs_bisect: bisect_result = auto_bisect.StartNewBisectForBug(bug_id) if 'error' in bisect_result: logging.info('Failed to kick bisect for ' + str(bug_id)) template_params['bisect_error'] = bisect_result['error'] else: logging.info('Successfully kicked bisect for ' + str(bug_id)) template_params.update(bisect_result) else: kinds = set() for k in alert_keys: kinds.add(k.kind()) logging.info( 'Didn\'t kick bisect for bug id %s because alerts had kinds %s', bug_id, list(kinds)) return template_params
def _AddAlertsToDataStore(self): """Adds sample data, including triaged and non-triaged alerts.""" key_map = {} subscription = Subscription( name='Chromium Perf Sheriff', notification_email='*****@*****.**', ) # We still need sheriff information from here before sheriff-config # provide a way to fetch subsciber list. sheriff.Sheriff( id='Chromium Perf Sheriff', email='*****@*****.**').put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'first_paint_ref': {}, 'mean_frame_time': {}, 'mean_frame_time_ref': {}, } }) first_paint = utils.TestKey( 'ChromiumGPU/linux-release/scrolling-benchmark/first_paint') mean_frame_time = utils.TestKey( 'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time') # By default, all TestMetadata entities have an improvement_direction of # UNKNOWN, meaning that neither direction is considered an improvement. # Here we set the improvement direction so that some anomalies are # considered improvements. for test_key in [first_paint, mean_frame_time]: test = test_key.get() test.improvement_direction = anomaly.DOWN test.put() # Add some (12) non-triaged alerts. for end_rev in range(10000, 10120, 10): test_key = first_paint if end_rev % 20 == 0 else mean_frame_time ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key)) anomaly_entity = anomaly.Anomaly( start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=100, median_after_anomaly=200, ref_test=ref_test_key, subscriptions=[subscription], subscription_names=[subscription.name], ) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() key_map[end_rev] = anomaly_key.urlsafe() # Add some (2) already-triaged alerts. for end_rev in range(10120, 10140, 10): test_key = first_paint if end_rev % 20 == 0 else mean_frame_time ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key)) bug_id = -1 if end_rev % 20 == 0 else 12345 anomaly_entity = anomaly.Anomaly( start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=100, median_after_anomaly=200, ref_test=ref_test_key, bug_id=bug_id, subscriptions=[subscription], subscription_names=[subscription.name], ) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() key_map[end_rev] = anomaly_key.urlsafe() if bug_id > 0: bug_data.Bug(id=bug_id).put() # Add some (6) non-triaged improvements. for end_rev in range(10140, 10200, 10): test_key = mean_frame_time ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key)) anomaly_entity = anomaly.Anomaly( start_revision=end_rev - 5, end_revision=end_rev, test=test_key, median_before_anomaly=200, median_after_anomaly=100, ref_test=ref_test_key, subscriptions=[subscription], subscription_names=[subscription.name], ) anomaly_entity.SetIsImprovement() anomaly_key = anomaly_entity.put() self.assertTrue(anomaly_entity.is_improvement) key_map[end_rev] = anomaly_key.urlsafe() return key_map