def _EditEntity(self): """Edits an existing entity according to the request parameters.""" name = self.request.get('edit-name') if not name: raise request_handler.InvalidInputError('No name given.') entity = self._model_class.get_by_id(name) if not entity: raise request_handler.InvalidInputError( 'Entity "%s" does not exist, cannot edit.' % name) self._UpdateAndReportResults(entity)
def _AddEntity(self): """Adds adds a new entity according to the request parameters.""" name = self.request.get('add-name') if not name: raise request_handler.InvalidInputError('No name given when adding new ') if self._model_class.get_by_id(name): raise request_handler.InvalidInputError( 'Entity "%s" already exists, cannot add.' % name) entity = self._model_class(id=name) self._UpdateAndReportResults(entity)
def GetAlertsForKeys(keys): """Get alerts for |keys|. Query for anomalies with overlapping revision. The |keys| parameter for group_report is a comma-separated list of urlsafe strings for Keys for Anomaly entities. (Each key corresponds to an alert) Args: keys: Comma-separated list of urlsafe strings for Anomaly keys. Returns: list of anomaly.Anomaly """ urlsafe_keys = keys try: keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys] # Errors that can be thrown here include ProtocolBufferDecodeError # in google.net.proto.ProtocolBuffer. We want to catch any errors here # because they're almost certainly urlsafe key decoding errors. except Exception: raise request_handler.InvalidInputError('Invalid Anomaly key given.') requested_anomalies = utils.GetMulti(keys) for i, anomaly_entity in enumerate(requested_anomalies): if anomaly_entity is None: raise request_handler.InvalidInputError( 'No Anomaly found for key %s.' % urlsafe_keys[i]) if not requested_anomalies: raise request_handler.InvalidInputError('No anomalies found.') sheriff_key = requested_anomalies[0].sheriff min_range = utils.MinimumAlertRange(requested_anomalies) if min_range: anomalies, _, _ = anomaly.Anomaly.QueryAsync( sheriff=sheriff_key.id(), limit=_QUERY_LIMIT).get_result() # Filter out anomalies that have been marked as invalid or ignore. # Include all anomalies with an overlapping revision range that have # been associated with a bug, or are not yet triaged. requested_anomalies_set = set([a.key for a in requested_anomalies]) def _IsValidAlert(a): if a.key in requested_anomalies_set: return False return a.bug_id is None or a.bug_id > 0 anomalies = [a for a in anomalies if _IsValidAlert(a)] anomalies = _GetOverlaps(anomalies, min_range[0], min_range[1]) anomalies = requested_anomalies + anomalies else: anomalies = requested_anomalies return anomalies
def _ShowAlertsForKeys(self, keys): """Show alerts for |keys|. Query for anomalies with overlapping revision. The |keys| parameter for group_report is a comma-separated list of urlsafe strings for Keys for Anomaly entities. (Each key corresponds to an alert) Args: keys: Comma-separated list of urlsafe strings for Anomaly keys. """ urlsafe_keys = keys.split(',') try: keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys] # Errors that can be thrown here include ProtocolBufferDecodeError # in google.net.proto.ProtocolBuffer. We want to catch any errors here # because they're almost certainly urlsafe key decoding errors. except Exception: raise request_handler.InvalidInputError( 'Invalid Anomaly key given.') requested_anomalies = utils.GetMulti(keys) for i, anomaly_entity in enumerate(requested_anomalies): if anomaly_entity is None: raise request_handler.InvalidInputError( 'No Anomaly found for key %s.' % urlsafe_keys[i]) if not requested_anomalies: raise request_handler.InvalidInputError('No anomalies found.') sheriff_key = requested_anomalies[0].sheriff min_range = utils.MinimumAlertRange(requested_anomalies) if min_range: query = anomaly.Anomaly.query( anomaly.Anomaly.sheriff == sheriff_key) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch(limit=_QUERY_LIMIT) # Filter out anomalies that have been marked as invalid or ignore. # Include all anomalies with an overlapping revision range that have # been associated with a bug, or are not yet triaged. anomalies = [ a for a in anomalies if a.bug_id is None or a.bug_id > 0 ] anomalies = _GetOverlaps(anomalies, min_range[0], min_range[1]) # Make sure alerts in specified param "keys" are included. key_set = {a.key for a in anomalies} for anomaly_entity in requested_anomalies: if anomaly_entity.key not in key_set: anomalies.append(anomaly_entity) else: anomalies = requested_anomalies self._ShowAlerts(anomalies)
def _GetAndValidateConfigContents(self): """Returns a config dict if one could be gotten, or None otherwise.""" config = self.request.get('config') if not config: raise request_handler.InvalidInputError('No config contents given.') try: config_dict = json.loads(config) except (ValueError, TypeError) as json_parse_error: raise request_handler.InvalidInputError(str(json_parse_error)) if not isinstance(config_dict, dict): raise request_handler.InvalidInputError('Config was not a dict.') return config_dict
def GetAlertsAroundRevision(rev): """Gets the alerts whose revision range includes the given revision. Args: rev: A revision number, as a string. Returns: tuple (alerts, extra_columns) """ if not _IsInt(rev): raise request_handler.InvalidInputError('Invalid rev "%s".' % rev) rev = int(rev) # We can't make a query that has two inequality filters on two different # properties (start_revision and end_revision). Therefore we first query # Anomaly entities based on one of these, then filter the resulting list. anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.end_revision >= rev) anomaly_query = anomaly_query.order(anomaly.Anomaly.end_revision) anomalies = anomaly_query.fetch(limit=_QUERY_LIMIT) anomalies = [a for a in anomalies if a.start_revision <= rev] stoppage_alert_query = stoppage_alert.StoppageAlert.query( stoppage_alert.StoppageAlert.end_revision == rev) stoppage_alerts = stoppage_alert_query.fetch(limit=_DISPLAY_LIMIT) # Always show anomalies extra_columns for alerts around revision. return anomalies + stoppage_alerts, 'anomalies'
def post(self): """Updates the user-selected anomaly threshold configuration. Request parameters: add-edit: Either 'add' if adding a new config, or 'edit'. add-name: A new anomaly config name, if adding one. edit-name: An existing anomaly config name, if editing one. patterns: Newline-separated list of test path patterns to monitor. Depending on the specific sub-class, this will also take other parameters for specific properties of the entity being edited. """ try: edit_type = self.request.get('add-edit') if edit_type == 'add': self._AddEntity() elif edit_type == 'edit': self._EditEntity() else: raise request_handler.InvalidInputError( 'Invalid value for add-edit.') except request_handler.InvalidInputError as error: message = str( error) + ' Model class: ' + self._model_class.__name__ self.RenderHtml('result.html', {'errors': [message]})
def GetAlertsWithBugId(bug_id): """Get alerts for |bug_id|. Args: bug_id: A bug ID (as an int or string). Could be also be a pseudo-bug ID, such as -1 or -2 indicating invalid or ignored. Returns: tuple (alerts, extra_columns) """ if not _IsInt(bug_id): raise request_handler.InvalidInputError('Invalid bug ID "%s".' % bug_id) bug_id = int(bug_id) anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id) anomalies = anomaly_query.fetch(limit=_DISPLAY_LIMIT) stoppage_alert_query = stoppage_alert.StoppageAlert.query( stoppage_alert.StoppageAlert.bug_id == bug_id) stoppage_alerts = stoppage_alert_query.fetch(limit=_DISPLAY_LIMIT) # If there are any anomalies on the bug, use anomaly extra columns. extra_columns = None if len(anomalies) > 0: extra_columns = 'anomalies' elif len(stoppage_alerts) > 0: extra_columns = 'stoppage_alerts' return anomalies + stoppage_alerts, extra_columns
def _MakeBuildbucketBisectJob(bisect_job): """Creates a bisect job object that the buildbucket service can use. Args: bisect_job: The entity (try_job.TryJob) off of which to create the buildbucket job. Returns: A buildbucket_job.BisectJob object populated with the necessary attributes to pass it to the buildbucket service to start the job. """ config = bisect_job.GetConfigDict() if bisect_job.job_type not in ['bisect', 'bisect-fyi']: raise request_handler.InvalidInputError( 'Recipe only supports bisect jobs at this time.') # Recipe bisect supports 'perf' and 'return_code' test types only. # TODO (prasadv): Update bisect form on dashboard to support test_types. test_type = 'perf' if config.get('bisect_mode') == 'return_code': test_type = config['bisect_mode'] return buildbucket_job.BisectJob( try_job_id=bisect_job.key.id(), good_revision=config['good_revision'], bad_revision=config['bad_revision'], test_command=config['command'], metric=config['metric'], repeats=config['repeat_count'], timeout_minutes=config['max_time_minutes'], bug_id=bisect_job.bug_id, gs_bucket='chrome-perf', recipe_tester_name=config['recipe_tester_name'], test_type=test_type, required_initial_confidence=config.get('required_initial_confidence'))
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the keys, bug ID or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). Outputs: JSON for the /group_report page XHR request. """ keys = self.request.get('keys') bug_id = self.request.get('bug_id') rev = self.request.get('rev') try: if bug_id: self._ShowAlertsWithBugId(bug_id) elif keys: self._ShowAlertsForKeys(keys) elif rev: self._ShowAlertsAroundRevision(rev) else: # TODO(qyearsley): Instead of just showing an error here, show a form # where the user can input a bug ID or revision. raise request_handler.InvalidInputError( 'No anomalies specified.') except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the sid, keys, bug ID, or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). sid: A hash of a group of keys from /short_uri (optional). Outputs: JSON for the /group_report page XHR request. """ bug_id = self.request.get('bug_id') rev = self.request.get('rev') keys = self.request.get('keys') hash_code = self.request.get('sid') # sid takes precedence. if hash_code: state = ndb.Key(page_state.PageState, hash_code).get() if state: keys = json.loads(state.value) elif keys: keys = keys.split(',') try: alert_list = None if bug_id: alert_list = GetAlertsWithBugId(bug_id) elif keys: alert_list = GetAlertsForKeys(keys) elif rev: alert_list = GetAlertsAroundRevision(rev) else: # TODO(qyearsley): Instead of just showing an error here, show a form # where the user can input a bug ID or revision. raise request_handler.InvalidInputError( 'No anomalies specified.') alert_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) values = { 'alert_list': alert_dicts[:_DISPLAY_LIMIT], 'test_suites': update_test_suites.FetchCachedTestSuites(), } if bug_id: values['bug_id'] = bug_id if keys: values['selected_keys'] = keys self.GetDynamicVariables(values) self.response.out.write(json.dumps(values)) except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
def testPost_RunCount1_ExceptionInPerformBisect_CustomMetricNotTicked( self, mock_tick, mock_perform_bisect): mock_perform_bisect.side_effect = request_handler.InvalidInputError() try_job.TryJob(bug_id=222, status='failed', last_ran_timestamp=datetime.datetime.now(), run_count=1).put() self.testapp.post('/auto_bisect') self.assertEqual(0, mock_tick.call_count)
def GetAlertsForGroupID(group_id): """Get alerts for AlertGroup. Args: group_id: AlertGroup ID Returns: list of anomaly.Anomaly """ group = alert_group.AlertGroup.GetByID(group_id) if not group: raise request_handler.InvalidInputError('Invalid AlertGroup ID "%s".' % group_id) return ndb.get_multi(group.anomalies)
def GetAlertsWithBugId(bug_id): """Get alerts for |bug_id|. Args: bug_id: A bug ID (as an int or string). Could be also be a pseudo-bug ID, such as -1 or -2 indicating invalid or ignored. Returns: list of anomaly.Anomaly """ if not _IsInt(bug_id): raise request_handler.InvalidInputError('Invalid bug ID "%s".' % bug_id) bug_id = int(bug_id) anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id) return anomaly_query.fetch(limit=_DISPLAY_LIMIT)
def _ShowAlertsWithBugId(self, bug_id): """Show alerts for |bug_id|. Args: bug_id: A bug ID (as an int or string). Could be also be a pseudo-bug ID, such as -1 or -2 indicating invalid or ignored. """ if not _IsInt(bug_id): raise request_handler.InvalidInputError('Invalid bug ID "%s".' % bug_id) bug_id = int(bug_id) anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id) anomalies = anomaly_query.fetch(limit=_DISPLAY_LIMIT) stoppage_alert_query = stoppage_alert.StoppageAlert.query( stoppage_alert.StoppageAlert.bug_id == bug_id) stoppage_alerts = stoppage_alert_query.fetch(limit=_DISPLAY_LIMIT) self._ShowAlerts(anomalies + stoppage_alerts, bug_id)
def GetAlertsAroundRevision(rev): """Gets the alerts whose revision range includes the given revision. Args: rev: A revision number, as a string. Returns: list of anomaly.Anomaly """ if not _IsInt(rev): raise request_handler.InvalidInputError('Invalid rev "%s".' % rev) rev = int(rev) # We can't make a query that has two inequality filters on two different # properties (start_revision and end_revision). Therefore we first query # Anomaly entities based on one of these, then filter the resulting list. anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.end_revision >= rev) anomaly_query = anomaly_query.order(anomaly.Anomaly.end_revision) anomalies = anomaly_query.fetch(limit=_QUERY_LIMIT) return [a for a in anomalies if a.start_revision <= rev]
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the sid, keys, bug ID, or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). sid: A hash of a group of keys from /short_uri (optional). Outputs: JSON for the /group_report page XHR request. """ bug_id = self.request.get('bug_id') rev = self.request.get('rev') keys = self.request.get('keys') hash_code = self.request.get('sid') # sid takes precedence. if hash_code: state = ndb.Key(page_state.PageState, hash_code).get() if state: keys = json.loads(state.value) elif keys: keys = keys.split(',') try: if bug_id: self._ShowAlertsWithBugId(bug_id) elif keys: self._ShowAlertsForKeys(keys) elif rev: self._ShowAlertsAroundRevision(rev) else: # TODO(qyearsley): Instead of just showing an error here, show a form # where the user can input a bug ID or revision. raise request_handler.InvalidInputError('No anomalies specified.') except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
def GetAlertsForKeys(keys): """Get alerts for |keys|. Query for anomalies with overlapping revision. The |keys| parameter for group_report is a comma-separated list of urlsafe strings for Keys for Anomaly entities. (Each key corresponds to an alert) Args: keys: Comma-separated list of urlsafe strings for Anomaly keys. Returns: tuple (alerts, extra_columns) """ urlsafe_keys = keys try: keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys] # Errors that can be thrown here include ProtocolBufferDecodeError # in google.net.proto.ProtocolBuffer. We want to catch any errors here # because they're almost certainly urlsafe key decoding errors. except Exception: raise request_handler.InvalidInputError('Invalid Anomaly key given.') requested_anomalies = utils.GetMulti(keys) extra_columns = None for i, anomaly_entity in enumerate(requested_anomalies): if isinstance(anomaly_entity, anomaly.Anomaly): extra_columns = 'anomalies' elif (isinstance(anomaly_entity, stoppage_alert.StoppageAlert) and extra_columns is None): extra_columns = 'stoppage_alerts' if anomaly_entity is None: raise request_handler.InvalidInputError( 'No Anomaly found for key %s.' % urlsafe_keys[i]) if not requested_anomalies: raise request_handler.InvalidInputError('No anomalies found.') sheriff_key = requested_anomalies[0].sheriff min_range = utils.MinimumAlertRange(requested_anomalies) if min_range: query = anomaly.Anomaly.query(anomaly.Anomaly.sheriff == sheriff_key) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch(limit=_QUERY_LIMIT) # Filter out anomalies that have been marked as invalid or ignore. # Include all anomalies with an overlapping revision range that have # been associated with a bug, or are not yet triaged. requested_anomalies_set = set([a.key for a in requested_anomalies]) def _IsValidAlert(a): if a.key in requested_anomalies_set: return False return a.bug_id is None or a.bug_id > 0 anomalies = [a for a in anomalies if _IsValidAlert(a)] anomalies = _GetOverlaps(anomalies, min_range[0], min_range[1]) # Make sure alerts in specified param "keys" are included. # We actually only send the first _DISPLAY_LIMIT alerts to the UI, so we # need to include those keys at the start of the list. anomalies = requested_anomalies + anomalies else: anomalies = requested_anomalies return anomalies, extra_columns
class StartNewBisectForBugTest(testing_common.TestCase): def setUp(self): super(StartNewBisectForBugTest, self).setUp() self.SetCurrentUser('*****@*****.**') @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisect(self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertNotIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisectWithStoryFilter( self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey( 'ChromiumPerf/linux-release/sunspider/score/page_2') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_RevisionTooLow_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=222, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual({'error': 'Invalid "good" revision: 1199.'}, result) @mock.patch.object( auto_bisect.start_try_job, 'PerformBisect', mock.MagicMock( side_effect=request_handler.InvalidInputError('Some reason'))) def testStartNewBisectForBug_InvalidInputErrorRaised_ReturnsError(self): testing_common.AddTests(['Foo'], ['bar'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('Foo/bar/sunspider/score') anomaly.Anomaly(bug_id=345, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(345) self.assertEqual({'error': 'Some reason'}, result) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_WithDefaultRevs_StartsBisect( self, mock_perform_bisect): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 11990: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(333) job = try_job.TryJob.query(try_job.TryJob.bug_id == 333).get() mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['V8'], ['x86'], {'v8': {'sunspider': {}}}) # The test suite "v8" is in the black-list of test suite names. test_key = utils.TestKey('V8/x86/v8/sunspider') anomaly.Anomaly(bug_id=444, test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444) self.assertEqual({'error': 'Could not select a test.'}, result)
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the sid, keys, bug ID, AlertGroup ID, or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). sid: A hash of a group of keys from /short_uri (optional). group_id: An AlertGroup ID (optional). Outputs: JSON for the /group_report page XHR request. """ bug_id = self.request.get('bug_id') rev = self.request.get('rev') keys = self.request.get('keys') hash_code = self.request.get('sid') group_id = self.request.get('group_id') # sid takes precedence. if hash_code: state = ndb.Key(page_state.PageState, hash_code).get() if state: keys = json.loads(state.value) elif keys: keys = keys.split(',') try: alert_list = None if bug_id: try: alert_list, _, _ = anomaly.Anomaly.QueryAsync( bug_id=bug_id, limit=_QUERY_LIMIT).get_result() except ValueError: raise request_handler.InvalidInputError( 'Invalid bug ID "%s".' % bug_id) elif keys: alert_list = GetAlertsForKeys(keys) elif rev: alert_list = GetAlertsAroundRevision(rev) elif group_id: alert_list = GetAlertsForGroupID(group_id) else: raise request_handler.InvalidInputError( 'No anomalies specified.') alert_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) values = { 'alert_list': alert_dicts, 'test_suites': update_test_suites.FetchCachedTestSuites(), } if bug_id: values['bug_id'] = bug_id if keys: values['selected_keys'] = keys self.GetDynamicVariables(values) self.response.out.write(json.dumps(values)) except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
class StartNewBisectForBugTest(testing_common.TestCase): def setUp(self): super(StartNewBisectForBugTest, self).setUp() self.SetCurrentUser('*****@*****.**') auto_bisect._PINPOINT_BOTS = ['linux-pinpoint'] @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisect(self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertNotIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisectWithStoryFilter( self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey( 'ChromiumPerf/linux-release/sunspider/score/page_2') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_RevisionTooLow_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=222, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual({'error': 'Invalid "good" revision: 1199.'}, result) def testStartNewBisectForBug_RevisionsEqual_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 11990: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=222, test=test_key, start_revision=12500, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual( {'error': 'Same "good"/"bad" revisions, bisect skipped'}, result) @mock.patch.object( auto_bisect.start_try_job, 'PerformBisect', mock.MagicMock( side_effect=request_handler.InvalidInputError('Some reason'))) def testStartNewBisectForBug_InvalidInputErrorRaised_ReturnsError(self): testing_common.AddTests(['Foo'], ['bar'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('Foo/bar/sunspider/score') anomaly.Anomaly(bug_id=345, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(345) self.assertEqual({'error': 'Some reason'}, result) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_WithDefaultRevs_StartsBisect( self, mock_perform_bisect): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 11990: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(333) job = try_job.TryJob.query(try_job.TryJob.bug_id == 333).get() mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['V8'], ['x86'], {'v8': {'sunspider': {}}}) # The test suite "v8" is in the black-list of test suite names. test_key = utils.TestKey('V8/x86/v8/sunspider') anomaly.Anomaly(bug_id=444, test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444) self.assertEqual({'error': 'Could not select a test.'}, result) @mock.patch.object(utils, 'IsValidSheriffUser', mock.MagicMock(return_value=True)) @mock.patch.object(auto_bisect.pinpoint_service, 'NewJob', mock.MagicMock(return_value={ 'jobId': 123, 'jobUrl': 'http://pinpoint/123' })) @mock.patch.object(auto_bisect.start_try_job, 'GuessStoryFilter') def testStartNewBisectForBug_Pinpoint_Succeeds(self, mock_guess): namespaced_stored_object.Set('bot_dimensions_map', { 'linux-pinpoint': [{ 'key': 'foo', 'value': 'bar' }], }) namespaced_stored_object.Set('repositories', { 'chromium': { 'some': 'params' }, }) testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'a_default_rev': 'r_chromium', 'r_chromium': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_chromium', 'r_chromium': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({ 'issue_id': 123, 'issue_url': 'http://pinpoint/123' }, result) mock_guess.assert_called_once_with( 'ChromiumPerf/linux-pinpoint/sunspider/score') @mock.patch.object(utils, 'IsValidSheriffUser', mock.MagicMock(return_value=True)) @mock.patch.object(auto_bisect.pinpoint_service, 'NewJob', mock.MagicMock(return_value={ 'jobId': 123, 'jobUrl': 'http://pinpoint/123' })) def testStartNewBisectForBug_Pinpoint_No_a_default_rev_Succeeds(self): namespaced_stored_object.Set('bot_dimensions_map', { 'linux-pinpoint': [{ 'key': 'foo', 'value': 'bar' }], }) namespaced_stored_object.Set('repositories', { 'chromium': { 'some': 'params' }, }) testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'r_chromium': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'r_chromium': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({ 'issue_id': 123, 'issue_url': 'http://pinpoint/123' }, result) def testStartNewBisectForBug_Pinpoint_UnsupportedRepo_Error(self): testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({'error': 'Row has no r_chromium'}, result) def testStartNewBisectForBug_Pinpoint_InvalidRow_Error(self): testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'a_default_rev': 'r_chromium', 'r_chromium': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_chromium', 'r_chromium': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12501, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual( { 'error': 'No row ChromiumPerf/linux-pinpoint/' 'sunspider/score: 12501' }, result) def testStartNewBisectForBug_Pinpoint_UnsupportedMaster_Error(self): testing_common.AddTests(['SomeOtherMaster'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey( 'SomeOtherMaster/linux-pinpoint/sunspider/score') testing_common.AddRows( 'SomeOtherMaster/linux-pinpoint/sunspider/score', { 11999: { 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12501, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({'error': 'Unsupported master: SomeOtherMaster'}, result)
class StartNewBisectForBugTest(testing_common.TestCase): def setUp(self): super(StartNewBisectForBugTest, self).setUp() self.SetCurrentUser('*****@*****.**') namespaced_stored_object.Set('bot_configurations', { 'linux-pinpoint': {}, }) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisect(self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertNotIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_StartsBisectWithStoryFilter( self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': { 'page_1': {}, 'page_2': {} } }}) test_key = utils.TestKey( 'ChromiumPerf/linux-release/sunspider/score/page_2') anomaly.Anomaly(bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_RevisionTooLow_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=222, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual({'error': 'Invalid "good" revision: 1199.'}, result) def testStartNewBisectForBug_RevisionsEqual_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 11990: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=222, test=test_key, start_revision=12500, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual( {'error': 'Same "good"/"bad" revisions, bisect skipped'}, result) @mock.patch.object( auto_bisect.start_try_job, 'PerformBisect', mock.MagicMock( side_effect=request_handler.InvalidInputError('Some reason'))) def testStartNewBisectForBug_InvalidInputErrorRaised_ReturnsError(self): testing_common.AddTests(['Foo'], ['bar'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('Foo/bar/sunspider/score') anomaly.Anomaly(bug_id=345, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(345) self.assertEqual({'error': 'Some reason'}, result) @mock.patch.object(auto_bisect.start_try_job, 'PerformBisect') def testStartNewBisectForBug_WithDefaultRevs_StartsBisect( self, mock_perform_bisect): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 11990: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(333) job = try_job.TryJob.query(try_job.TryJob.bug_id == 333).get() mock_perform_bisect.assert_called_once_with(job) def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['V8'], ['x86'], {'v8': {'sunspider': {}}}) # The test suite "v8" is in the black-list of test suite names. test_key = utils.TestKey('V8/x86/v8/sunspider') anomaly.Anomaly(bug_id=444, test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444) self.assertEqual({'error': 'Could not select a test.'}, result) @mock.patch.object(utils, 'IsValidSheriffUser', mock.MagicMock(return_value=True)) @mock.patch.object(auto_bisect.pinpoint_service, 'NewJob') @mock.patch.object(auto_bisect.start_try_job, 'GuessStoryFilter') @mock.patch.object(auto_bisect.pinpoint_request, 'ResolveToGitHash', mock.MagicMock(return_value='abc123')) def testStartNewBisectForBug_Pinpoint_Succeeds(self, mock_guess, mock_new): namespaced_stored_object.Set('bot_configurations', { 'linux-pinpoint': { 'dimensions': [{ 'key': 'foo', 'value': 'bar' }] }, }) namespaced_stored_object.Set('repositories', { 'chromium': { 'some': 'params' }, }) mock_new.return_value = {'jobId': 123, 'jobUrl': 'http://pinpoint/123'} testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'a_default_rev': 'r_chromium', 'r_chromium': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'a_default_rev': 'r_chromium', 'r_chromium': 'fc34e5346446854637311ad7793a95d56e314042' } }) a = anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12500, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({ 'issue_id': 123, 'issue_url': 'http://pinpoint/123' }, result) mock_guess.assert_called_once_with( 'ChromiumPerf/linux-pinpoint/sunspider/score') self.assertEqual('123', a.get().pinpoint_bisects[0]) self.assertEqual({ 'alert': a.urlsafe(), 'test_path': test_key.id() }, json.loads(mock_new.call_args[0][0]['tags'])) anomaly_entity = a.get() anomaly_magnitude = (anomaly_entity.median_after_anomaly - anomaly_entity.median_before_anomaly) self.assertEqual(anomaly_magnitude, mock_new.call_args[0][0]['comparison_magnitude']) @mock.patch.object(auto_bisect.pinpoint_request, 'PinpointParamsFromBisectParams', mock.MagicMock(side_effect=auto_bisect.pinpoint_request. InvalidParamsError('Some reason'))) def testStartNewBisectForBug_Pinpoint_ParamsRaisesError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-pinpoint'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-pinpoint/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-pinpoint/sunspider/score', { 11999: { 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 12500: { 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=12000, end_revision=12501, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(333) self.assertEqual({'error': 'Some reason'}, result)
def _ValidatePatterns(test_path_patterns): """Raises an exception if any test path patterns are invalid.""" for pattern in test_path_patterns: if not _IsValidTestPathPattern(pattern): raise request_handler.InvalidInputError( 'Invalid test path pattern: "%s"' % pattern)