def _ChooseTest(anomalies): """Chooses a test to use for a bisect job. The particular TestMetadata chosen determines the command and metric name that is chosen. The test to choose could depend on which of the anomalies has the largest regression size. Ideally, the choice of bisect bot to use should be based on bisect bot queue length, and the choice of metric should be based on regression size and noise level. However, we don't choose bisect bot and metric independently, since some regressions only happen for some tests on some platforms; we should generally only bisect with a given bisect bot on a given metric if we know that the regression showed up on that platform for that metric. Args: anomalies: A non-empty list of Anomaly entities. Returns: An Anomaly entity, or None if no valid entity could be chosen. """ if not anomalies: return None anomalies.sort(cmp=_CompareAnomalyBisectability) for anomaly_entity in anomalies: if can_bisect.IsValidTestForBisect( utils.TestPath(anomaly_entity.GetTestMetadataKey())): return anomaly_entity return None
def _ChooseTest(anomalies, index=0): """Chooses a test to use for a bisect job. The particular Test chosen determines the command and metric name that is chosen. The test to choose could depend on which of the anomalies has the largest regression size. Ideally, the choice of bisect bot to use should be based on bisect bot queue length, and the choice of metric should be based on regression size and noise level. However, we don't choose bisect bot and metric independently, since some regressions only happen for some tests on some platforms; we should generally only bisect with a given bisect bot on a given metric if we know that the regression showed up on that platform for that metric. Args: anomalies: A non-empty list of Anomaly entities. index: Index of the first Anomaly entity to look at. If this is greater than the number of Anomalies, it will wrap around. This is used to make it easier to get different suggestions for what test to use given the same list of alerts. Returns: A Test entity, or None if no valid Test could be chosen. """ if not anomalies: return None index %= len(anomalies) anomalies.sort(cmp=_CompareAnomalyBisectability) for anomaly_entity in anomalies[index:]: if can_bisect.IsValidTestForBisect(utils.TestPath( anomaly_entity.test)): return anomaly_entity.test.get() return None
def _StartBisectForBug(bug_id): anomalies = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id).fetch() if not anomalies: raise NotBisectableError('No Anomaly alerts found for this bug.') test_anomaly = _ChooseTest(anomalies) test = None if test_anomaly: test = test_anomaly.GetTestMetadataKey().get() if not test or not can_bisect.IsValidTestForBisect(test.test_path): raise NotBisectableError('Could not select a test.') if test.bot_name in _PINPOINT_BOTS: return _StartPinpointBisect(bug_id, test_anomaly, test) return _StartRecipeBisect(bug_id, test_anomaly, test)
def _ChooseTest(anomalies): """Chooses a test to use for a bisect job. The particular TestMetadata chosen determines the command and metric name that is chosen. The test to choose could depend on which of the anomalies has the largest regression size. Ideally, the choice of bisect bot to use should be based on bisect bot queue length, and the choice of metric should be based on regression size and noise level. However, we don't choose bisect bot and metric independently, since some regressions only happen for some tests on some platforms; we should generally only bisect with a given bisect bot on a given metric if we know that the regression showed up on that platform for that metric. Args: anomalies: A non-empty list of Anomaly entities. Returns: An Anomaly entity, or None if no valid entity could be chosen. Raises: NotBisectableError: The only matching tests are on domains that have been excluded for automatic bisects on alert triage. """ if not anomalies: return None anomalies.sort(cmp=_CompareAnomalyBisectability) found_excluded_domain = False for anomaly_entity in anomalies: if can_bisect.IsValidTestForBisect( utils.TestPath(anomaly_entity.GetTestMetadataKey())): if can_bisect.DomainIsExcludedFromTriageBisects( anomaly_entity.master_name): found_excluded_domain = True continue return anomaly_entity if found_excluded_domain: raise NotBisectableError( 'Did not kick off bisect because only available domains are ' 'excluded from automatic bisects on triage.') return None
def _StartBisectForBug(bug_id): anomalies, _, _ = anomaly.Anomaly.QueryAsync(bug_id=bug_id, limit=500).get_result() if not anomalies: raise NotBisectableError('No Anomaly alerts found for this bug.') test_anomaly = _ChooseTest(anomalies) test = None if test_anomaly: test = test_anomaly.GetTestMetadataKey().get() if not test or not can_bisect.IsValidTestForBisect(test.test_path): raise NotBisectableError('Could not select a test.') bot_configurations = namespaced_stored_object.Get('bot_configurations') if test.bot_name not in bot_configurations.keys(): raise NotBisectableError('Bot: %s has no corresponding Pinpoint bot.' % test.bot_name) return _StartPinpointBisect(bug_id, test_anomaly, test)
def _GetSeriesAnnotations(tests): """Makes a list of metadata about each series (i.e. each test). Args: tests: List of TestMetadata entities. Returns: A list of dicts of metadata about each series. One dict for each test. """ series_annotations = {} for i, test in enumerate(tests): series_annotations[i] = { 'name': test.test_name, 'path': test.test_path, 'units': test.units, 'better': _BETTER_DICT[test.improvement_direction], 'description': test.description, 'can_bisect': can_bisect.IsValidTestForBisect(test.test_path), } return series_annotations
def testIsValidTestForBisect_UnsupportedDomain_ReturnsFalse(self): self.assertFalse(can_bisect.IsValidTestForBisect('X/b/t/foo'))
def testIsValidTestForBisect_RefTest_ReturnsFalse(self): self.assertFalse( can_bisect.IsValidTestForBisect('SupportedDomain/b/t/ref'))
def testIsValidTestForBisect_V8_IsSupported(self): self.assertTrue( can_bisect.IsValidTestForBisect( 'SupportedDomain/Pixel2/v8/JSTests/Array/Total'))
def testIsValidTestForBisect_Supported_ReturnsTrue(self): self.assertTrue( can_bisect.IsValidTestForBisect('SupportedDomain/b/t/foo'))
def testIsValidTestForBisect_BisectableTests_ReturnsTrue(self): self.assertEqual( can_bisect.IsValidTestForBisect( 'SupportedDomain/mac/blink_perf.parser/simple-url'), True)
def _MakeBisectTryJob(bug_id, run_count=0): """Tries to automatically select parameters for a bisect job. Args: bug_id: A bug ID which some alerts are associated with. run_count: An integer; this is supposed to represent the number of times that a bisect has been tried for this bug; it is used to try different config parameters on different re-try attempts. Returns: A TryJob entity, which has not yet been put in the datastore. Raises: NotBisectableError: A valid bisect config could not be created. """ anomalies = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id).fetch() if not anomalies: raise NotBisectableError('No Anomaly alerts found for this bug.') good_revision, bad_revision = _ChooseRevisionRange(anomalies) if not can_bisect.IsValidRevisionForBisect(good_revision): raise NotBisectableError('Invalid "good" revision: %s.' % good_revision) if not can_bisect.IsValidRevisionForBisect(bad_revision): raise NotBisectableError('Invalid "bad" revision: %s.' % bad_revision) test = _ChooseTest(anomalies, run_count) if not test or not can_bisect.IsValidTestForBisect(test.test_path): raise NotBisectableError('Could not select a test.') metric = start_try_job.GuessMetric(test.test_path) bisect_bot = start_try_job.GuessBisectBot(test.master_name, test.bot_name) if not bisect_bot or '_' not in bisect_bot: raise NotBisectableError('Could not select a bisect bot.') use_recipe = bool(start_try_job.GetBisectDirectorForTester(bisect_bot)) new_bisect_config = start_try_job.GetBisectConfig( bisect_bot=bisect_bot, master_name=test.master_name, suite=test.suite_name, metric=metric, good_revision=good_revision, bad_revision=bad_revision, repeat_count=10, max_time_minutes=20, bug_id=bug_id, use_archive='true', use_buildbucket=use_recipe) if 'error' in new_bisect_config: raise NotBisectableError('Could not make a valid config.') config_python_string = utils.BisectConfigPythonString(new_bisect_config) bisect_job = try_job.TryJob(bot=bisect_bot, config=config_python_string, bug_id=bug_id, master_name=test.master_name, internal_only=test.internal_only, job_type='bisect', use_buildbucket=use_recipe) return bisect_job
def _MakeBisectTryJob(bug_id): """Tries to automatically select parameters for a bisect job. Args: bug_id: A bug ID which some alerts are associated with. Returns: A TryJob entity, which has not yet been put in the datastore. Raises: NotBisectableError: A valid bisect config could not be created. """ anomalies = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id).fetch() if not anomalies: raise NotBisectableError('No Anomaly alerts found for this bug.') test_anomaly = _ChooseTest(anomalies) test = None if test_anomaly: test = test_anomaly.GetTestMetadataKey().get() if not test or not can_bisect.IsValidTestForBisect(test.test_path): raise NotBisectableError('Could not select a test.') good_revision = _GetRevisionForBisect(test_anomaly.start_revision - 1, test) bad_revision = _GetRevisionForBisect(test_anomaly.end_revision, test) if not can_bisect.IsValidRevisionForBisect(good_revision): raise NotBisectableError('Invalid "good" revision: %s.' % good_revision) if not can_bisect.IsValidRevisionForBisect(bad_revision): raise NotBisectableError('Invalid "bad" revision: %s.' % bad_revision) if test_anomaly.start_revision == test_anomaly.end_revision: raise NotBisectableError( 'Same "good"/"bad" revisions, bisect skipped') metric = start_try_job.GuessMetric(test.test_path) story_filter = start_try_job.GuessStoryFilter(test.test_path) bisect_bot = start_try_job.GuessBisectBot(test.master_name, test.bot_name) if not bisect_bot: raise NotBisectableError( 'Could not select a bisect bot: %s for (%s, %s)' % ( bisect_bot, test.master_name, test.bot_name)) new_bisect_config = start_try_job.GetBisectConfig( bisect_bot=bisect_bot, master_name=test.master_name, suite=test.suite_name, metric=metric, story_filter=story_filter, good_revision=good_revision, bad_revision=bad_revision, repeat_count=10, max_time_minutes=20, bug_id=bug_id) if 'error' in new_bisect_config: raise NotBisectableError('Could not make a valid config.') config_python_string = utils.BisectConfigPythonString(new_bisect_config) bisect_job = try_job.TryJob( bot=bisect_bot, config=config_python_string, bug_id=bug_id, master_name=test.master_name, internal_only=test.internal_only, job_type='bisect') return bisect_job