def testGenerateCommitIDsForBoundingTargetsQueryGit(self, mock_revisions): data_points = [ DataPoint.Create(commit_position=1010, git_hash='r1010'), DataPoint.Create(commit_position=1000, git_hash='r1000'), ] mock_revisions.return_value = { 1003: 'r1003', 1004: 'r1004', 1005: 'r1005', 1006: 'r1006', 1007: 'r1007', 1008: 'r1008', 1009: 'r1009', 1010: 'r1010' } lower_bound_target = IsolatedTarget.Create(67890, '', '', 'm', 'b', '', '', '', '', '', '', 1003, None) upper_bound_target = IsolatedTarget.Create(67890, '', '', 'm', 'b', '', '', '', '', '', '', 1008, None) lower_bound_commit_id = CommitID(commit_position=1003, revision='r1003') upper_bound_commit_id = CommitID(commit_position=1008, revision='r1008') self.assertEqual( (lower_bound_commit_id, upper_bound_commit_id), next_commit_position_utils.GenerateCommitIDsForBoundingTargets( data_points, lower_bound_target, upper_bound_target)) mock_revisions.assert_called_once_with('r1010', 1010, 1003)
def testNextCommitPositionPipelineFoundCulprit(self, mock_next_commit): master_name = 'm' builder_name = 'b' build_number = 100 step_name = 's' test_name = 't' start_commit_position = 1000 culprit_commit_id = CommitID(commit_position=1000, revision='r1000') mock_next_commit.return_value = (None, culprit_commit_id) analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.Save() next_commit_position_input = NextCommitPositionInput( analysis_urlsafe_key=analysis.key.urlsafe(), commit_position_range=IntRange(lower=None, upper=start_commit_position), step_metadata=None) pipeline_job = NextCommitPositionPipeline(next_commit_position_input) pipeline_job.start() self.execute_queued_tasks() pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id) next_commit_position_output = pipeline_job.outputs.default.value self.assertFalse(pipeline_job.was_aborted) self.assertEqual(culprit_commit_id.ToSerializable(), next_commit_position_output['culprit_commit_id']) self.assertIsNone(next_commit_position_output['next_commit_id'])
def testBisectFinished(self): regression_range = CommitIDRange(lower=CommitID(commit_position=90, revision='rev_90'), upper=CommitID(commit_position=91, revision='rev_91')) self.assertEqual( (None, CommitID(commit_position=91, revision='rev_91')), lookback_algorithm._Bisect(regression_range))
def testAnalyzeFlakePipelineStartTaskAfterDelay(self, mocked_delay, _): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') # Random date in the past, for coverage. analysis.request_time = datetime.datetime(2015, 1, 1, 1, 1, 1) analysis.Save() start_commit_position = 1000 start_revision = 'r1000' delay = 60 step_metadata = StepMetadata( canonical_step_name='s', dimensions=None, full_step_name='s', patched=False, swarm_task_ids=None, waterfall_buildername='b', waterfall_mastername='w', isolate_target_name='s') mocked_delay.return_value = delay analyze_flake_input = AnalyzeFlakeInput( analysis_urlsafe_key=analysis.key.urlsafe(), analyze_commit_position_parameters=NextCommitPositionOutput( next_commit_id=CommitID( commit_position=start_commit_position, revision=start_revision), culprit_commit_id=None), commit_position_range=IntRange(lower=None, upper=None), dimensions=ListOfBasestring.FromSerializable(['os:testOS']), manually_triggered=False, rerun=False, retries=0, step_metadata=step_metadata) expected_retried_analyze_flake_input = AnalyzeFlakeInput( analysis_urlsafe_key=analysis.key.urlsafe(), analyze_commit_position_parameters=NextCommitPositionOutput( next_commit_id=CommitID( commit_position=start_commit_position, revision=start_revision), culprit_commit_id=None), commit_position_range=IntRange(lower=None, upper=None), dimensions=ListOfBasestring.FromSerializable(['os:testOS']), manually_triggered=False, rerun=False, retries=1, step_metadata=step_metadata) self.MockAsynchronousPipeline(DelayPipeline, delay, delay) self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline, expected_retried_analyze_flake_input, None) pipeline_job = AnalyzeFlakePipeline(analyze_flake_input) pipeline_job.start() self.execute_queued_tasks()
def testBisectNextCommitPosition(self, mock_get_revision): regression_range = CommitIDRange(lower=CommitID(commit_position=90, revision='rev_90'), upper=CommitID(commit_position=100, revision='rev_100')) mock_get_revision.return_value = 'rev_95' self.assertEqual( (CommitID(commit_position=95, revision='rev_95'), None), lookback_algorithm._Bisect(regression_range))
def GenerateCommitIDsForBoundingTargets(data_points, lower_bound_target, upper_bound_target): """Uses data_point info to help to generate commit_ids for bounding commit positions. Args: data_points ([DataPoints]): A list of DataPoints. Sorted by commit_position, reversed. lower_bound_target (IsolatedTarget) upper_bound_target (IsolatedTarget) Returns: (CommitID, CommitID): CommitIDs for lower and upper bound targets' commit. """ lower_bound_commit_position = lower_bound_target.commit_position lower_bound_revision = lower_bound_target.revision upper_bound_commit_position = upper_bound_target.commit_position upper_bound_revision = upper_bound_target.revision if not lower_bound_revision or not upper_bound_revision: # Looks for a data_point with a commit_position which is the closest to # upper_bound's commit_position but larger than it to be the right bound # to query gitiles. closest_data_point = None for data_point in data_points: if data_point.commit_position < lower_bound_commit_position: break if data_point.commit_position == lower_bound_commit_position: lower_bound_revision = data_point.git_hash elif data_point.commit_position == upper_bound_commit_position: upper_bound_revision = data_point.git_hash elif data_point.commit_position > upper_bound_commit_position: closest_data_point = data_point if not lower_bound_revision or not upper_bound_revision: assert closest_data_point, ( 'Failed to find a data_point with larger commit_position.') revisions = git.MapCommitPositionsToGitHashes( closest_data_point.git_hash, closest_data_point.commit_position, lower_bound_commit_position) lower_bound_revision = lower_bound_revision or revisions.get( lower_bound_commit_position) upper_bound_revision = upper_bound_revision or revisions.get( upper_bound_commit_position) return (CommitID(commit_position=lower_bound_commit_position, revision=lower_bound_revision), CommitID(commit_position=upper_bound_commit_position, revision=upper_bound_revision))
def testGetNextCommitIDFromBuildRangeReturnUpperBoundCloser(self): calculated_commit_id = CommitID(commit_position=1007, revision='r1007') lower = CommitID(commit_position=1000, revision='r1000') upper = CommitID(commit_position=1010, revision='r1010') build_range = CommitIDRange(lower=lower, upper=upper) analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=1020), # Doesn't have either. ] self.assertEqual( upper, next_commit_position_utils.GetNextCommitIDFromBuildRange( analysis, build_range, calculated_commit_id))
def testGetNextCommitIDFromBuildRangeAlreadyHasLowerReturnUpper(self): calculated_commit_id = CommitID(commit_position=1002, revision='r1002') lower = CommitID(commit_position=1000, revision='r1000') upper = CommitID(commit_position=1010, revision='r1010') build_range = CommitIDRange(lower=lower, upper=upper) analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=1000), # Already has lower bound. ] self.assertEqual( upper, next_commit_position_utils.GetNextCommitIDFromBuildRange( analysis, build_range, calculated_commit_id))
def testGetLatestRegressionRange(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=91, pass_rate=0.9, git_hash='rev91'), DataPoint.Create(commit_position=90, pass_rate=1.0, git_hash='rev90'), ] self.assertEqual( CommitIDRange(lower=CommitID(commit_position=90, revision='rev90'), upper=CommitID(commit_position=91, revision='rev91')), analysis.GetLatestRegressionRange())
def _Bisect(regression_range): """Bisects a regression range or returns a culprit. Args: reression_range (IntRange): The latest regression range to perform the bisection on. Returns: (int, int): The next commit position to run and suspected commit position. If the next commit position is identified, there will be no suspected commit position and vice versa. """ lower_bound = regression_range.lower upper_bound = regression_range.upper assert lower_bound is not None, 'Cannot bisect without lower bound' assert upper_bound is not None, 'Cannot bisect without upper bound' next_commit_position = BisectPoint(lower_bound.commit_position, upper_bound.commit_position) if next_commit_position == lower_bound.commit_position: return None, upper_bound return CommitID( commit_position=next_commit_position, revision=git.GetRevisionForCommitPositionByAnotherCommit( upper_bound.revision, upper_bound.commit_position, next_commit_position)), None
def testGenerateCommitIDsForBoundingTargets(self): data_points = [] lower_bound_target = IsolatedTarget.Create(67890, '', '', 'm', 'b', '', '', '', '', '', '', 1000, 'r1000') upper_bound_target = IsolatedTarget.Create(67890, '', '', 'm', 'b', '', '', '', '', '', '', 1010, 'r1010') lower_bound_commit_id = CommitID(commit_position=1000, revision='r1000') upper_bound_commit_id = CommitID(commit_position=1010, revision='r1010') self.assertEqual( (lower_bound_commit_id, upper_bound_commit_id), next_commit_position_utils.GenerateCommitIDsForBoundingTargets( data_points, lower_bound_target, upper_bound_target))
def testGetNextCommitIdExponentialSearch(self, mock_get_revision): regression_range = CommitIDRange(lower=CommitID(commit_position=90, revision='rev_90'), upper=CommitID(commit_position=100, revision='rev_100')) data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.9, git_hash='rev_100'), DataPoint.Create(commit_position=90, pass_rate=1.0, git_hash='rev_90'), ] mock_get_revision.return_value = 'rev_99' self.assertEqual( (CommitID(commit_position=99, revision='rev_99'), None), lookback_algorithm.GetNextCommitId(data_points, False, regression_range))
def testNextCommitPositionPipelineWithHeuristicResults( self, mock_heuristic_result, mock_run_heuristic, mock_next_commit): master_name = 'm' builder_name = 'b' build_number = 105 step_name = 's' test_name = 't' start_commit_position = 1000 suspect_commit_position = 95 expected_next_commit_id = CommitID(commit_position=94, revision='r94') suspect = FlakeCulprit.Create('repo', 'revision', suspect_commit_position) suspect.commit_position = suspect_commit_position suspect.put() analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.suspect_urlsafe_keys.append(suspect.key.urlsafe()) analysis.put() mock_run_heuristic.return_value = False mock_heuristic_result.return_value = expected_next_commit_id calculated_next_commit_id = CommitID(commit_position=999, revision='r999') mock_next_commit.return_value = (calculated_next_commit_id, None) next_commit_position_input = NextCommitPositionInput( analysis_urlsafe_key=analysis.key.urlsafe(), commit_position_range=IntRange(lower=None, upper=start_commit_position), step_metadata=None) pipeline_job = NextCommitPositionPipeline(next_commit_position_input) pipeline_job.start() self.execute_queued_tasks() pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id) next_commit_position_output = pipeline_job.outputs.default.value self.assertFalse(pipeline_job.was_aborted) self.assertIsNone(next_commit_position_output['culprit_commit_id']) self.assertEqual(expected_next_commit_id.ToSerializable(), next_commit_position_output['next_commit_id']) mock_heuristic_result.assert_called_once_with(analysis.key.urlsafe())
def testGetLatestRegressionRangeMultipleDataPoints(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=96, pass_rate=0.8), DataPoint.Create(commit_position=95, pass_rate=0.9, git_hash='rev95'), DataPoint.Create(commit_position=94, pass_rate=0.0, git_hash='rev94'), DataPoint.Create(commit_position=93, pass_rate=0.6), DataPoint.Create(commit_position=92, pass_rate=1.0), DataPoint.Create(commit_position=91, pass_rate=0.9), DataPoint.Create(commit_position=90, pass_rate=1.0), ] self.assertEqual( CommitIDRange(lower=CommitID(commit_position=94, revision='rev94'), upper=CommitID(commit_position=95, revision='rev95')), analysis.GetLatestRegressionRange())
def testGetLatestRegressionRangeRangeNoLowerBound(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=100, pass_rate=1.0, git_hash='rev100') ] self.assertEqual( CommitIDRange(lower=CommitID(commit_position=100, revision='rev100'), upper=None), analysis.GetLatestRegressionRange())
def testGetNextCommitIdFromHeuristicResultsNoDataPoints(self, _): suspect = FlakeCulprit.Create('repo', 'revision', 1000) suspect.put() analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.suspect_urlsafe_keys.append(suspect.key.urlsafe()) analysis.put() next_commit_id = CommitID(commit_position=999, revision='r999') self.assertEqual( next_commit_id, next_commit_position_utils.GetNextCommitIdFromHeuristicResults( analysis.key.urlsafe()))
def testLookbackAlgorithmSingleFlakyDataPoint(self, _): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100') ] next_commit_id = CommitID(commit_position=99, revision='r99') self.assertEqual( (next_commit_id, None), # Begins with step size 1. lookback_algorithm._DetermineNextCommitPosition(data_points))
def testGetLatestRegressionRangeNoUpperBoundMultipleDataPoints(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5), DataPoint.Create(commit_position=90, pass_rate=0.5, git_hash='rev90') ] self.assertEqual( CommitIDRange(lower=None, upper=CommitID(commit_position=90, revision='rev90')), analysis.GetLatestRegressionRange())
def testRerunAnalysisWithAnalyzeFlakePipeline( self, mocked_analysis, mocked_pipeline, mocked_need_analysis, mocked_build_info, mock_dimensions, *_): buildbucket_id = 'id' mock_dimensions.return_value = ['os:Mac', 'cpu:x86'] start_commit_position = 1000 start_build_info = BuildInfo('m', 'b 1', 123) start_build_info.commit_position = start_commit_position start_build_info.chromium_revision = 'r1000' start_build_info.buildbucket_id = buildbucket_id mocked_build_info.return_value = start_build_info mocked_analysis.pipeline_status_path.return_value = 'status' mocked_analysis.key.urlsafe.return_value = 'urlsafe_key' mocked_need_analysis.return_value = (True, mocked_analysis) test = TestInfo('m', 'b 1', 123, 's', 't') manually_triggered = False flake = Flake.Create('chromium', 's', 't', 'l') analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded( test, test, flake.key, bug_id=None, allow_new_analysis=True, manually_triggered=manually_triggered, force=True, queue_name=constants.DEFAULT_QUEUE) self.assertIsNotNone(analysis) self.assertEqual(buildbucket_id, analysis.build_id) self.assertEqual(buildbucket_id, analysis.original_build_id) analyze_flake_input = AnalyzeFlakeInput( analysis_urlsafe_key='urlsafe_key', analyze_commit_position_parameters=NextCommitPositionOutput( culprit_commit_id=None, next_commit_id=CommitID( commit_position=start_commit_position, revision=start_build_info.chromium_revision)), commit_position_range=IntRange(lower=None, upper=start_commit_position), dimensions=ListOfBasestring.FromSerializable( ['os:Mac', 'cpu:x86', 'pool:luci.chromium.findit']), manually_triggered=manually_triggered, rerun=True, retries=0, step_metadata=StepMetadata.FromSerializable({})) mocked_pipeline.assert_has_calls([ mock.call(analyze_flake_input), mock.call().start(queue_name=constants.DEFAULT_QUEUE) ])
def testLookbackAlgorithmExponentialLookback(self, mock_git): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=90, pass_rate=0.5, git_hash='r90'), ] # Step size 10, rounded up to the next square == 16. next_commit_id = CommitID(commit_position=74, revision='r74') self.assertEqual( (next_commit_id, None), lookback_algorithm._DetermineNextCommitPosition(data_points)) mock_git.assert_called_once_with('r90', 90, 74)
def testLookbackAlgorithmWithRegressionRangeRestartExponential( self, mock_git): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=90, pass_rate=1.0, git_hash='r90'), ] # 100 stable, 90 flaky. Restart search from 99. next_commit_id = CommitID(commit_position=99, revision='r99') self.assertEqual( (next_commit_id, None), lookback_algorithm._DetermineNextCommitPosition(data_points)) mock_git.assert_called_once_with('r100', 100, 99)
def testLookbackAlgorithmBisectWhenTestDoesNotExist(self, _): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create( commit_position=50, pass_rate=flake_constants.PASS_RATE_TEST_NOT_FOUND, git_hash='r50'), ] next_commit_id = CommitID(commit_position=75, revision='r75') self.assertEqual( (next_commit_id, None), # 100 flaky, 50 non-existent. Bisect. lookback_algorithm._DetermineNextCommitPosition(data_points))
def testLookbackAlgorithmCulpritFoundExistingTest(self): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=71, pass_rate=0.5, git_hash='r71'), DataPoint.Create(commit_position=70, pass_rate=1.0, git_hash='r70'), ] # 70 stable, 71 flaky. 71 must be the culprit. culprit_commit_id = CommitID(commit_position=71, revision='r71') self.assertEqual( (None, culprit_commit_id), lookback_algorithm._DetermineNextCommitPosition(data_points))
def testLookbackAlgorithmRestartExponentialLandsOnExistingDataPoint( self, _): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=32, pass_rate=0.5, git_hash='r32'), DataPoint.Create(commit_position=20, pass_rate=0.5, git_hash='r20'), DataPoint.Create(commit_position=4, pass_rate=1.0, git_hash='r4'), ] next_commit_id = CommitID(commit_position=19, revision='r19') self.assertEqual( (next_commit_id, None), lookback_algorithm._DetermineNextCommitPosition(data_points))
def testLookbackAlgorithmWithRegressionRangeContinueExponential( self, mock_git): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=90, pass_rate=0.5, git_hash='r90'), DataPoint.Create(commit_position=70, pass_rate=1.0, git_hash='r70'), ] next_commit_id = CommitID(commit_position=74, revision='r74') self.assertEqual( (next_commit_id, None), lookback_algorithm._DetermineNextCommitPosition(data_points)) mock_git.assert_called_once_with('r90', 90, 74)
def testLookbackAlgorithmWithRegressionRangeRestartExponentialLargeStep( self, _): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=90, pass_rate=0.5, git_hash='r90'), DataPoint.Create(commit_position=20, pass_rate=0.5, git_hash='r20'), DataPoint.Create(commit_position=10, pass_rate=1.0, git_hash='r10'), ] next_commit_id = CommitID(commit_position=19, revision='r19') self.assertEqual( (next_commit_id, None), lookback_algorithm._DetermineNextCommitPosition(data_points))
def testOnFinalizedNoError(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.Save() analyze_flake_input = AnalyzeFlakeInput( analysis_urlsafe_key=analysis.key.urlsafe(), analyze_commit_position_parameters=NextCommitPositionOutput( next_commit_id=CommitID(commit_position=1000, revision='rev'), culprit_commit_id=None), commit_position_range=IntRange(lower=None, upper=None), dimensions=ListOfBasestring.FromSerializable(['os:testOS']), manually_triggered=False, rerun=False, retries=0, step_metadata=None) pipeline_job = AnalyzeFlakePipeline(analyze_flake_input) pipeline_job.OnFinalized(analyze_flake_input) self.assertEqual(analysis_status.COMPLETED, analysis.status)
def testLookbackAlgorithmCulpritFoundNewlyAddedTest(self): data_points = [ DataPoint.Create(commit_position=100, pass_rate=0.5, git_hash='r100'), DataPoint.Create(commit_position=71, pass_rate=0.5, git_hash='r71'), DataPoint.Create( commit_position=70, pass_rate=flake_constants.PASS_RATE_TEST_NOT_FOUND, git_hash='r70') ] # 70 nonexistent, 71 flaky. 71 must be the culprit. culprit_commit_id = CommitID(commit_position=71, revision='r71') self.assertEqual( (None, culprit_commit_id), lookback_algorithm._DetermineNextCommitPosition(data_points))
def testRecursiveAnalyzeFlakePipeline(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.Save() analyze_flake_input = AnalyzeFlakeInput( analysis_urlsafe_key=analysis.key.urlsafe(), analyze_commit_position_parameters=NextCommitPositionOutput( next_commit_id=CommitID(commit_position=1000, revision='rev'), culprit_commit_id=None), commit_position_range=IntRange(lower=None, upper=None), dimensions=ListOfBasestring.FromSerializable([]), manually_triggered=False, rerun=False, retries=0, step_metadata=None) self.MockGeneratorPipeline(AnalyzeFlakePipeline, analyze_flake_input, None) pipeline_job = RecursiveAnalyzeFlakePipeline(analyze_flake_input) pipeline_job.start() self.execute_queued_tasks()
def GetNextCommitIdFromHeuristicResults(analysis_urlsafe_key): """Returns a commit_id based on heuristic results. Checks an analysis for suspect_urlsafe_keys, which each correspond to a commit position. This function will consider both these commit positions and their 1-previous positions, depending on what has already been run and whether or not they are already flaky. Each suspect_urlsafe_key is expected to be in chronological order. Args: analysis_urlsafe_key (str): The url-safe key to a MasterFlakeAnalsyis. Returns: (CommitId): A suggested commit_id based on heuristic results, or None if not applicable. """ analysis = ndb.Key(urlsafe=analysis_urlsafe_key).get() assert analysis suspect_urlsafe_keys = analysis.suspect_urlsafe_keys if not suspect_urlsafe_keys: # No heuristic results. return None # Suspects are expected to be in chronological order. for suspect_urlsafe_key in suspect_urlsafe_keys: # Go through each suspect to see if they have already been analyzed. # For each suspect, check the previous commit position prior to it to verify # it is stable, and the suspect itself to verify it is flaky. suspect = ndb.Key(urlsafe=suspect_urlsafe_key).get() assert suspect assert suspect.commit_position is not None assert suspect.revision is not None previous_commit_position_data_point = ( analysis.FindMatchingDataPointWithCommitPosition( suspect.commit_position - 1)) if not previous_commit_position_data_point: # Return the commit position right before the suspect, for the caller to # verify is stable. return CommitID( commit_position=suspect.commit_position - 1, revision=git.GetRevisionForCommitPositionByAnotherCommit( suspect.revision, suspect.commit_position, suspect.commit_position - 1)) if not pass_rate_util.IsStableDefaultThresholds( previous_commit_position_data_point.pass_rate): # The test is already confirmed to be flaky before any of the heuristic # results. return None suspected_data_point = analysis.FindMatchingDataPointWithCommitPosition( suspect.commit_position) # Suspect is not yet run. Return it next. if suspected_data_point is None: return CommitID(commit_position=suspect.commit_position, revision=suspect.revision) # Heuristic results and their 1-previous commit positions have all been run. return None