示例#1
0
  def testAnalyzeFlakePipelineStartTaskAfterDelay(self, mocked_delay, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    # Random date in the past, for coverage.
    analysis.request_time = datetime.datetime(2015, 1, 1, 1, 1, 1)
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    delay = 60

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    mocked_delay.return_value = delay

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_retried_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=1,
        step_metadata=step_metadata)

    self.MockAsynchronousPipeline(DelayPipeline, delay, delay)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_retried_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
  def testNextCommitPositionPipelineNotReproducible(self, mock_next_commit):
    master_name = 'm'
    builder_name = 'b'
    build_number = 100
    step_name = 's'
    test_name = 't'
    start_commit_position = 1000

    mock_next_commit.return_value = (None, None)

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.Save()

    next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        step_metadata=None)

    pipeline_job = NextCommitPositionPipeline(next_commit_position_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    next_commit_position_output = pipeline_job.outputs.default.value

    self.assertFalse(pipeline_job.was_aborted)
    self.assertIsNone(next_commit_position_output['culprit_commit_id'])
    self.assertIsNone(next_commit_position_output['next_commit_id'])
示例#3
0
  def testAnalyzeFlakePipelineAnalysisFinishedNoFindings(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None, culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertIsNone(analysis.culprit_urlsafe_key)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
示例#4
0
 def testGetDataPointsWithinCommitPositionRange(self):
     analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
     analysis.data_points = [
         DataPoint.Create(commit_position=1000),
         DataPoint.Create(commit_position=1005),
         DataPoint.Create(commit_position=1007),
         DataPoint.Create(commit_position=1010)
     ]
     self.assertEqual(
         analysis.data_points[-2:],
         analysis.GetDataPointsWithinCommitPositionRange(
             IntRange(lower=1007, upper=2000)))
     self.assertEqual([analysis.data_points[0]],
                      analysis.GetDataPointsWithinCommitPositionRange(
                          IntRange(lower=None, upper=1000)))
     self.assertEqual([analysis.data_points[-1]],
                      analysis.GetDataPointsWithinCommitPositionRange(
                          IntRange(lower=1010, upper=None)))
     self.assertEqual(
         analysis.data_points,
         analysis.GetDataPointsWithinCommitPositionRange(
             IntRange(lower=None, upper=None)))
  def testRerunAnalysisWithAnalyzeFlakePipeline(
      self, mocked_analysis, mocked_pipeline, mocked_need_analysis,
      mocked_build_info, mock_dimensions, *_):
    buildbucket_id = 'id'
    mock_dimensions.return_value = ['os:Mac', 'cpu:x86']
    start_commit_position = 1000
    start_build_info = BuildInfo('m', 'b 1', 123)
    start_build_info.commit_position = start_commit_position
    start_build_info.chromium_revision = 'r1000'
    start_build_info.buildbucket_id = buildbucket_id
    mocked_build_info.return_value = start_build_info
    mocked_analysis.pipeline_status_path.return_value = 'status'
    mocked_analysis.key.urlsafe.return_value = 'urlsafe_key'
    mocked_need_analysis.return_value = (True, mocked_analysis)
    test = TestInfo('m', 'b 1', 123, 's', 't')
    manually_triggered = False
    flake = Flake.Create('chromium', 's', 't', 'l')

    analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
        test,
        test,
        flake.key,
        bug_id=None,
        allow_new_analysis=True,
        manually_triggered=manually_triggered,
        force=True,
        queue_name=constants.DEFAULT_QUEUE)

    self.assertIsNotNone(analysis)
    self.assertEqual(buildbucket_id, analysis.build_id)
    self.assertEqual(buildbucket_id, analysis.original_build_id)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key='urlsafe_key',
        analyze_commit_position_parameters=NextCommitPositionOutput(
            culprit_commit_id=None,
            next_commit_id=CommitID(
                commit_position=start_commit_position,
                revision=start_build_info.chromium_revision)),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        dimensions=ListOfBasestring.FromSerializable(
            ['os:Mac', 'cpu:x86', 'pool:luci.chromium.findit']),
        manually_triggered=manually_triggered,
        rerun=True,
        retries=0,
        step_metadata=StepMetadata.FromSerializable({}))

    mocked_pipeline.assert_has_calls([
        mock.call(analyze_flake_input),
        mock.call().start(queue_name=constants.DEFAULT_QUEUE)
    ])
  def testNextCommitPositionPipelineWithHeuristicResults(
      self, mock_heuristic_result, mock_run_heuristic, mock_next_commit):
    master_name = 'm'
    builder_name = 'b'
    build_number = 105
    step_name = 's'
    test_name = 't'
    start_commit_position = 1000
    suspect_commit_position = 95
    expected_next_commit_id = CommitID(commit_position=94, revision='r94')

    suspect = FlakeCulprit.Create('repo', 'revision', suspect_commit_position)
    suspect.commit_position = suspect_commit_position
    suspect.put()

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.suspect_urlsafe_keys.append(suspect.key.urlsafe())
    analysis.put()

    mock_run_heuristic.return_value = False
    mock_heuristic_result.return_value = expected_next_commit_id

    calculated_next_commit_id = CommitID(commit_position=999, revision='r999')
    mock_next_commit.return_value = (calculated_next_commit_id, None)

    next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        step_metadata=None)

    pipeline_job = NextCommitPositionPipeline(next_commit_position_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    next_commit_position_output = pipeline_job.outputs.default.value

    self.assertFalse(pipeline_job.was_aborted)
    self.assertIsNone(next_commit_position_output['culprit_commit_id'])
    self.assertEqual(expected_next_commit_id.ToSerializable(),
                     next_commit_position_output['next_commit_id'])
    mock_heuristic_result.assert_called_once_with(analysis.key.urlsafe())
示例#7
0
  def testOnFinalizedNoError(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.OnFinalized(analyze_flake_input)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
示例#8
0
  def testRecursiveAnalyzeFlakePipeline(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    self.MockGeneratorPipeline(AnalyzeFlakePipeline, analyze_flake_input, None)

    pipeline_job = RecursiveAnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
    def RunImpl(self, parameters):
        """Pipeline for determining the next commit position to analyze."""

        analysis_urlsafe_key = parameters.analysis_urlsafe_key
        analysis = ndb.Key(urlsafe=analysis_urlsafe_key).get()
        assert analysis

        master_name = analysis.master_name
        builder_name = analysis.builder_name
        specified_lower_bound = parameters.commit_position_range.lower
        specified_upper_bound = parameters.commit_position_range.upper

        data_points = analysis.GetDataPointsWithinCommitPositionRange(
            IntRange(lower=specified_lower_bound, upper=specified_upper_bound))

        # Data points must be sorted in reverse order by commit position before.
        data_points = sorted(data_points,
                             key=lambda k: k.commit_position,
                             reverse=True)

        # A suspected build id is available when there is a regression range that
        # spans a single build cycle. During this time, bisect is preferred to
        # exponential search.
        use_bisect = (analysis.suspected_flake_build_number is not None
                      or analysis.suspected_build_id is not None)
        latest_regression_range = analysis.GetLatestRegressionRange()

        calculated_next_commit_id, culprit_commit_id = (
            lookback_algorithm.GetNextCommitId(data_points, use_bisect,
                                               latest_regression_range))

        if calculated_next_commit_id is None:
            # The analysis is finished according to the lookback algorithm.
            return NextCommitPositionOutput(
                next_commit_id=None, culprit_commit_id=culprit_commit_id)

        cutoff_commit_position = (
            next_commit_position_utils.GetEarliestCommitPosition(
                specified_lower_bound, specified_upper_bound))

        if calculated_next_commit_id.commit_position < cutoff_commit_position:
            # Long-standing flake. Do not continue the analysis.
            return NextCommitPositionOutput(next_commit_id=None,
                                            culprit_commit_id=None)

        # Try the analysis' heuristic results first, if any.
        next_commit_id = (
            next_commit_position_utils.GetNextCommitIdFromHeuristicResults(
                analysis_urlsafe_key))

        if next_commit_id is not None:
            # Heuristic results are available and should be tried first.
            assert not analysis.FindMatchingDataPointWithCommitPosition(
                next_commit_id.commit_position
            ), ('Existing heuristic results suggest commit position {} which has '
                'already been run'.format(next_commit_id.commit_position))
            return NextCommitPositionOutput(next_commit_id=next_commit_id,
                                            culprit_commit_id=None)

        # Round off the next calculated commit position to the nearest builds on
        # both sides.
        reference_build_info = build_util.GetBuildInfo(master_name,
                                                       builder_name,
                                                       analysis.build_number)
        parent_mastername = reference_build_info.parent_mastername or master_name
        parent_buildername = (reference_build_info.parent_buildername
                              or builder_name)
        target_name = parameters.step_metadata.isolate_target_name

        try:
            lower_bound_target, upper_bound_target = (
                step_util.GetBoundingIsolatedTargets(
                    parent_mastername, parent_buildername, target_name,
                    calculated_next_commit_id.commit_position))

            # Update the analysis' suspected build cycle if identified.
            analysis.UpdateSuspectedBuild(lower_bound_target,
                                          upper_bound_target)

            lower_bound_commit_id, upper_bound_commit_id = (
                next_commit_position_utils.GenerateCommitIDsForBoundingTargets(
                    data_points, lower_bound_target, upper_bound_target))
        except AssertionError as e:
            # Fallback to searching buildbot in case builds aren't indexed as
            # IsolatedTargets.
            # TODO(crbug.com/872992): Remove fallback logic.
            analysis.LogError(e.message)
            analysis.LogWarning((
                'Failed to determine isolated targets surrounding {}. Falling back '
                'to searching buildbot').format(
                    calculated_next_commit_id.commit_position))
            upper_bound_build_number = analysis.GetLowestUpperBoundBuildNumber(
                calculated_next_commit_id)
            lower_bound_build, upper_bound_build = (
                step_util.GetValidBoundingBuildsForStep(
                    master_name, builder_name, analysis.step_name, None,
                    upper_bound_build_number,
                    calculated_next_commit_id.commit_position))

            # Update the analysis' suspected build cycle if identified.
            analysis.UpdateSuspectedBuildUsingBuildInfo(
                lower_bound_build, upper_bound_build)
            lower_bound_commit_id = CommitID(
                commit_position=lower_bound_build.commit_position,
                revision=lower_bound_build.chromium_revision
            ) if lower_bound_build else None
            upper_bound_commit_id = CommitID(
                commit_position=upper_bound_build.commit_position,
                revision=upper_bound_build.chromium_revision
            ) if upper_bound_build else None

        # When identifying the neighboring builds of the requested commit position,
        # heuristic analysis may become eligible if the neighboring builds are
        # adjacent to one another.
        if analysis.CanRunHeuristicAnalysis():
            # Run heuristic analysis if eligible and not yet already done.
            heuristic_analysis.RunHeuristicAnalysis(analysis)

            # Try the newly computed heuristic results if any were identified.
            next_commit_id = (
                next_commit_position_utils.GetNextCommitIdFromHeuristicResults(
                    analysis_urlsafe_key))
            if next_commit_id is not None:  # pragma: no branch
                assert not analysis.FindMatchingDataPointWithCommitPosition(
                    next_commit_id.commit_position
                ), ('Newly run heuristic results suggest commit position {} which has '
                    'already been run'.format(next_commit_id))
                return NextCommitPositionOutput(next_commit_id=next_commit_id,
                                                culprit_commit_id=None)

        # Pick the commit position of the returned neighboring builds that has not
        # yet been analyzed if possible, or the commit position itself when not.
        build_range = CommitIDRange(lower=lower_bound_commit_id,
                                    upper=upper_bound_commit_id)
        actual_next_commit_id = (
            next_commit_position_utils.GetNextCommitIDFromBuildRange(
                analysis, build_range, calculated_next_commit_id))
        assert not analysis.FindMatchingDataPointWithCommitPosition(
            actual_next_commit_id.commit_position), (
                'Rounded-off commit position {} has already been run'.format(
                    actual_next_commit_id.commit_position))
        return NextCommitPositionOutput(next_commit_id=actual_next_commit_id,
                                        culprit_commit_id=culprit_commit_id)
示例#10
0
def ScheduleAnalysisIfNeeded(
        normalized_test,
        original_test,
        flake_key,
        bug_id=None,
        allow_new_analysis=False,
        force=False,
        manually_triggered=False,
        user_email=None,
        triggering_source=triggering_sources.FINDIT_PIPELINE,
        queue_name=constants.DEFAULT_QUEUE):
    """Schedules an analysis if needed and returns the MasterFlakeAnalysis.

  When the build failure was already analyzed and a new analysis is scheduled,
  the returned WfAnalysis will still have the result of last completed analysis.

  Args:
    normalized_test (TestInfo): Info of the normalized flaky test after mapping
      a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
      from a gtest, etc.
    original_test (TestInfo): Info of the original flaky test.
    flake_key (ndb.Key): The key to the Flake responsible for triggering this
      analysis.
    bug_id (int): The monorail bug id to update when analysis is done.
    allow_new_analysis (bool): Indicate whether a new analysis is allowed.
    force (bool): Indicate whether to force a rerun of current analysis.
    manually_triggered (bool): True if the analysis was requested manually,
      such as by a Chromium sheriff.
    user_email (str): The email of the user requesting the analysis.
    triggering_source (int): From where this analysis was triggered, such as
      through Findit pipeline, UI, or through Findit API.
    queue_name (str): The App Engine queue to run the analysis.

  Returns:
    A MasterFlakeAnalysis instance.
    None if no analysis was scheduled and the user has no permission to.
  """
    need_new_analysis, analysis = _NeedANewAnalysis(
        normalized_test,
        original_test,
        flake_key,
        bug_id=bug_id,
        allow_new_analysis=allow_new_analysis,
        force=force,
        user_email=user_email,
        triggering_source=triggering_source)

    if need_new_analysis:
        # _NeedANewAnalysis just created master_flake_analysis. Use the latest
        # version number and pass that along to the other pipelines for updating
        # results and data.
        logging.info(
            'A new master flake analysis was successfully saved for %s (%s) and '
            'will be captured in version %s', repr(normalized_test),
            repr(original_test), analysis.version_number)

        step_metadata = (step_util.LegacyGetStepMetadata(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number,
            normalized_test.step_name) or step_util.LegacyGetStepMetadata(
                original_test.master_name, original_test.builder_name,
                original_test.build_number, original_test.step_name))

        logging.info('Initializing flake analysis pipeline for key: %s',
                     analysis.key)

        starting_build_info = build_util.GetBuildInfo(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number)

        original_build_info = build_util.GetBuildInfo(
            original_test.master_name, original_test.builder_name,
            original_test.build_number)

        assert starting_build_info, (
            'Failed to get starting build for flake analysis')
        starting_commit_position = starting_build_info.commit_position

        assert starting_commit_position is not None, (
            'Cannot analyze flake without a starting commit position')

        assert original_build_info, 'Failed to get original build info'

        # Get the dimensions of the bot for when try jobs are needed to compile.
        dimensions = try_job_service.GetDimensionsFromBuildInfo(
            starting_build_info)

        analyze_flake_input = AnalyzeFlakeInput(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            analyze_commit_position_parameters=NextCommitPositionOutput(
                culprit_commit_id=None,
                next_commit_id=CommitID(
                    commit_position=starting_commit_position,
                    revision=starting_build_info.chromium_revision)),
            commit_position_range=IntRange(lower=None,
                                           upper=starting_commit_position),
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            manually_triggered=manually_triggered,
            retries=0,
            rerun=force,
            step_metadata=StepMetadata.FromSerializable(step_metadata))

        pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)

        pipeline_job.target = appengine_util.GetTargetNameForModule(
            constants.WATERFALL_BACKEND)
        pipeline_job.start(queue_name=queue_name)
        analysis.pipeline_status_path = pipeline_job.pipeline_status_path
        analysis.root_pipeline_id = pipeline_job.root_pipeline_id
        analysis.build_id = starting_build_info.buildbucket_id
        analysis.original_build_id = original_build_info.buildbucket_id
        analysis.put()
        analysis.LogInfo((
            'A flake analysis was scheduled using commit-based pipelines with '
            'path {}').format(pipeline_job.pipeline_status_path))
    else:
        logging.info('A flake analysis not necessary for build %s, %s, %s, %s',
                     normalized_test.master_name, normalized_test.builder_name,
                     normalized_test.build_number, normalized_test.step_name)

    return analysis
  def testNextCommitPositionPipelineContinueAnalysisFallbackToBuildInfo(
      self, mock_update, mock_heuristic, mock_targets, mock_bounding_builds,
      mock_next_commit, mock_reference_build):
    master_name = 'm'
    builder_name = 'b'
    build_number = 100
    step_name = 's'
    test_name = 't'
    start_commit_position = 1000
    expected_next_commit_id = CommitID(commit_position=990, revision='r990')

    mock_heuristic.return_value = False

    calculated_next_commit_id = CommitID(commit_position=999, revision='r999')
    mock_next_commit.return_value = (calculated_next_commit_id, None)

    target_name = 'browser_tests'
    step_metadata = StepMetadata(
        canonical_step_name=None,
        dimensions=None,
        full_step_name=None,
        isolate_target_name=target_name,
        patched=True,
        swarm_task_ids=None,
        waterfall_buildername=None,
        waterfall_mastername=None)

    reference_build = BuildInfo(master_name, builder_name, build_number)
    reference_build.commit_position = start_commit_position
    mock_reference_build.return_value = reference_build

    lower_bound_build = BuildInfo(master_name, builder_name, build_number - 1)
    lower_bound_build.commit_position = expected_next_commit_id.commit_position
    lower_bound_build.chromium_revision = expected_next_commit_id.revision
    upper_bound_build = BuildInfo(master_name, builder_name, build_number)
    upper_bound_build.commit_position = start_commit_position
    mock_bounding_builds.return_value = (lower_bound_build, upper_bound_build)

    mock_targets.side_effect = AssertionError()

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.data_points = [
        DataPoint.Create(commit_position=start_commit_position)
    ]
    analysis.Save()

    next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        step_metadata=step_metadata)

    pipeline_job = NextCommitPositionPipeline(next_commit_position_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    mock_update.assert_called_once_with(lower_bound_build, upper_bound_build)
    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    next_commit_position_output = pipeline_job.outputs.default.value

    self.assertFalse(pipeline_job.was_aborted)
    self.assertIsNone(next_commit_position_output['culprit_commit_id'])
    self.assertEqual(expected_next_commit_id.ToSerializable(),
                     next_commit_position_output['next_commit_id'])
  def testNextCommitPositionPipelineContinueAnalysis(
      self, mock_reference_build, mock_heuristic, mock_next_commit,
      mock_bound_commits):
    master_name = 'm'
    builder_name = 'b'
    parent_mastername = 'p_m'
    parent_buildername = 'p_b'
    build_number = 100
    build_id = 10000
    step_name = 's'
    test_name = 't'
    start_commit_position = 1000
    expected_next_commit_id = CommitID(commit_position=990, revision='r990')

    reference_build = BuildInfo(master_name, builder_name, build_number)
    reference_build.commit_position = start_commit_position
    reference_build.parent_mastername = parent_mastername
    reference_build.parent_buildername = parent_buildername
    mock_reference_build.return_value = reference_build
    mock_heuristic.return_value = False

    calculated_next_commit_id = CommitID(commit_position=999, revision='r999')
    mock_next_commit.return_value = (calculated_next_commit_id, None)

    target_name = 'browser_tests'
    step_metadata = StepMetadata(
        canonical_step_name=None,
        dimensions=None,
        full_step_name=None,
        isolate_target_name=target_name,
        patched=True,
        swarm_task_ids=None,
        waterfall_buildername=None,
        waterfall_mastername=None)

    luci_name = 'chromium'
    bucket_name = 'ci'
    gitiles_host = 'chromium.googlesource.com'
    gitiles_project = 'chromium/src'
    gitiles_ref = 'refs/heads/master'
    gerrit_patch = ''

    lower_bound_target = IsolatedTarget.Create(
        build_id - 1, luci_name, bucket_name, parent_mastername,
        parent_buildername, gitiles_host, gitiles_project, gitiles_ref,
        gerrit_patch, target_name, 'hash_1',
        expected_next_commit_id.commit_position, None)
    lower_bound_target.put()

    upper_bound_target = IsolatedTarget.Create(
        build_id, luci_name, bucket_name, parent_mastername, parent_buildername,
        gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
        'hash_2', start_commit_position, None)
    upper_bound_target.put()
    mock_bound_commits.return_value = (
        expected_next_commit_id,
        CommitID(commit_position=start_commit_position, revision='r1000'))

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.data_points = [
        DataPoint.Create(commit_position=start_commit_position)
    ]
    analysis.Save()

    next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        step_metadata=step_metadata)

    pipeline_job = NextCommitPositionPipeline(next_commit_position_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    next_commit_position_output = pipeline_job.outputs.default.value

    self.assertFalse(pipeline_job.was_aborted)
    self.assertIsNone(next_commit_position_output['culprit_commit_id'])
    self.assertEqual(expected_next_commit_id.ToSerializable(),
                     next_commit_position_output['next_commit_id'])
    mock_bound_commits.assert_called_once_with(
        analysis.data_points, lower_bound_target, upper_bound_target)
示例#13
0
  def testAnalyzeFlakePipelineCanStartAnalysisImmediately(self, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    isolate_sha = 'sha1'
    next_commit_position = 999
    pass_rate = 0.5
    build_url = 'url'
    try_job_url = None

    get_sha_output = GetIsolateShaOutput(
        isolate_sha=isolate_sha, build_url=build_url, try_job_url=try_job_url)

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    expected_flakiness = Flakiness(
        build_url=build_url,
        commit_position=start_commit_position,
        revision=start_revision,
        pass_rate=pass_rate)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_isolate_sha_input = GetIsolateShaForCommitPositionParameters(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position=start_commit_position,
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        revision=start_revision,
        step_metadata=step_metadata,
        upper_bound_build_number=analysis.build_number)

    expected_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=analysis.builder_name,
        commit_position=start_commit_position,
        flakiness_thus_far=None,
        get_isolate_sha_output=get_sha_output,
        master_name=analysis.master_name,
        previous_swarming_task_output=None,
        reference_build_number=analysis.build_number,
        revision=start_revision,
        step_name=analysis.step_name,
        test_name=analysis.test_name)

    expected_update_data_points_input = UpdateFlakeAnalysisDataPointsInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        flakiness=expected_flakiness)

    expected_next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=None),
        step_metadata=step_metadata)

    next_commit_id = CommitID(
        commit_position=next_commit_position, revision='r999')
    expected_next_commit_position_output = NextCommitPositionOutput(
        next_commit_id=next_commit_id, culprit_commit_id=None)

    expected_recursive_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=expected_next_commit_position_output,
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    self.MockGeneratorPipeline(GetIsolateShaForCommitPositionPipeline,
                               expected_isolate_sha_input, get_sha_output)

    self.MockGeneratorPipeline(DetermineApproximatePassRatePipeline,
                               expected_pass_rate_input, expected_flakiness)

    self.MockSynchronousPipeline(UpdateFlakeAnalysisDataPointsPipeline,
                                 expected_update_data_points_input, None)

    self.MockSynchronousPipeline(NextCommitPositionPipeline,
                                 expected_next_commit_position_input,
                                 expected_next_commit_position_output)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_recursive_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
示例#14
0
  def testAnalyzeFlakePipelineAnalysisFinishedWithCulprit(
      self, mocked_confidence, mocked_culprit, _):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'
    culprit_commit_position = 999

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.data_points = [
        DataPoint.Create(commit_position=culprit_commit_position)
    ]
    analysis.original_master_name = master_name
    analysis.original_builder_name = builder_name
    analysis.original_build_number = build_number
    analysis.original_step_name = step_name
    analysis.original_test_name = test_name
    analysis.Save()

    culprit_revision = 'r999'
    confidence_score = 0.85
    culprit = FlakeCulprit.Create('chromium', culprit_revision,
                                  culprit_commit_position)
    culprit.put()

    mocked_confidence.return_value = confidence_score
    mocked_culprit.return_value = culprit

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None,
            culprit_commit_id=CommitID(
                commit_position=culprit_commit_position,
                revision=culprit_revision)),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_recent_flakiness_input = AnalyzeRecentFlakinessInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    expected_auto_action_input = _PerformAutoActionsInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())

    self.MockGeneratorPipeline(AnalyzeRecentFlakinessPipeline,
                               expected_recent_flakiness_input, None)
    self.MockGeneratorPipeline(_PerformAutoActionsPipeline,
                               expected_auto_action_input, None)
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)