Exemple #1
0
  def testAnalyzeFlakePipelineStartTaskAfterDelay(self, mocked_delay, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    # Random date in the past, for coverage.
    analysis.request_time = datetime.datetime(2015, 1, 1, 1, 1, 1)
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    delay = 60

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    mocked_delay.return_value = delay

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_retried_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=1,
        step_metadata=step_metadata)

    self.MockAsynchronousPipeline(DelayPipeline, delay, delay)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_retried_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
Exemple #2
0
  def testAnalyzeFlakePipelineAnalysisFinishedNoFindings(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None, culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertIsNone(analysis.culprit_urlsafe_key)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
  def testRerunAnalysisWithAnalyzeFlakePipeline(
      self, mocked_analysis, mocked_pipeline, mocked_need_analysis,
      mocked_build_info, mock_dimensions, *_):
    buildbucket_id = 'id'
    mock_dimensions.return_value = ['os:Mac', 'cpu:x86']
    start_commit_position = 1000
    start_build_info = BuildInfo('m', 'b 1', 123)
    start_build_info.commit_position = start_commit_position
    start_build_info.chromium_revision = 'r1000'
    start_build_info.buildbucket_id = buildbucket_id
    mocked_build_info.return_value = start_build_info
    mocked_analysis.pipeline_status_path.return_value = 'status'
    mocked_analysis.key.urlsafe.return_value = 'urlsafe_key'
    mocked_need_analysis.return_value = (True, mocked_analysis)
    test = TestInfo('m', 'b 1', 123, 's', 't')
    manually_triggered = False
    flake = Flake.Create('chromium', 's', 't', 'l')

    analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
        test,
        test,
        flake.key,
        bug_id=None,
        allow_new_analysis=True,
        manually_triggered=manually_triggered,
        force=True,
        queue_name=constants.DEFAULT_QUEUE)

    self.assertIsNotNone(analysis)
    self.assertEqual(buildbucket_id, analysis.build_id)
    self.assertEqual(buildbucket_id, analysis.original_build_id)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key='urlsafe_key',
        analyze_commit_position_parameters=NextCommitPositionOutput(
            culprit_commit_id=None,
            next_commit_id=CommitID(
                commit_position=start_commit_position,
                revision=start_build_info.chromium_revision)),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        dimensions=ListOfBasestring.FromSerializable(
            ['os:Mac', 'cpu:x86', 'pool:luci.chromium.findit']),
        manually_triggered=manually_triggered,
        rerun=True,
        retries=0,
        step_metadata=StepMetadata.FromSerializable({}))

    mocked_pipeline.assert_has_calls([
        mock.call(analyze_flake_input),
        mock.call().start(queue_name=constants.DEFAULT_QUEUE)
    ])
Exemple #4
0
  def testOnFinalizedNoError(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.OnFinalized(analyze_flake_input)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
Exemple #5
0
  def testRecursiveAnalyzeFlakePipeline(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    self.MockGeneratorPipeline(AnalyzeFlakePipeline, analyze_flake_input, None)

    pipeline_job = RecursiveAnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
Exemple #6
0
def ScheduleAnalysisIfNeeded(
        normalized_test,
        original_test,
        flake_key,
        bug_id=None,
        allow_new_analysis=False,
        force=False,
        manually_triggered=False,
        user_email=None,
        triggering_source=triggering_sources.FINDIT_PIPELINE,
        queue_name=constants.DEFAULT_QUEUE):
    """Schedules an analysis if needed and returns the MasterFlakeAnalysis.

  When the build failure was already analyzed and a new analysis is scheduled,
  the returned WfAnalysis will still have the result of last completed analysis.

  Args:
    normalized_test (TestInfo): Info of the normalized flaky test after mapping
      a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
      from a gtest, etc.
    original_test (TestInfo): Info of the original flaky test.
    flake_key (ndb.Key): The key to the Flake responsible for triggering this
      analysis.
    bug_id (int): The monorail bug id to update when analysis is done.
    allow_new_analysis (bool): Indicate whether a new analysis is allowed.
    force (bool): Indicate whether to force a rerun of current analysis.
    manually_triggered (bool): True if the analysis was requested manually,
      such as by a Chromium sheriff.
    user_email (str): The email of the user requesting the analysis.
    triggering_source (int): From where this analysis was triggered, such as
      through Findit pipeline, UI, or through Findit API.
    queue_name (str): The App Engine queue to run the analysis.

  Returns:
    A MasterFlakeAnalysis instance.
    None if no analysis was scheduled and the user has no permission to.
  """
    need_new_analysis, analysis = _NeedANewAnalysis(
        normalized_test,
        original_test,
        flake_key,
        bug_id=bug_id,
        allow_new_analysis=allow_new_analysis,
        force=force,
        user_email=user_email,
        triggering_source=triggering_source)

    if need_new_analysis:
        # _NeedANewAnalysis just created master_flake_analysis. Use the latest
        # version number and pass that along to the other pipelines for updating
        # results and data.
        logging.info(
            'A new master flake analysis was successfully saved for %s (%s) and '
            'will be captured in version %s', repr(normalized_test),
            repr(original_test), analysis.version_number)

        step_metadata = (step_util.LegacyGetStepMetadata(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number,
            normalized_test.step_name) or step_util.LegacyGetStepMetadata(
                original_test.master_name, original_test.builder_name,
                original_test.build_number, original_test.step_name))

        logging.info('Initializing flake analysis pipeline for key: %s',
                     analysis.key)

        starting_build_info = build_util.GetBuildInfo(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number)

        original_build_info = build_util.GetBuildInfo(
            original_test.master_name, original_test.builder_name,
            original_test.build_number)

        assert starting_build_info, (
            'Failed to get starting build for flake analysis')
        starting_commit_position = starting_build_info.commit_position

        assert starting_commit_position is not None, (
            'Cannot analyze flake without a starting commit position')

        assert original_build_info, 'Failed to get original build info'

        # Get the dimensions of the bot for when try jobs are needed to compile.
        dimensions = try_job_service.GetDimensionsFromBuildInfo(
            starting_build_info)

        analyze_flake_input = AnalyzeFlakeInput(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            analyze_commit_position_parameters=NextCommitPositionOutput(
                culprit_commit_id=None,
                next_commit_id=CommitID(
                    commit_position=starting_commit_position,
                    revision=starting_build_info.chromium_revision)),
            commit_position_range=IntRange(lower=None,
                                           upper=starting_commit_position),
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            manually_triggered=manually_triggered,
            retries=0,
            rerun=force,
            step_metadata=StepMetadata.FromSerializable(step_metadata))

        pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)

        pipeline_job.target = appengine_util.GetTargetNameForModule(
            constants.WATERFALL_BACKEND)
        pipeline_job.start(queue_name=queue_name)
        analysis.pipeline_status_path = pipeline_job.pipeline_status_path
        analysis.root_pipeline_id = pipeline_job.root_pipeline_id
        analysis.build_id = starting_build_info.buildbucket_id
        analysis.original_build_id = original_build_info.buildbucket_id
        analysis.put()
        analysis.LogInfo((
            'A flake analysis was scheduled using commit-based pipelines with '
            'path {}').format(pipeline_job.pipeline_status_path))
    else:
        logging.info('A flake analysis not necessary for build %s, %s, %s, %s',
                     normalized_test.master_name, normalized_test.builder_name,
                     normalized_test.build_number, normalized_test.step_name)

    return analysis
Exemple #7
0
  def testAnalyzeFlakePipelineCanStartAnalysisImmediately(self, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    isolate_sha = 'sha1'
    next_commit_position = 999
    pass_rate = 0.5
    build_url = 'url'
    try_job_url = None

    get_sha_output = GetIsolateShaOutput(
        isolate_sha=isolate_sha, build_url=build_url, try_job_url=try_job_url)

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    expected_flakiness = Flakiness(
        build_url=build_url,
        commit_position=start_commit_position,
        revision=start_revision,
        pass_rate=pass_rate)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_isolate_sha_input = GetIsolateShaForCommitPositionParameters(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position=start_commit_position,
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        revision=start_revision,
        step_metadata=step_metadata,
        upper_bound_build_number=analysis.build_number)

    expected_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=analysis.builder_name,
        commit_position=start_commit_position,
        flakiness_thus_far=None,
        get_isolate_sha_output=get_sha_output,
        master_name=analysis.master_name,
        previous_swarming_task_output=None,
        reference_build_number=analysis.build_number,
        revision=start_revision,
        step_name=analysis.step_name,
        test_name=analysis.test_name)

    expected_update_data_points_input = UpdateFlakeAnalysisDataPointsInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        flakiness=expected_flakiness)

    expected_next_commit_position_input = NextCommitPositionInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        commit_position_range=IntRange(lower=None, upper=None),
        step_metadata=step_metadata)

    next_commit_id = CommitID(
        commit_position=next_commit_position, revision='r999')
    expected_next_commit_position_output = NextCommitPositionOutput(
        next_commit_id=next_commit_id, culprit_commit_id=None)

    expected_recursive_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=expected_next_commit_position_output,
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    self.MockGeneratorPipeline(GetIsolateShaForCommitPositionPipeline,
                               expected_isolate_sha_input, get_sha_output)

    self.MockGeneratorPipeline(DetermineApproximatePassRatePipeline,
                               expected_pass_rate_input, expected_flakiness)

    self.MockSynchronousPipeline(UpdateFlakeAnalysisDataPointsPipeline,
                                 expected_update_data_points_input, None)

    self.MockSynchronousPipeline(NextCommitPositionPipeline,
                                 expected_next_commit_position_input,
                                 expected_next_commit_position_output)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_recursive_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
Exemple #8
0
  def testAnalyzeFlakePipelineAnalysisFinishedWithCulprit(
      self, mocked_confidence, mocked_culprit, _):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'
    culprit_commit_position = 999

    analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                          build_number, step_name, test_name)
    analysis.data_points = [
        DataPoint.Create(commit_position=culprit_commit_position)
    ]
    analysis.original_master_name = master_name
    analysis.original_builder_name = builder_name
    analysis.original_build_number = build_number
    analysis.original_step_name = step_name
    analysis.original_test_name = test_name
    analysis.Save()

    culprit_revision = 'r999'
    confidence_score = 0.85
    culprit = FlakeCulprit.Create('chromium', culprit_revision,
                                  culprit_commit_position)
    culprit.put()

    mocked_confidence.return_value = confidence_score
    mocked_culprit.return_value = culprit

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None,
            culprit_commit_id=CommitID(
                commit_position=culprit_commit_position,
                revision=culprit_revision)),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_recent_flakiness_input = AnalyzeRecentFlakinessInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    expected_auto_action_input = _PerformAutoActionsInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())

    self.MockGeneratorPipeline(AnalyzeRecentFlakinessPipeline,
                               expected_recent_flakiness_input, None)
    self.MockGeneratorPipeline(_PerformAutoActionsPipeline,
                               expected_auto_action_input, None)
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)